1 //===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs global value numbering to eliminate fully redundant
11 // instructions. It also performs simple dead load elimination.
13 // Note that this pass does the value numbering itself; it does not use the
14 // ValueNumbering analysis passes.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "gvn"
19 #include "llvm/Transforms/Scalar.h"
20 #include "llvm/BasicBlock.h"
21 #include "llvm/Constants.h"
22 #include "llvm/DerivedTypes.h"
23 #include "llvm/Function.h"
24 #include "llvm/IntrinsicInst.h"
25 #include "llvm/LLVMContext.h"
26 #include "llvm/Operator.h"
27 #include "llvm/Value.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/DepthFirstIterator.h"
30 #include "llvm/ADT/PostOrderIterator.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallVector.h"
33 #include "llvm/ADT/Statistic.h"
34 #include "llvm/Analysis/Dominators.h"
35 #include "llvm/Analysis/AliasAnalysis.h"
36 #include "llvm/Analysis/MemoryBuiltins.h"
37 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
38 #include "llvm/Support/CFG.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Support/ErrorHandling.h"
42 #include "llvm/Support/GetElementPtrTypeIterator.h"
43 #include "llvm/Support/IRBuilder.h"
44 #include "llvm/Support/raw_ostream.h"
45 #include "llvm/Target/TargetData.h"
46 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
47 #include "llvm/Transforms/Utils/Local.h"
48 #include "llvm/Transforms/Utils/SSAUpdater.h"
52 STATISTIC(NumGVNInstr, "Number of instructions deleted");
53 STATISTIC(NumGVNLoad, "Number of loads deleted");
54 STATISTIC(NumGVNPRE, "Number of instructions PRE'd");
55 STATISTIC(NumGVNBlocks, "Number of blocks merged");
56 STATISTIC(NumPRELoad, "Number of loads PRE'd");
58 static cl::opt<bool> EnablePRE("enable-pre",
59 cl::init(true), cl::Hidden);
60 static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true));
62 //===----------------------------------------------------------------------===//
64 //===----------------------------------------------------------------------===//
66 /// This class holds the mapping between values and value numbers. It is used
67 /// as an efficient mechanism to determine the expression-wise equivalence of
71 enum ExpressionOpcode { ADD, FADD, SUB, FSUB, MUL, FMUL,
72 UDIV, SDIV, FDIV, UREM, SREM,
73 FREM, SHL, LSHR, ASHR, AND, OR, XOR, ICMPEQ,
74 ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE,
75 ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ,
76 FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE,
77 FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE,
78 FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT,
79 SHUFFLE, SELECT, TRUNC, ZEXT, SEXT, FPTOUI,
80 FPTOSI, UITOFP, SITOFP, FPTRUNC, FPEXT,
81 PTRTOINT, INTTOPTR, BITCAST, GEP, CALL, CONSTANT,
82 INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE };
84 ExpressionOpcode opcode;
86 SmallVector<uint32_t, 4> varargs;
90 Expression(ExpressionOpcode o) : opcode(o) { }
92 bool operator==(const Expression &other) const {
93 if (opcode != other.opcode)
95 else if (opcode == EMPTY || opcode == TOMBSTONE)
97 else if (type != other.type)
99 else if (function != other.function)
102 if (varargs.size() != other.varargs.size())
105 for (size_t i = 0; i < varargs.size(); ++i)
106 if (varargs[i] != other.varargs[i])
113 bool operator!=(const Expression &other) const {
114 return !(*this == other);
120 DenseMap<Value*, uint32_t> valueNumbering;
121 DenseMap<Expression, uint32_t> expressionNumbering;
123 MemoryDependenceAnalysis* MD;
126 uint32_t nextValueNumber;
128 Expression::ExpressionOpcode getOpcode(BinaryOperator* BO);
129 Expression::ExpressionOpcode getOpcode(CmpInst* C);
130 Expression::ExpressionOpcode getOpcode(CastInst* C);
131 Expression create_expression(BinaryOperator* BO);
132 Expression create_expression(CmpInst* C);
133 Expression create_expression(ShuffleVectorInst* V);
134 Expression create_expression(ExtractElementInst* C);
135 Expression create_expression(InsertElementInst* V);
136 Expression create_expression(SelectInst* V);
137 Expression create_expression(CastInst* C);
138 Expression create_expression(GetElementPtrInst* G);
139 Expression create_expression(CallInst* C);
140 Expression create_expression(Constant* C);
141 Expression create_expression(ExtractValueInst* C);
142 Expression create_expression(InsertValueInst* C);
144 uint32_t lookup_or_add_call(CallInst* C);
146 ValueTable() : nextValueNumber(1) { }
147 uint32_t lookup_or_add(Value *V);
148 uint32_t lookup(Value *V) const;
149 void add(Value *V, uint32_t num);
151 void erase(Value *v);
153 void setAliasAnalysis(AliasAnalysis* A) { AA = A; }
154 AliasAnalysis *getAliasAnalysis() const { return AA; }
155 void setMemDep(MemoryDependenceAnalysis* M) { MD = M; }
156 void setDomTree(DominatorTree* D) { DT = D; }
157 uint32_t getNextUnusedValueNumber() { return nextValueNumber; }
158 void verifyRemoved(const Value *) const;
163 template <> struct DenseMapInfo<Expression> {
164 static inline Expression getEmptyKey() {
165 return Expression(Expression::EMPTY);
168 static inline Expression getTombstoneKey() {
169 return Expression(Expression::TOMBSTONE);
172 static unsigned getHashValue(const Expression e) {
173 unsigned hash = e.opcode;
175 hash = ((unsigned)((uintptr_t)e.type >> 4) ^
176 (unsigned)((uintptr_t)e.type >> 9));
178 for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(),
179 E = e.varargs.end(); I != E; ++I)
180 hash = *I + hash * 37;
182 hash = ((unsigned)((uintptr_t)e.function >> 4) ^
183 (unsigned)((uintptr_t)e.function >> 9)) +
188 static bool isEqual(const Expression &LHS, const Expression &RHS) {
191 static bool isPod() { return true; }
195 //===----------------------------------------------------------------------===//
196 // ValueTable Internal Functions
197 //===----------------------------------------------------------------------===//
198 Expression::ExpressionOpcode ValueTable::getOpcode(BinaryOperator* BO) {
199 switch(BO->getOpcode()) {
200 default: // THIS SHOULD NEVER HAPPEN
201 llvm_unreachable("Binary operator with unknown opcode?");
202 case Instruction::Add: return Expression::ADD;
203 case Instruction::FAdd: return Expression::FADD;
204 case Instruction::Sub: return Expression::SUB;
205 case Instruction::FSub: return Expression::FSUB;
206 case Instruction::Mul: return Expression::MUL;
207 case Instruction::FMul: return Expression::FMUL;
208 case Instruction::UDiv: return Expression::UDIV;
209 case Instruction::SDiv: return Expression::SDIV;
210 case Instruction::FDiv: return Expression::FDIV;
211 case Instruction::URem: return Expression::UREM;
212 case Instruction::SRem: return Expression::SREM;
213 case Instruction::FRem: return Expression::FREM;
214 case Instruction::Shl: return Expression::SHL;
215 case Instruction::LShr: return Expression::LSHR;
216 case Instruction::AShr: return Expression::ASHR;
217 case Instruction::And: return Expression::AND;
218 case Instruction::Or: return Expression::OR;
219 case Instruction::Xor: return Expression::XOR;
223 Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) {
224 if (isa<ICmpInst>(C)) {
225 switch (C->getPredicate()) {
226 default: // THIS SHOULD NEVER HAPPEN
227 llvm_unreachable("Comparison with unknown predicate?");
228 case ICmpInst::ICMP_EQ: return Expression::ICMPEQ;
229 case ICmpInst::ICMP_NE: return Expression::ICMPNE;
230 case ICmpInst::ICMP_UGT: return Expression::ICMPUGT;
231 case ICmpInst::ICMP_UGE: return Expression::ICMPUGE;
232 case ICmpInst::ICMP_ULT: return Expression::ICMPULT;
233 case ICmpInst::ICMP_ULE: return Expression::ICMPULE;
234 case ICmpInst::ICMP_SGT: return Expression::ICMPSGT;
235 case ICmpInst::ICMP_SGE: return Expression::ICMPSGE;
236 case ICmpInst::ICMP_SLT: return Expression::ICMPSLT;
237 case ICmpInst::ICMP_SLE: return Expression::ICMPSLE;
240 switch (C->getPredicate()) {
241 default: // THIS SHOULD NEVER HAPPEN
242 llvm_unreachable("Comparison with unknown predicate?");
243 case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ;
244 case FCmpInst::FCMP_OGT: return Expression::FCMPOGT;
245 case FCmpInst::FCMP_OGE: return Expression::FCMPOGE;
246 case FCmpInst::FCMP_OLT: return Expression::FCMPOLT;
247 case FCmpInst::FCMP_OLE: return Expression::FCMPOLE;
248 case FCmpInst::FCMP_ONE: return Expression::FCMPONE;
249 case FCmpInst::FCMP_ORD: return Expression::FCMPORD;
250 case FCmpInst::FCMP_UNO: return Expression::FCMPUNO;
251 case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ;
252 case FCmpInst::FCMP_UGT: return Expression::FCMPUGT;
253 case FCmpInst::FCMP_UGE: return Expression::FCMPUGE;
254 case FCmpInst::FCMP_ULT: return Expression::FCMPULT;
255 case FCmpInst::FCMP_ULE: return Expression::FCMPULE;
256 case FCmpInst::FCMP_UNE: return Expression::FCMPUNE;
261 Expression::ExpressionOpcode ValueTable::getOpcode(CastInst* C) {
262 switch(C->getOpcode()) {
263 default: // THIS SHOULD NEVER HAPPEN
264 llvm_unreachable("Cast operator with unknown opcode?");
265 case Instruction::Trunc: return Expression::TRUNC;
266 case Instruction::ZExt: return Expression::ZEXT;
267 case Instruction::SExt: return Expression::SEXT;
268 case Instruction::FPToUI: return Expression::FPTOUI;
269 case Instruction::FPToSI: return Expression::FPTOSI;
270 case Instruction::UIToFP: return Expression::UITOFP;
271 case Instruction::SIToFP: return Expression::SITOFP;
272 case Instruction::FPTrunc: return Expression::FPTRUNC;
273 case Instruction::FPExt: return Expression::FPEXT;
274 case Instruction::PtrToInt: return Expression::PTRTOINT;
275 case Instruction::IntToPtr: return Expression::INTTOPTR;
276 case Instruction::BitCast: return Expression::BITCAST;
280 Expression ValueTable::create_expression(CallInst* C) {
283 e.type = C->getType();
284 e.function = C->getCalledFunction();
285 e.opcode = Expression::CALL;
287 for (CallInst::op_iterator I = C->op_begin()+1, E = C->op_end();
289 e.varargs.push_back(lookup_or_add(*I));
294 Expression ValueTable::create_expression(BinaryOperator* BO) {
296 e.varargs.push_back(lookup_or_add(BO->getOperand(0)));
297 e.varargs.push_back(lookup_or_add(BO->getOperand(1)));
299 e.type = BO->getType();
300 e.opcode = getOpcode(BO);
305 Expression ValueTable::create_expression(CmpInst* C) {
308 e.varargs.push_back(lookup_or_add(C->getOperand(0)));
309 e.varargs.push_back(lookup_or_add(C->getOperand(1)));
311 e.type = C->getType();
312 e.opcode = getOpcode(C);
317 Expression ValueTable::create_expression(CastInst* C) {
320 e.varargs.push_back(lookup_or_add(C->getOperand(0)));
322 e.type = C->getType();
323 e.opcode = getOpcode(C);
328 Expression ValueTable::create_expression(ShuffleVectorInst* S) {
331 e.varargs.push_back(lookup_or_add(S->getOperand(0)));
332 e.varargs.push_back(lookup_or_add(S->getOperand(1)));
333 e.varargs.push_back(lookup_or_add(S->getOperand(2)));
335 e.type = S->getType();
336 e.opcode = Expression::SHUFFLE;
341 Expression ValueTable::create_expression(ExtractElementInst* E) {
344 e.varargs.push_back(lookup_or_add(E->getOperand(0)));
345 e.varargs.push_back(lookup_or_add(E->getOperand(1)));
347 e.type = E->getType();
348 e.opcode = Expression::EXTRACT;
353 Expression ValueTable::create_expression(InsertElementInst* I) {
356 e.varargs.push_back(lookup_or_add(I->getOperand(0)));
357 e.varargs.push_back(lookup_or_add(I->getOperand(1)));
358 e.varargs.push_back(lookup_or_add(I->getOperand(2)));
360 e.type = I->getType();
361 e.opcode = Expression::INSERT;
366 Expression ValueTable::create_expression(SelectInst* I) {
369 e.varargs.push_back(lookup_or_add(I->getCondition()));
370 e.varargs.push_back(lookup_or_add(I->getTrueValue()));
371 e.varargs.push_back(lookup_or_add(I->getFalseValue()));
373 e.type = I->getType();
374 e.opcode = Expression::SELECT;
379 Expression ValueTable::create_expression(GetElementPtrInst* G) {
382 e.varargs.push_back(lookup_or_add(G->getPointerOperand()));
384 e.type = G->getType();
385 e.opcode = Expression::GEP;
387 for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end();
389 e.varargs.push_back(lookup_or_add(*I));
394 Expression ValueTable::create_expression(ExtractValueInst* E) {
397 e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
398 for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
400 e.varargs.push_back(*II);
402 e.type = E->getType();
403 e.opcode = Expression::EXTRACTVALUE;
408 Expression ValueTable::create_expression(InsertValueInst* E) {
411 e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
412 e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand()));
413 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
415 e.varargs.push_back(*II);
417 e.type = E->getType();
418 e.opcode = Expression::INSERTVALUE;
423 //===----------------------------------------------------------------------===//
424 // ValueTable External Functions
425 //===----------------------------------------------------------------------===//
427 /// add - Insert a value into the table with a specified value number.
428 void ValueTable::add(Value *V, uint32_t num) {
429 valueNumbering.insert(std::make_pair(V, num));
432 uint32_t ValueTable::lookup_or_add_call(CallInst* C) {
433 if (AA->doesNotAccessMemory(C)) {
434 Expression exp = create_expression(C);
435 uint32_t& e = expressionNumbering[exp];
436 if (!e) e = nextValueNumber++;
437 valueNumbering[C] = e;
439 } else if (AA->onlyReadsMemory(C)) {
440 Expression exp = create_expression(C);
441 uint32_t& e = expressionNumbering[exp];
443 e = nextValueNumber++;
444 valueNumbering[C] = e;
448 e = nextValueNumber++;
449 valueNumbering[C] = e;
453 MemDepResult local_dep = MD->getDependency(C);
455 if (!local_dep.isDef() && !local_dep.isNonLocal()) {
456 valueNumbering[C] = nextValueNumber;
457 return nextValueNumber++;
460 if (local_dep.isDef()) {
461 CallInst* local_cdep = cast<CallInst>(local_dep.getInst());
463 if (local_cdep->getNumOperands() != C->getNumOperands()) {
464 valueNumbering[C] = nextValueNumber;
465 return nextValueNumber++;
468 for (unsigned i = 1; i < C->getNumOperands(); ++i) {
469 uint32_t c_vn = lookup_or_add(C->getOperand(i));
470 uint32_t cd_vn = lookup_or_add(local_cdep->getOperand(i));
472 valueNumbering[C] = nextValueNumber;
473 return nextValueNumber++;
477 uint32_t v = lookup_or_add(local_cdep);
478 valueNumbering[C] = v;
483 const MemoryDependenceAnalysis::NonLocalDepInfo &deps =
484 MD->getNonLocalCallDependency(CallSite(C));
485 // FIXME: call/call dependencies for readonly calls should return def, not
486 // clobber! Move the checking logic to MemDep!
489 // Check to see if we have a single dominating call instruction that is
491 for (unsigned i = 0, e = deps.size(); i != e; ++i) {
492 const MemoryDependenceAnalysis::NonLocalDepEntry *I = &deps[i];
493 // Ignore non-local dependencies.
494 if (I->second.isNonLocal())
497 // We don't handle non-depedencies. If we already have a call, reject
498 // instruction dependencies.
499 if (I->second.isClobber() || cdep != 0) {
504 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->second.getInst());
505 // FIXME: All duplicated with non-local case.
506 if (NonLocalDepCall && DT->properlyDominates(I->first, C->getParent())){
507 cdep = NonLocalDepCall;
516 valueNumbering[C] = nextValueNumber;
517 return nextValueNumber++;
520 if (cdep->getNumOperands() != C->getNumOperands()) {
521 valueNumbering[C] = nextValueNumber;
522 return nextValueNumber++;
524 for (unsigned i = 1; i < C->getNumOperands(); ++i) {
525 uint32_t c_vn = lookup_or_add(C->getOperand(i));
526 uint32_t cd_vn = lookup_or_add(cdep->getOperand(i));
528 valueNumbering[C] = nextValueNumber;
529 return nextValueNumber++;
533 uint32_t v = lookup_or_add(cdep);
534 valueNumbering[C] = v;
538 valueNumbering[C] = nextValueNumber;
539 return nextValueNumber++;
543 /// lookup_or_add - Returns the value number for the specified value, assigning
544 /// it a new number if it did not have one before.
545 uint32_t ValueTable::lookup_or_add(Value *V) {
546 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
547 if (VI != valueNumbering.end())
550 if (!isa<Instruction>(V)) {
551 valueNumbering[V] = nextValueNumber;
552 return nextValueNumber++;
555 Instruction* I = cast<Instruction>(V);
557 switch (I->getOpcode()) {
558 case Instruction::Call:
559 return lookup_or_add_call(cast<CallInst>(I));
560 case Instruction::Add:
561 case Instruction::FAdd:
562 case Instruction::Sub:
563 case Instruction::FSub:
564 case Instruction::Mul:
565 case Instruction::FMul:
566 case Instruction::UDiv:
567 case Instruction::SDiv:
568 case Instruction::FDiv:
569 case Instruction::URem:
570 case Instruction::SRem:
571 case Instruction::FRem:
572 case Instruction::Shl:
573 case Instruction::LShr:
574 case Instruction::AShr:
575 case Instruction::And:
576 case Instruction::Or :
577 case Instruction::Xor:
578 exp = create_expression(cast<BinaryOperator>(I));
580 case Instruction::ICmp:
581 case Instruction::FCmp:
582 exp = create_expression(cast<CmpInst>(I));
584 case Instruction::Trunc:
585 case Instruction::ZExt:
586 case Instruction::SExt:
587 case Instruction::FPToUI:
588 case Instruction::FPToSI:
589 case Instruction::UIToFP:
590 case Instruction::SIToFP:
591 case Instruction::FPTrunc:
592 case Instruction::FPExt:
593 case Instruction::PtrToInt:
594 case Instruction::IntToPtr:
595 case Instruction::BitCast:
596 exp = create_expression(cast<CastInst>(I));
598 case Instruction::Select:
599 exp = create_expression(cast<SelectInst>(I));
601 case Instruction::ExtractElement:
602 exp = create_expression(cast<ExtractElementInst>(I));
604 case Instruction::InsertElement:
605 exp = create_expression(cast<InsertElementInst>(I));
607 case Instruction::ShuffleVector:
608 exp = create_expression(cast<ShuffleVectorInst>(I));
610 case Instruction::ExtractValue:
611 exp = create_expression(cast<ExtractValueInst>(I));
613 case Instruction::InsertValue:
614 exp = create_expression(cast<InsertValueInst>(I));
616 case Instruction::GetElementPtr:
617 exp = create_expression(cast<GetElementPtrInst>(I));
620 valueNumbering[V] = nextValueNumber;
621 return nextValueNumber++;
624 uint32_t& e = expressionNumbering[exp];
625 if (!e) e = nextValueNumber++;
626 valueNumbering[V] = e;
630 /// lookup - Returns the value number of the specified value. Fails if
631 /// the value has not yet been numbered.
632 uint32_t ValueTable::lookup(Value *V) const {
633 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
634 assert(VI != valueNumbering.end() && "Value not numbered?");
638 /// clear - Remove all entries from the ValueTable
639 void ValueTable::clear() {
640 valueNumbering.clear();
641 expressionNumbering.clear();
645 /// erase - Remove a value from the value numbering
646 void ValueTable::erase(Value *V) {
647 valueNumbering.erase(V);
650 /// verifyRemoved - Verify that the value is removed from all internal data
652 void ValueTable::verifyRemoved(const Value *V) const {
653 for (DenseMap<Value*, uint32_t>::const_iterator
654 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
655 assert(I->first != V && "Inst still occurs in value numbering map!");
659 //===----------------------------------------------------------------------===//
661 //===----------------------------------------------------------------------===//
664 struct ValueNumberScope {
665 ValueNumberScope* parent;
666 DenseMap<uint32_t, Value*> table;
668 ValueNumberScope(ValueNumberScope* p) : parent(p) { }
674 class GVN : public FunctionPass {
675 bool runOnFunction(Function &F);
677 static char ID; // Pass identification, replacement for typeid
678 explicit GVN(bool nopre = false, bool noloads = false)
679 : FunctionPass(&ID), NoPRE(nopre), NoLoads(noloads), MD(0) { }
684 MemoryDependenceAnalysis *MD;
688 DenseMap<BasicBlock*, ValueNumberScope*> localAvail;
690 // This transformation requires dominator postdominator info
691 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
692 AU.addRequired<DominatorTree>();
694 AU.addRequired<MemoryDependenceAnalysis>();
695 AU.addRequired<AliasAnalysis>();
697 AU.addPreserved<DominatorTree>();
698 AU.addPreserved<AliasAnalysis>();
702 // FIXME: eliminate or document these better
703 bool processLoad(LoadInst* L,
704 SmallVectorImpl<Instruction*> &toErase);
705 bool processInstruction(Instruction *I,
706 SmallVectorImpl<Instruction*> &toErase);
707 bool processNonLocalLoad(LoadInst* L,
708 SmallVectorImpl<Instruction*> &toErase);
709 bool processBlock(BasicBlock *BB);
710 void dump(DenseMap<uint32_t, Value*>& d);
711 bool iterateOnFunction(Function &F);
712 Value *CollapsePhi(PHINode* p);
713 bool performPRE(Function& F);
714 Value *lookupNumber(BasicBlock *BB, uint32_t num);
715 void cleanupGlobalSets();
716 void verifyRemoved(const Instruction *I) const;
722 // createGVNPass - The public interface to this file...
723 FunctionPass *llvm::createGVNPass(bool NoPRE, bool NoLoads) {
724 return new GVN(NoPRE, NoLoads);
727 static RegisterPass<GVN> X("gvn",
728 "Global Value Numbering");
730 void GVN::dump(DenseMap<uint32_t, Value*>& d) {
732 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(),
733 E = d.end(); I != E; ++I) {
734 printf("%d\n", I->first);
740 static bool isSafeReplacement(PHINode* p, Instruction *inst) {
741 if (!isa<PHINode>(inst))
744 for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end();
746 if (PHINode* use_phi = dyn_cast<PHINode>(UI))
747 if (use_phi->getParent() == inst->getParent())
753 Value *GVN::CollapsePhi(PHINode *PN) {
754 Value *ConstVal = PN->hasConstantValue(DT);
755 if (!ConstVal) return 0;
757 Instruction *Inst = dyn_cast<Instruction>(ConstVal);
761 if (DT->dominates(Inst, PN))
762 if (isSafeReplacement(PN, Inst))
767 /// IsValueFullyAvailableInBlock - Return true if we can prove that the value
768 /// we're analyzing is fully available in the specified block. As we go, keep
769 /// track of which blocks we know are fully alive in FullyAvailableBlocks. This
770 /// map is actually a tri-state map with the following values:
771 /// 0) we know the block *is not* fully available.
772 /// 1) we know the block *is* fully available.
773 /// 2) we do not know whether the block is fully available or not, but we are
774 /// currently speculating that it will be.
775 /// 3) we are speculating for this block and have used that to speculate for
777 static bool IsValueFullyAvailableInBlock(BasicBlock *BB,
778 DenseMap<BasicBlock*, char> &FullyAvailableBlocks) {
779 // Optimistically assume that the block is fully available and check to see
780 // if we already know about this block in one lookup.
781 std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV =
782 FullyAvailableBlocks.insert(std::make_pair(BB, 2));
784 // If the entry already existed for this block, return the precomputed value.
786 // If this is a speculative "available" value, mark it as being used for
787 // speculation of other blocks.
788 if (IV.first->second == 2)
789 IV.first->second = 3;
790 return IV.first->second != 0;
793 // Otherwise, see if it is fully available in all predecessors.
794 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
796 // If this block has no predecessors, it isn't live-in here.
798 goto SpeculationFailure;
800 for (; PI != PE; ++PI)
801 // If the value isn't fully available in one of our predecessors, then it
802 // isn't fully available in this block either. Undo our previous
803 // optimistic assumption and bail out.
804 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks))
805 goto SpeculationFailure;
809 // SpeculationFailure - If we get here, we found out that this is not, after
810 // all, a fully-available block. We have a problem if we speculated on this and
811 // used the speculation to mark other blocks as available.
813 char &BBVal = FullyAvailableBlocks[BB];
815 // If we didn't speculate on this, just return with it set to false.
821 // If we did speculate on this value, we could have blocks set to 1 that are
822 // incorrect. Walk the (transitive) successors of this block and mark them as
824 SmallVector<BasicBlock*, 32> BBWorklist;
825 BBWorklist.push_back(BB);
827 while (!BBWorklist.empty()) {
828 BasicBlock *Entry = BBWorklist.pop_back_val();
829 // Note that this sets blocks to 0 (unavailable) if they happen to not
830 // already be in FullyAvailableBlocks. This is safe.
831 char &EntryVal = FullyAvailableBlocks[Entry];
832 if (EntryVal == 0) continue; // Already unavailable.
834 // Mark as unavailable.
837 for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I)
838 BBWorklist.push_back(*I);
845 /// CanCoerceMustAliasedValueToLoad - Return true if
846 /// CoerceAvailableValueToLoadType will succeed.
847 static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
849 const TargetData &TD) {
850 // If the loaded or stored value is an first class array or struct, don't try
851 // to transform them. We need to be able to bitcast to integer.
852 if (isa<StructType>(LoadTy) || isa<ArrayType>(LoadTy) ||
853 isa<StructType>(StoredVal->getType()) ||
854 isa<ArrayType>(StoredVal->getType()))
857 // The store has to be at least as big as the load.
858 if (TD.getTypeSizeInBits(StoredVal->getType()) <
859 TD.getTypeSizeInBits(LoadTy))
866 /// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and
867 /// then a load from a must-aliased pointer of a different type, try to coerce
868 /// the stored value. LoadedTy is the type of the load we want to replace and
869 /// InsertPt is the place to insert new instructions.
871 /// If we can't do it, return null.
872 static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
873 const Type *LoadedTy,
874 Instruction *InsertPt,
875 const TargetData &TD) {
876 if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD))
879 const Type *StoredValTy = StoredVal->getType();
881 uint64_t StoreSize = TD.getTypeSizeInBits(StoredValTy);
882 uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy);
884 // If the store and reload are the same size, we can always reuse it.
885 if (StoreSize == LoadSize) {
886 if (isa<PointerType>(StoredValTy) && isa<PointerType>(LoadedTy)) {
887 // Pointer to Pointer -> use bitcast.
888 return new BitCastInst(StoredVal, LoadedTy, "", InsertPt);
891 // Convert source pointers to integers, which can be bitcast.
892 if (isa<PointerType>(StoredValTy)) {
893 StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
894 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
897 const Type *TypeToCastTo = LoadedTy;
898 if (isa<PointerType>(TypeToCastTo))
899 TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext());
901 if (StoredValTy != TypeToCastTo)
902 StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt);
904 // Cast to pointer if the load needs a pointer type.
905 if (isa<PointerType>(LoadedTy))
906 StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt);
911 // If the loaded value is smaller than the available value, then we can
912 // extract out a piece from it. If the available value is too small, then we
913 // can't do anything.
914 assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail");
916 // Convert source pointers to integers, which can be manipulated.
917 if (isa<PointerType>(StoredValTy)) {
918 StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
919 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
922 // Convert vectors and fp to integer, which can be manipulated.
923 if (!isa<IntegerType>(StoredValTy)) {
924 StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize);
925 StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt);
928 // If this is a big-endian system, we need to shift the value down to the low
929 // bits so that a truncate will work.
930 if (TD.isBigEndian()) {
931 Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize);
932 StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt);
935 // Truncate the integer to the right size now.
936 const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize);
937 StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt);
939 if (LoadedTy == NewIntTy)
942 // If the result is a pointer, inttoptr.
943 if (isa<PointerType>(LoadedTy))
944 return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt);
946 // Otherwise, bitcast.
947 return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt);
950 /// GetBaseWithConstantOffset - Analyze the specified pointer to see if it can
951 /// be expressed as a base pointer plus a constant offset. Return the base and
952 /// offset to the caller.
953 static Value *GetBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
954 const TargetData &TD) {
955 Operator *PtrOp = dyn_cast<Operator>(Ptr);
956 if (PtrOp == 0) return Ptr;
958 // Just look through bitcasts.
959 if (PtrOp->getOpcode() == Instruction::BitCast)
960 return GetBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD);
962 // If this is a GEP with constant indices, we can look through it.
963 GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp);
964 if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr;
966 gep_type_iterator GTI = gep_type_begin(GEP);
967 for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E;
969 ConstantInt *OpC = cast<ConstantInt>(*I);
970 if (OpC->isZero()) continue;
972 // Handle a struct and array indices which add their offset to the pointer.
973 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
974 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
976 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
977 Offset += OpC->getSExtValue()*Size;
981 // Re-sign extend from the pointer size if needed to get overflow edge cases
983 unsigned PtrSize = TD.getPointerSizeInBits();
985 Offset = (Offset << (64-PtrSize)) >> (64-PtrSize);
987 return GetBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD);
991 /// AnalyzeLoadFromClobberingWrite - This function is called when we have a
992 /// memdep query of a load that ends up being a clobbering memory write (store,
993 /// memset, memcpy, memmove). This means that the write *may* provide bits used
994 /// by the load but we can't be sure because the pointers don't mustalias.
996 /// Check this case to see if there is anything more we can do before we give
997 /// up. This returns -1 if we have to give up, or a byte number in the stored
998 /// value of the piece that feeds the load.
999 static int AnalyzeLoadFromClobberingWrite(LoadInst *L, Value *WritePtr,
1000 uint64_t WriteSizeInBits,
1001 const TargetData &TD) {
1002 // If the loaded or stored value is an first class array or struct, don't try
1003 // to transform them. We need to be able to bitcast to integer.
1004 if (isa<StructType>(L->getType()) || isa<ArrayType>(L->getType()))
1007 int64_t StoreOffset = 0, LoadOffset = 0;
1008 Value *StoreBase = GetBaseWithConstantOffset(WritePtr, StoreOffset, TD);
1010 GetBaseWithConstantOffset(L->getPointerOperand(), LoadOffset, TD);
1011 if (StoreBase != LoadBase)
1014 // If the load and store are to the exact same address, they should have been
1015 // a must alias. AA must have gotten confused.
1016 // FIXME: Study to see if/when this happens.
1017 if (LoadOffset == StoreOffset) {
1019 errs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n"
1020 << "Base = " << *StoreBase << "\n"
1021 << "Store Ptr = " << *WritePtr << "\n"
1022 << "Store Offs = " << StoreOffset << "\n"
1023 << "Load Ptr = " << *L->getPointerOperand() << "\n"
1024 << "Load Offs = " << LoadOffset << " - " << *L << "\n\n";
1025 errs() << "'" << L->getParent()->getParent()->getName() << "'"
1031 // If the load and store don't overlap at all, the store doesn't provide
1032 // anything to the load. In this case, they really don't alias at all, AA
1033 // must have gotten confused.
1034 // FIXME: Investigate cases where this bails out, e.g. rdar://7238614. Then
1035 // remove this check, as it is duplicated with what we have below.
1036 uint64_t LoadSize = TD.getTypeSizeInBits(L->getType());
1038 if ((WriteSizeInBits & 7) | (LoadSize & 7))
1040 uint64_t StoreSize = WriteSizeInBits >> 3; // Convert to bytes.
1044 bool isAAFailure = false;
1045 if (StoreOffset < LoadOffset) {
1046 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset;
1048 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset;
1052 errs() << "STORE LOAD DEP WITH COMMON BASE:\n"
1053 << "Base = " << *StoreBase << "\n"
1054 << "Store Ptr = " << *WritePtr << "\n"
1055 << "Store Offs = " << StoreOffset << "\n"
1056 << "Load Ptr = " << *L->getPointerOperand() << "\n"
1057 << "Load Offs = " << LoadOffset << " - " << *L << "\n\n";
1058 errs() << "'" << L->getParent()->getParent()->getName() << "'"
1064 // If the Load isn't completely contained within the stored bits, we don't
1065 // have all the bits to feed it. We could do something crazy in the future
1066 // (issue a smaller load then merge the bits in) but this seems unlikely to be
1068 if (StoreOffset > LoadOffset ||
1069 StoreOffset+StoreSize < LoadOffset+LoadSize)
1072 // Okay, we can do this transformation. Return the number of bytes into the
1073 // store that the load is.
1074 return LoadOffset-StoreOffset;
1077 /// AnalyzeLoadFromClobberingStore - This function is called when we have a
1078 /// memdep query of a load that ends up being a clobbering store.
1079 static int AnalyzeLoadFromClobberingStore(LoadInst *L, StoreInst *DepSI,
1080 const TargetData &TD) {
1081 // Cannot handle reading from store of first-class aggregate yet.
1082 if (isa<StructType>(DepSI->getOperand(0)->getType()) ||
1083 isa<ArrayType>(DepSI->getOperand(0)->getType()))
1086 Value *StorePtr = DepSI->getPointerOperand();
1087 uint64_t StoreSize = TD.getTypeSizeInBits(StorePtr->getType());
1088 return AnalyzeLoadFromClobberingWrite(L, StorePtr, StoreSize, TD);
1091 static int AnalyzeLoadFromClobberingMemInst(LoadInst *L, MemIntrinsic *MI,
1092 const TargetData &TD) {
1093 // If the mem operation is a non-constant size, we can't handle it.
1094 ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength());
1095 if (SizeCst == 0) return -1;
1096 uint64_t MemSizeInBits = SizeCst->getZExtValue()*8;
1098 if (MI->getIntrinsicID() == Intrinsic::memset)
1099 return AnalyzeLoadFromClobberingWrite(L, MI->getDest(), MemSizeInBits, TD);
1101 // Unhandled memcpy/memmove.
1106 /// GetStoreValueForLoad - This function is called when we have a
1107 /// memdep query of a load that ends up being a clobbering store. This means
1108 /// that the store *may* provide bits used by the load but we can't be sure
1109 /// because the pointers don't mustalias. Check this case to see if there is
1110 /// anything more we can do before we give up.
1111 static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
1113 Instruction *InsertPt, const TargetData &TD){
1114 LLVMContext &Ctx = SrcVal->getType()->getContext();
1116 uint64_t StoreSize = TD.getTypeSizeInBits(SrcVal->getType())/8;
1117 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
1120 // Compute which bits of the stored value are being used by the load. Convert
1121 // to an integer type to start with.
1122 if (isa<PointerType>(SrcVal->getType()))
1123 SrcVal = new PtrToIntInst(SrcVal, TD.getIntPtrType(Ctx), "tmp", InsertPt);
1124 if (!isa<IntegerType>(SrcVal->getType()))
1125 SrcVal = new BitCastInst(SrcVal, IntegerType::get(Ctx, StoreSize*8),
1128 // Shift the bits to the least significant depending on endianness.
1130 if (TD.isLittleEndian())
1131 ShiftAmt = Offset*8;
1133 ShiftAmt = (StoreSize-LoadSize-Offset)*8;
1136 SrcVal = BinaryOperator::CreateLShr(SrcVal,
1137 ConstantInt::get(SrcVal->getType(), ShiftAmt), "tmp", InsertPt);
1139 if (LoadSize != StoreSize)
1140 SrcVal = new TruncInst(SrcVal, IntegerType::get(Ctx, LoadSize*8),
1143 return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD);
1146 /// GetMemInstValueForLoad - This function is called when we have a
1147 /// memdep query of a load that ends up being a clobbering mem intrinsic.
1148 static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
1149 const Type *LoadTy, Instruction *InsertPt,
1150 const TargetData &TD){
1151 LLVMContext &Ctx = LoadTy->getContext();
1152 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
1154 IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
1156 // We know that this method is only called when the mem transfer fully
1157 // provides the bits for the load.
1158 if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) {
1159 // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and
1160 // independently of what the offset is.
1161 Value *Val = MSI->getValue();
1163 Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8));
1165 Value *OneElt = Val;
1167 // Splat the value out to the right number of bits.
1168 for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) {
1169 // If we can double the number of bytes set, do it.
1170 if (NumBytesSet*2 <= LoadSize) {
1171 Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8);
1172 Val = Builder.CreateOr(Val, ShVal);
1177 // Otherwise insert one byte at a time.
1178 Value *ShVal = Builder.CreateShl(Val, 1*8);
1179 Val = Builder.CreateOr(OneElt, ShVal);
1183 return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD);
1192 struct AvailableValueInBlock {
1193 /// BB - The basic block in question.
1195 /// V - The value that is live out of the block.
1197 /// Offset - The byte offset in V that is interesting for the load query.
1200 static AvailableValueInBlock get(BasicBlock *BB, Value *V,
1201 unsigned Offset = 0) {
1202 AvailableValueInBlock Res;
1205 Res.Offset = Offset;
1210 /// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock,
1211 /// construct SSA form, allowing us to eliminate LI. This returns the value
1212 /// that should be used at LI's definition site.
1213 static Value *ConstructSSAForLoadSet(LoadInst *LI,
1214 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
1215 const TargetData *TD,
1216 AliasAnalysis *AA) {
1217 SmallVector<PHINode*, 8> NewPHIs;
1218 SSAUpdater SSAUpdate(&NewPHIs);
1219 SSAUpdate.Initialize(LI);
1221 const Type *LoadTy = LI->getType();
1223 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1224 BasicBlock *BB = ValuesPerBlock[i].BB;
1225 Value *AvailableVal = ValuesPerBlock[i].V;
1226 unsigned Offset = ValuesPerBlock[i].Offset;
1228 if (SSAUpdate.HasValueForBlock(BB))
1231 if (AvailableVal->getType() != LoadTy) {
1232 assert(TD && "Need target data to handle type mismatch case");
1233 AvailableVal = GetStoreValueForLoad(AvailableVal, Offset, LoadTy,
1234 BB->getTerminator(), *TD);
1237 DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\n"
1238 << *ValuesPerBlock[i].V << '\n'
1239 << *AvailableVal << '\n' << "\n\n\n");
1243 DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\n"
1244 << *ValuesPerBlock[i].V << '\n'
1245 << *AvailableVal << '\n' << "\n\n\n");
1248 SSAUpdate.AddAvailableValue(BB, AvailableVal);
1251 // Perform PHI construction.
1252 Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent());
1254 // If new PHI nodes were created, notify alias analysis.
1255 if (isa<PointerType>(V->getType()))
1256 for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
1257 AA->copyValue(LI, NewPHIs[i]);
1262 static bool isLifetimeStart(Instruction *Inst) {
1263 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
1264 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1268 /// processNonLocalLoad - Attempt to eliminate a load whose dependencies are
1269 /// non-local by performing PHI construction.
1270 bool GVN::processNonLocalLoad(LoadInst *LI,
1271 SmallVectorImpl<Instruction*> &toErase) {
1272 // Find the non-local dependencies of the load.
1273 SmallVector<MemoryDependenceAnalysis::NonLocalDepEntry, 64> Deps;
1274 MD->getNonLocalPointerDependency(LI->getOperand(0), true, LI->getParent(),
1276 //DEBUG(errs() << "INVESTIGATING NONLOCAL LOAD: "
1277 // << Deps.size() << *LI << '\n');
1279 // If we had to process more than one hundred blocks to find the
1280 // dependencies, this load isn't worth worrying about. Optimizing
1281 // it will be too expensive.
1282 if (Deps.size() > 100)
1285 // If we had a phi translation failure, we'll have a single entry which is a
1286 // clobber in the current block. Reject this early.
1287 if (Deps.size() == 1 && Deps[0].second.isClobber()) {
1289 errs() << "GVN: non-local load ";
1290 WriteAsOperand(errs(), LI);
1291 errs() << " is clobbered by " << *Deps[0].second.getInst() << '\n';
1296 // Filter out useless results (non-locals, etc). Keep track of the blocks
1297 // where we have a value available in repl, also keep track of whether we see
1298 // dependencies that produce an unknown value for the load (such as a call
1299 // that could potentially clobber the load).
1300 SmallVector<AvailableValueInBlock, 16> ValuesPerBlock;
1301 SmallVector<BasicBlock*, 16> UnavailableBlocks;
1303 const TargetData *TD = 0;
1305 for (unsigned i = 0, e = Deps.size(); i != e; ++i) {
1306 BasicBlock *DepBB = Deps[i].first;
1307 MemDepResult DepInfo = Deps[i].second;
1309 if (DepInfo.isClobber()) {
1310 // If the dependence is to a store that writes to a superset of the bits
1311 // read by the load, we can extract the bits we need for the load from the
1313 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
1315 TD = getAnalysisIfAvailable<TargetData>();
1317 int Offset = AnalyzeLoadFromClobberingStore(LI, DepSI, *TD);
1319 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1320 DepSI->getOperand(0),
1328 // If the clobbering value is a memset/memcpy/memmove, see if we can
1329 // forward a value on from it.
1330 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) {
1332 TD = getAnalysisIfAvailable<TargetData>();
1334 int Offset = AnalyzeLoadFromClobberingMemInst(L, DepMI, *TD);
1336 AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,*TD);
1341 UnavailableBlocks.push_back(DepBB);
1345 Instruction *DepInst = DepInfo.getInst();
1347 // Loading the allocation -> undef.
1348 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) ||
1349 // Loading immediately after lifetime begin -> undef.
1350 isLifetimeStart(DepInst)) {
1351 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1352 UndefValue::get(LI->getType())));
1356 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1357 // Reject loads and stores that are to the same address but are of
1358 // different types if we have to.
1359 if (S->getOperand(0)->getType() != LI->getType()) {
1361 TD = getAnalysisIfAvailable<TargetData>();
1363 // If the stored value is larger or equal to the loaded value, we can
1365 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getOperand(0),
1366 LI->getType(), *TD)) {
1367 UnavailableBlocks.push_back(DepBB);
1372 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1377 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1378 // If the types mismatch and we can't handle it, reject reuse of the load.
1379 if (LD->getType() != LI->getType()) {
1381 TD = getAnalysisIfAvailable<TargetData>();
1383 // If the stored value is larger or equal to the loaded value, we can
1385 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){
1386 UnavailableBlocks.push_back(DepBB);
1390 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD));
1394 UnavailableBlocks.push_back(DepBB);
1398 // If we have no predecessors that produce a known value for this load, exit
1400 if (ValuesPerBlock.empty()) return false;
1402 // If all of the instructions we depend on produce a known value for this
1403 // load, then it is fully redundant and we can use PHI insertion to compute
1404 // its value. Insert PHIs and remove the fully redundant value now.
1405 if (UnavailableBlocks.empty()) {
1406 DEBUG(errs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n');
1408 // Perform PHI construction.
1409 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD,
1410 VN.getAliasAnalysis());
1411 LI->replaceAllUsesWith(V);
1413 if (isa<PHINode>(V))
1415 if (isa<PointerType>(V->getType()))
1416 MD->invalidateCachedPointerInfo(V);
1417 toErase.push_back(LI);
1422 if (!EnablePRE || !EnableLoadPRE)
1425 // Okay, we have *some* definitions of the value. This means that the value
1426 // is available in some of our (transitive) predecessors. Lets think about
1427 // doing PRE of this load. This will involve inserting a new load into the
1428 // predecessor when it's not available. We could do this in general, but
1429 // prefer to not increase code size. As such, we only do this when we know
1430 // that we only have to insert *one* load (which means we're basically moving
1431 // the load, not inserting a new one).
1433 SmallPtrSet<BasicBlock *, 4> Blockers;
1434 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1435 Blockers.insert(UnavailableBlocks[i]);
1437 // Lets find first basic block with more than one predecessor. Walk backwards
1438 // through predecessors if needed.
1439 BasicBlock *LoadBB = LI->getParent();
1440 BasicBlock *TmpBB = LoadBB;
1442 bool isSinglePred = false;
1443 bool allSingleSucc = true;
1444 while (TmpBB->getSinglePredecessor()) {
1445 isSinglePred = true;
1446 TmpBB = TmpBB->getSinglePredecessor();
1447 if (!TmpBB) // If haven't found any, bail now.
1449 if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1451 if (Blockers.count(TmpBB))
1453 if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1454 allSingleSucc = false;
1460 // If we have a repl set with LI itself in it, this means we have a loop where
1461 // at least one of the values is LI. Since this means that we won't be able
1462 // to eliminate LI even if we insert uses in the other predecessors, we will
1463 // end up increasing code size. Reject this by scanning for LI.
1464 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
1465 if (ValuesPerBlock[i].V == LI)
1470 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
1471 if (Instruction *I = dyn_cast<Instruction>(ValuesPerBlock[i].V))
1472 // "Hot" Instruction is in some loop (because it dominates its dep.
1474 if (DT->dominates(LI, I)) {
1479 // We are interested only in "hot" instructions. We don't want to do any
1480 // mis-optimizations here.
1485 // Okay, we have some hope :). Check to see if the loaded value is fully
1486 // available in all but one predecessor.
1487 // FIXME: If we could restructure the CFG, we could make a common pred with
1488 // all the preds that don't have an available LI and insert a new load into
1490 BasicBlock *UnavailablePred = 0;
1492 DenseMap<BasicBlock*, char> FullyAvailableBlocks;
1493 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
1494 FullyAvailableBlocks[ValuesPerBlock[i].BB] = true;
1495 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1496 FullyAvailableBlocks[UnavailableBlocks[i]] = false;
1498 for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB);
1500 if (IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks))
1503 // If this load is not available in multiple predecessors, reject it.
1504 if (UnavailablePred && UnavailablePred != *PI)
1506 UnavailablePred = *PI;
1509 assert(UnavailablePred != 0 &&
1510 "Fully available value should be eliminated above!");
1512 // We don't currently handle critical edges :(
1513 if (UnavailablePred->getTerminator()->getNumSuccessors() != 1) {
1514 DEBUG(errs() << "COULD NOT PRE LOAD BECAUSE OF CRITICAL EDGE '"
1515 << UnavailablePred->getName() << "': " << *LI << '\n');
1519 // Do PHI translation to get its value in the predecessor if necessary. The
1520 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1522 // FIXME: This may insert a computation, but we don't tell scalar GVN
1523 // optimization stuff about it. How do we do this?
1524 SmallVector<Instruction*, 8> NewInsts;
1527 // If all preds have a single successor, then we know it is safe to insert the
1528 // load on the pred (?!?), so we can insert code to materialize the pointer if
1529 // it is not available.
1530 if (allSingleSucc) {
1531 LoadPtr = MD->InsertPHITranslatedPointer(LI->getOperand(0), LoadBB,
1532 UnavailablePred, TD, *DT,NewInsts);
1534 LoadPtr = MD->GetAvailablePHITranslatedValue(LI->getOperand(0), LoadBB,
1535 UnavailablePred, TD, *DT);
1538 // Assign value numbers to these new instructions.
1539 for (SmallVector<Instruction*, 8>::iterator NI = NewInsts.begin(),
1540 NE = NewInsts.end(); NI != NE; ++NI) {
1541 // FIXME: We really _ought_ to insert these value numbers into their
1542 // parent's availability map. However, in doing so, we risk getting into
1543 // ordering issues. If a block hasn't been processed yet, we would be
1544 // marking a value as AVAIL-IN, which isn't what we intend.
1545 VN.lookup_or_add(*NI);
1548 // If we couldn't find or insert a computation of this phi translated value,
1551 DEBUG(errs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1552 << *LI->getOperand(0) << "\n");
1556 // Make sure it is valid to move this load here. We have to watch out for:
1557 // @1 = getelementptr (i8* p, ...
1558 // test p and branch if == 0
1560 // It is valid to have the getelementptr before the test, even if p can be 0,
1561 // as getelementptr only does address arithmetic.
1562 // If we are not pushing the value through any multiple-successor blocks
1563 // we do not have this case. Otherwise, check that the load is safe to
1564 // put anywhere; this can be improved, but should be conservatively safe.
1565 if (!allSingleSucc &&
1566 // FIXME: REEVALUTE THIS.
1567 !isSafeToLoadUnconditionally(LoadPtr, UnavailablePred->getTerminator())) {
1568 assert(NewInsts.empty() && "Should not have inserted instructions");
1572 // Okay, we can eliminate this load by inserting a reload in the predecessor
1573 // and using PHI construction to get the value in the other predecessors, do
1575 DEBUG(errs() << "GVN REMOVING PRE LOAD: " << *LI << '\n');
1576 DEBUG(if (!NewInsts.empty())
1577 errs() << "INSERTED " << NewInsts.size() << " INSTS: "
1578 << *NewInsts.back() << '\n');
1580 Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
1582 UnavailablePred->getTerminator());
1584 // Add the newly created load.
1585 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,NewLoad));
1587 // Perform PHI construction.
1588 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD,
1589 VN.getAliasAnalysis());
1590 LI->replaceAllUsesWith(V);
1591 if (isa<PHINode>(V))
1593 if (isa<PointerType>(V->getType()))
1594 MD->invalidateCachedPointerInfo(V);
1595 toErase.push_back(LI);
1600 /// processLoad - Attempt to eliminate a load, first by eliminating it
1601 /// locally, and then attempting non-local elimination if that fails.
1602 bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
1606 if (L->isVolatile())
1609 // ... to a pointer that has been loaded from before...
1610 MemDepResult Dep = MD->getDependency(L);
1612 // If the value isn't available, don't do anything!
1613 if (Dep.isClobber()) {
1614 // Check to see if we have something like this:
1615 // store i32 123, i32* %P
1616 // %A = bitcast i32* %P to i8*
1617 // %B = gep i8* %A, i32 1
1620 // We could do that by recognizing if the clobber instructions are obviously
1621 // a common base + constant offset, and if the previous store (or memset)
1622 // completely covers this load. This sort of thing can happen in bitfield
1624 Value *AvailVal = 0;
1625 if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst()))
1626 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
1627 int Offset = AnalyzeLoadFromClobberingStore(L, DepSI, *TD);
1629 AvailVal = GetStoreValueForLoad(DepSI->getOperand(0), Offset,
1630 L->getType(), L, *TD);
1633 // If the clobbering value is a memset/memcpy/memmove, see if we can forward
1634 // a value on from it.
1635 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) {
1636 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
1637 int Offset = AnalyzeLoadFromClobberingMemInst(L, DepMI, *TD);
1639 AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,*TD);
1644 DEBUG(errs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n'
1645 << *AvailVal << '\n' << *L << "\n\n\n");
1647 // Replace the load!
1648 L->replaceAllUsesWith(AvailVal);
1649 if (isa<PointerType>(AvailVal->getType()))
1650 MD->invalidateCachedPointerInfo(AvailVal);
1651 toErase.push_back(L);
1657 // fast print dep, using operator<< on instruction would be too slow
1658 errs() << "GVN: load ";
1659 WriteAsOperand(errs(), L);
1660 Instruction *I = Dep.getInst();
1661 errs() << " is clobbered by " << *I << '\n';
1666 // If it is defined in another block, try harder.
1667 if (Dep.isNonLocal())
1668 return processNonLocalLoad(L, toErase);
1670 Instruction *DepInst = Dep.getInst();
1671 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1672 Value *StoredVal = DepSI->getOperand(0);
1674 // The store and load are to a must-aliased pointer, but they may not
1675 // actually have the same type. See if we know how to reuse the stored
1676 // value (depending on its type).
1677 const TargetData *TD = 0;
1678 if (StoredVal->getType() != L->getType()) {
1679 if ((TD = getAnalysisIfAvailable<TargetData>())) {
1680 StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(),
1685 DEBUG(errs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
1686 << '\n' << *L << "\n\n\n");
1693 L->replaceAllUsesWith(StoredVal);
1694 if (isa<PointerType>(StoredVal->getType()))
1695 MD->invalidateCachedPointerInfo(StoredVal);
1696 toErase.push_back(L);
1701 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
1702 Value *AvailableVal = DepLI;
1704 // The loads are of a must-aliased pointer, but they may not actually have
1705 // the same type. See if we know how to reuse the previously loaded value
1706 // (depending on its type).
1707 const TargetData *TD = 0;
1708 if (DepLI->getType() != L->getType()) {
1709 if ((TD = getAnalysisIfAvailable<TargetData>())) {
1710 AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD);
1711 if (AvailableVal == 0)
1714 DEBUG(errs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
1715 << "\n" << *L << "\n\n\n");
1722 L->replaceAllUsesWith(AvailableVal);
1723 if (isa<PointerType>(DepLI->getType()))
1724 MD->invalidateCachedPointerInfo(DepLI);
1725 toErase.push_back(L);
1730 // If this load really doesn't depend on anything, then we must be loading an
1731 // undef value. This can happen when loading for a fresh allocation with no
1732 // intervening stores, for example.
1733 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) {
1734 L->replaceAllUsesWith(UndefValue::get(L->getType()));
1735 toErase.push_back(L);
1740 // If this load occurs either right after a lifetime begin,
1741 // then the loaded value is undefined.
1742 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) {
1743 if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
1744 L->replaceAllUsesWith(UndefValue::get(L->getType()));
1745 toErase.push_back(L);
1754 Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) {
1755 DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB);
1756 if (I == localAvail.end())
1759 ValueNumberScope *Locals = I->second;
1761 DenseMap<uint32_t, Value*>::iterator I = Locals->table.find(num);
1762 if (I != Locals->table.end())
1764 Locals = Locals->parent;
1771 /// processInstruction - When calculating availability, handle an instruction
1772 /// by inserting it into the appropriate sets
1773 bool GVN::processInstruction(Instruction *I,
1774 SmallVectorImpl<Instruction*> &toErase) {
1775 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1776 bool Changed = processLoad(LI, toErase);
1779 unsigned Num = VN.lookup_or_add(LI);
1780 localAvail[I->getParent()]->table.insert(std::make_pair(Num, LI));
1786 uint32_t NextNum = VN.getNextUnusedValueNumber();
1787 unsigned Num = VN.lookup_or_add(I);
1789 if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
1790 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1792 if (!BI->isConditional() || isa<Constant>(BI->getCondition()))
1795 Value *BranchCond = BI->getCondition();
1796 uint32_t CondVN = VN.lookup_or_add(BranchCond);
1798 BasicBlock *TrueSucc = BI->getSuccessor(0);
1799 BasicBlock *FalseSucc = BI->getSuccessor(1);
1801 if (TrueSucc->getSinglePredecessor())
1802 localAvail[TrueSucc]->table[CondVN] =
1803 ConstantInt::getTrue(TrueSucc->getContext());
1804 if (FalseSucc->getSinglePredecessor())
1805 localAvail[FalseSucc]->table[CondVN] =
1806 ConstantInt::getFalse(TrueSucc->getContext());
1810 // Allocations are always uniquely numbered, so we can save time and memory
1811 // by fast failing them.
1812 } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) {
1813 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1817 // Collapse PHI nodes
1818 if (PHINode* p = dyn_cast<PHINode>(I)) {
1819 Value *constVal = CollapsePhi(p);
1822 p->replaceAllUsesWith(constVal);
1823 if (MD && isa<PointerType>(constVal->getType()))
1824 MD->invalidateCachedPointerInfo(constVal);
1827 toErase.push_back(p);
1829 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1832 // If the number we were assigned was a brand new VN, then we don't
1833 // need to do a lookup to see if the number already exists
1834 // somewhere in the domtree: it can't!
1835 } else if (Num == NextNum) {
1836 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1838 // Perform fast-path value-number based elimination of values inherited from
1840 } else if (Value *repl = lookupNumber(I->getParent(), Num)) {
1843 I->replaceAllUsesWith(repl);
1844 if (MD && isa<PointerType>(repl->getType()))
1845 MD->invalidateCachedPointerInfo(repl);
1846 toErase.push_back(I);
1850 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1856 /// runOnFunction - This is the main transformation entry point for a function.
1857 bool GVN::runOnFunction(Function& F) {
1859 MD = &getAnalysis<MemoryDependenceAnalysis>();
1860 DT = &getAnalysis<DominatorTree>();
1861 VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
1865 bool Changed = false;
1866 bool ShouldContinue = true;
1868 // Merge unconditional branches, allowing PRE to catch more
1869 // optimization opportunities.
1870 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
1871 BasicBlock *BB = FI;
1873 bool removedBlock = MergeBlockIntoPredecessor(BB, this);
1874 if (removedBlock) NumGVNBlocks++;
1876 Changed |= removedBlock;
1879 unsigned Iteration = 0;
1881 while (ShouldContinue) {
1882 DEBUG(errs() << "GVN iteration: " << Iteration << "\n");
1883 ShouldContinue = iterateOnFunction(F);
1884 Changed |= ShouldContinue;
1889 bool PREChanged = true;
1890 while (PREChanged) {
1891 PREChanged = performPRE(F);
1892 Changed |= PREChanged;
1895 // FIXME: Should perform GVN again after PRE does something. PRE can move
1896 // computations into blocks where they become fully redundant. Note that
1897 // we can't do this until PRE's critical edge splitting updates memdep.
1898 // Actually, when this happens, we should just fully integrate PRE into GVN.
1900 cleanupGlobalSets();
1906 bool GVN::processBlock(BasicBlock *BB) {
1907 // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and
1908 // incrementing BI before processing an instruction).
1909 SmallVector<Instruction*, 8> toErase;
1910 bool ChangedFunction = false;
1912 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
1914 ChangedFunction |= processInstruction(BI, toErase);
1915 if (toErase.empty()) {
1920 // If we need some instructions deleted, do it now.
1921 NumGVNInstr += toErase.size();
1923 // Avoid iterator invalidation.
1924 bool AtStart = BI == BB->begin();
1928 for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(),
1929 E = toErase.end(); I != E; ++I) {
1930 DEBUG(errs() << "GVN removed: " << **I << '\n');
1931 if (MD) MD->removeInstruction(*I);
1932 (*I)->eraseFromParent();
1933 DEBUG(verifyRemoved(*I));
1943 return ChangedFunction;
1946 /// performPRE - Perform a purely local form of PRE that looks for diamond
1947 /// control flow patterns and attempts to perform simple PRE at the join point.
1948 bool GVN::performPRE(Function &F) {
1949 bool Changed = false;
1950 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit;
1951 DenseMap<BasicBlock*, Value*> predMap;
1952 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
1953 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
1954 BasicBlock *CurrentBlock = *DI;
1956 // Nothing to PRE in the entry block.
1957 if (CurrentBlock == &F.getEntryBlock()) continue;
1959 for (BasicBlock::iterator BI = CurrentBlock->begin(),
1960 BE = CurrentBlock->end(); BI != BE; ) {
1961 Instruction *CurInst = BI++;
1963 if (isa<AllocaInst>(CurInst) ||
1964 isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) ||
1965 CurInst->getType()->isVoidTy() ||
1966 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
1967 isa<DbgInfoIntrinsic>(CurInst))
1970 uint32_t ValNo = VN.lookup(CurInst);
1972 // Look for the predecessors for PRE opportunities. We're
1973 // only trying to solve the basic diamond case, where
1974 // a value is computed in the successor and one predecessor,
1975 // but not the other. We also explicitly disallow cases
1976 // where the successor is its own predecessor, because they're
1977 // more complicated to get right.
1978 unsigned NumWith = 0;
1979 unsigned NumWithout = 0;
1980 BasicBlock *PREPred = 0;
1983 for (pred_iterator PI = pred_begin(CurrentBlock),
1984 PE = pred_end(CurrentBlock); PI != PE; ++PI) {
1985 // We're not interested in PRE where the block is its
1986 // own predecessor, on in blocks with predecessors
1987 // that are not reachable.
1988 if (*PI == CurrentBlock) {
1991 } else if (!localAvail.count(*PI)) {
1996 DenseMap<uint32_t, Value*>::iterator predV =
1997 localAvail[*PI]->table.find(ValNo);
1998 if (predV == localAvail[*PI]->table.end()) {
2001 } else if (predV->second == CurInst) {
2004 predMap[*PI] = predV->second;
2009 // Don't do PRE when it might increase code size, i.e. when
2010 // we would need to insert instructions in more than one pred.
2011 if (NumWithout != 1 || NumWith == 0)
2014 // Don't do PRE across indirect branch.
2015 if (isa<IndirectBrInst>(PREPred->getTerminator()))
2018 // We can't do PRE safely on a critical edge, so instead we schedule
2019 // the edge to be split and perform the PRE the next time we iterate
2021 unsigned SuccNum = 0;
2022 for (unsigned i = 0, e = PREPred->getTerminator()->getNumSuccessors();
2024 if (PREPred->getTerminator()->getSuccessor(i) == CurrentBlock) {
2029 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
2030 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
2034 // Instantiate the expression the in predecessor that lacked it.
2035 // Because we are going top-down through the block, all value numbers
2036 // will be available in the predecessor by the time we need them. Any
2037 // that weren't original present will have been instantiated earlier
2039 Instruction *PREInstr = CurInst->clone();
2040 bool success = true;
2041 for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) {
2042 Value *Op = PREInstr->getOperand(i);
2043 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2046 if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) {
2047 PREInstr->setOperand(i, V);
2054 // Fail out if we encounter an operand that is not available in
2055 // the PRE predecessor. This is typically because of loads which
2056 // are not value numbered precisely.
2059 DEBUG(verifyRemoved(PREInstr));
2063 PREInstr->insertBefore(PREPred->getTerminator());
2064 PREInstr->setName(CurInst->getName() + ".pre");
2065 predMap[PREPred] = PREInstr;
2066 VN.add(PREInstr, ValNo);
2069 // Update the availability map to include the new instruction.
2070 localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr));
2072 // Create a PHI to make the value available in this block.
2073 PHINode* Phi = PHINode::Create(CurInst->getType(),
2074 CurInst->getName() + ".pre-phi",
2075 CurrentBlock->begin());
2076 for (pred_iterator PI = pred_begin(CurrentBlock),
2077 PE = pred_end(CurrentBlock); PI != PE; ++PI)
2078 Phi->addIncoming(predMap[*PI], *PI);
2081 localAvail[CurrentBlock]->table[ValNo] = Phi;
2083 CurInst->replaceAllUsesWith(Phi);
2084 if (MD && isa<PointerType>(Phi->getType()))
2085 MD->invalidateCachedPointerInfo(Phi);
2088 DEBUG(errs() << "GVN PRE removed: " << *CurInst << '\n');
2089 if (MD) MD->removeInstruction(CurInst);
2090 CurInst->eraseFromParent();
2091 DEBUG(verifyRemoved(CurInst));
2096 for (SmallVector<std::pair<TerminatorInst*, unsigned>, 4>::iterator
2097 I = toSplit.begin(), E = toSplit.end(); I != E; ++I)
2098 SplitCriticalEdge(I->first, I->second, this);
2100 return Changed || toSplit.size();
2103 /// iterateOnFunction - Executes one iteration of GVN
2104 bool GVN::iterateOnFunction(Function &F) {
2105 cleanupGlobalSets();
2107 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
2108 DE = df_end(DT->getRootNode()); DI != DE; ++DI) {
2110 localAvail[DI->getBlock()] =
2111 new ValueNumberScope(localAvail[DI->getIDom()->getBlock()]);
2113 localAvail[DI->getBlock()] = new ValueNumberScope(0);
2116 // Top-down walk of the dominator tree
2117 bool Changed = false;
2119 // Needed for value numbering with phi construction to work.
2120 ReversePostOrderTraversal<Function*> RPOT(&F);
2121 for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(),
2122 RE = RPOT.end(); RI != RE; ++RI)
2123 Changed |= processBlock(*RI);
2125 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
2126 DE = df_end(DT->getRootNode()); DI != DE; ++DI)
2127 Changed |= processBlock(DI->getBlock());
2133 void GVN::cleanupGlobalSets() {
2136 for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator
2137 I = localAvail.begin(), E = localAvail.end(); I != E; ++I)
2142 /// verifyRemoved - Verify that the specified instruction does not occur in our
2143 /// internal data structures.
2144 void GVN::verifyRemoved(const Instruction *Inst) const {
2145 VN.verifyRemoved(Inst);
2147 // Walk through the value number scope to make sure the instruction isn't
2148 // ferreted away in it.
2149 for (DenseMap<BasicBlock*, ValueNumberScope*>::const_iterator
2150 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) {
2151 const ValueNumberScope *VNS = I->second;
2154 for (DenseMap<uint32_t, Value*>::const_iterator
2155 II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) {
2156 assert(II->second != Inst && "Inst still in value numbering scope!");