1 //===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs global value numbering to eliminate fully redundant
11 // instructions. It also performs simple dead load elimination.
13 // Note that this pass does the value numbering itself; it does not use the
14 // ValueNumbering analysis passes.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "gvn"
19 #include "llvm/Transforms/Scalar.h"
20 #include "llvm/BasicBlock.h"
21 #include "llvm/Constants.h"
22 #include "llvm/DerivedTypes.h"
23 #include "llvm/GlobalVariable.h"
24 #include "llvm/Function.h"
25 #include "llvm/IntrinsicInst.h"
26 #include "llvm/LLVMContext.h"
27 #include "llvm/Operator.h"
28 #include "llvm/Value.h"
29 #include "llvm/ADT/DenseMap.h"
30 #include "llvm/ADT/DepthFirstIterator.h"
31 #include "llvm/ADT/PostOrderIterator.h"
32 #include "llvm/ADT/SmallPtrSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/Analysis/AliasAnalysis.h"
36 #include "llvm/Analysis/ConstantFolding.h"
37 #include "llvm/Analysis/Dominators.h"
38 #include "llvm/Analysis/Loads.h"
39 #include "llvm/Analysis/MemoryBuiltins.h"
40 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
41 #include "llvm/Analysis/PHITransAddr.h"
42 #include "llvm/Support/CFG.h"
43 #include "llvm/Support/CommandLine.h"
44 #include "llvm/Support/Debug.h"
45 #include "llvm/Support/ErrorHandling.h"
46 #include "llvm/Support/GetElementPtrTypeIterator.h"
47 #include "llvm/Support/IRBuilder.h"
48 #include "llvm/Support/raw_ostream.h"
49 #include "llvm/Target/TargetData.h"
50 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
51 #include "llvm/Transforms/Utils/Local.h"
52 #include "llvm/Transforms/Utils/SSAUpdater.h"
55 STATISTIC(NumGVNInstr, "Number of instructions deleted");
56 STATISTIC(NumGVNLoad, "Number of loads deleted");
57 STATISTIC(NumGVNPRE, "Number of instructions PRE'd");
58 STATISTIC(NumGVNBlocks, "Number of blocks merged");
59 STATISTIC(NumPRELoad, "Number of loads PRE'd");
61 static cl::opt<bool> EnablePRE("enable-pre",
62 cl::init(true), cl::Hidden);
63 static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true));
64 static cl::opt<bool> EnableFullLoadPRE("enable-full-load-pre", cl::init(false));
66 //===----------------------------------------------------------------------===//
68 //===----------------------------------------------------------------------===//
70 /// This class holds the mapping between values and value numbers. It is used
71 /// as an efficient mechanism to determine the expression-wise equivalence of
75 enum ExpressionOpcode {
76 ADD = Instruction::Add,
77 FADD = Instruction::FAdd,
78 SUB = Instruction::Sub,
79 FSUB = Instruction::FSub,
80 MUL = Instruction::Mul,
81 FMUL = Instruction::FMul,
82 UDIV = Instruction::UDiv,
83 SDIV = Instruction::SDiv,
84 FDIV = Instruction::FDiv,
85 UREM = Instruction::URem,
86 SREM = Instruction::SRem,
87 FREM = Instruction::FRem,
88 SHL = Instruction::Shl,
89 LSHR = Instruction::LShr,
90 ASHR = Instruction::AShr,
91 AND = Instruction::And,
93 XOR = Instruction::Xor,
94 TRUNC = Instruction::Trunc,
95 ZEXT = Instruction::ZExt,
96 SEXT = Instruction::SExt,
97 FPTOUI = Instruction::FPToUI,
98 FPTOSI = Instruction::FPToSI,
99 UITOFP = Instruction::UIToFP,
100 SITOFP = Instruction::SIToFP,
101 FPTRUNC = Instruction::FPTrunc,
102 FPEXT = Instruction::FPExt,
103 PTRTOINT = Instruction::PtrToInt,
104 INTTOPTR = Instruction::IntToPtr,
105 BITCAST = Instruction::BitCast,
106 ICMPEQ, ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE,
107 ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ,
108 FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE,
109 FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE,
110 FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT,
111 SHUFFLE, SELECT, GEP, CALL, CONSTANT,
112 INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE };
114 ExpressionOpcode opcode;
116 SmallVector<uint32_t, 4> varargs;
120 Expression(ExpressionOpcode o) : opcode(o) { }
122 bool operator==(const Expression &other) const {
123 if (opcode != other.opcode)
125 else if (opcode == EMPTY || opcode == TOMBSTONE)
127 else if (type != other.type)
129 else if (function != other.function)
132 if (varargs.size() != other.varargs.size())
135 for (size_t i = 0; i < varargs.size(); ++i)
136 if (varargs[i] != other.varargs[i])
143 bool operator!=(const Expression &other) const {
144 return !(*this == other);
150 DenseMap<Value*, uint32_t> valueNumbering;
151 DenseMap<Expression, uint32_t> expressionNumbering;
153 MemoryDependenceAnalysis* MD;
156 uint32_t nextValueNumber;
158 Expression::ExpressionOpcode getOpcode(CmpInst* C);
159 Expression create_expression(BinaryOperator* BO);
160 Expression create_expression(CmpInst* C);
161 Expression create_expression(ShuffleVectorInst* V);
162 Expression create_expression(ExtractElementInst* C);
163 Expression create_expression(InsertElementInst* V);
164 Expression create_expression(SelectInst* V);
165 Expression create_expression(CastInst* C);
166 Expression create_expression(GetElementPtrInst* G);
167 Expression create_expression(CallInst* C);
168 Expression create_expression(Constant* C);
169 Expression create_expression(ExtractValueInst* C);
170 Expression create_expression(InsertValueInst* C);
172 uint32_t lookup_or_add_call(CallInst* C);
174 ValueTable() : nextValueNumber(1) { }
175 uint32_t lookup_or_add(Value *V);
176 uint32_t lookup(Value *V) const;
177 void add(Value *V, uint32_t num);
179 void erase(Value *v);
181 void setAliasAnalysis(AliasAnalysis* A) { AA = A; }
182 AliasAnalysis *getAliasAnalysis() const { return AA; }
183 void setMemDep(MemoryDependenceAnalysis* M) { MD = M; }
184 void setDomTree(DominatorTree* D) { DT = D; }
185 uint32_t getNextUnusedValueNumber() { return nextValueNumber; }
186 void verifyRemoved(const Value *) const;
191 template <> struct DenseMapInfo<Expression> {
192 static inline Expression getEmptyKey() {
193 return Expression(Expression::EMPTY);
196 static inline Expression getTombstoneKey() {
197 return Expression(Expression::TOMBSTONE);
200 static unsigned getHashValue(const Expression e) {
201 unsigned hash = e.opcode;
203 hash = ((unsigned)((uintptr_t)e.type >> 4) ^
204 (unsigned)((uintptr_t)e.type >> 9));
206 for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(),
207 E = e.varargs.end(); I != E; ++I)
208 hash = *I + hash * 37;
210 hash = ((unsigned)((uintptr_t)e.function >> 4) ^
211 (unsigned)((uintptr_t)e.function >> 9)) +
216 static bool isEqual(const Expression &LHS, const Expression &RHS) {
222 struct isPodLike<Expression> { static const bool value = true; };
226 //===----------------------------------------------------------------------===//
227 // ValueTable Internal Functions
228 //===----------------------------------------------------------------------===//
230 Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) {
231 if (isa<ICmpInst>(C)) {
232 switch (C->getPredicate()) {
233 default: // THIS SHOULD NEVER HAPPEN
234 llvm_unreachable("Comparison with unknown predicate?");
235 case ICmpInst::ICMP_EQ: return Expression::ICMPEQ;
236 case ICmpInst::ICMP_NE: return Expression::ICMPNE;
237 case ICmpInst::ICMP_UGT: return Expression::ICMPUGT;
238 case ICmpInst::ICMP_UGE: return Expression::ICMPUGE;
239 case ICmpInst::ICMP_ULT: return Expression::ICMPULT;
240 case ICmpInst::ICMP_ULE: return Expression::ICMPULE;
241 case ICmpInst::ICMP_SGT: return Expression::ICMPSGT;
242 case ICmpInst::ICMP_SGE: return Expression::ICMPSGE;
243 case ICmpInst::ICMP_SLT: return Expression::ICMPSLT;
244 case ICmpInst::ICMP_SLE: return Expression::ICMPSLE;
247 switch (C->getPredicate()) {
248 default: // THIS SHOULD NEVER HAPPEN
249 llvm_unreachable("Comparison with unknown predicate?");
250 case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ;
251 case FCmpInst::FCMP_OGT: return Expression::FCMPOGT;
252 case FCmpInst::FCMP_OGE: return Expression::FCMPOGE;
253 case FCmpInst::FCMP_OLT: return Expression::FCMPOLT;
254 case FCmpInst::FCMP_OLE: return Expression::FCMPOLE;
255 case FCmpInst::FCMP_ONE: return Expression::FCMPONE;
256 case FCmpInst::FCMP_ORD: return Expression::FCMPORD;
257 case FCmpInst::FCMP_UNO: return Expression::FCMPUNO;
258 case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ;
259 case FCmpInst::FCMP_UGT: return Expression::FCMPUGT;
260 case FCmpInst::FCMP_UGE: return Expression::FCMPUGE;
261 case FCmpInst::FCMP_ULT: return Expression::FCMPULT;
262 case FCmpInst::FCMP_ULE: return Expression::FCMPULE;
263 case FCmpInst::FCMP_UNE: return Expression::FCMPUNE;
268 Expression ValueTable::create_expression(CallInst* C) {
271 e.type = C->getType();
272 e.function = C->getCalledFunction();
273 e.opcode = Expression::CALL;
275 for (CallInst::op_iterator I = C->op_begin()+1, E = C->op_end();
277 e.varargs.push_back(lookup_or_add(*I));
282 Expression ValueTable::create_expression(BinaryOperator* BO) {
284 e.varargs.push_back(lookup_or_add(BO->getOperand(0)));
285 e.varargs.push_back(lookup_or_add(BO->getOperand(1)));
287 e.type = BO->getType();
288 e.opcode = static_cast<Expression::ExpressionOpcode>(BO->getOpcode());
293 Expression ValueTable::create_expression(CmpInst* C) {
296 e.varargs.push_back(lookup_or_add(C->getOperand(0)));
297 e.varargs.push_back(lookup_or_add(C->getOperand(1)));
299 e.type = C->getType();
300 e.opcode = getOpcode(C);
305 Expression ValueTable::create_expression(CastInst* C) {
308 e.varargs.push_back(lookup_or_add(C->getOperand(0)));
310 e.type = C->getType();
311 e.opcode = static_cast<Expression::ExpressionOpcode>(C->getOpcode());
316 Expression ValueTable::create_expression(ShuffleVectorInst* S) {
319 e.varargs.push_back(lookup_or_add(S->getOperand(0)));
320 e.varargs.push_back(lookup_or_add(S->getOperand(1)));
321 e.varargs.push_back(lookup_or_add(S->getOperand(2)));
323 e.type = S->getType();
324 e.opcode = Expression::SHUFFLE;
329 Expression ValueTable::create_expression(ExtractElementInst* E) {
332 e.varargs.push_back(lookup_or_add(E->getOperand(0)));
333 e.varargs.push_back(lookup_or_add(E->getOperand(1)));
335 e.type = E->getType();
336 e.opcode = Expression::EXTRACT;
341 Expression ValueTable::create_expression(InsertElementInst* I) {
344 e.varargs.push_back(lookup_or_add(I->getOperand(0)));
345 e.varargs.push_back(lookup_or_add(I->getOperand(1)));
346 e.varargs.push_back(lookup_or_add(I->getOperand(2)));
348 e.type = I->getType();
349 e.opcode = Expression::INSERT;
354 Expression ValueTable::create_expression(SelectInst* I) {
357 e.varargs.push_back(lookup_or_add(I->getCondition()));
358 e.varargs.push_back(lookup_or_add(I->getTrueValue()));
359 e.varargs.push_back(lookup_or_add(I->getFalseValue()));
361 e.type = I->getType();
362 e.opcode = Expression::SELECT;
367 Expression ValueTable::create_expression(GetElementPtrInst* G) {
370 e.varargs.push_back(lookup_or_add(G->getPointerOperand()));
372 e.type = G->getType();
373 e.opcode = Expression::GEP;
375 for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end();
377 e.varargs.push_back(lookup_or_add(*I));
382 Expression ValueTable::create_expression(ExtractValueInst* E) {
385 e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
386 for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
388 e.varargs.push_back(*II);
390 e.type = E->getType();
391 e.opcode = Expression::EXTRACTVALUE;
396 Expression ValueTable::create_expression(InsertValueInst* E) {
399 e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
400 e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand()));
401 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
403 e.varargs.push_back(*II);
405 e.type = E->getType();
406 e.opcode = Expression::INSERTVALUE;
411 //===----------------------------------------------------------------------===//
412 // ValueTable External Functions
413 //===----------------------------------------------------------------------===//
415 /// add - Insert a value into the table with a specified value number.
416 void ValueTable::add(Value *V, uint32_t num) {
417 valueNumbering.insert(std::make_pair(V, num));
420 uint32_t ValueTable::lookup_or_add_call(CallInst* C) {
421 if (AA->doesNotAccessMemory(C)) {
422 Expression exp = create_expression(C);
423 uint32_t& e = expressionNumbering[exp];
424 if (!e) e = nextValueNumber++;
425 valueNumbering[C] = e;
427 } else if (AA->onlyReadsMemory(C)) {
428 Expression exp = create_expression(C);
429 uint32_t& e = expressionNumbering[exp];
431 e = nextValueNumber++;
432 valueNumbering[C] = e;
436 e = nextValueNumber++;
437 valueNumbering[C] = e;
441 MemDepResult local_dep = MD->getDependency(C);
443 if (!local_dep.isDef() && !local_dep.isNonLocal()) {
444 valueNumbering[C] = nextValueNumber;
445 return nextValueNumber++;
448 if (local_dep.isDef()) {
449 CallInst* local_cdep = cast<CallInst>(local_dep.getInst());
451 if (local_cdep->getNumOperands() != C->getNumOperands()) {
452 valueNumbering[C] = nextValueNumber;
453 return nextValueNumber++;
456 for (unsigned i = 1; i < C->getNumOperands(); ++i) {
457 uint32_t c_vn = lookup_or_add(C->getOperand(i));
458 uint32_t cd_vn = lookup_or_add(local_cdep->getOperand(i));
460 valueNumbering[C] = nextValueNumber;
461 return nextValueNumber++;
465 uint32_t v = lookup_or_add(local_cdep);
466 valueNumbering[C] = v;
471 const MemoryDependenceAnalysis::NonLocalDepInfo &deps =
472 MD->getNonLocalCallDependency(CallSite(C));
473 // FIXME: call/call dependencies for readonly calls should return def, not
474 // clobber! Move the checking logic to MemDep!
477 // Check to see if we have a single dominating call instruction that is
479 for (unsigned i = 0, e = deps.size(); i != e; ++i) {
480 const NonLocalDepEntry *I = &deps[i];
481 // Ignore non-local dependencies.
482 if (I->getResult().isNonLocal())
485 // We don't handle non-depedencies. If we already have a call, reject
486 // instruction dependencies.
487 if (I->getResult().isClobber() || cdep != 0) {
492 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst());
493 // FIXME: All duplicated with non-local case.
494 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){
495 cdep = NonLocalDepCall;
504 valueNumbering[C] = nextValueNumber;
505 return nextValueNumber++;
508 if (cdep->getNumOperands() != C->getNumOperands()) {
509 valueNumbering[C] = nextValueNumber;
510 return nextValueNumber++;
512 for (unsigned i = 1; i < C->getNumOperands(); ++i) {
513 uint32_t c_vn = lookup_or_add(C->getOperand(i));
514 uint32_t cd_vn = lookup_or_add(cdep->getOperand(i));
516 valueNumbering[C] = nextValueNumber;
517 return nextValueNumber++;
521 uint32_t v = lookup_or_add(cdep);
522 valueNumbering[C] = v;
526 valueNumbering[C] = nextValueNumber;
527 return nextValueNumber++;
531 /// lookup_or_add - Returns the value number for the specified value, assigning
532 /// it a new number if it did not have one before.
533 uint32_t ValueTable::lookup_or_add(Value *V) {
534 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
535 if (VI != valueNumbering.end())
538 if (!isa<Instruction>(V)) {
539 valueNumbering[V] = nextValueNumber;
540 return nextValueNumber++;
543 Instruction* I = cast<Instruction>(V);
545 switch (I->getOpcode()) {
546 case Instruction::Call:
547 return lookup_or_add_call(cast<CallInst>(I));
548 case Instruction::Add:
549 case Instruction::FAdd:
550 case Instruction::Sub:
551 case Instruction::FSub:
552 case Instruction::Mul:
553 case Instruction::FMul:
554 case Instruction::UDiv:
555 case Instruction::SDiv:
556 case Instruction::FDiv:
557 case Instruction::URem:
558 case Instruction::SRem:
559 case Instruction::FRem:
560 case Instruction::Shl:
561 case Instruction::LShr:
562 case Instruction::AShr:
563 case Instruction::And:
564 case Instruction::Or :
565 case Instruction::Xor:
566 exp = create_expression(cast<BinaryOperator>(I));
568 case Instruction::ICmp:
569 case Instruction::FCmp:
570 exp = create_expression(cast<CmpInst>(I));
572 case Instruction::Trunc:
573 case Instruction::ZExt:
574 case Instruction::SExt:
575 case Instruction::FPToUI:
576 case Instruction::FPToSI:
577 case Instruction::UIToFP:
578 case Instruction::SIToFP:
579 case Instruction::FPTrunc:
580 case Instruction::FPExt:
581 case Instruction::PtrToInt:
582 case Instruction::IntToPtr:
583 case Instruction::BitCast:
584 exp = create_expression(cast<CastInst>(I));
586 case Instruction::Select:
587 exp = create_expression(cast<SelectInst>(I));
589 case Instruction::ExtractElement:
590 exp = create_expression(cast<ExtractElementInst>(I));
592 case Instruction::InsertElement:
593 exp = create_expression(cast<InsertElementInst>(I));
595 case Instruction::ShuffleVector:
596 exp = create_expression(cast<ShuffleVectorInst>(I));
598 case Instruction::ExtractValue:
599 exp = create_expression(cast<ExtractValueInst>(I));
601 case Instruction::InsertValue:
602 exp = create_expression(cast<InsertValueInst>(I));
604 case Instruction::GetElementPtr:
605 exp = create_expression(cast<GetElementPtrInst>(I));
608 valueNumbering[V] = nextValueNumber;
609 return nextValueNumber++;
612 uint32_t& e = expressionNumbering[exp];
613 if (!e) e = nextValueNumber++;
614 valueNumbering[V] = e;
618 /// lookup - Returns the value number of the specified value. Fails if
619 /// the value has not yet been numbered.
620 uint32_t ValueTable::lookup(Value *V) const {
621 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
622 assert(VI != valueNumbering.end() && "Value not numbered?");
626 /// clear - Remove all entries from the ValueTable
627 void ValueTable::clear() {
628 valueNumbering.clear();
629 expressionNumbering.clear();
633 /// erase - Remove a value from the value numbering
634 void ValueTable::erase(Value *V) {
635 valueNumbering.erase(V);
638 /// verifyRemoved - Verify that the value is removed from all internal data
640 void ValueTable::verifyRemoved(const Value *V) const {
641 for (DenseMap<Value*, uint32_t>::const_iterator
642 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
643 assert(I->first != V && "Inst still occurs in value numbering map!");
647 //===----------------------------------------------------------------------===//
649 //===----------------------------------------------------------------------===//
652 struct ValueNumberScope {
653 ValueNumberScope* parent;
654 DenseMap<uint32_t, Value*> table;
656 ValueNumberScope(ValueNumberScope* p) : parent(p) { }
662 class GVN : public FunctionPass {
663 bool runOnFunction(Function &F);
665 static char ID; // Pass identification, replacement for typeid
666 explicit GVN(bool noloads = false)
667 : FunctionPass(&ID), NoLoads(noloads), MD(0) { }
671 MemoryDependenceAnalysis *MD;
675 DenseMap<BasicBlock*, ValueNumberScope*> localAvail;
677 // List of critical edges to be split between iterations.
678 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit;
680 // This transformation requires dominator postdominator info
681 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
682 AU.addRequired<DominatorTree>();
684 AU.addRequired<MemoryDependenceAnalysis>();
685 AU.addRequired<AliasAnalysis>();
687 AU.addPreserved<DominatorTree>();
688 AU.addPreserved<AliasAnalysis>();
692 // FIXME: eliminate or document these better
693 bool processLoad(LoadInst* L,
694 SmallVectorImpl<Instruction*> &toErase);
695 bool processInstruction(Instruction *I,
696 SmallVectorImpl<Instruction*> &toErase);
697 bool processNonLocalLoad(LoadInst* L,
698 SmallVectorImpl<Instruction*> &toErase);
699 bool processBlock(BasicBlock *BB);
700 void dump(DenseMap<uint32_t, Value*>& d);
701 bool iterateOnFunction(Function &F);
702 Value *CollapsePhi(PHINode* p);
703 bool performPRE(Function& F);
704 Value *lookupNumber(BasicBlock *BB, uint32_t num);
705 void cleanupGlobalSets();
706 void verifyRemoved(const Instruction *I) const;
707 bool splitCriticalEdges();
713 // createGVNPass - The public interface to this file...
714 FunctionPass *llvm::createGVNPass(bool NoLoads) {
715 return new GVN(NoLoads);
718 static RegisterPass<GVN> X("gvn",
719 "Global Value Numbering");
721 void GVN::dump(DenseMap<uint32_t, Value*>& d) {
723 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(),
724 E = d.end(); I != E; ++I) {
725 errs() << I->first << "\n";
731 static bool isSafeReplacement(PHINode* p, Instruction *inst) {
732 if (!isa<PHINode>(inst))
735 for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end();
737 if (PHINode* use_phi = dyn_cast<PHINode>(UI))
738 if (use_phi->getParent() == inst->getParent())
744 Value *GVN::CollapsePhi(PHINode *PN) {
745 Value *ConstVal = PN->hasConstantValue(DT);
746 if (!ConstVal) return 0;
748 Instruction *Inst = dyn_cast<Instruction>(ConstVal);
752 if (DT->dominates(Inst, PN))
753 if (isSafeReplacement(PN, Inst))
758 /// IsValueFullyAvailableInBlock - Return true if we can prove that the value
759 /// we're analyzing is fully available in the specified block. As we go, keep
760 /// track of which blocks we know are fully alive in FullyAvailableBlocks. This
761 /// map is actually a tri-state map with the following values:
762 /// 0) we know the block *is not* fully available.
763 /// 1) we know the block *is* fully available.
764 /// 2) we do not know whether the block is fully available or not, but we are
765 /// currently speculating that it will be.
766 /// 3) we are speculating for this block and have used that to speculate for
768 static bool IsValueFullyAvailableInBlock(BasicBlock *BB,
769 DenseMap<BasicBlock*, char> &FullyAvailableBlocks) {
770 // Optimistically assume that the block is fully available and check to see
771 // if we already know about this block in one lookup.
772 std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV =
773 FullyAvailableBlocks.insert(std::make_pair(BB, 2));
775 // If the entry already existed for this block, return the precomputed value.
777 // If this is a speculative "available" value, mark it as being used for
778 // speculation of other blocks.
779 if (IV.first->second == 2)
780 IV.first->second = 3;
781 return IV.first->second != 0;
784 // Otherwise, see if it is fully available in all predecessors.
785 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
787 // If this block has no predecessors, it isn't live-in here.
789 goto SpeculationFailure;
791 for (; PI != PE; ++PI)
792 // If the value isn't fully available in one of our predecessors, then it
793 // isn't fully available in this block either. Undo our previous
794 // optimistic assumption and bail out.
795 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks))
796 goto SpeculationFailure;
800 // SpeculationFailure - If we get here, we found out that this is not, after
801 // all, a fully-available block. We have a problem if we speculated on this and
802 // used the speculation to mark other blocks as available.
804 char &BBVal = FullyAvailableBlocks[BB];
806 // If we didn't speculate on this, just return with it set to false.
812 // If we did speculate on this value, we could have blocks set to 1 that are
813 // incorrect. Walk the (transitive) successors of this block and mark them as
815 SmallVector<BasicBlock*, 32> BBWorklist;
816 BBWorklist.push_back(BB);
819 BasicBlock *Entry = BBWorklist.pop_back_val();
820 // Note that this sets blocks to 0 (unavailable) if they happen to not
821 // already be in FullyAvailableBlocks. This is safe.
822 char &EntryVal = FullyAvailableBlocks[Entry];
823 if (EntryVal == 0) continue; // Already unavailable.
825 // Mark as unavailable.
828 for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I)
829 BBWorklist.push_back(*I);
830 } while (!BBWorklist.empty());
836 /// CanCoerceMustAliasedValueToLoad - Return true if
837 /// CoerceAvailableValueToLoadType will succeed.
838 static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
840 const TargetData &TD) {
841 // If the loaded or stored value is an first class array or struct, don't try
842 // to transform them. We need to be able to bitcast to integer.
843 if (LoadTy->isStructTy() || LoadTy->isArrayTy() ||
844 StoredVal->getType()->isStructTy() ||
845 StoredVal->getType()->isArrayTy())
848 // The store has to be at least as big as the load.
849 if (TD.getTypeSizeInBits(StoredVal->getType()) <
850 TD.getTypeSizeInBits(LoadTy))
857 /// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and
858 /// then a load from a must-aliased pointer of a different type, try to coerce
859 /// the stored value. LoadedTy is the type of the load we want to replace and
860 /// InsertPt is the place to insert new instructions.
862 /// If we can't do it, return null.
863 static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
864 const Type *LoadedTy,
865 Instruction *InsertPt,
866 const TargetData &TD) {
867 if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD))
870 const Type *StoredValTy = StoredVal->getType();
872 uint64_t StoreSize = TD.getTypeStoreSizeInBits(StoredValTy);
873 uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy);
875 // If the store and reload are the same size, we can always reuse it.
876 if (StoreSize == LoadSize) {
877 if (StoredValTy->isPointerTy() && LoadedTy->isPointerTy()) {
878 // Pointer to Pointer -> use bitcast.
879 return new BitCastInst(StoredVal, LoadedTy, "", InsertPt);
882 // Convert source pointers to integers, which can be bitcast.
883 if (StoredValTy->isPointerTy()) {
884 StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
885 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
888 const Type *TypeToCastTo = LoadedTy;
889 if (TypeToCastTo->isPointerTy())
890 TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext());
892 if (StoredValTy != TypeToCastTo)
893 StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt);
895 // Cast to pointer if the load needs a pointer type.
896 if (LoadedTy->isPointerTy())
897 StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt);
902 // If the loaded value is smaller than the available value, then we can
903 // extract out a piece from it. If the available value is too small, then we
904 // can't do anything.
905 assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail");
907 // Convert source pointers to integers, which can be manipulated.
908 if (StoredValTy->isPointerTy()) {
909 StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
910 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
913 // Convert vectors and fp to integer, which can be manipulated.
914 if (!StoredValTy->isIntegerTy()) {
915 StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize);
916 StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt);
919 // If this is a big-endian system, we need to shift the value down to the low
920 // bits so that a truncate will work.
921 if (TD.isBigEndian()) {
922 Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize);
923 StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt);
926 // Truncate the integer to the right size now.
927 const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize);
928 StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt);
930 if (LoadedTy == NewIntTy)
933 // If the result is a pointer, inttoptr.
934 if (LoadedTy->isPointerTy())
935 return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt);
937 // Otherwise, bitcast.
938 return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt);
941 /// GetBaseWithConstantOffset - Analyze the specified pointer to see if it can
942 /// be expressed as a base pointer plus a constant offset. Return the base and
943 /// offset to the caller.
944 static Value *GetBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
945 const TargetData &TD) {
946 Operator *PtrOp = dyn_cast<Operator>(Ptr);
947 if (PtrOp == 0) return Ptr;
949 // Just look through bitcasts.
950 if (PtrOp->getOpcode() == Instruction::BitCast)
951 return GetBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD);
953 // If this is a GEP with constant indices, we can look through it.
954 GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp);
955 if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr;
957 gep_type_iterator GTI = gep_type_begin(GEP);
958 for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E;
960 ConstantInt *OpC = cast<ConstantInt>(*I);
961 if (OpC->isZero()) continue;
963 // Handle a struct and array indices which add their offset to the pointer.
964 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
965 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
967 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
968 Offset += OpC->getSExtValue()*Size;
972 // Re-sign extend from the pointer size if needed to get overflow edge cases
974 unsigned PtrSize = TD.getPointerSizeInBits();
976 Offset = (Offset << (64-PtrSize)) >> (64-PtrSize);
978 return GetBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD);
982 /// AnalyzeLoadFromClobberingWrite - This function is called when we have a
983 /// memdep query of a load that ends up being a clobbering memory write (store,
984 /// memset, memcpy, memmove). This means that the write *may* provide bits used
985 /// by the load but we can't be sure because the pointers don't mustalias.
987 /// Check this case to see if there is anything more we can do before we give
988 /// up. This returns -1 if we have to give up, or a byte number in the stored
989 /// value of the piece that feeds the load.
990 static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr,
992 uint64_t WriteSizeInBits,
993 const TargetData &TD) {
994 // If the loaded or stored value is an first class array or struct, don't try
995 // to transform them. We need to be able to bitcast to integer.
996 if (LoadTy->isStructTy() || LoadTy->isArrayTy())
999 int64_t StoreOffset = 0, LoadOffset = 0;
1000 Value *StoreBase = GetBaseWithConstantOffset(WritePtr, StoreOffset, TD);
1002 GetBaseWithConstantOffset(LoadPtr, LoadOffset, TD);
1003 if (StoreBase != LoadBase)
1006 // If the load and store are to the exact same address, they should have been
1007 // a must alias. AA must have gotten confused.
1008 // FIXME: Study to see if/when this happens. One case is forwarding a memset
1009 // to a load from the base of the memset.
1011 if (LoadOffset == StoreOffset) {
1012 dbgs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n"
1013 << "Base = " << *StoreBase << "\n"
1014 << "Store Ptr = " << *WritePtr << "\n"
1015 << "Store Offs = " << StoreOffset << "\n"
1016 << "Load Ptr = " << *LoadPtr << "\n";
1021 // If the load and store don't overlap at all, the store doesn't provide
1022 // anything to the load. In this case, they really don't alias at all, AA
1023 // must have gotten confused.
1024 // FIXME: Investigate cases where this bails out, e.g. rdar://7238614. Then
1025 // remove this check, as it is duplicated with what we have below.
1026 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy);
1028 if ((WriteSizeInBits & 7) | (LoadSize & 7))
1030 uint64_t StoreSize = WriteSizeInBits >> 3; // Convert to bytes.
1034 bool isAAFailure = false;
1035 if (StoreOffset < LoadOffset)
1036 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset;
1038 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset;
1042 dbgs() << "STORE LOAD DEP WITH COMMON BASE:\n"
1043 << "Base = " << *StoreBase << "\n"
1044 << "Store Ptr = " << *WritePtr << "\n"
1045 << "Store Offs = " << StoreOffset << "\n"
1046 << "Load Ptr = " << *LoadPtr << "\n";
1052 // If the Load isn't completely contained within the stored bits, we don't
1053 // have all the bits to feed it. We could do something crazy in the future
1054 // (issue a smaller load then merge the bits in) but this seems unlikely to be
1056 if (StoreOffset > LoadOffset ||
1057 StoreOffset+StoreSize < LoadOffset+LoadSize)
1060 // Okay, we can do this transformation. Return the number of bytes into the
1061 // store that the load is.
1062 return LoadOffset-StoreOffset;
1065 /// AnalyzeLoadFromClobberingStore - This function is called when we have a
1066 /// memdep query of a load that ends up being a clobbering store.
1067 static int AnalyzeLoadFromClobberingStore(const Type *LoadTy, Value *LoadPtr,
1069 const TargetData &TD) {
1070 // Cannot handle reading from store of first-class aggregate yet.
1071 if (DepSI->getOperand(0)->getType()->isStructTy() ||
1072 DepSI->getOperand(0)->getType()->isArrayTy())
1075 Value *StorePtr = DepSI->getPointerOperand();
1076 uint64_t StoreSize = TD.getTypeSizeInBits(DepSI->getOperand(0)->getType());
1077 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
1078 StorePtr, StoreSize, TD);
1081 static int AnalyzeLoadFromClobberingMemInst(const Type *LoadTy, Value *LoadPtr,
1083 const TargetData &TD) {
1084 // If the mem operation is a non-constant size, we can't handle it.
1085 ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength());
1086 if (SizeCst == 0) return -1;
1087 uint64_t MemSizeInBits = SizeCst->getZExtValue()*8;
1089 // If this is memset, we just need to see if the offset is valid in the size
1091 if (MI->getIntrinsicID() == Intrinsic::memset)
1092 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(),
1095 // If we have a memcpy/memmove, the only case we can handle is if this is a
1096 // copy from constant memory. In that case, we can read directly from the
1098 MemTransferInst *MTI = cast<MemTransferInst>(MI);
1100 Constant *Src = dyn_cast<Constant>(MTI->getSource());
1101 if (Src == 0) return -1;
1103 GlobalVariable *GV = dyn_cast<GlobalVariable>(Src->getUnderlyingObject());
1104 if (GV == 0 || !GV->isConstant()) return -1;
1106 // See if the access is within the bounds of the transfer.
1107 int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
1108 MI->getDest(), MemSizeInBits, TD);
1112 // Otherwise, see if we can constant fold a load from the constant with the
1113 // offset applied as appropriate.
1114 Src = ConstantExpr::getBitCast(Src,
1115 llvm::Type::getInt8PtrTy(Src->getContext()));
1116 Constant *OffsetCst =
1117 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
1118 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1);
1119 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
1120 if (ConstantFoldLoadFromConstPtr(Src, &TD))
1126 /// GetStoreValueForLoad - This function is called when we have a
1127 /// memdep query of a load that ends up being a clobbering store. This means
1128 /// that the store *may* provide bits used by the load but we can't be sure
1129 /// because the pointers don't mustalias. Check this case to see if there is
1130 /// anything more we can do before we give up.
1131 static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
1133 Instruction *InsertPt, const TargetData &TD){
1134 LLVMContext &Ctx = SrcVal->getType()->getContext();
1136 uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;
1137 uint64_t LoadSize = (TD.getTypeSizeInBits(LoadTy) + 7) / 8;
1139 IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
1141 // Compute which bits of the stored value are being used by the load. Convert
1142 // to an integer type to start with.
1143 if (SrcVal->getType()->isPointerTy())
1144 SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx), "tmp");
1145 if (!SrcVal->getType()->isIntegerTy())
1146 SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8),
1149 // Shift the bits to the least significant depending on endianness.
1151 if (TD.isLittleEndian())
1152 ShiftAmt = Offset*8;
1154 ShiftAmt = (StoreSize-LoadSize-Offset)*8;
1157 SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt, "tmp");
1159 if (LoadSize != StoreSize)
1160 SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8),
1163 return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD);
1166 /// GetMemInstValueForLoad - This function is called when we have a
1167 /// memdep query of a load that ends up being a clobbering mem intrinsic.
1168 static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
1169 const Type *LoadTy, Instruction *InsertPt,
1170 const TargetData &TD){
1171 LLVMContext &Ctx = LoadTy->getContext();
1172 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
1174 IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
1176 // We know that this method is only called when the mem transfer fully
1177 // provides the bits for the load.
1178 if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) {
1179 // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and
1180 // independently of what the offset is.
1181 Value *Val = MSI->getValue();
1183 Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8));
1185 Value *OneElt = Val;
1187 // Splat the value out to the right number of bits.
1188 for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) {
1189 // If we can double the number of bytes set, do it.
1190 if (NumBytesSet*2 <= LoadSize) {
1191 Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8);
1192 Val = Builder.CreateOr(Val, ShVal);
1197 // Otherwise insert one byte at a time.
1198 Value *ShVal = Builder.CreateShl(Val, 1*8);
1199 Val = Builder.CreateOr(OneElt, ShVal);
1203 return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD);
1206 // Otherwise, this is a memcpy/memmove from a constant global.
1207 MemTransferInst *MTI = cast<MemTransferInst>(SrcInst);
1208 Constant *Src = cast<Constant>(MTI->getSource());
1210 // Otherwise, see if we can constant fold a load from the constant with the
1211 // offset applied as appropriate.
1212 Src = ConstantExpr::getBitCast(Src,
1213 llvm::Type::getInt8PtrTy(Src->getContext()));
1214 Constant *OffsetCst =
1215 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
1216 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1);
1217 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
1218 return ConstantFoldLoadFromConstPtr(Src, &TD);
1223 struct AvailableValueInBlock {
1224 /// BB - The basic block in question.
1227 SimpleVal, // A simple offsetted value that is accessed.
1228 MemIntrin // A memory intrinsic which is loaded from.
1231 /// V - The value that is live out of the block.
1232 PointerIntPair<Value *, 1, ValType> Val;
1234 /// Offset - The byte offset in Val that is interesting for the load query.
1237 static AvailableValueInBlock get(BasicBlock *BB, Value *V,
1238 unsigned Offset = 0) {
1239 AvailableValueInBlock Res;
1241 Res.Val.setPointer(V);
1242 Res.Val.setInt(SimpleVal);
1243 Res.Offset = Offset;
1247 static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI,
1248 unsigned Offset = 0) {
1249 AvailableValueInBlock Res;
1251 Res.Val.setPointer(MI);
1252 Res.Val.setInt(MemIntrin);
1253 Res.Offset = Offset;
1257 bool isSimpleValue() const { return Val.getInt() == SimpleVal; }
1258 Value *getSimpleValue() const {
1259 assert(isSimpleValue() && "Wrong accessor");
1260 return Val.getPointer();
1263 MemIntrinsic *getMemIntrinValue() const {
1264 assert(!isSimpleValue() && "Wrong accessor");
1265 return cast<MemIntrinsic>(Val.getPointer());
1268 /// MaterializeAdjustedValue - Emit code into this block to adjust the value
1269 /// defined here to the specified type. This handles various coercion cases.
1270 Value *MaterializeAdjustedValue(const Type *LoadTy,
1271 const TargetData *TD) const {
1273 if (isSimpleValue()) {
1274 Res = getSimpleValue();
1275 if (Res->getType() != LoadTy) {
1276 assert(TD && "Need target data to handle type mismatch case");
1277 Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(),
1280 DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " "
1281 << *getSimpleValue() << '\n'
1282 << *Res << '\n' << "\n\n\n");
1285 Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset,
1286 LoadTy, BB->getTerminator(), *TD);
1287 DEBUG(errs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
1288 << " " << *getMemIntrinValue() << '\n'
1289 << *Res << '\n' << "\n\n\n");
1297 /// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock,
1298 /// construct SSA form, allowing us to eliminate LI. This returns the value
1299 /// that should be used at LI's definition site.
1300 static Value *ConstructSSAForLoadSet(LoadInst *LI,
1301 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
1302 const TargetData *TD,
1303 const DominatorTree &DT,
1304 AliasAnalysis *AA) {
1305 // Check for the fully redundant, dominating load case. In this case, we can
1306 // just use the dominating value directly.
1307 if (ValuesPerBlock.size() == 1 &&
1308 DT.properlyDominates(ValuesPerBlock[0].BB, LI->getParent()))
1309 return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), TD);
1311 // Otherwise, we have to construct SSA form.
1312 SmallVector<PHINode*, 8> NewPHIs;
1313 SSAUpdater SSAUpdate(&NewPHIs);
1314 SSAUpdate.Initialize(LI);
1316 const Type *LoadTy = LI->getType();
1318 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1319 const AvailableValueInBlock &AV = ValuesPerBlock[i];
1320 BasicBlock *BB = AV.BB;
1322 if (SSAUpdate.HasValueForBlock(BB))
1325 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, TD));
1328 // Perform PHI construction.
1329 Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent());
1331 // If new PHI nodes were created, notify alias analysis.
1332 if (V->getType()->isPointerTy())
1333 for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
1334 AA->copyValue(LI, NewPHIs[i]);
1339 static bool isLifetimeStart(const Instruction *Inst) {
1340 if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
1341 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1345 /// processNonLocalLoad - Attempt to eliminate a load whose dependencies are
1346 /// non-local by performing PHI construction.
1347 bool GVN::processNonLocalLoad(LoadInst *LI,
1348 SmallVectorImpl<Instruction*> &toErase) {
1349 // Find the non-local dependencies of the load.
1350 SmallVector<NonLocalDepResult, 64> Deps;
1351 MD->getNonLocalPointerDependency(LI->getOperand(0), true, LI->getParent(),
1353 //DEBUG(dbgs() << "INVESTIGATING NONLOCAL LOAD: "
1354 // << Deps.size() << *LI << '\n');
1356 // If we had to process more than one hundred blocks to find the
1357 // dependencies, this load isn't worth worrying about. Optimizing
1358 // it will be too expensive.
1359 if (Deps.size() > 100)
1362 // If we had a phi translation failure, we'll have a single entry which is a
1363 // clobber in the current block. Reject this early.
1364 if (Deps.size() == 1 && Deps[0].getResult().isClobber()) {
1366 dbgs() << "GVN: non-local load ";
1367 WriteAsOperand(dbgs(), LI);
1368 dbgs() << " is clobbered by " << *Deps[0].getResult().getInst() << '\n';
1373 // Filter out useless results (non-locals, etc). Keep track of the blocks
1374 // where we have a value available in repl, also keep track of whether we see
1375 // dependencies that produce an unknown value for the load (such as a call
1376 // that could potentially clobber the load).
1377 SmallVector<AvailableValueInBlock, 16> ValuesPerBlock;
1378 SmallVector<BasicBlock*, 16> UnavailableBlocks;
1380 const TargetData *TD = 0;
1382 for (unsigned i = 0, e = Deps.size(); i != e; ++i) {
1383 BasicBlock *DepBB = Deps[i].getBB();
1384 MemDepResult DepInfo = Deps[i].getResult();
1386 if (DepInfo.isClobber()) {
1387 // The address being loaded in this non-local block may not be the same as
1388 // the pointer operand of the load if PHI translation occurs. Make sure
1389 // to consider the right address.
1390 Value *Address = Deps[i].getAddress();
1392 // If the dependence is to a store that writes to a superset of the bits
1393 // read by the load, we can extract the bits we need for the load from the
1395 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
1397 TD = getAnalysisIfAvailable<TargetData>();
1398 if (TD && Address) {
1399 int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address,
1402 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1403 DepSI->getOperand(0),
1410 // If the clobbering value is a memset/memcpy/memmove, see if we can
1411 // forward a value on from it.
1412 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) {
1414 TD = getAnalysisIfAvailable<TargetData>();
1415 if (TD && Address) {
1416 int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address,
1419 ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI,
1426 UnavailableBlocks.push_back(DepBB);
1430 Instruction *DepInst = DepInfo.getInst();
1432 // Loading the allocation -> undef.
1433 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) ||
1434 // Loading immediately after lifetime begin -> undef.
1435 isLifetimeStart(DepInst)) {
1436 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1437 UndefValue::get(LI->getType())));
1441 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1442 // Reject loads and stores that are to the same address but are of
1443 // different types if we have to.
1444 if (S->getOperand(0)->getType() != LI->getType()) {
1446 TD = getAnalysisIfAvailable<TargetData>();
1448 // If the stored value is larger or equal to the loaded value, we can
1450 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getOperand(0),
1451 LI->getType(), *TD)) {
1452 UnavailableBlocks.push_back(DepBB);
1457 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1462 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1463 // If the types mismatch and we can't handle it, reject reuse of the load.
1464 if (LD->getType() != LI->getType()) {
1466 TD = getAnalysisIfAvailable<TargetData>();
1468 // If the stored value is larger or equal to the loaded value, we can
1470 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){
1471 UnavailableBlocks.push_back(DepBB);
1475 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD));
1479 UnavailableBlocks.push_back(DepBB);
1483 // If we have no predecessors that produce a known value for this load, exit
1485 if (ValuesPerBlock.empty()) return false;
1487 // If all of the instructions we depend on produce a known value for this
1488 // load, then it is fully redundant and we can use PHI insertion to compute
1489 // its value. Insert PHIs and remove the fully redundant value now.
1490 if (UnavailableBlocks.empty()) {
1491 DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n');
1493 // Perform PHI construction.
1494 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT,
1495 VN.getAliasAnalysis());
1496 LI->replaceAllUsesWith(V);
1498 if (isa<PHINode>(V))
1500 if (V->getType()->isPointerTy())
1501 MD->invalidateCachedPointerInfo(V);
1503 toErase.push_back(LI);
1508 if (!EnablePRE || !EnableLoadPRE)
1511 // Okay, we have *some* definitions of the value. This means that the value
1512 // is available in some of our (transitive) predecessors. Lets think about
1513 // doing PRE of this load. This will involve inserting a new load into the
1514 // predecessor when it's not available. We could do this in general, but
1515 // prefer to not increase code size. As such, we only do this when we know
1516 // that we only have to insert *one* load (which means we're basically moving
1517 // the load, not inserting a new one).
1519 SmallPtrSet<BasicBlock *, 4> Blockers;
1520 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1521 Blockers.insert(UnavailableBlocks[i]);
1523 // Lets find first basic block with more than one predecessor. Walk backwards
1524 // through predecessors if needed.
1525 BasicBlock *LoadBB = LI->getParent();
1526 BasicBlock *TmpBB = LoadBB;
1528 bool isSinglePred = false;
1529 bool allSingleSucc = true;
1530 while (TmpBB->getSinglePredecessor()) {
1531 isSinglePred = true;
1532 TmpBB = TmpBB->getSinglePredecessor();
1533 if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1535 if (Blockers.count(TmpBB))
1537 if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1538 allSingleSucc = false;
1544 // If we have a repl set with LI itself in it, this means we have a loop where
1545 // at least one of the values is LI. Since this means that we won't be able
1546 // to eliminate LI even if we insert uses in the other predecessors, we will
1547 // end up increasing code size. Reject this by scanning for LI.
1548 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1549 if (ValuesPerBlock[i].isSimpleValue() &&
1550 ValuesPerBlock[i].getSimpleValue() == LI) {
1551 // Skip cases where LI is the only definition, even for EnableFullLoadPRE.
1552 if (!EnableFullLoadPRE || e == 1)
1557 // FIXME: It is extremely unclear what this loop is doing, other than
1558 // artificially restricting loadpre.
1561 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1562 const AvailableValueInBlock &AV = ValuesPerBlock[i];
1563 if (AV.isSimpleValue())
1564 // "Hot" Instruction is in some loop (because it dominates its dep.
1566 if (Instruction *I = dyn_cast<Instruction>(AV.getSimpleValue()))
1567 if (DT->dominates(LI, I)) {
1573 // We are interested only in "hot" instructions. We don't want to do any
1574 // mis-optimizations here.
1579 // Check to see how many predecessors have the loaded value fully
1581 DenseMap<BasicBlock*, Value*> PredLoads;
1582 DenseMap<BasicBlock*, char> FullyAvailableBlocks;
1583 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
1584 FullyAvailableBlocks[ValuesPerBlock[i].BB] = true;
1585 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1586 FullyAvailableBlocks[UnavailableBlocks[i]] = false;
1588 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> NeedToSplit;
1589 for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB);
1591 BasicBlock *Pred = *PI;
1592 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) {
1595 PredLoads[Pred] = 0;
1597 if (Pred->getTerminator()->getNumSuccessors() != 1) {
1598 if (isa<IndirectBrInst>(Pred->getTerminator())) {
1599 DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1600 << Pred->getName() << "': " << *LI << '\n');
1603 unsigned SuccNum = GetSuccessorNumber(Pred, LoadBB);
1604 NeedToSplit.push_back(std::make_pair(Pred->getTerminator(), SuccNum));
1607 if (!NeedToSplit.empty()) {
1608 toSplit.append(NeedToSplit.begin(), NeedToSplit.end());
1612 // Decide whether PRE is profitable for this load.
1613 unsigned NumUnavailablePreds = PredLoads.size();
1614 assert(NumUnavailablePreds != 0 &&
1615 "Fully available value should be eliminated above!");
1616 if (!EnableFullLoadPRE) {
1617 // If this load is unavailable in multiple predecessors, reject it.
1618 // FIXME: If we could restructure the CFG, we could make a common pred with
1619 // all the preds that don't have an available LI and insert a new load into
1621 if (NumUnavailablePreds != 1)
1625 // Check if the load can safely be moved to all the unavailable predecessors.
1626 bool CanDoPRE = true;
1627 SmallVector<Instruction*, 8> NewInsts;
1628 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
1629 E = PredLoads.end(); I != E; ++I) {
1630 BasicBlock *UnavailablePred = I->first;
1632 // Do PHI translation to get its value in the predecessor if necessary. The
1633 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1635 // If all preds have a single successor, then we know it is safe to insert
1636 // the load on the pred (?!?), so we can insert code to materialize the
1637 // pointer if it is not available.
1638 PHITransAddr Address(LI->getOperand(0), TD);
1640 if (allSingleSucc) {
1641 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred,
1644 Address.PHITranslateValue(LoadBB, UnavailablePred, DT);
1645 LoadPtr = Address.getAddr();
1648 // If we couldn't find or insert a computation of this phi translated value,
1651 DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1652 << *LI->getOperand(0) << "\n");
1657 // Make sure it is valid to move this load here. We have to watch out for:
1658 // @1 = getelementptr (i8* p, ...
1659 // test p and branch if == 0
1661 // It is valid to have the getelementptr before the test, even if p can be 0,
1662 // as getelementptr only does address arithmetic.
1663 // If we are not pushing the value through any multiple-successor blocks
1664 // we do not have this case. Otherwise, check that the load is safe to
1665 // put anywhere; this can be improved, but should be conservatively safe.
1666 if (!allSingleSucc &&
1667 // FIXME: REEVALUTE THIS.
1668 !isSafeToLoadUnconditionally(LoadPtr,
1669 UnavailablePred->getTerminator(),
1670 LI->getAlignment(), TD)) {
1675 I->second = LoadPtr;
1679 while (!NewInsts.empty())
1680 NewInsts.pop_back_val()->eraseFromParent();
1684 // Okay, we can eliminate this load by inserting a reload in the predecessor
1685 // and using PHI construction to get the value in the other predecessors, do
1687 DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n');
1688 DEBUG(if (!NewInsts.empty())
1689 dbgs() << "INSERTED " << NewInsts.size() << " INSTS: "
1690 << *NewInsts.back() << '\n');
1692 // Assign value numbers to the new instructions.
1693 for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) {
1694 // FIXME: We really _ought_ to insert these value numbers into their
1695 // parent's availability map. However, in doing so, we risk getting into
1696 // ordering issues. If a block hasn't been processed yet, we would be
1697 // marking a value as AVAIL-IN, which isn't what we intend.
1698 VN.lookup_or_add(NewInsts[i]);
1701 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
1702 E = PredLoads.end(); I != E; ++I) {
1703 BasicBlock *UnavailablePred = I->first;
1704 Value *LoadPtr = I->second;
1706 Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
1708 UnavailablePred->getTerminator());
1710 // Add the newly created load.
1711 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,
1713 MD->invalidateCachedPointerInfo(LoadPtr);
1714 DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
1717 // Perform PHI construction.
1718 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT,
1719 VN.getAliasAnalysis());
1720 LI->replaceAllUsesWith(V);
1721 if (isa<PHINode>(V))
1723 if (V->getType()->isPointerTy())
1724 MD->invalidateCachedPointerInfo(V);
1726 toErase.push_back(LI);
1731 /// processLoad - Attempt to eliminate a load, first by eliminating it
1732 /// locally, and then attempting non-local elimination if that fails.
1733 bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
1737 if (L->isVolatile())
1740 // ... to a pointer that has been loaded from before...
1741 MemDepResult Dep = MD->getDependency(L);
1743 // If the value isn't available, don't do anything!
1744 if (Dep.isClobber()) {
1745 // Check to see if we have something like this:
1746 // store i32 123, i32* %P
1747 // %A = bitcast i32* %P to i8*
1748 // %B = gep i8* %A, i32 1
1751 // We could do that by recognizing if the clobber instructions are obviously
1752 // a common base + constant offset, and if the previous store (or memset)
1753 // completely covers this load. This sort of thing can happen in bitfield
1755 Value *AvailVal = 0;
1756 if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst()))
1757 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
1758 int Offset = AnalyzeLoadFromClobberingStore(L->getType(),
1759 L->getPointerOperand(),
1762 AvailVal = GetStoreValueForLoad(DepSI->getOperand(0), Offset,
1763 L->getType(), L, *TD);
1766 // If the clobbering value is a memset/memcpy/memmove, see if we can forward
1767 // a value on from it.
1768 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) {
1769 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
1770 int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(),
1771 L->getPointerOperand(),
1774 AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,*TD);
1779 DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n'
1780 << *AvailVal << '\n' << *L << "\n\n\n");
1782 // Replace the load!
1783 L->replaceAllUsesWith(AvailVal);
1784 if (AvailVal->getType()->isPointerTy())
1785 MD->invalidateCachedPointerInfo(AvailVal);
1787 toErase.push_back(L);
1793 // fast print dep, using operator<< on instruction would be too slow
1794 dbgs() << "GVN: load ";
1795 WriteAsOperand(dbgs(), L);
1796 Instruction *I = Dep.getInst();
1797 dbgs() << " is clobbered by " << *I << '\n';
1802 // If it is defined in another block, try harder.
1803 if (Dep.isNonLocal())
1804 return processNonLocalLoad(L, toErase);
1806 Instruction *DepInst = Dep.getInst();
1807 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1808 Value *StoredVal = DepSI->getOperand(0);
1810 // The store and load are to a must-aliased pointer, but they may not
1811 // actually have the same type. See if we know how to reuse the stored
1812 // value (depending on its type).
1813 const TargetData *TD = 0;
1814 if (StoredVal->getType() != L->getType()) {
1815 if ((TD = getAnalysisIfAvailable<TargetData>())) {
1816 StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(),
1821 DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
1822 << '\n' << *L << "\n\n\n");
1829 L->replaceAllUsesWith(StoredVal);
1830 if (StoredVal->getType()->isPointerTy())
1831 MD->invalidateCachedPointerInfo(StoredVal);
1833 toErase.push_back(L);
1838 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
1839 Value *AvailableVal = DepLI;
1841 // The loads are of a must-aliased pointer, but they may not actually have
1842 // the same type. See if we know how to reuse the previously loaded value
1843 // (depending on its type).
1844 const TargetData *TD = 0;
1845 if (DepLI->getType() != L->getType()) {
1846 if ((TD = getAnalysisIfAvailable<TargetData>())) {
1847 AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD);
1848 if (AvailableVal == 0)
1851 DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
1852 << "\n" << *L << "\n\n\n");
1859 L->replaceAllUsesWith(AvailableVal);
1860 if (DepLI->getType()->isPointerTy())
1861 MD->invalidateCachedPointerInfo(DepLI);
1863 toErase.push_back(L);
1868 // If this load really doesn't depend on anything, then we must be loading an
1869 // undef value. This can happen when loading for a fresh allocation with no
1870 // intervening stores, for example.
1871 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) {
1872 L->replaceAllUsesWith(UndefValue::get(L->getType()));
1874 toErase.push_back(L);
1879 // If this load occurs either right after a lifetime begin,
1880 // then the loaded value is undefined.
1881 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) {
1882 if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
1883 L->replaceAllUsesWith(UndefValue::get(L->getType()));
1885 toErase.push_back(L);
1894 Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) {
1895 DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB);
1896 if (I == localAvail.end())
1899 ValueNumberScope *Locals = I->second;
1901 DenseMap<uint32_t, Value*>::iterator I = Locals->table.find(num);
1902 if (I != Locals->table.end())
1904 Locals = Locals->parent;
1911 /// processInstruction - When calculating availability, handle an instruction
1912 /// by inserting it into the appropriate sets
1913 bool GVN::processInstruction(Instruction *I,
1914 SmallVectorImpl<Instruction*> &toErase) {
1915 // Ignore dbg info intrinsics.
1916 if (isa<DbgInfoIntrinsic>(I))
1919 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1920 bool Changed = processLoad(LI, toErase);
1923 unsigned Num = VN.lookup_or_add(LI);
1924 localAvail[I->getParent()]->table.insert(std::make_pair(Num, LI));
1930 uint32_t NextNum = VN.getNextUnusedValueNumber();
1931 unsigned Num = VN.lookup_or_add(I);
1933 if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
1934 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1936 if (!BI->isConditional() || isa<Constant>(BI->getCondition()))
1939 Value *BranchCond = BI->getCondition();
1940 uint32_t CondVN = VN.lookup_or_add(BranchCond);
1942 BasicBlock *TrueSucc = BI->getSuccessor(0);
1943 BasicBlock *FalseSucc = BI->getSuccessor(1);
1945 if (TrueSucc->getSinglePredecessor())
1946 localAvail[TrueSucc]->table[CondVN] =
1947 ConstantInt::getTrue(TrueSucc->getContext());
1948 if (FalseSucc->getSinglePredecessor())
1949 localAvail[FalseSucc]->table[CondVN] =
1950 ConstantInt::getFalse(TrueSucc->getContext());
1954 // Allocations are always uniquely numbered, so we can save time and memory
1955 // by fast failing them.
1956 } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) {
1957 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1961 // Collapse PHI nodes
1962 if (PHINode* p = dyn_cast<PHINode>(I)) {
1963 Value *constVal = CollapsePhi(p);
1966 p->replaceAllUsesWith(constVal);
1967 if (MD && constVal->getType()->isPointerTy())
1968 MD->invalidateCachedPointerInfo(constVal);
1971 toErase.push_back(p);
1973 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1976 // If the number we were assigned was a brand new VN, then we don't
1977 // need to do a lookup to see if the number already exists
1978 // somewhere in the domtree: it can't!
1979 } else if (Num == NextNum) {
1980 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1982 // Perform fast-path value-number based elimination of values inherited from
1984 } else if (Value *repl = lookupNumber(I->getParent(), Num)) {
1987 I->replaceAllUsesWith(repl);
1988 if (MD && repl->getType()->isPointerTy())
1989 MD->invalidateCachedPointerInfo(repl);
1990 toErase.push_back(I);
1994 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
2000 /// runOnFunction - This is the main transformation entry point for a function.
2001 bool GVN::runOnFunction(Function& F) {
2003 MD = &getAnalysis<MemoryDependenceAnalysis>();
2004 DT = &getAnalysis<DominatorTree>();
2005 VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
2009 bool Changed = false;
2010 bool ShouldContinue = true;
2012 // Merge unconditional branches, allowing PRE to catch more
2013 // optimization opportunities.
2014 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
2015 BasicBlock *BB = FI;
2017 bool removedBlock = MergeBlockIntoPredecessor(BB, this);
2018 if (removedBlock) NumGVNBlocks++;
2020 Changed |= removedBlock;
2023 unsigned Iteration = 0;
2025 while (ShouldContinue) {
2026 DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
2027 ShouldContinue = iterateOnFunction(F);
2028 if (splitCriticalEdges())
2029 ShouldContinue = true;
2030 Changed |= ShouldContinue;
2035 bool PREChanged = true;
2036 while (PREChanged) {
2037 PREChanged = performPRE(F);
2038 Changed |= PREChanged;
2041 // FIXME: Should perform GVN again after PRE does something. PRE can move
2042 // computations into blocks where they become fully redundant. Note that
2043 // we can't do this until PRE's critical edge splitting updates memdep.
2044 // Actually, when this happens, we should just fully integrate PRE into GVN.
2046 cleanupGlobalSets();
2052 bool GVN::processBlock(BasicBlock *BB) {
2053 // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and
2054 // incrementing BI before processing an instruction).
2055 SmallVector<Instruction*, 8> toErase;
2056 bool ChangedFunction = false;
2058 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
2060 ChangedFunction |= processInstruction(BI, toErase);
2061 if (toErase.empty()) {
2066 // If we need some instructions deleted, do it now.
2067 NumGVNInstr += toErase.size();
2069 // Avoid iterator invalidation.
2070 bool AtStart = BI == BB->begin();
2074 for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(),
2075 E = toErase.end(); I != E; ++I) {
2076 DEBUG(dbgs() << "GVN removed: " << **I << '\n');
2077 if (MD) MD->removeInstruction(*I);
2078 (*I)->eraseFromParent();
2079 DEBUG(verifyRemoved(*I));
2089 return ChangedFunction;
2092 /// performPRE - Perform a purely local form of PRE that looks for diamond
2093 /// control flow patterns and attempts to perform simple PRE at the join point.
2094 bool GVN::performPRE(Function &F) {
2095 bool Changed = false;
2096 DenseMap<BasicBlock*, Value*> predMap;
2097 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
2098 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
2099 BasicBlock *CurrentBlock = *DI;
2101 // Nothing to PRE in the entry block.
2102 if (CurrentBlock == &F.getEntryBlock()) continue;
2104 for (BasicBlock::iterator BI = CurrentBlock->begin(),
2105 BE = CurrentBlock->end(); BI != BE; ) {
2106 Instruction *CurInst = BI++;
2108 if (isa<AllocaInst>(CurInst) ||
2109 isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) ||
2110 CurInst->getType()->isVoidTy() ||
2111 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
2112 isa<DbgInfoIntrinsic>(CurInst))
2115 uint32_t ValNo = VN.lookup(CurInst);
2117 // Look for the predecessors for PRE opportunities. We're
2118 // only trying to solve the basic diamond case, where
2119 // a value is computed in the successor and one predecessor,
2120 // but not the other. We also explicitly disallow cases
2121 // where the successor is its own predecessor, because they're
2122 // more complicated to get right.
2123 unsigned NumWith = 0;
2124 unsigned NumWithout = 0;
2125 BasicBlock *PREPred = 0;
2128 for (pred_iterator PI = pred_begin(CurrentBlock),
2129 PE = pred_end(CurrentBlock); PI != PE; ++PI) {
2130 // We're not interested in PRE where the block is its
2131 // own predecessor, or in blocks with predecessors
2132 // that are not reachable.
2133 if (*PI == CurrentBlock) {
2136 } else if (!localAvail.count(*PI)) {
2141 DenseMap<uint32_t, Value*>::iterator predV =
2142 localAvail[*PI]->table.find(ValNo);
2143 if (predV == localAvail[*PI]->table.end()) {
2146 } else if (predV->second == CurInst) {
2149 predMap[*PI] = predV->second;
2154 // Don't do PRE when it might increase code size, i.e. when
2155 // we would need to insert instructions in more than one pred.
2156 if (NumWithout != 1 || NumWith == 0)
2159 // Don't do PRE across indirect branch.
2160 if (isa<IndirectBrInst>(PREPred->getTerminator()))
2163 // We can't do PRE safely on a critical edge, so instead we schedule
2164 // the edge to be split and perform the PRE the next time we iterate
2166 unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock);
2167 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
2168 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
2172 // Instantiate the expression in the predecessor that lacked it.
2173 // Because we are going top-down through the block, all value numbers
2174 // will be available in the predecessor by the time we need them. Any
2175 // that weren't originally present will have been instantiated earlier
2177 Instruction *PREInstr = CurInst->clone();
2178 bool success = true;
2179 for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) {
2180 Value *Op = PREInstr->getOperand(i);
2181 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2184 if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) {
2185 PREInstr->setOperand(i, V);
2192 // Fail out if we encounter an operand that is not available in
2193 // the PRE predecessor. This is typically because of loads which
2194 // are not value numbered precisely.
2197 DEBUG(verifyRemoved(PREInstr));
2201 PREInstr->insertBefore(PREPred->getTerminator());
2202 PREInstr->setName(CurInst->getName() + ".pre");
2203 predMap[PREPred] = PREInstr;
2204 VN.add(PREInstr, ValNo);
2207 // Update the availability map to include the new instruction.
2208 localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr));
2210 // Create a PHI to make the value available in this block.
2211 PHINode* Phi = PHINode::Create(CurInst->getType(),
2212 CurInst->getName() + ".pre-phi",
2213 CurrentBlock->begin());
2214 for (pred_iterator PI = pred_begin(CurrentBlock),
2215 PE = pred_end(CurrentBlock); PI != PE; ++PI)
2216 Phi->addIncoming(predMap[*PI], *PI);
2219 localAvail[CurrentBlock]->table[ValNo] = Phi;
2221 CurInst->replaceAllUsesWith(Phi);
2222 if (MD && Phi->getType()->isPointerTy())
2223 MD->invalidateCachedPointerInfo(Phi);
2226 DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
2227 if (MD) MD->removeInstruction(CurInst);
2228 CurInst->eraseFromParent();
2229 DEBUG(verifyRemoved(CurInst));
2234 if (splitCriticalEdges())
2240 /// splitCriticalEdges - Split critical edges found during the previous
2241 /// iteration that may enable further optimization.
2242 bool GVN::splitCriticalEdges() {
2243 if (toSplit.empty())
2246 std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val();
2247 SplitCriticalEdge(Edge.first, Edge.second, this);
2248 } while (!toSplit.empty());
2249 if (MD) MD->invalidateCachedPredecessors();
2253 /// iterateOnFunction - Executes one iteration of GVN
2254 bool GVN::iterateOnFunction(Function &F) {
2255 cleanupGlobalSets();
2257 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
2258 DE = df_end(DT->getRootNode()); DI != DE; ++DI) {
2260 localAvail[DI->getBlock()] =
2261 new ValueNumberScope(localAvail[DI->getIDom()->getBlock()]);
2263 localAvail[DI->getBlock()] = new ValueNumberScope(0);
2266 // Top-down walk of the dominator tree
2267 bool Changed = false;
2269 // Needed for value numbering with phi construction to work.
2270 ReversePostOrderTraversal<Function*> RPOT(&F);
2271 for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(),
2272 RE = RPOT.end(); RI != RE; ++RI)
2273 Changed |= processBlock(*RI);
2275 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
2276 DE = df_end(DT->getRootNode()); DI != DE; ++DI)
2277 Changed |= processBlock(DI->getBlock());
2283 void GVN::cleanupGlobalSets() {
2286 for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator
2287 I = localAvail.begin(), E = localAvail.end(); I != E; ++I)
2292 /// verifyRemoved - Verify that the specified instruction does not occur in our
2293 /// internal data structures.
2294 void GVN::verifyRemoved(const Instruction *Inst) const {
2295 VN.verifyRemoved(Inst);
2297 // Walk through the value number scope to make sure the instruction isn't
2298 // ferreted away in it.
2299 for (DenseMap<BasicBlock*, ValueNumberScope*>::const_iterator
2300 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) {
2301 const ValueNumberScope *VNS = I->second;
2304 for (DenseMap<uint32_t, Value*>::const_iterator
2305 II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) {
2306 assert(II->second != Inst && "Inst still in value numbering scope!");