1 //===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs global value numbering to eliminate fully redundant
11 // instructions. It also performs simple dead load elimination.
13 // Note that this pass does the value numbering itself; it does not use the
14 // ValueNumbering analysis passes.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "gvn"
19 #include "llvm/Transforms/Scalar.h"
20 #include "llvm/BasicBlock.h"
21 #include "llvm/Constants.h"
22 #include "llvm/DerivedTypes.h"
23 #include "llvm/GlobalVariable.h"
24 #include "llvm/Function.h"
25 #include "llvm/IntrinsicInst.h"
26 #include "llvm/LLVMContext.h"
27 #include "llvm/Operator.h"
28 #include "llvm/Value.h"
29 #include "llvm/ADT/DenseMap.h"
30 #include "llvm/ADT/DepthFirstIterator.h"
31 #include "llvm/ADT/PostOrderIterator.h"
32 #include "llvm/ADT/SmallPtrSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/Analysis/AliasAnalysis.h"
36 #include "llvm/Analysis/ConstantFolding.h"
37 #include "llvm/Analysis/Dominators.h"
38 #include "llvm/Analysis/Loads.h"
39 #include "llvm/Analysis/MemoryBuiltins.h"
40 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
41 #include "llvm/Analysis/PHITransAddr.h"
42 #include "llvm/Support/CFG.h"
43 #include "llvm/Support/CommandLine.h"
44 #include "llvm/Support/Debug.h"
45 #include "llvm/Support/ErrorHandling.h"
46 #include "llvm/Support/GetElementPtrTypeIterator.h"
47 #include "llvm/Support/IRBuilder.h"
48 #include "llvm/Support/raw_ostream.h"
49 #include "llvm/Target/TargetData.h"
50 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
51 #include "llvm/Transforms/Utils/Local.h"
52 #include "llvm/Transforms/Utils/SSAUpdater.h"
55 STATISTIC(NumGVNInstr, "Number of instructions deleted");
56 STATISTIC(NumGVNLoad, "Number of loads deleted");
57 STATISTIC(NumGVNPRE, "Number of instructions PRE'd");
58 STATISTIC(NumGVNBlocks, "Number of blocks merged");
59 STATISTIC(NumPRELoad, "Number of loads PRE'd");
61 static cl::opt<bool> EnablePRE("enable-pre",
62 cl::init(true), cl::Hidden);
63 static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true));
64 static cl::opt<bool> EnableFullLoadPRE("enable-full-load-pre", cl::init(false));
66 //===----------------------------------------------------------------------===//
68 //===----------------------------------------------------------------------===//
70 /// This class holds the mapping between values and value numbers. It is used
71 /// as an efficient mechanism to determine the expression-wise equivalence of
75 enum ExpressionOpcode {
76 ADD = Instruction::Add,
77 FADD = Instruction::FAdd,
78 SUB = Instruction::Sub,
79 FSUB = Instruction::FSub,
80 MUL = Instruction::Mul,
81 FMUL = Instruction::FMul,
82 UDIV = Instruction::UDiv,
83 SDIV = Instruction::SDiv,
84 FDIV = Instruction::FDiv,
85 UREM = Instruction::URem,
86 SREM = Instruction::SRem,
87 FREM = Instruction::FRem,
88 SHL = Instruction::Shl,
89 LSHR = Instruction::LShr,
90 ASHR = Instruction::AShr,
91 AND = Instruction::And,
93 XOR = Instruction::Xor,
94 TRUNC = Instruction::Trunc,
95 ZEXT = Instruction::ZExt,
96 SEXT = Instruction::SExt,
97 FPTOUI = Instruction::FPToUI,
98 FPTOSI = Instruction::FPToSI,
99 UITOFP = Instruction::UIToFP,
100 SITOFP = Instruction::SIToFP,
101 FPTRUNC = Instruction::FPTrunc,
102 FPEXT = Instruction::FPExt,
103 PTRTOINT = Instruction::PtrToInt,
104 INTTOPTR = Instruction::IntToPtr,
105 BITCAST = Instruction::BitCast,
106 ICMPEQ, ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE,
107 ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ,
108 FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE,
109 FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE,
110 FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT,
111 SHUFFLE, SELECT, GEP, CALL, CONSTANT,
112 INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE };
114 ExpressionOpcode opcode;
116 SmallVector<uint32_t, 4> varargs;
120 Expression(ExpressionOpcode o) : opcode(o) { }
122 bool operator==(const Expression &other) const {
123 if (opcode != other.opcode)
125 else if (opcode == EMPTY || opcode == TOMBSTONE)
127 else if (type != other.type)
129 else if (function != other.function)
132 if (varargs.size() != other.varargs.size())
135 for (size_t i = 0; i < varargs.size(); ++i)
136 if (varargs[i] != other.varargs[i])
143 /*bool operator!=(const Expression &other) const {
144 return !(*this == other);
150 DenseMap<Value*, uint32_t> valueNumbering;
151 DenseMap<Expression, uint32_t> expressionNumbering;
153 MemoryDependenceAnalysis* MD;
156 uint32_t nextValueNumber;
158 Expression::ExpressionOpcode getOpcode(CmpInst* C);
159 Expression create_expression(BinaryOperator* BO);
160 Expression create_expression(CmpInst* C);
161 Expression create_expression(ShuffleVectorInst* V);
162 Expression create_expression(ExtractElementInst* C);
163 Expression create_expression(InsertElementInst* V);
164 Expression create_expression(SelectInst* V);
165 Expression create_expression(CastInst* C);
166 Expression create_expression(GetElementPtrInst* G);
167 Expression create_expression(CallInst* C);
168 Expression create_expression(ExtractValueInst* C);
169 Expression create_expression(InsertValueInst* C);
171 uint32_t lookup_or_add_call(CallInst* C);
173 ValueTable() : nextValueNumber(1) { }
174 uint32_t lookup_or_add(Value *V);
175 uint32_t lookup(Value *V) const;
176 void add(Value *V, uint32_t num);
178 void erase(Value *v);
179 void setAliasAnalysis(AliasAnalysis* A) { AA = A; }
180 AliasAnalysis *getAliasAnalysis() const { return AA; }
181 void setMemDep(MemoryDependenceAnalysis* M) { MD = M; }
182 void setDomTree(DominatorTree* D) { DT = D; }
183 uint32_t getNextUnusedValueNumber() { return nextValueNumber; }
184 void verifyRemoved(const Value *) const;
189 template <> struct DenseMapInfo<Expression> {
190 static inline Expression getEmptyKey() {
191 return Expression(Expression::EMPTY);
194 static inline Expression getTombstoneKey() {
195 return Expression(Expression::TOMBSTONE);
198 static unsigned getHashValue(const Expression e) {
199 unsigned hash = e.opcode;
201 hash = ((unsigned)((uintptr_t)e.type >> 4) ^
202 (unsigned)((uintptr_t)e.type >> 9));
204 for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(),
205 E = e.varargs.end(); I != E; ++I)
206 hash = *I + hash * 37;
208 hash = ((unsigned)((uintptr_t)e.function >> 4) ^
209 (unsigned)((uintptr_t)e.function >> 9)) +
214 static bool isEqual(const Expression &LHS, const Expression &RHS) {
220 struct isPodLike<Expression> { static const bool value = true; };
224 //===----------------------------------------------------------------------===//
225 // ValueTable Internal Functions
226 //===----------------------------------------------------------------------===//
228 Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) {
229 if (isa<ICmpInst>(C)) {
230 switch (C->getPredicate()) {
231 default: // THIS SHOULD NEVER HAPPEN
232 llvm_unreachable("Comparison with unknown predicate?");
233 case ICmpInst::ICMP_EQ: return Expression::ICMPEQ;
234 case ICmpInst::ICMP_NE: return Expression::ICMPNE;
235 case ICmpInst::ICMP_UGT: return Expression::ICMPUGT;
236 case ICmpInst::ICMP_UGE: return Expression::ICMPUGE;
237 case ICmpInst::ICMP_ULT: return Expression::ICMPULT;
238 case ICmpInst::ICMP_ULE: return Expression::ICMPULE;
239 case ICmpInst::ICMP_SGT: return Expression::ICMPSGT;
240 case ICmpInst::ICMP_SGE: return Expression::ICMPSGE;
241 case ICmpInst::ICMP_SLT: return Expression::ICMPSLT;
242 case ICmpInst::ICMP_SLE: return Expression::ICMPSLE;
245 switch (C->getPredicate()) {
246 default: // THIS SHOULD NEVER HAPPEN
247 llvm_unreachable("Comparison with unknown predicate?");
248 case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ;
249 case FCmpInst::FCMP_OGT: return Expression::FCMPOGT;
250 case FCmpInst::FCMP_OGE: return Expression::FCMPOGE;
251 case FCmpInst::FCMP_OLT: return Expression::FCMPOLT;
252 case FCmpInst::FCMP_OLE: return Expression::FCMPOLE;
253 case FCmpInst::FCMP_ONE: return Expression::FCMPONE;
254 case FCmpInst::FCMP_ORD: return Expression::FCMPORD;
255 case FCmpInst::FCMP_UNO: return Expression::FCMPUNO;
256 case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ;
257 case FCmpInst::FCMP_UGT: return Expression::FCMPUGT;
258 case FCmpInst::FCMP_UGE: return Expression::FCMPUGE;
259 case FCmpInst::FCMP_ULT: return Expression::FCMPULT;
260 case FCmpInst::FCMP_ULE: return Expression::FCMPULE;
261 case FCmpInst::FCMP_UNE: return Expression::FCMPUNE;
266 Expression ValueTable::create_expression(CallInst* C) {
269 e.type = C->getType();
270 e.function = C->getCalledFunction();
271 e.opcode = Expression::CALL;
274 for (CallInst::op_iterator I = CS.arg_begin(), E = CS.arg_end();
276 e.varargs.push_back(lookup_or_add(*I));
281 Expression ValueTable::create_expression(BinaryOperator* BO) {
283 e.varargs.push_back(lookup_or_add(BO->getOperand(0)));
284 e.varargs.push_back(lookup_or_add(BO->getOperand(1)));
286 e.type = BO->getType();
287 e.opcode = static_cast<Expression::ExpressionOpcode>(BO->getOpcode());
292 Expression ValueTable::create_expression(CmpInst* C) {
295 e.varargs.push_back(lookup_or_add(C->getOperand(0)));
296 e.varargs.push_back(lookup_or_add(C->getOperand(1)));
298 e.type = C->getType();
299 e.opcode = getOpcode(C);
304 Expression ValueTable::create_expression(CastInst* C) {
307 e.varargs.push_back(lookup_or_add(C->getOperand(0)));
309 e.type = C->getType();
310 e.opcode = static_cast<Expression::ExpressionOpcode>(C->getOpcode());
315 Expression ValueTable::create_expression(ShuffleVectorInst* S) {
318 e.varargs.push_back(lookup_or_add(S->getOperand(0)));
319 e.varargs.push_back(lookup_or_add(S->getOperand(1)));
320 e.varargs.push_back(lookup_or_add(S->getOperand(2)));
322 e.type = S->getType();
323 e.opcode = Expression::SHUFFLE;
328 Expression ValueTable::create_expression(ExtractElementInst* E) {
331 e.varargs.push_back(lookup_or_add(E->getOperand(0)));
332 e.varargs.push_back(lookup_or_add(E->getOperand(1)));
334 e.type = E->getType();
335 e.opcode = Expression::EXTRACT;
340 Expression ValueTable::create_expression(InsertElementInst* I) {
343 e.varargs.push_back(lookup_or_add(I->getOperand(0)));
344 e.varargs.push_back(lookup_or_add(I->getOperand(1)));
345 e.varargs.push_back(lookup_or_add(I->getOperand(2)));
347 e.type = I->getType();
348 e.opcode = Expression::INSERT;
353 Expression ValueTable::create_expression(SelectInst* I) {
356 e.varargs.push_back(lookup_or_add(I->getCondition()));
357 e.varargs.push_back(lookup_or_add(I->getTrueValue()));
358 e.varargs.push_back(lookup_or_add(I->getFalseValue()));
360 e.type = I->getType();
361 e.opcode = Expression::SELECT;
366 Expression ValueTable::create_expression(GetElementPtrInst* G) {
369 e.varargs.push_back(lookup_or_add(G->getPointerOperand()));
371 e.type = G->getType();
372 e.opcode = Expression::GEP;
374 for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end();
376 e.varargs.push_back(lookup_or_add(*I));
381 Expression ValueTable::create_expression(ExtractValueInst* E) {
384 e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
385 for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
387 e.varargs.push_back(*II);
389 e.type = E->getType();
390 e.opcode = Expression::EXTRACTVALUE;
395 Expression ValueTable::create_expression(InsertValueInst* E) {
398 e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
399 e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand()));
400 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
402 e.varargs.push_back(*II);
404 e.type = E->getType();
405 e.opcode = Expression::INSERTVALUE;
410 //===----------------------------------------------------------------------===//
411 // ValueTable External Functions
412 //===----------------------------------------------------------------------===//
414 /// add - Insert a value into the table with a specified value number.
415 void ValueTable::add(Value *V, uint32_t num) {
416 valueNumbering.insert(std::make_pair(V, num));
419 uint32_t ValueTable::lookup_or_add_call(CallInst* C) {
420 if (AA->doesNotAccessMemory(C)) {
421 Expression exp = create_expression(C);
422 uint32_t& e = expressionNumbering[exp];
423 if (!e) e = nextValueNumber++;
424 valueNumbering[C] = e;
426 } else if (AA->onlyReadsMemory(C)) {
427 Expression exp = create_expression(C);
428 uint32_t& e = expressionNumbering[exp];
430 e = nextValueNumber++;
431 valueNumbering[C] = e;
435 e = nextValueNumber++;
436 valueNumbering[C] = e;
440 MemDepResult local_dep = MD->getDependency(C);
442 if (!local_dep.isDef() && !local_dep.isNonLocal()) {
443 valueNumbering[C] = nextValueNumber;
444 return nextValueNumber++;
447 if (local_dep.isDef()) {
448 CallInst* local_cdep = cast<CallInst>(local_dep.getInst());
450 if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) {
451 valueNumbering[C] = nextValueNumber;
452 return nextValueNumber++;
455 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
456 uint32_t c_vn = lookup_or_add(C->getArgOperand(i));
457 uint32_t cd_vn = lookup_or_add(local_cdep->getArgOperand(i));
459 valueNumbering[C] = nextValueNumber;
460 return nextValueNumber++;
464 uint32_t v = lookup_or_add(local_cdep);
465 valueNumbering[C] = v;
470 const MemoryDependenceAnalysis::NonLocalDepInfo &deps =
471 MD->getNonLocalCallDependency(CallSite(C));
472 // FIXME: call/call dependencies for readonly calls should return def, not
473 // clobber! Move the checking logic to MemDep!
476 // Check to see if we have a single dominating call instruction that is
478 for (unsigned i = 0, e = deps.size(); i != e; ++i) {
479 const NonLocalDepEntry *I = &deps[i];
480 // Ignore non-local dependencies.
481 if (I->getResult().isNonLocal())
484 // We don't handle non-depedencies. If we already have a call, reject
485 // instruction dependencies.
486 if (I->getResult().isClobber() || cdep != 0) {
491 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst());
492 // FIXME: All duplicated with non-local case.
493 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){
494 cdep = NonLocalDepCall;
503 valueNumbering[C] = nextValueNumber;
504 return nextValueNumber++;
507 if (cdep->getNumArgOperands() != C->getNumArgOperands()) {
508 valueNumbering[C] = nextValueNumber;
509 return nextValueNumber++;
511 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
512 uint32_t c_vn = lookup_or_add(C->getArgOperand(i));
513 uint32_t cd_vn = lookup_or_add(cdep->getArgOperand(i));
515 valueNumbering[C] = nextValueNumber;
516 return nextValueNumber++;
520 uint32_t v = lookup_or_add(cdep);
521 valueNumbering[C] = v;
525 valueNumbering[C] = nextValueNumber;
526 return nextValueNumber++;
530 /// lookup_or_add - Returns the value number for the specified value, assigning
531 /// it a new number if it did not have one before.
532 uint32_t ValueTable::lookup_or_add(Value *V) {
533 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
534 if (VI != valueNumbering.end())
537 if (!isa<Instruction>(V)) {
538 valueNumbering[V] = nextValueNumber;
539 return nextValueNumber++;
542 Instruction* I = cast<Instruction>(V);
544 switch (I->getOpcode()) {
545 case Instruction::Call:
546 return lookup_or_add_call(cast<CallInst>(I));
547 case Instruction::Add:
548 case Instruction::FAdd:
549 case Instruction::Sub:
550 case Instruction::FSub:
551 case Instruction::Mul:
552 case Instruction::FMul:
553 case Instruction::UDiv:
554 case Instruction::SDiv:
555 case Instruction::FDiv:
556 case Instruction::URem:
557 case Instruction::SRem:
558 case Instruction::FRem:
559 case Instruction::Shl:
560 case Instruction::LShr:
561 case Instruction::AShr:
562 case Instruction::And:
563 case Instruction::Or :
564 case Instruction::Xor:
565 exp = create_expression(cast<BinaryOperator>(I));
567 case Instruction::ICmp:
568 case Instruction::FCmp:
569 exp = create_expression(cast<CmpInst>(I));
571 case Instruction::Trunc:
572 case Instruction::ZExt:
573 case Instruction::SExt:
574 case Instruction::FPToUI:
575 case Instruction::FPToSI:
576 case Instruction::UIToFP:
577 case Instruction::SIToFP:
578 case Instruction::FPTrunc:
579 case Instruction::FPExt:
580 case Instruction::PtrToInt:
581 case Instruction::IntToPtr:
582 case Instruction::BitCast:
583 exp = create_expression(cast<CastInst>(I));
585 case Instruction::Select:
586 exp = create_expression(cast<SelectInst>(I));
588 case Instruction::ExtractElement:
589 exp = create_expression(cast<ExtractElementInst>(I));
591 case Instruction::InsertElement:
592 exp = create_expression(cast<InsertElementInst>(I));
594 case Instruction::ShuffleVector:
595 exp = create_expression(cast<ShuffleVectorInst>(I));
597 case Instruction::ExtractValue:
598 exp = create_expression(cast<ExtractValueInst>(I));
600 case Instruction::InsertValue:
601 exp = create_expression(cast<InsertValueInst>(I));
603 case Instruction::GetElementPtr:
604 exp = create_expression(cast<GetElementPtrInst>(I));
607 valueNumbering[V] = nextValueNumber;
608 return nextValueNumber++;
611 uint32_t& e = expressionNumbering[exp];
612 if (!e) e = nextValueNumber++;
613 valueNumbering[V] = e;
617 /// lookup - Returns the value number of the specified value. Fails if
618 /// the value has not yet been numbered.
619 uint32_t ValueTable::lookup(Value *V) const {
620 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
621 assert(VI != valueNumbering.end() && "Value not numbered?");
625 /// clear - Remove all entries from the ValueTable
626 void ValueTable::clear() {
627 valueNumbering.clear();
628 expressionNumbering.clear();
632 /// erase - Remove a value from the value numbering
633 void ValueTable::erase(Value *V) {
634 valueNumbering.erase(V);
637 /// verifyRemoved - Verify that the value is removed from all internal data
639 void ValueTable::verifyRemoved(const Value *V) const {
640 for (DenseMap<Value*, uint32_t>::const_iterator
641 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
642 assert(I->first != V && "Inst still occurs in value numbering map!");
646 //===----------------------------------------------------------------------===//
648 //===----------------------------------------------------------------------===//
651 struct ValueNumberScope {
652 ValueNumberScope* parent;
653 DenseMap<uint32_t, Value*> table;
655 ValueNumberScope(ValueNumberScope* p) : parent(p) { }
661 class GVN : public FunctionPass {
662 bool runOnFunction(Function &F);
664 static char ID; // Pass identification, replacement for typeid
665 explicit GVN(bool noloads = false)
666 : FunctionPass(ID), NoLoads(noloads), MD(0) { }
670 MemoryDependenceAnalysis *MD;
674 DenseMap<BasicBlock*, ValueNumberScope*> localAvail;
676 // List of critical edges to be split between iterations.
677 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit;
679 // This transformation requires dominator postdominator info
680 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
681 AU.addRequired<DominatorTree>();
683 AU.addRequired<MemoryDependenceAnalysis>();
684 AU.addRequired<AliasAnalysis>();
686 AU.addPreserved<DominatorTree>();
687 AU.addPreserved<AliasAnalysis>();
691 // FIXME: eliminate or document these better
692 bool processLoad(LoadInst* L,
693 SmallVectorImpl<Instruction*> &toErase);
694 bool processInstruction(Instruction *I,
695 SmallVectorImpl<Instruction*> &toErase);
696 bool processNonLocalLoad(LoadInst* L,
697 SmallVectorImpl<Instruction*> &toErase);
698 bool processBlock(BasicBlock *BB);
699 void dump(DenseMap<uint32_t, Value*>& d);
700 bool iterateOnFunction(Function &F);
701 Value *CollapsePhi(PHINode* p);
702 bool performPRE(Function& F);
703 Value *lookupNumber(BasicBlock *BB, uint32_t num);
704 void cleanupGlobalSets();
705 void verifyRemoved(const Instruction *I) const;
706 bool splitCriticalEdges();
712 // createGVNPass - The public interface to this file...
713 FunctionPass *llvm::createGVNPass(bool NoLoads) {
714 return new GVN(NoLoads);
717 INITIALIZE_PASS(GVN, "gvn", "Global Value Numbering", false, false);
719 void GVN::dump(DenseMap<uint32_t, Value*>& d) {
721 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(),
722 E = d.end(); I != E; ++I) {
723 errs() << I->first << "\n";
729 static bool isSafeReplacement(PHINode* p, Instruction *inst) {
730 if (!isa<PHINode>(inst))
733 for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end();
735 if (PHINode* use_phi = dyn_cast<PHINode>(*UI))
736 if (use_phi->getParent() == inst->getParent())
742 Value *GVN::CollapsePhi(PHINode *PN) {
743 Value *ConstVal = PN->hasConstantValue(DT);
744 if (!ConstVal) return 0;
746 Instruction *Inst = dyn_cast<Instruction>(ConstVal);
750 if (DT->dominates(Inst, PN))
751 if (isSafeReplacement(PN, Inst))
756 /// IsValueFullyAvailableInBlock - Return true if we can prove that the value
757 /// we're analyzing is fully available in the specified block. As we go, keep
758 /// track of which blocks we know are fully alive in FullyAvailableBlocks. This
759 /// map is actually a tri-state map with the following values:
760 /// 0) we know the block *is not* fully available.
761 /// 1) we know the block *is* fully available.
762 /// 2) we do not know whether the block is fully available or not, but we are
763 /// currently speculating that it will be.
764 /// 3) we are speculating for this block and have used that to speculate for
766 static bool IsValueFullyAvailableInBlock(BasicBlock *BB,
767 DenseMap<BasicBlock*, char> &FullyAvailableBlocks) {
768 // Optimistically assume that the block is fully available and check to see
769 // if we already know about this block in one lookup.
770 std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV =
771 FullyAvailableBlocks.insert(std::make_pair(BB, 2));
773 // If the entry already existed for this block, return the precomputed value.
775 // If this is a speculative "available" value, mark it as being used for
776 // speculation of other blocks.
777 if (IV.first->second == 2)
778 IV.first->second = 3;
779 return IV.first->second != 0;
782 // Otherwise, see if it is fully available in all predecessors.
783 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
785 // If this block has no predecessors, it isn't live-in here.
787 goto SpeculationFailure;
789 for (; PI != PE; ++PI)
790 // If the value isn't fully available in one of our predecessors, then it
791 // isn't fully available in this block either. Undo our previous
792 // optimistic assumption and bail out.
793 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks))
794 goto SpeculationFailure;
798 // SpeculationFailure - If we get here, we found out that this is not, after
799 // all, a fully-available block. We have a problem if we speculated on this and
800 // used the speculation to mark other blocks as available.
802 char &BBVal = FullyAvailableBlocks[BB];
804 // If we didn't speculate on this, just return with it set to false.
810 // If we did speculate on this value, we could have blocks set to 1 that are
811 // incorrect. Walk the (transitive) successors of this block and mark them as
813 SmallVector<BasicBlock*, 32> BBWorklist;
814 BBWorklist.push_back(BB);
817 BasicBlock *Entry = BBWorklist.pop_back_val();
818 // Note that this sets blocks to 0 (unavailable) if they happen to not
819 // already be in FullyAvailableBlocks. This is safe.
820 char &EntryVal = FullyAvailableBlocks[Entry];
821 if (EntryVal == 0) continue; // Already unavailable.
823 // Mark as unavailable.
826 for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I)
827 BBWorklist.push_back(*I);
828 } while (!BBWorklist.empty());
834 /// CanCoerceMustAliasedValueToLoad - Return true if
835 /// CoerceAvailableValueToLoadType will succeed.
836 static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
838 const TargetData &TD) {
839 // If the loaded or stored value is an first class array or struct, don't try
840 // to transform them. We need to be able to bitcast to integer.
841 if (LoadTy->isStructTy() || LoadTy->isArrayTy() ||
842 StoredVal->getType()->isStructTy() ||
843 StoredVal->getType()->isArrayTy())
846 // The store has to be at least as big as the load.
847 if (TD.getTypeSizeInBits(StoredVal->getType()) <
848 TD.getTypeSizeInBits(LoadTy))
855 /// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and
856 /// then a load from a must-aliased pointer of a different type, try to coerce
857 /// the stored value. LoadedTy is the type of the load we want to replace and
858 /// InsertPt is the place to insert new instructions.
860 /// If we can't do it, return null.
861 static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
862 const Type *LoadedTy,
863 Instruction *InsertPt,
864 const TargetData &TD) {
865 if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD))
868 const Type *StoredValTy = StoredVal->getType();
870 uint64_t StoreSize = TD.getTypeStoreSizeInBits(StoredValTy);
871 uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy);
873 // If the store and reload are the same size, we can always reuse it.
874 if (StoreSize == LoadSize) {
875 if (StoredValTy->isPointerTy() && LoadedTy->isPointerTy()) {
876 // Pointer to Pointer -> use bitcast.
877 return new BitCastInst(StoredVal, LoadedTy, "", InsertPt);
880 // Convert source pointers to integers, which can be bitcast.
881 if (StoredValTy->isPointerTy()) {
882 StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
883 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
886 const Type *TypeToCastTo = LoadedTy;
887 if (TypeToCastTo->isPointerTy())
888 TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext());
890 if (StoredValTy != TypeToCastTo)
891 StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt);
893 // Cast to pointer if the load needs a pointer type.
894 if (LoadedTy->isPointerTy())
895 StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt);
900 // If the loaded value is smaller than the available value, then we can
901 // extract out a piece from it. If the available value is too small, then we
902 // can't do anything.
903 assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail");
905 // Convert source pointers to integers, which can be manipulated.
906 if (StoredValTy->isPointerTy()) {
907 StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
908 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
911 // Convert vectors and fp to integer, which can be manipulated.
912 if (!StoredValTy->isIntegerTy()) {
913 StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize);
914 StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt);
917 // If this is a big-endian system, we need to shift the value down to the low
918 // bits so that a truncate will work.
919 if (TD.isBigEndian()) {
920 Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize);
921 StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt);
924 // Truncate the integer to the right size now.
925 const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize);
926 StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt);
928 if (LoadedTy == NewIntTy)
931 // If the result is a pointer, inttoptr.
932 if (LoadedTy->isPointerTy())
933 return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt);
935 // Otherwise, bitcast.
936 return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt);
939 /// GetBaseWithConstantOffset - Analyze the specified pointer to see if it can
940 /// be expressed as a base pointer plus a constant offset. Return the base and
941 /// offset to the caller.
942 static Value *GetBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
943 const TargetData &TD) {
944 Operator *PtrOp = dyn_cast<Operator>(Ptr);
945 if (PtrOp == 0) return Ptr;
947 // Just look through bitcasts.
948 if (PtrOp->getOpcode() == Instruction::BitCast)
949 return GetBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD);
951 // If this is a GEP with constant indices, we can look through it.
952 GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp);
953 if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr;
955 gep_type_iterator GTI = gep_type_begin(GEP);
956 for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E;
958 ConstantInt *OpC = cast<ConstantInt>(*I);
959 if (OpC->isZero()) continue;
961 // Handle a struct and array indices which add their offset to the pointer.
962 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
963 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
965 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
966 Offset += OpC->getSExtValue()*Size;
970 // Re-sign extend from the pointer size if needed to get overflow edge cases
972 unsigned PtrSize = TD.getPointerSizeInBits();
974 Offset = (Offset << (64-PtrSize)) >> (64-PtrSize);
976 return GetBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD);
980 /// AnalyzeLoadFromClobberingWrite - This function is called when we have a
981 /// memdep query of a load that ends up being a clobbering memory write (store,
982 /// memset, memcpy, memmove). This means that the write *may* provide bits used
983 /// by the load but we can't be sure because the pointers don't mustalias.
985 /// Check this case to see if there is anything more we can do before we give
986 /// up. This returns -1 if we have to give up, or a byte number in the stored
987 /// value of the piece that feeds the load.
988 static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr,
990 uint64_t WriteSizeInBits,
991 const TargetData &TD) {
992 // If the loaded or stored value is an first class array or struct, don't try
993 // to transform them. We need to be able to bitcast to integer.
994 if (LoadTy->isStructTy() || LoadTy->isArrayTy())
997 int64_t StoreOffset = 0, LoadOffset = 0;
998 Value *StoreBase = GetBaseWithConstantOffset(WritePtr, StoreOffset, TD);
1000 GetBaseWithConstantOffset(LoadPtr, LoadOffset, TD);
1001 if (StoreBase != LoadBase)
1004 // If the load and store are to the exact same address, they should have been
1005 // a must alias. AA must have gotten confused.
1006 // FIXME: Study to see if/when this happens. One case is forwarding a memset
1007 // to a load from the base of the memset.
1009 if (LoadOffset == StoreOffset) {
1010 dbgs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n"
1011 << "Base = " << *StoreBase << "\n"
1012 << "Store Ptr = " << *WritePtr << "\n"
1013 << "Store Offs = " << StoreOffset << "\n"
1014 << "Load Ptr = " << *LoadPtr << "\n";
1019 // If the load and store don't overlap at all, the store doesn't provide
1020 // anything to the load. In this case, they really don't alias at all, AA
1021 // must have gotten confused.
1022 // FIXME: Investigate cases where this bails out, e.g. rdar://7238614. Then
1023 // remove this check, as it is duplicated with what we have below.
1024 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy);
1026 if ((WriteSizeInBits & 7) | (LoadSize & 7))
1028 uint64_t StoreSize = WriteSizeInBits >> 3; // Convert to bytes.
1032 bool isAAFailure = false;
1033 if (StoreOffset < LoadOffset)
1034 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset;
1036 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset;
1040 dbgs() << "STORE LOAD DEP WITH COMMON BASE:\n"
1041 << "Base = " << *StoreBase << "\n"
1042 << "Store Ptr = " << *WritePtr << "\n"
1043 << "Store Offs = " << StoreOffset << "\n"
1044 << "Load Ptr = " << *LoadPtr << "\n";
1050 // If the Load isn't completely contained within the stored bits, we don't
1051 // have all the bits to feed it. We could do something crazy in the future
1052 // (issue a smaller load then merge the bits in) but this seems unlikely to be
1054 if (StoreOffset > LoadOffset ||
1055 StoreOffset+StoreSize < LoadOffset+LoadSize)
1058 // Okay, we can do this transformation. Return the number of bytes into the
1059 // store that the load is.
1060 return LoadOffset-StoreOffset;
1063 /// AnalyzeLoadFromClobberingStore - This function is called when we have a
1064 /// memdep query of a load that ends up being a clobbering store.
1065 static int AnalyzeLoadFromClobberingStore(const Type *LoadTy, Value *LoadPtr,
1067 const TargetData &TD) {
1068 // Cannot handle reading from store of first-class aggregate yet.
1069 if (DepSI->getOperand(0)->getType()->isStructTy() ||
1070 DepSI->getOperand(0)->getType()->isArrayTy())
1073 Value *StorePtr = DepSI->getPointerOperand();
1074 uint64_t StoreSize = TD.getTypeSizeInBits(DepSI->getOperand(0)->getType());
1075 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
1076 StorePtr, StoreSize, TD);
1079 static int AnalyzeLoadFromClobberingMemInst(const Type *LoadTy, Value *LoadPtr,
1081 const TargetData &TD) {
1082 // If the mem operation is a non-constant size, we can't handle it.
1083 ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength());
1084 if (SizeCst == 0) return -1;
1085 uint64_t MemSizeInBits = SizeCst->getZExtValue()*8;
1087 // If this is memset, we just need to see if the offset is valid in the size
1089 if (MI->getIntrinsicID() == Intrinsic::memset)
1090 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(),
1093 // If we have a memcpy/memmove, the only case we can handle is if this is a
1094 // copy from constant memory. In that case, we can read directly from the
1096 MemTransferInst *MTI = cast<MemTransferInst>(MI);
1098 Constant *Src = dyn_cast<Constant>(MTI->getSource());
1099 if (Src == 0) return -1;
1101 GlobalVariable *GV = dyn_cast<GlobalVariable>(Src->getUnderlyingObject());
1102 if (GV == 0 || !GV->isConstant()) return -1;
1104 // See if the access is within the bounds of the transfer.
1105 int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
1106 MI->getDest(), MemSizeInBits, TD);
1110 // Otherwise, see if we can constant fold a load from the constant with the
1111 // offset applied as appropriate.
1112 Src = ConstantExpr::getBitCast(Src,
1113 llvm::Type::getInt8PtrTy(Src->getContext()));
1114 Constant *OffsetCst =
1115 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
1116 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1);
1117 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
1118 if (ConstantFoldLoadFromConstPtr(Src, &TD))
1124 /// GetStoreValueForLoad - This function is called when we have a
1125 /// memdep query of a load that ends up being a clobbering store. This means
1126 /// that the store *may* provide bits used by the load but we can't be sure
1127 /// because the pointers don't mustalias. Check this case to see if there is
1128 /// anything more we can do before we give up.
1129 static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
1131 Instruction *InsertPt, const TargetData &TD){
1132 LLVMContext &Ctx = SrcVal->getType()->getContext();
1134 uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;
1135 uint64_t LoadSize = (TD.getTypeSizeInBits(LoadTy) + 7) / 8;
1137 IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
1139 // Compute which bits of the stored value are being used by the load. Convert
1140 // to an integer type to start with.
1141 if (SrcVal->getType()->isPointerTy())
1142 SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx), "tmp");
1143 if (!SrcVal->getType()->isIntegerTy())
1144 SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8),
1147 // Shift the bits to the least significant depending on endianness.
1149 if (TD.isLittleEndian())
1150 ShiftAmt = Offset*8;
1152 ShiftAmt = (StoreSize-LoadSize-Offset)*8;
1155 SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt, "tmp");
1157 if (LoadSize != StoreSize)
1158 SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8),
1161 return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD);
1164 /// GetMemInstValueForLoad - This function is called when we have a
1165 /// memdep query of a load that ends up being a clobbering mem intrinsic.
1166 static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
1167 const Type *LoadTy, Instruction *InsertPt,
1168 const TargetData &TD){
1169 LLVMContext &Ctx = LoadTy->getContext();
1170 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
1172 IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
1174 // We know that this method is only called when the mem transfer fully
1175 // provides the bits for the load.
1176 if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) {
1177 // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and
1178 // independently of what the offset is.
1179 Value *Val = MSI->getValue();
1181 Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8));
1183 Value *OneElt = Val;
1185 // Splat the value out to the right number of bits.
1186 for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) {
1187 // If we can double the number of bytes set, do it.
1188 if (NumBytesSet*2 <= LoadSize) {
1189 Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8);
1190 Val = Builder.CreateOr(Val, ShVal);
1195 // Otherwise insert one byte at a time.
1196 Value *ShVal = Builder.CreateShl(Val, 1*8);
1197 Val = Builder.CreateOr(OneElt, ShVal);
1201 return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD);
1204 // Otherwise, this is a memcpy/memmove from a constant global.
1205 MemTransferInst *MTI = cast<MemTransferInst>(SrcInst);
1206 Constant *Src = cast<Constant>(MTI->getSource());
1208 // Otherwise, see if we can constant fold a load from the constant with the
1209 // offset applied as appropriate.
1210 Src = ConstantExpr::getBitCast(Src,
1211 llvm::Type::getInt8PtrTy(Src->getContext()));
1212 Constant *OffsetCst =
1213 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
1214 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1);
1215 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
1216 return ConstantFoldLoadFromConstPtr(Src, &TD);
1221 struct AvailableValueInBlock {
1222 /// BB - The basic block in question.
1225 SimpleVal, // A simple offsetted value that is accessed.
1226 MemIntrin // A memory intrinsic which is loaded from.
1229 /// V - The value that is live out of the block.
1230 PointerIntPair<Value *, 1, ValType> Val;
1232 /// Offset - The byte offset in Val that is interesting for the load query.
1235 static AvailableValueInBlock get(BasicBlock *BB, Value *V,
1236 unsigned Offset = 0) {
1237 AvailableValueInBlock Res;
1239 Res.Val.setPointer(V);
1240 Res.Val.setInt(SimpleVal);
1241 Res.Offset = Offset;
1245 static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI,
1246 unsigned Offset = 0) {
1247 AvailableValueInBlock Res;
1249 Res.Val.setPointer(MI);
1250 Res.Val.setInt(MemIntrin);
1251 Res.Offset = Offset;
1255 bool isSimpleValue() const { return Val.getInt() == SimpleVal; }
1256 Value *getSimpleValue() const {
1257 assert(isSimpleValue() && "Wrong accessor");
1258 return Val.getPointer();
1261 MemIntrinsic *getMemIntrinValue() const {
1262 assert(!isSimpleValue() && "Wrong accessor");
1263 return cast<MemIntrinsic>(Val.getPointer());
1266 /// MaterializeAdjustedValue - Emit code into this block to adjust the value
1267 /// defined here to the specified type. This handles various coercion cases.
1268 Value *MaterializeAdjustedValue(const Type *LoadTy,
1269 const TargetData *TD) const {
1271 if (isSimpleValue()) {
1272 Res = getSimpleValue();
1273 if (Res->getType() != LoadTy) {
1274 assert(TD && "Need target data to handle type mismatch case");
1275 Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(),
1278 DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " "
1279 << *getSimpleValue() << '\n'
1280 << *Res << '\n' << "\n\n\n");
1283 Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset,
1284 LoadTy, BB->getTerminator(), *TD);
1285 DEBUG(errs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
1286 << " " << *getMemIntrinValue() << '\n'
1287 << *Res << '\n' << "\n\n\n");
1295 /// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock,
1296 /// construct SSA form, allowing us to eliminate LI. This returns the value
1297 /// that should be used at LI's definition site.
1298 static Value *ConstructSSAForLoadSet(LoadInst *LI,
1299 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
1300 const TargetData *TD,
1301 const DominatorTree &DT,
1302 AliasAnalysis *AA) {
1303 // Check for the fully redundant, dominating load case. In this case, we can
1304 // just use the dominating value directly.
1305 if (ValuesPerBlock.size() == 1 &&
1306 DT.properlyDominates(ValuesPerBlock[0].BB, LI->getParent()))
1307 return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), TD);
1309 // Otherwise, we have to construct SSA form.
1310 SmallVector<PHINode*, 8> NewPHIs;
1311 SSAUpdater SSAUpdate(&NewPHIs);
1312 SSAUpdate.Initialize(LI->getType(), LI->getName());
1314 const Type *LoadTy = LI->getType();
1316 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1317 const AvailableValueInBlock &AV = ValuesPerBlock[i];
1318 BasicBlock *BB = AV.BB;
1320 if (SSAUpdate.HasValueForBlock(BB))
1323 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, TD));
1326 // Perform PHI construction.
1327 Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent());
1329 // If new PHI nodes were created, notify alias analysis.
1330 if (V->getType()->isPointerTy())
1331 for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
1332 AA->copyValue(LI, NewPHIs[i]);
1337 static bool isLifetimeStart(const Instruction *Inst) {
1338 if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
1339 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1343 /// processNonLocalLoad - Attempt to eliminate a load whose dependencies are
1344 /// non-local by performing PHI construction.
1345 bool GVN::processNonLocalLoad(LoadInst *LI,
1346 SmallVectorImpl<Instruction*> &toErase) {
1347 // Find the non-local dependencies of the load.
1348 SmallVector<NonLocalDepResult, 64> Deps;
1349 MD->getNonLocalPointerDependency(LI->getOperand(0), true, LI->getParent(),
1351 //DEBUG(dbgs() << "INVESTIGATING NONLOCAL LOAD: "
1352 // << Deps.size() << *LI << '\n');
1354 // If we had to process more than one hundred blocks to find the
1355 // dependencies, this load isn't worth worrying about. Optimizing
1356 // it will be too expensive.
1357 if (Deps.size() > 100)
1360 // If we had a phi translation failure, we'll have a single entry which is a
1361 // clobber in the current block. Reject this early.
1362 if (Deps.size() == 1 && Deps[0].getResult().isClobber()) {
1364 dbgs() << "GVN: non-local load ";
1365 WriteAsOperand(dbgs(), LI);
1366 dbgs() << " is clobbered by " << *Deps[0].getResult().getInst() << '\n';
1371 // Filter out useless results (non-locals, etc). Keep track of the blocks
1372 // where we have a value available in repl, also keep track of whether we see
1373 // dependencies that produce an unknown value for the load (such as a call
1374 // that could potentially clobber the load).
1375 SmallVector<AvailableValueInBlock, 16> ValuesPerBlock;
1376 SmallVector<BasicBlock*, 16> UnavailableBlocks;
1378 const TargetData *TD = 0;
1380 for (unsigned i = 0, e = Deps.size(); i != e; ++i) {
1381 BasicBlock *DepBB = Deps[i].getBB();
1382 MemDepResult DepInfo = Deps[i].getResult();
1384 if (DepInfo.isClobber()) {
1385 // The address being loaded in this non-local block may not be the same as
1386 // the pointer operand of the load if PHI translation occurs. Make sure
1387 // to consider the right address.
1388 Value *Address = Deps[i].getAddress();
1390 // If the dependence is to a store that writes to a superset of the bits
1391 // read by the load, we can extract the bits we need for the load from the
1393 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
1395 TD = getAnalysisIfAvailable<TargetData>();
1396 if (TD && Address) {
1397 int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address,
1400 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1401 DepSI->getOperand(0),
1408 // If the clobbering value is a memset/memcpy/memmove, see if we can
1409 // forward a value on from it.
1410 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) {
1412 TD = getAnalysisIfAvailable<TargetData>();
1413 if (TD && Address) {
1414 int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address,
1417 ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI,
1424 UnavailableBlocks.push_back(DepBB);
1428 Instruction *DepInst = DepInfo.getInst();
1430 // Loading the allocation -> undef.
1431 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) ||
1432 // Loading immediately after lifetime begin -> undef.
1433 isLifetimeStart(DepInst)) {
1434 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1435 UndefValue::get(LI->getType())));
1439 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1440 // Reject loads and stores that are to the same address but are of
1441 // different types if we have to.
1442 if (S->getOperand(0)->getType() != LI->getType()) {
1444 TD = getAnalysisIfAvailable<TargetData>();
1446 // If the stored value is larger or equal to the loaded value, we can
1448 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getOperand(0),
1449 LI->getType(), *TD)) {
1450 UnavailableBlocks.push_back(DepBB);
1455 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1460 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1461 // If the types mismatch and we can't handle it, reject reuse of the load.
1462 if (LD->getType() != LI->getType()) {
1464 TD = getAnalysisIfAvailable<TargetData>();
1466 // If the stored value is larger or equal to the loaded value, we can
1468 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){
1469 UnavailableBlocks.push_back(DepBB);
1473 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD));
1477 UnavailableBlocks.push_back(DepBB);
1481 // If we have no predecessors that produce a known value for this load, exit
1483 if (ValuesPerBlock.empty()) return false;
1485 // If all of the instructions we depend on produce a known value for this
1486 // load, then it is fully redundant and we can use PHI insertion to compute
1487 // its value. Insert PHIs and remove the fully redundant value now.
1488 if (UnavailableBlocks.empty()) {
1489 DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n');
1491 // Perform PHI construction.
1492 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT,
1493 VN.getAliasAnalysis());
1494 LI->replaceAllUsesWith(V);
1496 if (isa<PHINode>(V))
1498 if (V->getType()->isPointerTy())
1499 MD->invalidateCachedPointerInfo(V);
1501 toErase.push_back(LI);
1506 if (!EnablePRE || !EnableLoadPRE)
1509 // Okay, we have *some* definitions of the value. This means that the value
1510 // is available in some of our (transitive) predecessors. Lets think about
1511 // doing PRE of this load. This will involve inserting a new load into the
1512 // predecessor when it's not available. We could do this in general, but
1513 // prefer to not increase code size. As such, we only do this when we know
1514 // that we only have to insert *one* load (which means we're basically moving
1515 // the load, not inserting a new one).
1517 SmallPtrSet<BasicBlock *, 4> Blockers;
1518 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1519 Blockers.insert(UnavailableBlocks[i]);
1521 // Lets find first basic block with more than one predecessor. Walk backwards
1522 // through predecessors if needed.
1523 BasicBlock *LoadBB = LI->getParent();
1524 BasicBlock *TmpBB = LoadBB;
1526 bool isSinglePred = false;
1527 bool allSingleSucc = true;
1528 while (TmpBB->getSinglePredecessor()) {
1529 isSinglePred = true;
1530 TmpBB = TmpBB->getSinglePredecessor();
1531 if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1533 if (Blockers.count(TmpBB))
1536 // If any of these blocks has more than one successor (i.e. if the edge we
1537 // just traversed was critical), then there are other paths through this
1538 // block along which the load may not be anticipated. Hoisting the load
1539 // above this block would be adding the load to execution paths along
1540 // which it was not previously executed.
1541 if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1548 // If we have a repl set with LI itself in it, this means we have a loop where
1549 // at least one of the values is LI. Since this means that we won't be able
1550 // to eliminate LI even if we insert uses in the other predecessors, we will
1551 // end up increasing code size. Reject this by scanning for LI.
1552 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1553 if (ValuesPerBlock[i].isSimpleValue() &&
1554 ValuesPerBlock[i].getSimpleValue() == LI) {
1555 // Skip cases where LI is the only definition, even for EnableFullLoadPRE.
1556 if (!EnableFullLoadPRE || e == 1)
1561 // FIXME: It is extremely unclear what this loop is doing, other than
1562 // artificially restricting loadpre.
1565 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1566 const AvailableValueInBlock &AV = ValuesPerBlock[i];
1567 if (AV.isSimpleValue())
1568 // "Hot" Instruction is in some loop (because it dominates its dep.
1570 if (Instruction *I = dyn_cast<Instruction>(AV.getSimpleValue()))
1571 if (DT->dominates(LI, I)) {
1577 // We are interested only in "hot" instructions. We don't want to do any
1578 // mis-optimizations here.
1583 // Check to see how many predecessors have the loaded value fully
1585 DenseMap<BasicBlock*, Value*> PredLoads;
1586 DenseMap<BasicBlock*, char> FullyAvailableBlocks;
1587 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
1588 FullyAvailableBlocks[ValuesPerBlock[i].BB] = true;
1589 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1590 FullyAvailableBlocks[UnavailableBlocks[i]] = false;
1592 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> NeedToSplit;
1593 for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB);
1595 BasicBlock *Pred = *PI;
1596 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) {
1599 PredLoads[Pred] = 0;
1601 if (Pred->getTerminator()->getNumSuccessors() != 1) {
1602 if (isa<IndirectBrInst>(Pred->getTerminator())) {
1603 DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1604 << Pred->getName() << "': " << *LI << '\n');
1607 unsigned SuccNum = GetSuccessorNumber(Pred, LoadBB);
1608 NeedToSplit.push_back(std::make_pair(Pred->getTerminator(), SuccNum));
1611 if (!NeedToSplit.empty()) {
1612 toSplit.append(NeedToSplit.begin(), NeedToSplit.end());
1616 // Decide whether PRE is profitable for this load.
1617 unsigned NumUnavailablePreds = PredLoads.size();
1618 assert(NumUnavailablePreds != 0 &&
1619 "Fully available value should be eliminated above!");
1620 if (!EnableFullLoadPRE) {
1621 // If this load is unavailable in multiple predecessors, reject it.
1622 // FIXME: If we could restructure the CFG, we could make a common pred with
1623 // all the preds that don't have an available LI and insert a new load into
1625 if (NumUnavailablePreds != 1)
1629 // Check if the load can safely be moved to all the unavailable predecessors.
1630 bool CanDoPRE = true;
1631 SmallVector<Instruction*, 8> NewInsts;
1632 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
1633 E = PredLoads.end(); I != E; ++I) {
1634 BasicBlock *UnavailablePred = I->first;
1636 // Do PHI translation to get its value in the predecessor if necessary. The
1637 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1639 // If all preds have a single successor, then we know it is safe to insert
1640 // the load on the pred (?!?), so we can insert code to materialize the
1641 // pointer if it is not available.
1642 PHITransAddr Address(LI->getOperand(0), TD);
1644 if (allSingleSucc) {
1645 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred,
1648 Address.PHITranslateValue(LoadBB, UnavailablePred, DT);
1649 LoadPtr = Address.getAddr();
1652 // If we couldn't find or insert a computation of this phi translated value,
1655 DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1656 << *LI->getOperand(0) << "\n");
1661 // Make sure it is valid to move this load here. We have to watch out for:
1662 // @1 = getelementptr (i8* p, ...
1663 // test p and branch if == 0
1665 // It is valid to have the getelementptr before the test, even if p can be 0,
1666 // as getelementptr only does address arithmetic.
1667 // If we are not pushing the value through any multiple-successor blocks
1668 // we do not have this case. Otherwise, check that the load is safe to
1669 // put anywhere; this can be improved, but should be conservatively safe.
1670 if (!allSingleSucc &&
1671 // FIXME: REEVALUTE THIS.
1672 !isSafeToLoadUnconditionally(LoadPtr,
1673 UnavailablePred->getTerminator(),
1674 LI->getAlignment(), TD)) {
1679 I->second = LoadPtr;
1683 while (!NewInsts.empty())
1684 NewInsts.pop_back_val()->eraseFromParent();
1688 // Okay, we can eliminate this load by inserting a reload in the predecessor
1689 // and using PHI construction to get the value in the other predecessors, do
1691 DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n');
1692 DEBUG(if (!NewInsts.empty())
1693 dbgs() << "INSERTED " << NewInsts.size() << " INSTS: "
1694 << *NewInsts.back() << '\n');
1696 // Assign value numbers to the new instructions.
1697 for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) {
1698 // FIXME: We really _ought_ to insert these value numbers into their
1699 // parent's availability map. However, in doing so, we risk getting into
1700 // ordering issues. If a block hasn't been processed yet, we would be
1701 // marking a value as AVAIL-IN, which isn't what we intend.
1702 VN.lookup_or_add(NewInsts[i]);
1705 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
1706 E = PredLoads.end(); I != E; ++I) {
1707 BasicBlock *UnavailablePred = I->first;
1708 Value *LoadPtr = I->second;
1710 Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
1712 UnavailablePred->getTerminator());
1714 // Add the newly created load.
1715 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,
1717 MD->invalidateCachedPointerInfo(LoadPtr);
1718 DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
1721 // Perform PHI construction.
1722 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT,
1723 VN.getAliasAnalysis());
1724 LI->replaceAllUsesWith(V);
1725 if (isa<PHINode>(V))
1727 if (V->getType()->isPointerTy())
1728 MD->invalidateCachedPointerInfo(V);
1730 toErase.push_back(LI);
1735 /// processLoad - Attempt to eliminate a load, first by eliminating it
1736 /// locally, and then attempting non-local elimination if that fails.
1737 bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
1741 if (L->isVolatile())
1744 // ... to a pointer that has been loaded from before...
1745 MemDepResult Dep = MD->getDependency(L);
1747 // If the value isn't available, don't do anything!
1748 if (Dep.isClobber()) {
1749 // Check to see if we have something like this:
1750 // store i32 123, i32* %P
1751 // %A = bitcast i32* %P to i8*
1752 // %B = gep i8* %A, i32 1
1755 // We could do that by recognizing if the clobber instructions are obviously
1756 // a common base + constant offset, and if the previous store (or memset)
1757 // completely covers this load. This sort of thing can happen in bitfield
1759 Value *AvailVal = 0;
1760 if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst()))
1761 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
1762 int Offset = AnalyzeLoadFromClobberingStore(L->getType(),
1763 L->getPointerOperand(),
1766 AvailVal = GetStoreValueForLoad(DepSI->getOperand(0), Offset,
1767 L->getType(), L, *TD);
1770 // If the clobbering value is a memset/memcpy/memmove, see if we can forward
1771 // a value on from it.
1772 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) {
1773 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
1774 int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(),
1775 L->getPointerOperand(),
1778 AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,*TD);
1783 DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n'
1784 << *AvailVal << '\n' << *L << "\n\n\n");
1786 // Replace the load!
1787 L->replaceAllUsesWith(AvailVal);
1788 if (AvailVal->getType()->isPointerTy())
1789 MD->invalidateCachedPointerInfo(AvailVal);
1791 toErase.push_back(L);
1797 // fast print dep, using operator<< on instruction would be too slow
1798 dbgs() << "GVN: load ";
1799 WriteAsOperand(dbgs(), L);
1800 Instruction *I = Dep.getInst();
1801 dbgs() << " is clobbered by " << *I << '\n';
1806 // If it is defined in another block, try harder.
1807 if (Dep.isNonLocal())
1808 return processNonLocalLoad(L, toErase);
1810 Instruction *DepInst = Dep.getInst();
1811 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1812 Value *StoredVal = DepSI->getOperand(0);
1814 // The store and load are to a must-aliased pointer, but they may not
1815 // actually have the same type. See if we know how to reuse the stored
1816 // value (depending on its type).
1817 const TargetData *TD = 0;
1818 if (StoredVal->getType() != L->getType()) {
1819 if ((TD = getAnalysisIfAvailable<TargetData>())) {
1820 StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(),
1825 DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
1826 << '\n' << *L << "\n\n\n");
1833 L->replaceAllUsesWith(StoredVal);
1834 if (StoredVal->getType()->isPointerTy())
1835 MD->invalidateCachedPointerInfo(StoredVal);
1837 toErase.push_back(L);
1842 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
1843 Value *AvailableVal = DepLI;
1845 // The loads are of a must-aliased pointer, but they may not actually have
1846 // the same type. See if we know how to reuse the previously loaded value
1847 // (depending on its type).
1848 const TargetData *TD = 0;
1849 if (DepLI->getType() != L->getType()) {
1850 if ((TD = getAnalysisIfAvailable<TargetData>())) {
1851 AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD);
1852 if (AvailableVal == 0)
1855 DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
1856 << "\n" << *L << "\n\n\n");
1863 L->replaceAllUsesWith(AvailableVal);
1864 if (DepLI->getType()->isPointerTy())
1865 MD->invalidateCachedPointerInfo(DepLI);
1867 toErase.push_back(L);
1872 // If this load really doesn't depend on anything, then we must be loading an
1873 // undef value. This can happen when loading for a fresh allocation with no
1874 // intervening stores, for example.
1875 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) {
1876 L->replaceAllUsesWith(UndefValue::get(L->getType()));
1878 toErase.push_back(L);
1883 // If this load occurs either right after a lifetime begin,
1884 // then the loaded value is undefined.
1885 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) {
1886 if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
1887 L->replaceAllUsesWith(UndefValue::get(L->getType()));
1889 toErase.push_back(L);
1898 Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) {
1899 DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB);
1900 if (I == localAvail.end())
1903 ValueNumberScope *Locals = I->second;
1905 DenseMap<uint32_t, Value*>::iterator I = Locals->table.find(num);
1906 if (I != Locals->table.end())
1908 Locals = Locals->parent;
1915 /// processInstruction - When calculating availability, handle an instruction
1916 /// by inserting it into the appropriate sets
1917 bool GVN::processInstruction(Instruction *I,
1918 SmallVectorImpl<Instruction*> &toErase) {
1919 // Ignore dbg info intrinsics.
1920 if (isa<DbgInfoIntrinsic>(I))
1923 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1924 bool Changed = processLoad(LI, toErase);
1927 unsigned Num = VN.lookup_or_add(LI);
1928 localAvail[I->getParent()]->table.insert(std::make_pair(Num, LI));
1934 uint32_t NextNum = VN.getNextUnusedValueNumber();
1935 unsigned Num = VN.lookup_or_add(I);
1937 if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
1938 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1940 if (!BI->isConditional() || isa<Constant>(BI->getCondition()))
1943 Value *BranchCond = BI->getCondition();
1944 uint32_t CondVN = VN.lookup_or_add(BranchCond);
1946 BasicBlock *TrueSucc = BI->getSuccessor(0);
1947 BasicBlock *FalseSucc = BI->getSuccessor(1);
1949 if (TrueSucc->getSinglePredecessor())
1950 localAvail[TrueSucc]->table[CondVN] =
1951 ConstantInt::getTrue(TrueSucc->getContext());
1952 if (FalseSucc->getSinglePredecessor())
1953 localAvail[FalseSucc]->table[CondVN] =
1954 ConstantInt::getFalse(TrueSucc->getContext());
1958 // Allocations are always uniquely numbered, so we can save time and memory
1959 // by fast failing them.
1960 } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) {
1961 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1965 // Collapse PHI nodes
1966 if (PHINode* p = dyn_cast<PHINode>(I)) {
1967 Value *constVal = CollapsePhi(p);
1970 p->replaceAllUsesWith(constVal);
1971 if (MD && constVal->getType()->isPointerTy())
1972 MD->invalidateCachedPointerInfo(constVal);
1975 toErase.push_back(p);
1977 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1980 // If the number we were assigned was a brand new VN, then we don't
1981 // need to do a lookup to see if the number already exists
1982 // somewhere in the domtree: it can't!
1983 } else if (Num == NextNum) {
1984 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1986 // Perform fast-path value-number based elimination of values inherited from
1988 } else if (Value *repl = lookupNumber(I->getParent(), Num)) {
1991 I->replaceAllUsesWith(repl);
1992 if (MD && repl->getType()->isPointerTy())
1993 MD->invalidateCachedPointerInfo(repl);
1994 toErase.push_back(I);
1998 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
2004 /// runOnFunction - This is the main transformation entry point for a function.
2005 bool GVN::runOnFunction(Function& F) {
2007 MD = &getAnalysis<MemoryDependenceAnalysis>();
2008 DT = &getAnalysis<DominatorTree>();
2009 VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
2013 bool Changed = false;
2014 bool ShouldContinue = true;
2016 // Merge unconditional branches, allowing PRE to catch more
2017 // optimization opportunities.
2018 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
2019 BasicBlock *BB = FI;
2021 bool removedBlock = MergeBlockIntoPredecessor(BB, this);
2022 if (removedBlock) ++NumGVNBlocks;
2024 Changed |= removedBlock;
2027 unsigned Iteration = 0;
2029 while (ShouldContinue) {
2030 DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
2031 ShouldContinue = iterateOnFunction(F);
2032 if (splitCriticalEdges())
2033 ShouldContinue = true;
2034 Changed |= ShouldContinue;
2039 bool PREChanged = true;
2040 while (PREChanged) {
2041 PREChanged = performPRE(F);
2042 Changed |= PREChanged;
2045 // FIXME: Should perform GVN again after PRE does something. PRE can move
2046 // computations into blocks where they become fully redundant. Note that
2047 // we can't do this until PRE's critical edge splitting updates memdep.
2048 // Actually, when this happens, we should just fully integrate PRE into GVN.
2050 cleanupGlobalSets();
2056 bool GVN::processBlock(BasicBlock *BB) {
2057 // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and
2058 // incrementing BI before processing an instruction).
2059 SmallVector<Instruction*, 8> toErase;
2060 bool ChangedFunction = false;
2062 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
2064 ChangedFunction |= processInstruction(BI, toErase);
2065 if (toErase.empty()) {
2070 // If we need some instructions deleted, do it now.
2071 NumGVNInstr += toErase.size();
2073 // Avoid iterator invalidation.
2074 bool AtStart = BI == BB->begin();
2078 for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(),
2079 E = toErase.end(); I != E; ++I) {
2080 DEBUG(dbgs() << "GVN removed: " << **I << '\n');
2081 if (MD) MD->removeInstruction(*I);
2082 (*I)->eraseFromParent();
2083 DEBUG(verifyRemoved(*I));
2093 return ChangedFunction;
2096 /// performPRE - Perform a purely local form of PRE that looks for diamond
2097 /// control flow patterns and attempts to perform simple PRE at the join point.
2098 bool GVN::performPRE(Function &F) {
2099 bool Changed = false;
2100 DenseMap<BasicBlock*, Value*> predMap;
2101 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
2102 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
2103 BasicBlock *CurrentBlock = *DI;
2105 // Nothing to PRE in the entry block.
2106 if (CurrentBlock == &F.getEntryBlock()) continue;
2108 for (BasicBlock::iterator BI = CurrentBlock->begin(),
2109 BE = CurrentBlock->end(); BI != BE; ) {
2110 Instruction *CurInst = BI++;
2112 if (isa<AllocaInst>(CurInst) ||
2113 isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) ||
2114 CurInst->getType()->isVoidTy() ||
2115 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
2116 isa<DbgInfoIntrinsic>(CurInst))
2119 // We don't currently value number ANY inline asm calls.
2120 if (CallInst *CallI = dyn_cast<CallInst>(CurInst))
2121 if (CallI->isInlineAsm())
2124 uint32_t ValNo = VN.lookup(CurInst);
2126 // Look for the predecessors for PRE opportunities. We're
2127 // only trying to solve the basic diamond case, where
2128 // a value is computed in the successor and one predecessor,
2129 // but not the other. We also explicitly disallow cases
2130 // where the successor is its own predecessor, because they're
2131 // more complicated to get right.
2132 unsigned NumWith = 0;
2133 unsigned NumWithout = 0;
2134 BasicBlock *PREPred = 0;
2137 for (pred_iterator PI = pred_begin(CurrentBlock),
2138 PE = pred_end(CurrentBlock); PI != PE; ++PI) {
2139 BasicBlock *P = *PI;
2140 // We're not interested in PRE where the block is its
2141 // own predecessor, or in blocks with predecessors
2142 // that are not reachable.
2143 if (P == CurrentBlock) {
2146 } else if (!localAvail.count(P)) {
2151 DenseMap<uint32_t, Value*>::iterator predV =
2152 localAvail[P]->table.find(ValNo);
2153 if (predV == localAvail[P]->table.end()) {
2156 } else if (predV->second == CurInst) {
2159 predMap[P] = predV->second;
2164 // Don't do PRE when it might increase code size, i.e. when
2165 // we would need to insert instructions in more than one pred.
2166 if (NumWithout != 1 || NumWith == 0)
2169 // Don't do PRE across indirect branch.
2170 if (isa<IndirectBrInst>(PREPred->getTerminator()))
2173 // We can't do PRE safely on a critical edge, so instead we schedule
2174 // the edge to be split and perform the PRE the next time we iterate
2176 unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock);
2177 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
2178 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
2182 // Instantiate the expression in the predecessor that lacked it.
2183 // Because we are going top-down through the block, all value numbers
2184 // will be available in the predecessor by the time we need them. Any
2185 // that weren't originally present will have been instantiated earlier
2187 Instruction *PREInstr = CurInst->clone();
2188 bool success = true;
2189 for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) {
2190 Value *Op = PREInstr->getOperand(i);
2191 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2194 if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) {
2195 PREInstr->setOperand(i, V);
2202 // Fail out if we encounter an operand that is not available in
2203 // the PRE predecessor. This is typically because of loads which
2204 // are not value numbered precisely.
2207 DEBUG(verifyRemoved(PREInstr));
2211 PREInstr->insertBefore(PREPred->getTerminator());
2212 PREInstr->setName(CurInst->getName() + ".pre");
2213 predMap[PREPred] = PREInstr;
2214 VN.add(PREInstr, ValNo);
2217 // Update the availability map to include the new instruction.
2218 localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr));
2220 // Create a PHI to make the value available in this block.
2221 PHINode* Phi = PHINode::Create(CurInst->getType(),
2222 CurInst->getName() + ".pre-phi",
2223 CurrentBlock->begin());
2224 for (pred_iterator PI = pred_begin(CurrentBlock),
2225 PE = pred_end(CurrentBlock); PI != PE; ++PI) {
2226 BasicBlock *P = *PI;
2227 Phi->addIncoming(predMap[P], P);
2231 localAvail[CurrentBlock]->table[ValNo] = Phi;
2233 CurInst->replaceAllUsesWith(Phi);
2234 if (MD && Phi->getType()->isPointerTy())
2235 MD->invalidateCachedPointerInfo(Phi);
2238 DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
2239 if (MD) MD->removeInstruction(CurInst);
2240 CurInst->eraseFromParent();
2241 DEBUG(verifyRemoved(CurInst));
2246 if (splitCriticalEdges())
2252 /// splitCriticalEdges - Split critical edges found during the previous
2253 /// iteration that may enable further optimization.
2254 bool GVN::splitCriticalEdges() {
2255 if (toSplit.empty())
2258 std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val();
2259 SplitCriticalEdge(Edge.first, Edge.second, this);
2260 } while (!toSplit.empty());
2261 if (MD) MD->invalidateCachedPredecessors();
2265 /// iterateOnFunction - Executes one iteration of GVN
2266 bool GVN::iterateOnFunction(Function &F) {
2267 cleanupGlobalSets();
2269 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
2270 DE = df_end(DT->getRootNode()); DI != DE; ++DI) {
2272 localAvail[DI->getBlock()] =
2273 new ValueNumberScope(localAvail[DI->getIDom()->getBlock()]);
2275 localAvail[DI->getBlock()] = new ValueNumberScope(0);
2278 // Top-down walk of the dominator tree
2279 bool Changed = false;
2281 // Needed for value numbering with phi construction to work.
2282 ReversePostOrderTraversal<Function*> RPOT(&F);
2283 for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(),
2284 RE = RPOT.end(); RI != RE; ++RI)
2285 Changed |= processBlock(*RI);
2287 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
2288 DE = df_end(DT->getRootNode()); DI != DE; ++DI)
2289 Changed |= processBlock(DI->getBlock());
2295 void GVN::cleanupGlobalSets() {
2298 for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator
2299 I = localAvail.begin(), E = localAvail.end(); I != E; ++I)
2304 /// verifyRemoved - Verify that the specified instruction does not occur in our
2305 /// internal data structures.
2306 void GVN::verifyRemoved(const Instruction *Inst) const {
2307 VN.verifyRemoved(Inst);
2309 // Walk through the value number scope to make sure the instruction isn't
2310 // ferreted away in it.
2311 for (DenseMap<BasicBlock*, ValueNumberScope*>::const_iterator
2312 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) {
2313 const ValueNumberScope *VNS = I->second;
2316 for (DenseMap<uint32_t, Value*>::const_iterator
2317 II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) {
2318 assert(II->second != Inst && "Inst still in value numbering scope!");