1 //===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs global value numbering to eliminate fully redundant
11 // instructions. It also performs simple dead load elimination.
13 // Note that this pass does the value numbering itself; it does not use the
14 // ValueNumbering analysis passes.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "gvn"
19 #include "llvm/Transforms/Scalar.h"
20 #include "llvm/BasicBlock.h"
21 #include "llvm/Constants.h"
22 #include "llvm/DerivedTypes.h"
23 #include "llvm/GlobalVariable.h"
24 #include "llvm/Function.h"
25 #include "llvm/IntrinsicInst.h"
26 #include "llvm/LLVMContext.h"
27 #include "llvm/Operator.h"
28 #include "llvm/Value.h"
29 #include "llvm/ADT/DenseMap.h"
30 #include "llvm/ADT/DepthFirstIterator.h"
31 #include "llvm/ADT/PostOrderIterator.h"
32 #include "llvm/ADT/SmallPtrSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/Analysis/AliasAnalysis.h"
36 #include "llvm/Analysis/ConstantFolding.h"
37 #include "llvm/Analysis/Dominators.h"
38 #include "llvm/Analysis/MemoryBuiltins.h"
39 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
40 #include "llvm/Analysis/PHITransAddr.h"
41 #include "llvm/Support/CFG.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/GetElementPtrTypeIterator.h"
46 #include "llvm/Support/IRBuilder.h"
47 #include "llvm/Support/raw_ostream.h"
48 #include "llvm/Target/TargetData.h"
49 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
50 #include "llvm/Transforms/Utils/Local.h"
51 #include "llvm/Transforms/Utils/SSAUpdater.h"
54 STATISTIC(NumGVNInstr, "Number of instructions deleted");
55 STATISTIC(NumGVNLoad, "Number of loads deleted");
56 STATISTIC(NumGVNPRE, "Number of instructions PRE'd");
57 STATISTIC(NumGVNBlocks, "Number of blocks merged");
58 STATISTIC(NumPRELoad, "Number of loads PRE'd");
60 static cl::opt<bool> EnablePRE("enable-pre",
61 cl::init(true), cl::Hidden);
62 static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true));
63 static cl::opt<bool> EnableFullLoadPRE("enable-full-load-pre", cl::init(false));
65 //===----------------------------------------------------------------------===//
67 //===----------------------------------------------------------------------===//
69 /// This class holds the mapping between values and value numbers. It is used
70 /// as an efficient mechanism to determine the expression-wise equivalence of
74 enum ExpressionOpcode {
75 ADD = Instruction::Add,
76 FADD = Instruction::FAdd,
77 SUB = Instruction::Sub,
78 FSUB = Instruction::FSub,
79 MUL = Instruction::Mul,
80 FMUL = Instruction::FMul,
81 UDIV = Instruction::UDiv,
82 SDIV = Instruction::SDiv,
83 FDIV = Instruction::FDiv,
84 UREM = Instruction::URem,
85 SREM = Instruction::SRem,
86 FREM = Instruction::FRem,
87 SHL = Instruction::Shl,
88 LSHR = Instruction::LShr,
89 ASHR = Instruction::AShr,
90 AND = Instruction::And,
92 XOR = Instruction::Xor,
93 TRUNC = Instruction::Trunc,
94 ZEXT = Instruction::ZExt,
95 SEXT = Instruction::SExt,
96 FPTOUI = Instruction::FPToUI,
97 FPTOSI = Instruction::FPToSI,
98 UITOFP = Instruction::UIToFP,
99 SITOFP = Instruction::SIToFP,
100 FPTRUNC = Instruction::FPTrunc,
101 FPEXT = Instruction::FPExt,
102 PTRTOINT = Instruction::PtrToInt,
103 INTTOPTR = Instruction::IntToPtr,
104 BITCAST = Instruction::BitCast,
105 ICMPEQ, ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE,
106 ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ,
107 FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE,
108 FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE,
109 FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT,
110 SHUFFLE, SELECT, GEP, CALL, CONSTANT,
111 INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE };
113 ExpressionOpcode opcode;
115 SmallVector<uint32_t, 4> varargs;
119 Expression(ExpressionOpcode o) : opcode(o) { }
121 bool operator==(const Expression &other) const {
122 if (opcode != other.opcode)
124 else if (opcode == EMPTY || opcode == TOMBSTONE)
126 else if (type != other.type)
128 else if (function != other.function)
131 if (varargs.size() != other.varargs.size())
134 for (size_t i = 0; i < varargs.size(); ++i)
135 if (varargs[i] != other.varargs[i])
142 bool operator!=(const Expression &other) const {
143 return !(*this == other);
149 DenseMap<Value*, uint32_t> valueNumbering;
150 DenseMap<Expression, uint32_t> expressionNumbering;
152 MemoryDependenceAnalysis* MD;
155 uint32_t nextValueNumber;
157 Expression::ExpressionOpcode getOpcode(CmpInst* C);
158 Expression create_expression(BinaryOperator* BO);
159 Expression create_expression(CmpInst* C);
160 Expression create_expression(ShuffleVectorInst* V);
161 Expression create_expression(ExtractElementInst* C);
162 Expression create_expression(InsertElementInst* V);
163 Expression create_expression(SelectInst* V);
164 Expression create_expression(CastInst* C);
165 Expression create_expression(GetElementPtrInst* G);
166 Expression create_expression(CallInst* C);
167 Expression create_expression(Constant* C);
168 Expression create_expression(ExtractValueInst* C);
169 Expression create_expression(InsertValueInst* C);
171 uint32_t lookup_or_add_call(CallInst* C);
173 ValueTable() : nextValueNumber(1) { }
174 uint32_t lookup_or_add(Value *V);
175 uint32_t lookup(Value *V) const;
176 void add(Value *V, uint32_t num);
178 void erase(Value *v);
180 void setAliasAnalysis(AliasAnalysis* A) { AA = A; }
181 AliasAnalysis *getAliasAnalysis() const { return AA; }
182 void setMemDep(MemoryDependenceAnalysis* M) { MD = M; }
183 void setDomTree(DominatorTree* D) { DT = D; }
184 uint32_t getNextUnusedValueNumber() { return nextValueNumber; }
185 void verifyRemoved(const Value *) const;
190 template <> struct DenseMapInfo<Expression> {
191 static inline Expression getEmptyKey() {
192 return Expression(Expression::EMPTY);
195 static inline Expression getTombstoneKey() {
196 return Expression(Expression::TOMBSTONE);
199 static unsigned getHashValue(const Expression e) {
200 unsigned hash = e.opcode;
202 hash = ((unsigned)((uintptr_t)e.type >> 4) ^
203 (unsigned)((uintptr_t)e.type >> 9));
205 for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(),
206 E = e.varargs.end(); I != E; ++I)
207 hash = *I + hash * 37;
209 hash = ((unsigned)((uintptr_t)e.function >> 4) ^
210 (unsigned)((uintptr_t)e.function >> 9)) +
215 static bool isEqual(const Expression &LHS, const Expression &RHS) {
221 struct isPodLike<Expression> { static const bool value = true; };
225 //===----------------------------------------------------------------------===//
226 // ValueTable Internal Functions
227 //===----------------------------------------------------------------------===//
229 Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) {
230 if (isa<ICmpInst>(C)) {
231 switch (C->getPredicate()) {
232 default: // THIS SHOULD NEVER HAPPEN
233 llvm_unreachable("Comparison with unknown predicate?");
234 case ICmpInst::ICMP_EQ: return Expression::ICMPEQ;
235 case ICmpInst::ICMP_NE: return Expression::ICMPNE;
236 case ICmpInst::ICMP_UGT: return Expression::ICMPUGT;
237 case ICmpInst::ICMP_UGE: return Expression::ICMPUGE;
238 case ICmpInst::ICMP_ULT: return Expression::ICMPULT;
239 case ICmpInst::ICMP_ULE: return Expression::ICMPULE;
240 case ICmpInst::ICMP_SGT: return Expression::ICMPSGT;
241 case ICmpInst::ICMP_SGE: return Expression::ICMPSGE;
242 case ICmpInst::ICMP_SLT: return Expression::ICMPSLT;
243 case ICmpInst::ICMP_SLE: return Expression::ICMPSLE;
246 switch (C->getPredicate()) {
247 default: // THIS SHOULD NEVER HAPPEN
248 llvm_unreachable("Comparison with unknown predicate?");
249 case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ;
250 case FCmpInst::FCMP_OGT: return Expression::FCMPOGT;
251 case FCmpInst::FCMP_OGE: return Expression::FCMPOGE;
252 case FCmpInst::FCMP_OLT: return Expression::FCMPOLT;
253 case FCmpInst::FCMP_OLE: return Expression::FCMPOLE;
254 case FCmpInst::FCMP_ONE: return Expression::FCMPONE;
255 case FCmpInst::FCMP_ORD: return Expression::FCMPORD;
256 case FCmpInst::FCMP_UNO: return Expression::FCMPUNO;
257 case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ;
258 case FCmpInst::FCMP_UGT: return Expression::FCMPUGT;
259 case FCmpInst::FCMP_UGE: return Expression::FCMPUGE;
260 case FCmpInst::FCMP_ULT: return Expression::FCMPULT;
261 case FCmpInst::FCMP_ULE: return Expression::FCMPULE;
262 case FCmpInst::FCMP_UNE: return Expression::FCMPUNE;
267 Expression ValueTable::create_expression(CallInst* C) {
270 e.type = C->getType();
271 e.function = C->getCalledFunction();
272 e.opcode = Expression::CALL;
274 for (CallInst::op_iterator I = C->op_begin()+1, E = C->op_end();
276 e.varargs.push_back(lookup_or_add(*I));
281 Expression ValueTable::create_expression(BinaryOperator* BO) {
283 e.varargs.push_back(lookup_or_add(BO->getOperand(0)));
284 e.varargs.push_back(lookup_or_add(BO->getOperand(1)));
286 e.type = BO->getType();
287 e.opcode = static_cast<Expression::ExpressionOpcode>(BO->getOpcode());
292 Expression ValueTable::create_expression(CmpInst* C) {
295 e.varargs.push_back(lookup_or_add(C->getOperand(0)));
296 e.varargs.push_back(lookup_or_add(C->getOperand(1)));
298 e.type = C->getType();
299 e.opcode = getOpcode(C);
304 Expression ValueTable::create_expression(CastInst* C) {
307 e.varargs.push_back(lookup_or_add(C->getOperand(0)));
309 e.type = C->getType();
310 e.opcode = static_cast<Expression::ExpressionOpcode>(C->getOpcode());
315 Expression ValueTable::create_expression(ShuffleVectorInst* S) {
318 e.varargs.push_back(lookup_or_add(S->getOperand(0)));
319 e.varargs.push_back(lookup_or_add(S->getOperand(1)));
320 e.varargs.push_back(lookup_or_add(S->getOperand(2)));
322 e.type = S->getType();
323 e.opcode = Expression::SHUFFLE;
328 Expression ValueTable::create_expression(ExtractElementInst* E) {
331 e.varargs.push_back(lookup_or_add(E->getOperand(0)));
332 e.varargs.push_back(lookup_or_add(E->getOperand(1)));
334 e.type = E->getType();
335 e.opcode = Expression::EXTRACT;
340 Expression ValueTable::create_expression(InsertElementInst* I) {
343 e.varargs.push_back(lookup_or_add(I->getOperand(0)));
344 e.varargs.push_back(lookup_or_add(I->getOperand(1)));
345 e.varargs.push_back(lookup_or_add(I->getOperand(2)));
347 e.type = I->getType();
348 e.opcode = Expression::INSERT;
353 Expression ValueTable::create_expression(SelectInst* I) {
356 e.varargs.push_back(lookup_or_add(I->getCondition()));
357 e.varargs.push_back(lookup_or_add(I->getTrueValue()));
358 e.varargs.push_back(lookup_or_add(I->getFalseValue()));
360 e.type = I->getType();
361 e.opcode = Expression::SELECT;
366 Expression ValueTable::create_expression(GetElementPtrInst* G) {
369 e.varargs.push_back(lookup_or_add(G->getPointerOperand()));
371 e.type = G->getType();
372 e.opcode = Expression::GEP;
374 for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end();
376 e.varargs.push_back(lookup_or_add(*I));
381 Expression ValueTable::create_expression(ExtractValueInst* E) {
384 e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
385 for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
387 e.varargs.push_back(*II);
389 e.type = E->getType();
390 e.opcode = Expression::EXTRACTVALUE;
395 Expression ValueTable::create_expression(InsertValueInst* E) {
398 e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
399 e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand()));
400 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
402 e.varargs.push_back(*II);
404 e.type = E->getType();
405 e.opcode = Expression::INSERTVALUE;
410 //===----------------------------------------------------------------------===//
411 // ValueTable External Functions
412 //===----------------------------------------------------------------------===//
414 /// add - Insert a value into the table with a specified value number.
415 void ValueTable::add(Value *V, uint32_t num) {
416 valueNumbering.insert(std::make_pair(V, num));
419 uint32_t ValueTable::lookup_or_add_call(CallInst* C) {
420 if (AA->doesNotAccessMemory(C)) {
421 Expression exp = create_expression(C);
422 uint32_t& e = expressionNumbering[exp];
423 if (!e) e = nextValueNumber++;
424 valueNumbering[C] = e;
426 } else if (AA->onlyReadsMemory(C)) {
427 Expression exp = create_expression(C);
428 uint32_t& e = expressionNumbering[exp];
430 e = nextValueNumber++;
431 valueNumbering[C] = e;
435 e = nextValueNumber++;
436 valueNumbering[C] = e;
440 MemDepResult local_dep = MD->getDependency(C);
442 if (!local_dep.isDef() && !local_dep.isNonLocal()) {
443 valueNumbering[C] = nextValueNumber;
444 return nextValueNumber++;
447 if (local_dep.isDef()) {
448 CallInst* local_cdep = cast<CallInst>(local_dep.getInst());
450 if (local_cdep->getNumOperands() != C->getNumOperands()) {
451 valueNumbering[C] = nextValueNumber;
452 return nextValueNumber++;
455 for (unsigned i = 1; i < C->getNumOperands(); ++i) {
456 uint32_t c_vn = lookup_or_add(C->getOperand(i));
457 uint32_t cd_vn = lookup_or_add(local_cdep->getOperand(i));
459 valueNumbering[C] = nextValueNumber;
460 return nextValueNumber++;
464 uint32_t v = lookup_or_add(local_cdep);
465 valueNumbering[C] = v;
470 const MemoryDependenceAnalysis::NonLocalDepInfo &deps =
471 MD->getNonLocalCallDependency(CallSite(C));
472 // FIXME: call/call dependencies for readonly calls should return def, not
473 // clobber! Move the checking logic to MemDep!
476 // Check to see if we have a single dominating call instruction that is
478 for (unsigned i = 0, e = deps.size(); i != e; ++i) {
479 const NonLocalDepEntry *I = &deps[i];
480 // Ignore non-local dependencies.
481 if (I->getResult().isNonLocal())
484 // We don't handle non-depedencies. If we already have a call, reject
485 // instruction dependencies.
486 if (I->getResult().isClobber() || cdep != 0) {
491 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst());
492 // FIXME: All duplicated with non-local case.
493 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){
494 cdep = NonLocalDepCall;
503 valueNumbering[C] = nextValueNumber;
504 return nextValueNumber++;
507 if (cdep->getNumOperands() != C->getNumOperands()) {
508 valueNumbering[C] = nextValueNumber;
509 return nextValueNumber++;
511 for (unsigned i = 1; i < C->getNumOperands(); ++i) {
512 uint32_t c_vn = lookup_or_add(C->getOperand(i));
513 uint32_t cd_vn = lookup_or_add(cdep->getOperand(i));
515 valueNumbering[C] = nextValueNumber;
516 return nextValueNumber++;
520 uint32_t v = lookup_or_add(cdep);
521 valueNumbering[C] = v;
525 valueNumbering[C] = nextValueNumber;
526 return nextValueNumber++;
530 /// lookup_or_add - Returns the value number for the specified value, assigning
531 /// it a new number if it did not have one before.
532 uint32_t ValueTable::lookup_or_add(Value *V) {
533 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
534 if (VI != valueNumbering.end())
537 if (!isa<Instruction>(V)) {
538 valueNumbering[V] = nextValueNumber;
539 return nextValueNumber++;
542 Instruction* I = cast<Instruction>(V);
544 switch (I->getOpcode()) {
545 case Instruction::Call:
546 return lookup_or_add_call(cast<CallInst>(I));
547 case Instruction::Add:
548 case Instruction::FAdd:
549 case Instruction::Sub:
550 case Instruction::FSub:
551 case Instruction::Mul:
552 case Instruction::FMul:
553 case Instruction::UDiv:
554 case Instruction::SDiv:
555 case Instruction::FDiv:
556 case Instruction::URem:
557 case Instruction::SRem:
558 case Instruction::FRem:
559 case Instruction::Shl:
560 case Instruction::LShr:
561 case Instruction::AShr:
562 case Instruction::And:
563 case Instruction::Or :
564 case Instruction::Xor:
565 exp = create_expression(cast<BinaryOperator>(I));
567 case Instruction::ICmp:
568 case Instruction::FCmp:
569 exp = create_expression(cast<CmpInst>(I));
571 case Instruction::Trunc:
572 case Instruction::ZExt:
573 case Instruction::SExt:
574 case Instruction::FPToUI:
575 case Instruction::FPToSI:
576 case Instruction::UIToFP:
577 case Instruction::SIToFP:
578 case Instruction::FPTrunc:
579 case Instruction::FPExt:
580 case Instruction::PtrToInt:
581 case Instruction::IntToPtr:
582 case Instruction::BitCast:
583 exp = create_expression(cast<CastInst>(I));
585 case Instruction::Select:
586 exp = create_expression(cast<SelectInst>(I));
588 case Instruction::ExtractElement:
589 exp = create_expression(cast<ExtractElementInst>(I));
591 case Instruction::InsertElement:
592 exp = create_expression(cast<InsertElementInst>(I));
594 case Instruction::ShuffleVector:
595 exp = create_expression(cast<ShuffleVectorInst>(I));
597 case Instruction::ExtractValue:
598 exp = create_expression(cast<ExtractValueInst>(I));
600 case Instruction::InsertValue:
601 exp = create_expression(cast<InsertValueInst>(I));
603 case Instruction::GetElementPtr:
604 exp = create_expression(cast<GetElementPtrInst>(I));
607 valueNumbering[V] = nextValueNumber;
608 return nextValueNumber++;
611 uint32_t& e = expressionNumbering[exp];
612 if (!e) e = nextValueNumber++;
613 valueNumbering[V] = e;
617 /// lookup - Returns the value number of the specified value. Fails if
618 /// the value has not yet been numbered.
619 uint32_t ValueTable::lookup(Value *V) const {
620 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
621 assert(VI != valueNumbering.end() && "Value not numbered?");
625 /// clear - Remove all entries from the ValueTable
626 void ValueTable::clear() {
627 valueNumbering.clear();
628 expressionNumbering.clear();
632 /// erase - Remove a value from the value numbering
633 void ValueTable::erase(Value *V) {
634 valueNumbering.erase(V);
637 /// verifyRemoved - Verify that the value is removed from all internal data
639 void ValueTable::verifyRemoved(const Value *V) const {
640 for (DenseMap<Value*, uint32_t>::const_iterator
641 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
642 assert(I->first != V && "Inst still occurs in value numbering map!");
646 //===----------------------------------------------------------------------===//
648 //===----------------------------------------------------------------------===//
651 struct ValueNumberScope {
652 ValueNumberScope* parent;
653 DenseMap<uint32_t, Value*> table;
655 ValueNumberScope(ValueNumberScope* p) : parent(p) { }
661 class GVN : public FunctionPass {
662 bool runOnFunction(Function &F);
664 static char ID; // Pass identification, replacement for typeid
665 explicit GVN(bool nopre = false, bool noloads = false)
666 : FunctionPass(&ID), NoPRE(nopre), NoLoads(noloads), MD(0) { }
671 MemoryDependenceAnalysis *MD;
675 DenseMap<BasicBlock*, ValueNumberScope*> localAvail;
677 // This transformation requires dominator postdominator info
678 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
679 AU.addRequired<DominatorTree>();
681 AU.addRequired<MemoryDependenceAnalysis>();
682 AU.addRequired<AliasAnalysis>();
684 AU.addPreserved<DominatorTree>();
685 AU.addPreserved<AliasAnalysis>();
689 // FIXME: eliminate or document these better
690 bool processLoad(LoadInst* L,
691 SmallVectorImpl<Instruction*> &toErase);
692 bool processInstruction(Instruction *I,
693 SmallVectorImpl<Instruction*> &toErase);
694 bool processNonLocalLoad(LoadInst* L,
695 SmallVectorImpl<Instruction*> &toErase);
696 bool processBlock(BasicBlock *BB);
697 void dump(DenseMap<uint32_t, Value*>& d);
698 bool iterateOnFunction(Function &F);
699 Value *CollapsePhi(PHINode* p);
700 bool performPRE(Function& F);
701 Value *lookupNumber(BasicBlock *BB, uint32_t num);
702 void cleanupGlobalSets();
703 void verifyRemoved(const Instruction *I) const;
709 // createGVNPass - The public interface to this file...
710 FunctionPass *llvm::createGVNPass(bool NoPRE, bool NoLoads) {
711 return new GVN(NoPRE, NoLoads);
714 static RegisterPass<GVN> X("gvn",
715 "Global Value Numbering");
717 void GVN::dump(DenseMap<uint32_t, Value*>& d) {
719 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(),
720 E = d.end(); I != E; ++I) {
721 errs() << I->first << "\n";
727 static bool isSafeReplacement(PHINode* p, Instruction *inst) {
728 if (!isa<PHINode>(inst))
731 for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end();
733 if (PHINode* use_phi = dyn_cast<PHINode>(UI))
734 if (use_phi->getParent() == inst->getParent())
740 Value *GVN::CollapsePhi(PHINode *PN) {
741 Value *ConstVal = PN->hasConstantValue(DT);
742 if (!ConstVal) return 0;
744 Instruction *Inst = dyn_cast<Instruction>(ConstVal);
748 if (DT->dominates(Inst, PN))
749 if (isSafeReplacement(PN, Inst))
754 /// IsValueFullyAvailableInBlock - Return true if we can prove that the value
755 /// we're analyzing is fully available in the specified block. As we go, keep
756 /// track of which blocks we know are fully alive in FullyAvailableBlocks. This
757 /// map is actually a tri-state map with the following values:
758 /// 0) we know the block *is not* fully available.
759 /// 1) we know the block *is* fully available.
760 /// 2) we do not know whether the block is fully available or not, but we are
761 /// currently speculating that it will be.
762 /// 3) we are speculating for this block and have used that to speculate for
764 static bool IsValueFullyAvailableInBlock(BasicBlock *BB,
765 DenseMap<BasicBlock*, char> &FullyAvailableBlocks) {
766 // Optimistically assume that the block is fully available and check to see
767 // if we already know about this block in one lookup.
768 std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV =
769 FullyAvailableBlocks.insert(std::make_pair(BB, 2));
771 // If the entry already existed for this block, return the precomputed value.
773 // If this is a speculative "available" value, mark it as being used for
774 // speculation of other blocks.
775 if (IV.first->second == 2)
776 IV.first->second = 3;
777 return IV.first->second != 0;
780 // Otherwise, see if it is fully available in all predecessors.
781 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
783 // If this block has no predecessors, it isn't live-in here.
785 goto SpeculationFailure;
787 for (; PI != PE; ++PI)
788 // If the value isn't fully available in one of our predecessors, then it
789 // isn't fully available in this block either. Undo our previous
790 // optimistic assumption and bail out.
791 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks))
792 goto SpeculationFailure;
796 // SpeculationFailure - If we get here, we found out that this is not, after
797 // all, a fully-available block. We have a problem if we speculated on this and
798 // used the speculation to mark other blocks as available.
800 char &BBVal = FullyAvailableBlocks[BB];
802 // If we didn't speculate on this, just return with it set to false.
808 // If we did speculate on this value, we could have blocks set to 1 that are
809 // incorrect. Walk the (transitive) successors of this block and mark them as
811 SmallVector<BasicBlock*, 32> BBWorklist;
812 BBWorklist.push_back(BB);
815 BasicBlock *Entry = BBWorklist.pop_back_val();
816 // Note that this sets blocks to 0 (unavailable) if they happen to not
817 // already be in FullyAvailableBlocks. This is safe.
818 char &EntryVal = FullyAvailableBlocks[Entry];
819 if (EntryVal == 0) continue; // Already unavailable.
821 // Mark as unavailable.
824 for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I)
825 BBWorklist.push_back(*I);
826 } while (!BBWorklist.empty());
832 /// CanCoerceMustAliasedValueToLoad - Return true if
833 /// CoerceAvailableValueToLoadType will succeed.
834 static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
836 const TargetData &TD) {
837 // If the loaded or stored value is an first class array or struct, don't try
838 // to transform them. We need to be able to bitcast to integer.
839 if (isa<StructType>(LoadTy) || isa<ArrayType>(LoadTy) ||
840 isa<StructType>(StoredVal->getType()) ||
841 isa<ArrayType>(StoredVal->getType()))
844 // The store has to be at least as big as the load.
845 if (TD.getTypeSizeInBits(StoredVal->getType()) <
846 TD.getTypeSizeInBits(LoadTy))
853 /// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and
854 /// then a load from a must-aliased pointer of a different type, try to coerce
855 /// the stored value. LoadedTy is the type of the load we want to replace and
856 /// InsertPt is the place to insert new instructions.
858 /// If we can't do it, return null.
859 static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
860 const Type *LoadedTy,
861 Instruction *InsertPt,
862 const TargetData &TD) {
863 if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD))
866 const Type *StoredValTy = StoredVal->getType();
868 uint64_t StoreSize = TD.getTypeSizeInBits(StoredValTy);
869 uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy);
871 // If the store and reload are the same size, we can always reuse it.
872 if (StoreSize == LoadSize) {
873 if (isa<PointerType>(StoredValTy) && isa<PointerType>(LoadedTy)) {
874 // Pointer to Pointer -> use bitcast.
875 return new BitCastInst(StoredVal, LoadedTy, "", InsertPt);
878 // Convert source pointers to integers, which can be bitcast.
879 if (isa<PointerType>(StoredValTy)) {
880 StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
881 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
884 const Type *TypeToCastTo = LoadedTy;
885 if (isa<PointerType>(TypeToCastTo))
886 TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext());
888 if (StoredValTy != TypeToCastTo)
889 StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt);
891 // Cast to pointer if the load needs a pointer type.
892 if (isa<PointerType>(LoadedTy))
893 StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt);
898 // If the loaded value is smaller than the available value, then we can
899 // extract out a piece from it. If the available value is too small, then we
900 // can't do anything.
901 assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail");
903 // Convert source pointers to integers, which can be manipulated.
904 if (isa<PointerType>(StoredValTy)) {
905 StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
906 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
909 // Convert vectors and fp to integer, which can be manipulated.
910 if (!isa<IntegerType>(StoredValTy)) {
911 StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize);
912 StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt);
915 // If this is a big-endian system, we need to shift the value down to the low
916 // bits so that a truncate will work.
917 if (TD.isBigEndian()) {
918 Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize);
919 StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt);
922 // Truncate the integer to the right size now.
923 const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize);
924 StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt);
926 if (LoadedTy == NewIntTy)
929 // If the result is a pointer, inttoptr.
930 if (isa<PointerType>(LoadedTy))
931 return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt);
933 // Otherwise, bitcast.
934 return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt);
937 /// GetBaseWithConstantOffset - Analyze the specified pointer to see if it can
938 /// be expressed as a base pointer plus a constant offset. Return the base and
939 /// offset to the caller.
940 static Value *GetBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
941 const TargetData &TD) {
942 Operator *PtrOp = dyn_cast<Operator>(Ptr);
943 if (PtrOp == 0) return Ptr;
945 // Just look through bitcasts.
946 if (PtrOp->getOpcode() == Instruction::BitCast)
947 return GetBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD);
949 // If this is a GEP with constant indices, we can look through it.
950 GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp);
951 if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr;
953 gep_type_iterator GTI = gep_type_begin(GEP);
954 for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E;
956 ConstantInt *OpC = cast<ConstantInt>(*I);
957 if (OpC->isZero()) continue;
959 // Handle a struct and array indices which add their offset to the pointer.
960 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
961 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
963 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
964 Offset += OpC->getSExtValue()*Size;
968 // Re-sign extend from the pointer size if needed to get overflow edge cases
970 unsigned PtrSize = TD.getPointerSizeInBits();
972 Offset = (Offset << (64-PtrSize)) >> (64-PtrSize);
974 return GetBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD);
978 /// AnalyzeLoadFromClobberingWrite - This function is called when we have a
979 /// memdep query of a load that ends up being a clobbering memory write (store,
980 /// memset, memcpy, memmove). This means that the write *may* provide bits used
981 /// by the load but we can't be sure because the pointers don't mustalias.
983 /// Check this case to see if there is anything more we can do before we give
984 /// up. This returns -1 if we have to give up, or a byte number in the stored
985 /// value of the piece that feeds the load.
986 static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr,
988 uint64_t WriteSizeInBits,
989 const TargetData &TD) {
990 // If the loaded or stored value is an first class array or struct, don't try
991 // to transform them. We need to be able to bitcast to integer.
992 if (isa<StructType>(LoadTy) || isa<ArrayType>(LoadTy))
995 int64_t StoreOffset = 0, LoadOffset = 0;
996 Value *StoreBase = GetBaseWithConstantOffset(WritePtr, StoreOffset, TD);
998 GetBaseWithConstantOffset(LoadPtr, LoadOffset, TD);
999 if (StoreBase != LoadBase)
1002 // If the load and store are to the exact same address, they should have been
1003 // a must alias. AA must have gotten confused.
1004 // FIXME: Study to see if/when this happens.
1005 if (LoadOffset == StoreOffset) {
1007 dbgs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n"
1008 << "Base = " << *StoreBase << "\n"
1009 << "Store Ptr = " << *WritePtr << "\n"
1010 << "Store Offs = " << StoreOffset << "\n"
1011 << "Load Ptr = " << *LoadPtr << "\n";
1017 // If the load and store don't overlap at all, the store doesn't provide
1018 // anything to the load. In this case, they really don't alias at all, AA
1019 // must have gotten confused.
1020 // FIXME: Investigate cases where this bails out, e.g. rdar://7238614. Then
1021 // remove this check, as it is duplicated with what we have below.
1022 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy);
1024 if ((WriteSizeInBits & 7) | (LoadSize & 7))
1026 uint64_t StoreSize = WriteSizeInBits >> 3; // Convert to bytes.
1030 bool isAAFailure = false;
1031 if (StoreOffset < LoadOffset) {
1032 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset;
1034 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset;
1038 dbgs() << "STORE LOAD DEP WITH COMMON BASE:\n"
1039 << "Base = " << *StoreBase << "\n"
1040 << "Store Ptr = " << *WritePtr << "\n"
1041 << "Store Offs = " << StoreOffset << "\n"
1042 << "Load Ptr = " << *LoadPtr << "\n";
1048 // If the Load isn't completely contained within the stored bits, we don't
1049 // have all the bits to feed it. We could do something crazy in the future
1050 // (issue a smaller load then merge the bits in) but this seems unlikely to be
1052 if (StoreOffset > LoadOffset ||
1053 StoreOffset+StoreSize < LoadOffset+LoadSize)
1056 // Okay, we can do this transformation. Return the number of bytes into the
1057 // store that the load is.
1058 return LoadOffset-StoreOffset;
1061 /// AnalyzeLoadFromClobberingStore - This function is called when we have a
1062 /// memdep query of a load that ends up being a clobbering store.
1063 static int AnalyzeLoadFromClobberingStore(const Type *LoadTy, Value *LoadPtr,
1065 const TargetData &TD) {
1066 // Cannot handle reading from store of first-class aggregate yet.
1067 if (isa<StructType>(DepSI->getOperand(0)->getType()) ||
1068 isa<ArrayType>(DepSI->getOperand(0)->getType()))
1071 Value *StorePtr = DepSI->getPointerOperand();
1072 uint64_t StoreSize = TD.getTypeSizeInBits(DepSI->getOperand(0)->getType());
1073 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
1074 StorePtr, StoreSize, TD);
1077 static int AnalyzeLoadFromClobberingMemInst(const Type *LoadTy, Value *LoadPtr,
1079 const TargetData &TD) {
1080 // If the mem operation is a non-constant size, we can't handle it.
1081 ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength());
1082 if (SizeCst == 0) return -1;
1083 uint64_t MemSizeInBits = SizeCst->getZExtValue()*8;
1085 // If this is memset, we just need to see if the offset is valid in the size
1087 if (MI->getIntrinsicID() == Intrinsic::memset)
1088 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(),
1091 // If we have a memcpy/memmove, the only case we can handle is if this is a
1092 // copy from constant memory. In that case, we can read directly from the
1094 MemTransferInst *MTI = cast<MemTransferInst>(MI);
1096 Constant *Src = dyn_cast<Constant>(MTI->getSource());
1097 if (Src == 0) return -1;
1099 GlobalVariable *GV = dyn_cast<GlobalVariable>(Src->getUnderlyingObject());
1100 if (GV == 0 || !GV->isConstant()) return -1;
1102 // See if the access is within the bounds of the transfer.
1103 int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
1104 MI->getDest(), MemSizeInBits, TD);
1108 // Otherwise, see if we can constant fold a load from the constant with the
1109 // offset applied as appropriate.
1110 Src = ConstantExpr::getBitCast(Src,
1111 llvm::Type::getInt8PtrTy(Src->getContext()));
1112 Constant *OffsetCst =
1113 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
1114 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1);
1115 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
1116 if (ConstantFoldLoadFromConstPtr(Src, &TD))
1122 /// GetStoreValueForLoad - This function is called when we have a
1123 /// memdep query of a load that ends up being a clobbering store. This means
1124 /// that the store *may* provide bits used by the load but we can't be sure
1125 /// because the pointers don't mustalias. Check this case to see if there is
1126 /// anything more we can do before we give up.
1127 static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
1129 Instruction *InsertPt, const TargetData &TD){
1130 LLVMContext &Ctx = SrcVal->getType()->getContext();
1132 uint64_t StoreSize = TD.getTypeSizeInBits(SrcVal->getType())/8;
1133 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
1135 IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
1137 // Compute which bits of the stored value are being used by the load. Convert
1138 // to an integer type to start with.
1139 if (isa<PointerType>(SrcVal->getType()))
1140 SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx), "tmp");
1141 if (!isa<IntegerType>(SrcVal->getType()))
1142 SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8),
1145 // Shift the bits to the least significant depending on endianness.
1147 if (TD.isLittleEndian())
1148 ShiftAmt = Offset*8;
1150 ShiftAmt = (StoreSize-LoadSize-Offset)*8;
1153 SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt, "tmp");
1155 if (LoadSize != StoreSize)
1156 SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8),
1159 return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD);
1162 /// GetMemInstValueForLoad - This function is called when we have a
1163 /// memdep query of a load that ends up being a clobbering mem intrinsic.
1164 static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
1165 const Type *LoadTy, Instruction *InsertPt,
1166 const TargetData &TD){
1167 LLVMContext &Ctx = LoadTy->getContext();
1168 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
1170 IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
1172 // We know that this method is only called when the mem transfer fully
1173 // provides the bits for the load.
1174 if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) {
1175 // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and
1176 // independently of what the offset is.
1177 Value *Val = MSI->getValue();
1179 Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8));
1181 Value *OneElt = Val;
1183 // Splat the value out to the right number of bits.
1184 for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) {
1185 // If we can double the number of bytes set, do it.
1186 if (NumBytesSet*2 <= LoadSize) {
1187 Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8);
1188 Val = Builder.CreateOr(Val, ShVal);
1193 // Otherwise insert one byte at a time.
1194 Value *ShVal = Builder.CreateShl(Val, 1*8);
1195 Val = Builder.CreateOr(OneElt, ShVal);
1199 return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD);
1202 // Otherwise, this is a memcpy/memmove from a constant global.
1203 MemTransferInst *MTI = cast<MemTransferInst>(SrcInst);
1204 Constant *Src = cast<Constant>(MTI->getSource());
1206 // Otherwise, see if we can constant fold a load from the constant with the
1207 // offset applied as appropriate.
1208 Src = ConstantExpr::getBitCast(Src,
1209 llvm::Type::getInt8PtrTy(Src->getContext()));
1210 Constant *OffsetCst =
1211 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
1212 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1);
1213 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
1214 return ConstantFoldLoadFromConstPtr(Src, &TD);
1219 struct AvailableValueInBlock {
1220 /// BB - The basic block in question.
1223 SimpleVal, // A simple offsetted value that is accessed.
1224 MemIntrin // A memory intrinsic which is loaded from.
1227 /// V - The value that is live out of the block.
1228 PointerIntPair<Value *, 1, ValType> Val;
1230 /// Offset - The byte offset in Val that is interesting for the load query.
1233 static AvailableValueInBlock get(BasicBlock *BB, Value *V,
1234 unsigned Offset = 0) {
1235 AvailableValueInBlock Res;
1237 Res.Val.setPointer(V);
1238 Res.Val.setInt(SimpleVal);
1239 Res.Offset = Offset;
1243 static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI,
1244 unsigned Offset = 0) {
1245 AvailableValueInBlock Res;
1247 Res.Val.setPointer(MI);
1248 Res.Val.setInt(MemIntrin);
1249 Res.Offset = Offset;
1253 bool isSimpleValue() const { return Val.getInt() == SimpleVal; }
1254 Value *getSimpleValue() const {
1255 assert(isSimpleValue() && "Wrong accessor");
1256 return Val.getPointer();
1259 MemIntrinsic *getMemIntrinValue() const {
1260 assert(!isSimpleValue() && "Wrong accessor");
1261 return cast<MemIntrinsic>(Val.getPointer());
1264 /// MaterializeAdjustedValue - Emit code into this block to adjust the value
1265 /// defined here to the specified type. This handles various coercion cases.
1266 Value *MaterializeAdjustedValue(const Type *LoadTy,
1267 const TargetData *TD) const {
1269 if (isSimpleValue()) {
1270 Res = getSimpleValue();
1271 if (Res->getType() != LoadTy) {
1272 assert(TD && "Need target data to handle type mismatch case");
1273 Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(),
1276 DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " "
1277 << *getSimpleValue() << '\n'
1278 << *Res << '\n' << "\n\n\n");
1281 Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset,
1282 LoadTy, BB->getTerminator(), *TD);
1283 DEBUG(errs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
1284 << " " << *getMemIntrinValue() << '\n'
1285 << *Res << '\n' << "\n\n\n");
1291 /// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock,
1292 /// construct SSA form, allowing us to eliminate LI. This returns the value
1293 /// that should be used at LI's definition site.
1294 static Value *ConstructSSAForLoadSet(LoadInst *LI,
1295 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
1296 const TargetData *TD,
1297 const DominatorTree &DT,
1298 AliasAnalysis *AA) {
1299 // Check for the fully redundant, dominating load case. In this case, we can
1300 // just use the dominating value directly.
1301 if (ValuesPerBlock.size() == 1 &&
1302 DT.properlyDominates(ValuesPerBlock[0].BB, LI->getParent()))
1303 return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), TD);
1305 // Otherwise, we have to construct SSA form.
1306 SmallVector<PHINode*, 8> NewPHIs;
1307 SSAUpdater SSAUpdate(&NewPHIs);
1308 SSAUpdate.Initialize(LI);
1310 const Type *LoadTy = LI->getType();
1312 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1313 const AvailableValueInBlock &AV = ValuesPerBlock[i];
1314 BasicBlock *BB = AV.BB;
1316 if (SSAUpdate.HasValueForBlock(BB))
1319 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, TD));
1322 // Perform PHI construction.
1323 Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent());
1325 // If new PHI nodes were created, notify alias analysis.
1326 if (isa<PointerType>(V->getType()))
1327 for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
1328 AA->copyValue(LI, NewPHIs[i]);
1333 static bool isLifetimeStart(Instruction *Inst) {
1334 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
1335 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1339 /// processNonLocalLoad - Attempt to eliminate a load whose dependencies are
1340 /// non-local by performing PHI construction.
1341 bool GVN::processNonLocalLoad(LoadInst *LI,
1342 SmallVectorImpl<Instruction*> &toErase) {
1343 // Find the non-local dependencies of the load.
1344 SmallVector<NonLocalDepResult, 64> Deps;
1345 MD->getNonLocalPointerDependency(LI->getOperand(0), true, LI->getParent(),
1347 //DEBUG(dbgs() << "INVESTIGATING NONLOCAL LOAD: "
1348 // << Deps.size() << *LI << '\n');
1350 // If we had to process more than one hundred blocks to find the
1351 // dependencies, this load isn't worth worrying about. Optimizing
1352 // it will be too expensive.
1353 if (Deps.size() > 100)
1356 // If we had a phi translation failure, we'll have a single entry which is a
1357 // clobber in the current block. Reject this early.
1358 if (Deps.size() == 1 && Deps[0].getResult().isClobber()) {
1360 dbgs() << "GVN: non-local load ";
1361 WriteAsOperand(dbgs(), LI);
1362 dbgs() << " is clobbered by " << *Deps[0].getResult().getInst() << '\n';
1367 // Filter out useless results (non-locals, etc). Keep track of the blocks
1368 // where we have a value available in repl, also keep track of whether we see
1369 // dependencies that produce an unknown value for the load (such as a call
1370 // that could potentially clobber the load).
1371 SmallVector<AvailableValueInBlock, 16> ValuesPerBlock;
1372 SmallVector<BasicBlock*, 16> UnavailableBlocks;
1374 const TargetData *TD = 0;
1376 for (unsigned i = 0, e = Deps.size(); i != e; ++i) {
1377 BasicBlock *DepBB = Deps[i].getBB();
1378 MemDepResult DepInfo = Deps[i].getResult();
1380 if (DepInfo.isClobber()) {
1381 // The address being loaded in this non-local block may not be the same as
1382 // the pointer operand of the load if PHI translation occurs. Make sure
1383 // to consider the right address.
1384 Value *Address = Deps[i].getAddress();
1386 // If the dependence is to a store that writes to a superset of the bits
1387 // read by the load, we can extract the bits we need for the load from the
1389 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
1391 TD = getAnalysisIfAvailable<TargetData>();
1392 if (TD && Address) {
1393 int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address,
1396 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1397 DepSI->getOperand(0),
1404 // If the clobbering value is a memset/memcpy/memmove, see if we can
1405 // forward a value on from it.
1406 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) {
1408 TD = getAnalysisIfAvailable<TargetData>();
1409 if (TD && Address) {
1410 int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address,
1413 ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI,
1420 UnavailableBlocks.push_back(DepBB);
1424 Instruction *DepInst = DepInfo.getInst();
1426 // Loading the allocation -> undef.
1427 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) ||
1428 // Loading immediately after lifetime begin -> undef.
1429 isLifetimeStart(DepInst)) {
1430 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1431 UndefValue::get(LI->getType())));
1435 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1436 // Reject loads and stores that are to the same address but are of
1437 // different types if we have to.
1438 if (S->getOperand(0)->getType() != LI->getType()) {
1440 TD = getAnalysisIfAvailable<TargetData>();
1442 // If the stored value is larger or equal to the loaded value, we can
1444 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getOperand(0),
1445 LI->getType(), *TD)) {
1446 UnavailableBlocks.push_back(DepBB);
1451 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1456 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1457 // If the types mismatch and we can't handle it, reject reuse of the load.
1458 if (LD->getType() != LI->getType()) {
1460 TD = getAnalysisIfAvailable<TargetData>();
1462 // If the stored value is larger or equal to the loaded value, we can
1464 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){
1465 UnavailableBlocks.push_back(DepBB);
1469 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD));
1473 UnavailableBlocks.push_back(DepBB);
1477 // If we have no predecessors that produce a known value for this load, exit
1479 if (ValuesPerBlock.empty()) return false;
1481 // If all of the instructions we depend on produce a known value for this
1482 // load, then it is fully redundant and we can use PHI insertion to compute
1483 // its value. Insert PHIs and remove the fully redundant value now.
1484 if (UnavailableBlocks.empty()) {
1485 DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n');
1487 // Perform PHI construction.
1488 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT,
1489 VN.getAliasAnalysis());
1490 LI->replaceAllUsesWith(V);
1492 if (isa<PHINode>(V))
1494 if (isa<PointerType>(V->getType()))
1495 MD->invalidateCachedPointerInfo(V);
1496 toErase.push_back(LI);
1501 if (!EnablePRE || !EnableLoadPRE)
1504 // Okay, we have *some* definitions of the value. This means that the value
1505 // is available in some of our (transitive) predecessors. Lets think about
1506 // doing PRE of this load. This will involve inserting a new load into the
1507 // predecessor when it's not available. We could do this in general, but
1508 // prefer to not increase code size. As such, we only do this when we know
1509 // that we only have to insert *one* load (which means we're basically moving
1510 // the load, not inserting a new one).
1512 SmallPtrSet<BasicBlock *, 4> Blockers;
1513 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1514 Blockers.insert(UnavailableBlocks[i]);
1516 // Lets find first basic block with more than one predecessor. Walk backwards
1517 // through predecessors if needed.
1518 BasicBlock *LoadBB = LI->getParent();
1519 BasicBlock *TmpBB = LoadBB;
1521 bool isSinglePred = false;
1522 bool allSingleSucc = true;
1523 while (TmpBB->getSinglePredecessor()) {
1524 isSinglePred = true;
1525 TmpBB = TmpBB->getSinglePredecessor();
1526 if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1528 if (Blockers.count(TmpBB))
1530 if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1531 allSingleSucc = false;
1537 // If we have a repl set with LI itself in it, this means we have a loop where
1538 // at least one of the values is LI. Since this means that we won't be able
1539 // to eliminate LI even if we insert uses in the other predecessors, we will
1540 // end up increasing code size. Reject this by scanning for LI.
1541 if (!EnableFullLoadPRE) {
1542 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
1543 if (ValuesPerBlock[i].isSimpleValue() &&
1544 ValuesPerBlock[i].getSimpleValue() == LI)
1548 // FIXME: It is extremely unclear what this loop is doing, other than
1549 // artificially restricting loadpre.
1552 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1553 const AvailableValueInBlock &AV = ValuesPerBlock[i];
1554 if (AV.isSimpleValue())
1555 // "Hot" Instruction is in some loop (because it dominates its dep.
1557 if (Instruction *I = dyn_cast<Instruction>(AV.getSimpleValue()))
1558 if (DT->dominates(LI, I)) {
1564 // We are interested only in "hot" instructions. We don't want to do any
1565 // mis-optimizations here.
1570 // Check to see how many predecessors have the loaded value fully
1572 DenseMap<BasicBlock*, Value*> PredLoads;
1573 DenseMap<BasicBlock*, char> FullyAvailableBlocks;
1574 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
1575 FullyAvailableBlocks[ValuesPerBlock[i].BB] = true;
1576 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1577 FullyAvailableBlocks[UnavailableBlocks[i]] = false;
1579 for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB);
1581 BasicBlock *Pred = *PI;
1582 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) {
1585 PredLoads[Pred] = 0;
1586 // We don't currently handle critical edges :(
1587 if (Pred->getTerminator()->getNumSuccessors() != 1) {
1588 DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF CRITICAL EDGE '"
1589 << Pred->getName() << "': " << *LI << '\n');
1594 // Decide whether PRE is profitable for this load.
1595 unsigned NumUnavailablePreds = PredLoads.size();
1596 assert(NumUnavailablePreds != 0 &&
1597 "Fully available value should be eliminated above!");
1598 if (!EnableFullLoadPRE) {
1599 // If this load is unavailable in multiple predecessors, reject it.
1600 // FIXME: If we could restructure the CFG, we could make a common pred with
1601 // all the preds that don't have an available LI and insert a new load into
1603 if (NumUnavailablePreds != 1)
1607 // Check if the load can safely be moved to all the unavailable predecessors.
1608 bool CanDoPRE = true;
1609 SmallVector<Instruction*, 8> NewInsts;
1610 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
1611 E = PredLoads.end(); I != E; ++I) {
1612 BasicBlock *UnavailablePred = I->first;
1614 // Do PHI translation to get its value in the predecessor if necessary. The
1615 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1617 // If all preds have a single successor, then we know it is safe to insert
1618 // the load on the pred (?!?), so we can insert code to materialize the
1619 // pointer if it is not available.
1620 PHITransAddr Address(LI->getOperand(0), TD);
1622 if (allSingleSucc) {
1623 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred,
1626 Address.PHITranslateValue(LoadBB, UnavailablePred);
1627 LoadPtr = Address.getAddr();
1629 // Make sure the value is live in the predecessor.
1630 if (Instruction *Inst = dyn_cast_or_null<Instruction>(LoadPtr))
1631 if (!DT->dominates(Inst->getParent(), UnavailablePred))
1635 // If we couldn't find or insert a computation of this phi translated value,
1638 DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1639 << *LI->getOperand(0) << "\n");
1644 // Make sure it is valid to move this load here. We have to watch out for:
1645 // @1 = getelementptr (i8* p, ...
1646 // test p and branch if == 0
1648 // It is valid to have the getelementptr before the test, even if p can be 0,
1649 // as getelementptr only does address arithmetic.
1650 // If we are not pushing the value through any multiple-successor blocks
1651 // we do not have this case. Otherwise, check that the load is safe to
1652 // put anywhere; this can be improved, but should be conservatively safe.
1653 if (!allSingleSucc &&
1654 // FIXME: REEVALUTE THIS.
1655 !isSafeToLoadUnconditionally(LoadPtr,
1656 UnavailablePred->getTerminator(),
1657 LI->getAlignment(), TD)) {
1662 I->second = LoadPtr;
1666 while (!NewInsts.empty())
1667 NewInsts.pop_back_val()->eraseFromParent();
1671 // Okay, we can eliminate this load by inserting a reload in the predecessor
1672 // and using PHI construction to get the value in the other predecessors, do
1674 DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n');
1675 DEBUG(if (!NewInsts.empty())
1676 dbgs() << "INSERTED " << NewInsts.size() << " INSTS: "
1677 << *NewInsts.back() << '\n');
1679 // Assign value numbers to the new instructions.
1680 for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) {
1681 // FIXME: We really _ought_ to insert these value numbers into their
1682 // parent's availability map. However, in doing so, we risk getting into
1683 // ordering issues. If a block hasn't been processed yet, we would be
1684 // marking a value as AVAIL-IN, which isn't what we intend.
1685 VN.lookup_or_add(NewInsts[i]);
1688 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
1689 E = PredLoads.end(); I != E; ++I) {
1690 BasicBlock *UnavailablePred = I->first;
1691 Value *LoadPtr = I->second;
1693 Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
1695 UnavailablePred->getTerminator());
1697 // Add the newly created load.
1698 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,
1702 // Perform PHI construction.
1703 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT,
1704 VN.getAliasAnalysis());
1705 LI->replaceAllUsesWith(V);
1706 if (isa<PHINode>(V))
1708 if (isa<PointerType>(V->getType()))
1709 MD->invalidateCachedPointerInfo(V);
1710 toErase.push_back(LI);
1715 /// processLoad - Attempt to eliminate a load, first by eliminating it
1716 /// locally, and then attempting non-local elimination if that fails.
1717 bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
1721 if (L->isVolatile())
1724 // ... to a pointer that has been loaded from before...
1725 MemDepResult Dep = MD->getDependency(L);
1727 // If the value isn't available, don't do anything!
1728 if (Dep.isClobber()) {
1729 // Check to see if we have something like this:
1730 // store i32 123, i32* %P
1731 // %A = bitcast i32* %P to i8*
1732 // %B = gep i8* %A, i32 1
1735 // We could do that by recognizing if the clobber instructions are obviously
1736 // a common base + constant offset, and if the previous store (or memset)
1737 // completely covers this load. This sort of thing can happen in bitfield
1739 Value *AvailVal = 0;
1740 if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst()))
1741 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
1742 int Offset = AnalyzeLoadFromClobberingStore(L->getType(),
1743 L->getPointerOperand(),
1746 AvailVal = GetStoreValueForLoad(DepSI->getOperand(0), Offset,
1747 L->getType(), L, *TD);
1750 // If the clobbering value is a memset/memcpy/memmove, see if we can forward
1751 // a value on from it.
1752 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) {
1753 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
1754 int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(),
1755 L->getPointerOperand(),
1758 AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,*TD);
1763 DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n'
1764 << *AvailVal << '\n' << *L << "\n\n\n");
1766 // Replace the load!
1767 L->replaceAllUsesWith(AvailVal);
1768 if (isa<PointerType>(AvailVal->getType()))
1769 MD->invalidateCachedPointerInfo(AvailVal);
1770 toErase.push_back(L);
1776 // fast print dep, using operator<< on instruction would be too slow
1777 dbgs() << "GVN: load ";
1778 WriteAsOperand(dbgs(), L);
1779 Instruction *I = Dep.getInst();
1780 dbgs() << " is clobbered by " << *I << '\n';
1785 // If it is defined in another block, try harder.
1786 if (Dep.isNonLocal())
1787 return processNonLocalLoad(L, toErase);
1789 Instruction *DepInst = Dep.getInst();
1790 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1791 Value *StoredVal = DepSI->getOperand(0);
1793 // The store and load are to a must-aliased pointer, but they may not
1794 // actually have the same type. See if we know how to reuse the stored
1795 // value (depending on its type).
1796 const TargetData *TD = 0;
1797 if (StoredVal->getType() != L->getType()) {
1798 if ((TD = getAnalysisIfAvailable<TargetData>())) {
1799 StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(),
1804 DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
1805 << '\n' << *L << "\n\n\n");
1812 L->replaceAllUsesWith(StoredVal);
1813 if (isa<PointerType>(StoredVal->getType()))
1814 MD->invalidateCachedPointerInfo(StoredVal);
1815 toErase.push_back(L);
1820 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
1821 Value *AvailableVal = DepLI;
1823 // The loads are of a must-aliased pointer, but they may not actually have
1824 // the same type. See if we know how to reuse the previously loaded value
1825 // (depending on its type).
1826 const TargetData *TD = 0;
1827 if (DepLI->getType() != L->getType()) {
1828 if ((TD = getAnalysisIfAvailable<TargetData>())) {
1829 AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD);
1830 if (AvailableVal == 0)
1833 DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
1834 << "\n" << *L << "\n\n\n");
1841 L->replaceAllUsesWith(AvailableVal);
1842 if (isa<PointerType>(DepLI->getType()))
1843 MD->invalidateCachedPointerInfo(DepLI);
1844 toErase.push_back(L);
1849 // If this load really doesn't depend on anything, then we must be loading an
1850 // undef value. This can happen when loading for a fresh allocation with no
1851 // intervening stores, for example.
1852 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) {
1853 L->replaceAllUsesWith(UndefValue::get(L->getType()));
1854 toErase.push_back(L);
1859 // If this load occurs either right after a lifetime begin,
1860 // then the loaded value is undefined.
1861 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) {
1862 if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
1863 L->replaceAllUsesWith(UndefValue::get(L->getType()));
1864 toErase.push_back(L);
1873 Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) {
1874 DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB);
1875 if (I == localAvail.end())
1878 ValueNumberScope *Locals = I->second;
1880 DenseMap<uint32_t, Value*>::iterator I = Locals->table.find(num);
1881 if (I != Locals->table.end())
1883 Locals = Locals->parent;
1890 /// processInstruction - When calculating availability, handle an instruction
1891 /// by inserting it into the appropriate sets
1892 bool GVN::processInstruction(Instruction *I,
1893 SmallVectorImpl<Instruction*> &toErase) {
1894 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1895 bool Changed = processLoad(LI, toErase);
1898 unsigned Num = VN.lookup_or_add(LI);
1899 localAvail[I->getParent()]->table.insert(std::make_pair(Num, LI));
1905 uint32_t NextNum = VN.getNextUnusedValueNumber();
1906 unsigned Num = VN.lookup_or_add(I);
1908 if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
1909 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1911 if (!BI->isConditional() || isa<Constant>(BI->getCondition()))
1914 Value *BranchCond = BI->getCondition();
1915 uint32_t CondVN = VN.lookup_or_add(BranchCond);
1917 BasicBlock *TrueSucc = BI->getSuccessor(0);
1918 BasicBlock *FalseSucc = BI->getSuccessor(1);
1920 if (TrueSucc->getSinglePredecessor())
1921 localAvail[TrueSucc]->table[CondVN] =
1922 ConstantInt::getTrue(TrueSucc->getContext());
1923 if (FalseSucc->getSinglePredecessor())
1924 localAvail[FalseSucc]->table[CondVN] =
1925 ConstantInt::getFalse(TrueSucc->getContext());
1929 // Allocations are always uniquely numbered, so we can save time and memory
1930 // by fast failing them.
1931 } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) {
1932 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1936 // Collapse PHI nodes
1937 if (PHINode* p = dyn_cast<PHINode>(I)) {
1938 Value *constVal = CollapsePhi(p);
1941 p->replaceAllUsesWith(constVal);
1942 if (MD && isa<PointerType>(constVal->getType()))
1943 MD->invalidateCachedPointerInfo(constVal);
1946 toErase.push_back(p);
1948 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1951 // If the number we were assigned was a brand new VN, then we don't
1952 // need to do a lookup to see if the number already exists
1953 // somewhere in the domtree: it can't!
1954 } else if (Num == NextNum) {
1955 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1957 // Perform fast-path value-number based elimination of values inherited from
1959 } else if (Value *repl = lookupNumber(I->getParent(), Num)) {
1962 I->replaceAllUsesWith(repl);
1963 if (MD && isa<PointerType>(repl->getType()))
1964 MD->invalidateCachedPointerInfo(repl);
1965 toErase.push_back(I);
1969 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1975 /// runOnFunction - This is the main transformation entry point for a function.
1976 bool GVN::runOnFunction(Function& F) {
1978 MD = &getAnalysis<MemoryDependenceAnalysis>();
1979 DT = &getAnalysis<DominatorTree>();
1980 VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
1984 bool Changed = false;
1985 bool ShouldContinue = true;
1987 // Merge unconditional branches, allowing PRE to catch more
1988 // optimization opportunities.
1989 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
1990 BasicBlock *BB = FI;
1992 bool removedBlock = MergeBlockIntoPredecessor(BB, this);
1993 if (removedBlock) NumGVNBlocks++;
1995 Changed |= removedBlock;
1998 unsigned Iteration = 0;
2000 while (ShouldContinue) {
2001 DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
2002 ShouldContinue = iterateOnFunction(F);
2003 Changed |= ShouldContinue;
2008 bool PREChanged = true;
2009 while (PREChanged) {
2010 PREChanged = performPRE(F);
2011 Changed |= PREChanged;
2014 // FIXME: Should perform GVN again after PRE does something. PRE can move
2015 // computations into blocks where they become fully redundant. Note that
2016 // we can't do this until PRE's critical edge splitting updates memdep.
2017 // Actually, when this happens, we should just fully integrate PRE into GVN.
2019 cleanupGlobalSets();
2025 bool GVN::processBlock(BasicBlock *BB) {
2026 // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and
2027 // incrementing BI before processing an instruction).
2028 SmallVector<Instruction*, 8> toErase;
2029 bool ChangedFunction = false;
2031 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
2033 ChangedFunction |= processInstruction(BI, toErase);
2034 if (toErase.empty()) {
2039 // If we need some instructions deleted, do it now.
2040 NumGVNInstr += toErase.size();
2042 // Avoid iterator invalidation.
2043 bool AtStart = BI == BB->begin();
2047 for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(),
2048 E = toErase.end(); I != E; ++I) {
2049 DEBUG(dbgs() << "GVN removed: " << **I << '\n');
2050 if (MD) MD->removeInstruction(*I);
2051 (*I)->eraseFromParent();
2052 DEBUG(verifyRemoved(*I));
2062 return ChangedFunction;
2065 /// performPRE - Perform a purely local form of PRE that looks for diamond
2066 /// control flow patterns and attempts to perform simple PRE at the join point.
2067 bool GVN::performPRE(Function &F) {
2068 bool Changed = false;
2069 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit;
2070 DenseMap<BasicBlock*, Value*> predMap;
2071 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
2072 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
2073 BasicBlock *CurrentBlock = *DI;
2075 // Nothing to PRE in the entry block.
2076 if (CurrentBlock == &F.getEntryBlock()) continue;
2078 for (BasicBlock::iterator BI = CurrentBlock->begin(),
2079 BE = CurrentBlock->end(); BI != BE; ) {
2080 Instruction *CurInst = BI++;
2082 if (isa<AllocaInst>(CurInst) ||
2083 isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) ||
2084 CurInst->getType()->isVoidTy() ||
2085 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
2086 isa<DbgInfoIntrinsic>(CurInst))
2089 uint32_t ValNo = VN.lookup(CurInst);
2091 // Look for the predecessors for PRE opportunities. We're
2092 // only trying to solve the basic diamond case, where
2093 // a value is computed in the successor and one predecessor,
2094 // but not the other. We also explicitly disallow cases
2095 // where the successor is its own predecessor, because they're
2096 // more complicated to get right.
2097 unsigned NumWith = 0;
2098 unsigned NumWithout = 0;
2099 BasicBlock *PREPred = 0;
2102 for (pred_iterator PI = pred_begin(CurrentBlock),
2103 PE = pred_end(CurrentBlock); PI != PE; ++PI) {
2104 // We're not interested in PRE where the block is its
2105 // own predecessor, or in blocks with predecessors
2106 // that are not reachable.
2107 if (*PI == CurrentBlock) {
2110 } else if (!localAvail.count(*PI)) {
2115 DenseMap<uint32_t, Value*>::iterator predV =
2116 localAvail[*PI]->table.find(ValNo);
2117 if (predV == localAvail[*PI]->table.end()) {
2120 } else if (predV->second == CurInst) {
2123 predMap[*PI] = predV->second;
2128 // Don't do PRE when it might increase code size, i.e. when
2129 // we would need to insert instructions in more than one pred.
2130 if (NumWithout != 1 || NumWith == 0)
2133 // Don't do PRE across indirect branch.
2134 if (isa<IndirectBrInst>(PREPred->getTerminator()))
2137 // We can't do PRE safely on a critical edge, so instead we schedule
2138 // the edge to be split and perform the PRE the next time we iterate
2140 unsigned SuccNum = 0;
2141 for (unsigned i = 0, e = PREPred->getTerminator()->getNumSuccessors();
2143 if (PREPred->getTerminator()->getSuccessor(i) == CurrentBlock) {
2148 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
2149 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
2153 // Instantiate the expression in the predecessor that lacked it.
2154 // Because we are going top-down through the block, all value numbers
2155 // will be available in the predecessor by the time we need them. Any
2156 // that weren't originally present will have been instantiated earlier
2158 Instruction *PREInstr = CurInst->clone();
2159 bool success = true;
2160 for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) {
2161 Value *Op = PREInstr->getOperand(i);
2162 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2165 if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) {
2166 PREInstr->setOperand(i, V);
2173 // Fail out if we encounter an operand that is not available in
2174 // the PRE predecessor. This is typically because of loads which
2175 // are not value numbered precisely.
2178 DEBUG(verifyRemoved(PREInstr));
2182 PREInstr->insertBefore(PREPred->getTerminator());
2183 PREInstr->setName(CurInst->getName() + ".pre");
2184 predMap[PREPred] = PREInstr;
2185 VN.add(PREInstr, ValNo);
2188 // Update the availability map to include the new instruction.
2189 localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr));
2191 // Create a PHI to make the value available in this block.
2192 PHINode* Phi = PHINode::Create(CurInst->getType(),
2193 CurInst->getName() + ".pre-phi",
2194 CurrentBlock->begin());
2195 for (pred_iterator PI = pred_begin(CurrentBlock),
2196 PE = pred_end(CurrentBlock); PI != PE; ++PI)
2197 Phi->addIncoming(predMap[*PI], *PI);
2200 localAvail[CurrentBlock]->table[ValNo] = Phi;
2202 CurInst->replaceAllUsesWith(Phi);
2203 if (MD && isa<PointerType>(Phi->getType()))
2204 MD->invalidateCachedPointerInfo(Phi);
2207 DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
2208 if (MD) MD->removeInstruction(CurInst);
2209 CurInst->eraseFromParent();
2210 DEBUG(verifyRemoved(CurInst));
2215 for (SmallVector<std::pair<TerminatorInst*, unsigned>, 4>::iterator
2216 I = toSplit.begin(), E = toSplit.end(); I != E; ++I)
2217 SplitCriticalEdge(I->first, I->second, this);
2219 return Changed || toSplit.size();
2222 /// iterateOnFunction - Executes one iteration of GVN
2223 bool GVN::iterateOnFunction(Function &F) {
2224 cleanupGlobalSets();
2226 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
2227 DE = df_end(DT->getRootNode()); DI != DE; ++DI) {
2229 localAvail[DI->getBlock()] =
2230 new ValueNumberScope(localAvail[DI->getIDom()->getBlock()]);
2232 localAvail[DI->getBlock()] = new ValueNumberScope(0);
2235 // Top-down walk of the dominator tree
2236 bool Changed = false;
2238 // Needed for value numbering with phi construction to work.
2239 ReversePostOrderTraversal<Function*> RPOT(&F);
2240 for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(),
2241 RE = RPOT.end(); RI != RE; ++RI)
2242 Changed |= processBlock(*RI);
2244 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
2245 DE = df_end(DT->getRootNode()); DI != DE; ++DI)
2246 Changed |= processBlock(DI->getBlock());
2252 void GVN::cleanupGlobalSets() {
2255 for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator
2256 I = localAvail.begin(), E = localAvail.end(); I != E; ++I)
2261 /// verifyRemoved - Verify that the specified instruction does not occur in our
2262 /// internal data structures.
2263 void GVN::verifyRemoved(const Instruction *Inst) const {
2264 VN.verifyRemoved(Inst);
2266 // Walk through the value number scope to make sure the instruction isn't
2267 // ferreted away in it.
2268 for (DenseMap<BasicBlock*, ValueNumberScope*>::const_iterator
2269 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) {
2270 const ValueNumberScope *VNS = I->second;
2273 for (DenseMap<uint32_t, Value*>::const_iterator
2274 II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) {
2275 assert(II->second != Inst && "Inst still in value numbering scope!");