1 //===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs global value numbering to eliminate fully redundant
11 // instructions. It also performs simple dead load elimination.
13 // Note that this pass does the value numbering itself; it does not use the
14 // ValueNumbering analysis passes.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "gvn"
19 #include "llvm/Transforms/Scalar.h"
20 #include "llvm/BasicBlock.h"
21 #include "llvm/Constants.h"
22 #include "llvm/DerivedTypes.h"
23 #include "llvm/GlobalVariable.h"
24 #include "llvm/Function.h"
25 #include "llvm/IntrinsicInst.h"
26 #include "llvm/LLVMContext.h"
27 #include "llvm/Operator.h"
28 #include "llvm/Value.h"
29 #include "llvm/ADT/DenseMap.h"
30 #include "llvm/ADT/DepthFirstIterator.h"
31 #include "llvm/ADT/PostOrderIterator.h"
32 #include "llvm/ADT/SmallPtrSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/Analysis/AliasAnalysis.h"
36 #include "llvm/Analysis/ConstantFolding.h"
37 #include "llvm/Analysis/Dominators.h"
38 #include "llvm/Analysis/MemoryBuiltins.h"
39 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
40 #include "llvm/Analysis/PHITransAddr.h"
41 #include "llvm/Support/CFG.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/GetElementPtrTypeIterator.h"
46 #include "llvm/Support/IRBuilder.h"
47 #include "llvm/Support/raw_ostream.h"
48 #include "llvm/Target/TargetData.h"
49 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
50 #include "llvm/Transforms/Utils/Local.h"
51 #include "llvm/Transforms/Utils/SSAUpdater.h"
54 STATISTIC(NumGVNInstr, "Number of instructions deleted");
55 STATISTIC(NumGVNLoad, "Number of loads deleted");
56 STATISTIC(NumGVNPRE, "Number of instructions PRE'd");
57 STATISTIC(NumGVNBlocks, "Number of blocks merged");
58 STATISTIC(NumPRELoad, "Number of loads PRE'd");
60 static cl::opt<bool> EnablePRE("enable-pre",
61 cl::init(true), cl::Hidden);
62 static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true));
64 //===----------------------------------------------------------------------===//
66 //===----------------------------------------------------------------------===//
68 /// This class holds the mapping between values and value numbers. It is used
69 /// as an efficient mechanism to determine the expression-wise equivalence of
73 enum ExpressionOpcode {
74 ADD = Instruction::Add,
75 FADD = Instruction::FAdd,
76 SUB = Instruction::Sub,
77 FSUB = Instruction::FSub,
78 MUL = Instruction::Mul,
79 FMUL = Instruction::FMul,
80 UDIV = Instruction::UDiv,
81 SDIV = Instruction::SDiv,
82 FDIV = Instruction::FDiv,
83 UREM = Instruction::URem,
84 SREM = Instruction::SRem,
85 FREM = Instruction::FRem,
86 SHL = Instruction::Shl,
87 LSHR = Instruction::LShr,
88 ASHR = Instruction::AShr,
89 AND = Instruction::And,
91 XOR = Instruction::Xor,
92 TRUNC = Instruction::Trunc,
93 ZEXT = Instruction::ZExt,
94 SEXT = Instruction::SExt,
95 FPTOUI = Instruction::FPToUI,
96 FPTOSI = Instruction::FPToSI,
97 UITOFP = Instruction::UIToFP,
98 SITOFP = Instruction::SIToFP,
99 FPTRUNC = Instruction::FPTrunc,
100 FPEXT = Instruction::FPExt,
101 PTRTOINT = Instruction::PtrToInt,
102 INTTOPTR = Instruction::IntToPtr,
103 BITCAST = Instruction::BitCast,
104 ICMPEQ, ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE,
105 ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ,
106 FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE,
107 FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE,
108 FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT,
109 SHUFFLE, SELECT, GEP, CALL, CONSTANT,
110 INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE };
112 ExpressionOpcode opcode;
114 SmallVector<uint32_t, 4> varargs;
118 Expression(ExpressionOpcode o) : opcode(o) { }
120 bool operator==(const Expression &other) const {
121 if (opcode != other.opcode)
123 else if (opcode == EMPTY || opcode == TOMBSTONE)
125 else if (type != other.type)
127 else if (function != other.function)
130 if (varargs.size() != other.varargs.size())
133 for (size_t i = 0; i < varargs.size(); ++i)
134 if (varargs[i] != other.varargs[i])
141 bool operator!=(const Expression &other) const {
142 return !(*this == other);
148 DenseMap<Value*, uint32_t> valueNumbering;
149 DenseMap<Expression, uint32_t> expressionNumbering;
151 MemoryDependenceAnalysis* MD;
154 uint32_t nextValueNumber;
156 Expression::ExpressionOpcode getOpcode(CmpInst* C);
157 Expression create_expression(BinaryOperator* BO);
158 Expression create_expression(CmpInst* C);
159 Expression create_expression(ShuffleVectorInst* V);
160 Expression create_expression(ExtractElementInst* C);
161 Expression create_expression(InsertElementInst* V);
162 Expression create_expression(SelectInst* V);
163 Expression create_expression(CastInst* C);
164 Expression create_expression(GetElementPtrInst* G);
165 Expression create_expression(CallInst* C);
166 Expression create_expression(Constant* C);
167 Expression create_expression(ExtractValueInst* C);
168 Expression create_expression(InsertValueInst* C);
170 uint32_t lookup_or_add_call(CallInst* C);
172 ValueTable() : nextValueNumber(1) { }
173 uint32_t lookup_or_add(Value *V);
174 uint32_t lookup(Value *V) const;
175 void add(Value *V, uint32_t num);
177 void erase(Value *v);
179 void setAliasAnalysis(AliasAnalysis* A) { AA = A; }
180 AliasAnalysis *getAliasAnalysis() const { return AA; }
181 void setMemDep(MemoryDependenceAnalysis* M) { MD = M; }
182 void setDomTree(DominatorTree* D) { DT = D; }
183 uint32_t getNextUnusedValueNumber() { return nextValueNumber; }
184 void verifyRemoved(const Value *) const;
189 template <> struct DenseMapInfo<Expression> {
190 static inline Expression getEmptyKey() {
191 return Expression(Expression::EMPTY);
194 static inline Expression getTombstoneKey() {
195 return Expression(Expression::TOMBSTONE);
198 static unsigned getHashValue(const Expression e) {
199 unsigned hash = e.opcode;
201 hash = ((unsigned)((uintptr_t)e.type >> 4) ^
202 (unsigned)((uintptr_t)e.type >> 9));
204 for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(),
205 E = e.varargs.end(); I != E; ++I)
206 hash = *I + hash * 37;
208 hash = ((unsigned)((uintptr_t)e.function >> 4) ^
209 (unsigned)((uintptr_t)e.function >> 9)) +
214 static bool isEqual(const Expression &LHS, const Expression &RHS) {
220 struct isPodLike<Expression> { static const bool value = true; };
224 //===----------------------------------------------------------------------===//
225 // ValueTable Internal Functions
226 //===----------------------------------------------------------------------===//
228 Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) {
229 if (isa<ICmpInst>(C)) {
230 switch (C->getPredicate()) {
231 default: // THIS SHOULD NEVER HAPPEN
232 llvm_unreachable("Comparison with unknown predicate?");
233 case ICmpInst::ICMP_EQ: return Expression::ICMPEQ;
234 case ICmpInst::ICMP_NE: return Expression::ICMPNE;
235 case ICmpInst::ICMP_UGT: return Expression::ICMPUGT;
236 case ICmpInst::ICMP_UGE: return Expression::ICMPUGE;
237 case ICmpInst::ICMP_ULT: return Expression::ICMPULT;
238 case ICmpInst::ICMP_ULE: return Expression::ICMPULE;
239 case ICmpInst::ICMP_SGT: return Expression::ICMPSGT;
240 case ICmpInst::ICMP_SGE: return Expression::ICMPSGE;
241 case ICmpInst::ICMP_SLT: return Expression::ICMPSLT;
242 case ICmpInst::ICMP_SLE: return Expression::ICMPSLE;
245 switch (C->getPredicate()) {
246 default: // THIS SHOULD NEVER HAPPEN
247 llvm_unreachable("Comparison with unknown predicate?");
248 case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ;
249 case FCmpInst::FCMP_OGT: return Expression::FCMPOGT;
250 case FCmpInst::FCMP_OGE: return Expression::FCMPOGE;
251 case FCmpInst::FCMP_OLT: return Expression::FCMPOLT;
252 case FCmpInst::FCMP_OLE: return Expression::FCMPOLE;
253 case FCmpInst::FCMP_ONE: return Expression::FCMPONE;
254 case FCmpInst::FCMP_ORD: return Expression::FCMPORD;
255 case FCmpInst::FCMP_UNO: return Expression::FCMPUNO;
256 case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ;
257 case FCmpInst::FCMP_UGT: return Expression::FCMPUGT;
258 case FCmpInst::FCMP_UGE: return Expression::FCMPUGE;
259 case FCmpInst::FCMP_ULT: return Expression::FCMPULT;
260 case FCmpInst::FCMP_ULE: return Expression::FCMPULE;
261 case FCmpInst::FCMP_UNE: return Expression::FCMPUNE;
266 Expression ValueTable::create_expression(CallInst* C) {
269 e.type = C->getType();
270 e.function = C->getCalledFunction();
271 e.opcode = Expression::CALL;
273 for (CallInst::op_iterator I = C->op_begin()+1, E = C->op_end();
275 e.varargs.push_back(lookup_or_add(*I));
280 Expression ValueTable::create_expression(BinaryOperator* BO) {
282 e.varargs.push_back(lookup_or_add(BO->getOperand(0)));
283 e.varargs.push_back(lookup_or_add(BO->getOperand(1)));
285 e.type = BO->getType();
286 e.opcode = static_cast<Expression::ExpressionOpcode>(BO->getOpcode());
291 Expression ValueTable::create_expression(CmpInst* C) {
294 e.varargs.push_back(lookup_or_add(C->getOperand(0)));
295 e.varargs.push_back(lookup_or_add(C->getOperand(1)));
297 e.type = C->getType();
298 e.opcode = getOpcode(C);
303 Expression ValueTable::create_expression(CastInst* C) {
306 e.varargs.push_back(lookup_or_add(C->getOperand(0)));
308 e.type = C->getType();
309 e.opcode = static_cast<Expression::ExpressionOpcode>(C->getOpcode());
314 Expression ValueTable::create_expression(ShuffleVectorInst* S) {
317 e.varargs.push_back(lookup_or_add(S->getOperand(0)));
318 e.varargs.push_back(lookup_or_add(S->getOperand(1)));
319 e.varargs.push_back(lookup_or_add(S->getOperand(2)));
321 e.type = S->getType();
322 e.opcode = Expression::SHUFFLE;
327 Expression ValueTable::create_expression(ExtractElementInst* E) {
330 e.varargs.push_back(lookup_or_add(E->getOperand(0)));
331 e.varargs.push_back(lookup_or_add(E->getOperand(1)));
333 e.type = E->getType();
334 e.opcode = Expression::EXTRACT;
339 Expression ValueTable::create_expression(InsertElementInst* I) {
342 e.varargs.push_back(lookup_or_add(I->getOperand(0)));
343 e.varargs.push_back(lookup_or_add(I->getOperand(1)));
344 e.varargs.push_back(lookup_or_add(I->getOperand(2)));
346 e.type = I->getType();
347 e.opcode = Expression::INSERT;
352 Expression ValueTable::create_expression(SelectInst* I) {
355 e.varargs.push_back(lookup_or_add(I->getCondition()));
356 e.varargs.push_back(lookup_or_add(I->getTrueValue()));
357 e.varargs.push_back(lookup_or_add(I->getFalseValue()));
359 e.type = I->getType();
360 e.opcode = Expression::SELECT;
365 Expression ValueTable::create_expression(GetElementPtrInst* G) {
368 e.varargs.push_back(lookup_or_add(G->getPointerOperand()));
370 e.type = G->getType();
371 e.opcode = Expression::GEP;
373 for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end();
375 e.varargs.push_back(lookup_or_add(*I));
380 Expression ValueTable::create_expression(ExtractValueInst* E) {
383 e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
384 for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
386 e.varargs.push_back(*II);
388 e.type = E->getType();
389 e.opcode = Expression::EXTRACTVALUE;
394 Expression ValueTable::create_expression(InsertValueInst* E) {
397 e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
398 e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand()));
399 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
401 e.varargs.push_back(*II);
403 e.type = E->getType();
404 e.opcode = Expression::INSERTVALUE;
409 //===----------------------------------------------------------------------===//
410 // ValueTable External Functions
411 //===----------------------------------------------------------------------===//
413 /// add - Insert a value into the table with a specified value number.
414 void ValueTable::add(Value *V, uint32_t num) {
415 valueNumbering.insert(std::make_pair(V, num));
418 uint32_t ValueTable::lookup_or_add_call(CallInst* C) {
419 if (AA->doesNotAccessMemory(C)) {
420 Expression exp = create_expression(C);
421 uint32_t& e = expressionNumbering[exp];
422 if (!e) e = nextValueNumber++;
423 valueNumbering[C] = e;
425 } else if (AA->onlyReadsMemory(C)) {
426 Expression exp = create_expression(C);
427 uint32_t& e = expressionNumbering[exp];
429 e = nextValueNumber++;
430 valueNumbering[C] = e;
434 e = nextValueNumber++;
435 valueNumbering[C] = e;
439 MemDepResult local_dep = MD->getDependency(C);
441 if (!local_dep.isDef() && !local_dep.isNonLocal()) {
442 valueNumbering[C] = nextValueNumber;
443 return nextValueNumber++;
446 if (local_dep.isDef()) {
447 CallInst* local_cdep = cast<CallInst>(local_dep.getInst());
449 if (local_cdep->getNumOperands() != C->getNumOperands()) {
450 valueNumbering[C] = nextValueNumber;
451 return nextValueNumber++;
454 for (unsigned i = 1; i < C->getNumOperands(); ++i) {
455 uint32_t c_vn = lookup_or_add(C->getOperand(i));
456 uint32_t cd_vn = lookup_or_add(local_cdep->getOperand(i));
458 valueNumbering[C] = nextValueNumber;
459 return nextValueNumber++;
463 uint32_t v = lookup_or_add(local_cdep);
464 valueNumbering[C] = v;
469 const MemoryDependenceAnalysis::NonLocalDepInfo &deps =
470 MD->getNonLocalCallDependency(CallSite(C));
471 // FIXME: call/call dependencies for readonly calls should return def, not
472 // clobber! Move the checking logic to MemDep!
475 // Check to see if we have a single dominating call instruction that is
477 for (unsigned i = 0, e = deps.size(); i != e; ++i) {
478 const NonLocalDepEntry *I = &deps[i];
479 // Ignore non-local dependencies.
480 if (I->getResult().isNonLocal())
483 // We don't handle non-depedencies. If we already have a call, reject
484 // instruction dependencies.
485 if (I->getResult().isClobber() || cdep != 0) {
490 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst());
491 // FIXME: All duplicated with non-local case.
492 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){
493 cdep = NonLocalDepCall;
502 valueNumbering[C] = nextValueNumber;
503 return nextValueNumber++;
506 if (cdep->getNumOperands() != C->getNumOperands()) {
507 valueNumbering[C] = nextValueNumber;
508 return nextValueNumber++;
510 for (unsigned i = 1; i < C->getNumOperands(); ++i) {
511 uint32_t c_vn = lookup_or_add(C->getOperand(i));
512 uint32_t cd_vn = lookup_or_add(cdep->getOperand(i));
514 valueNumbering[C] = nextValueNumber;
515 return nextValueNumber++;
519 uint32_t v = lookup_or_add(cdep);
520 valueNumbering[C] = v;
524 valueNumbering[C] = nextValueNumber;
525 return nextValueNumber++;
529 /// lookup_or_add - Returns the value number for the specified value, assigning
530 /// it a new number if it did not have one before.
531 uint32_t ValueTable::lookup_or_add(Value *V) {
532 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
533 if (VI != valueNumbering.end())
536 if (!isa<Instruction>(V)) {
537 valueNumbering[V] = nextValueNumber;
538 return nextValueNumber++;
541 Instruction* I = cast<Instruction>(V);
543 switch (I->getOpcode()) {
544 case Instruction::Call:
545 return lookup_or_add_call(cast<CallInst>(I));
546 case Instruction::Add:
547 case Instruction::FAdd:
548 case Instruction::Sub:
549 case Instruction::FSub:
550 case Instruction::Mul:
551 case Instruction::FMul:
552 case Instruction::UDiv:
553 case Instruction::SDiv:
554 case Instruction::FDiv:
555 case Instruction::URem:
556 case Instruction::SRem:
557 case Instruction::FRem:
558 case Instruction::Shl:
559 case Instruction::LShr:
560 case Instruction::AShr:
561 case Instruction::And:
562 case Instruction::Or :
563 case Instruction::Xor:
564 exp = create_expression(cast<BinaryOperator>(I));
566 case Instruction::ICmp:
567 case Instruction::FCmp:
568 exp = create_expression(cast<CmpInst>(I));
570 case Instruction::Trunc:
571 case Instruction::ZExt:
572 case Instruction::SExt:
573 case Instruction::FPToUI:
574 case Instruction::FPToSI:
575 case Instruction::UIToFP:
576 case Instruction::SIToFP:
577 case Instruction::FPTrunc:
578 case Instruction::FPExt:
579 case Instruction::PtrToInt:
580 case Instruction::IntToPtr:
581 case Instruction::BitCast:
582 exp = create_expression(cast<CastInst>(I));
584 case Instruction::Select:
585 exp = create_expression(cast<SelectInst>(I));
587 case Instruction::ExtractElement:
588 exp = create_expression(cast<ExtractElementInst>(I));
590 case Instruction::InsertElement:
591 exp = create_expression(cast<InsertElementInst>(I));
593 case Instruction::ShuffleVector:
594 exp = create_expression(cast<ShuffleVectorInst>(I));
596 case Instruction::ExtractValue:
597 exp = create_expression(cast<ExtractValueInst>(I));
599 case Instruction::InsertValue:
600 exp = create_expression(cast<InsertValueInst>(I));
602 case Instruction::GetElementPtr:
603 exp = create_expression(cast<GetElementPtrInst>(I));
606 valueNumbering[V] = nextValueNumber;
607 return nextValueNumber++;
610 uint32_t& e = expressionNumbering[exp];
611 if (!e) e = nextValueNumber++;
612 valueNumbering[V] = e;
616 /// lookup - Returns the value number of the specified value. Fails if
617 /// the value has not yet been numbered.
618 uint32_t ValueTable::lookup(Value *V) const {
619 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
620 assert(VI != valueNumbering.end() && "Value not numbered?");
624 /// clear - Remove all entries from the ValueTable
625 void ValueTable::clear() {
626 valueNumbering.clear();
627 expressionNumbering.clear();
631 /// erase - Remove a value from the value numbering
632 void ValueTable::erase(Value *V) {
633 valueNumbering.erase(V);
636 /// verifyRemoved - Verify that the value is removed from all internal data
638 void ValueTable::verifyRemoved(const Value *V) const {
639 for (DenseMap<Value*, uint32_t>::const_iterator
640 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
641 assert(I->first != V && "Inst still occurs in value numbering map!");
645 //===----------------------------------------------------------------------===//
647 //===----------------------------------------------------------------------===//
650 struct ValueNumberScope {
651 ValueNumberScope* parent;
652 DenseMap<uint32_t, Value*> table;
654 ValueNumberScope(ValueNumberScope* p) : parent(p) { }
660 class GVN : public FunctionPass {
661 bool runOnFunction(Function &F);
663 static char ID; // Pass identification, replacement for typeid
664 explicit GVN(bool nopre = false, bool noloads = false)
665 : FunctionPass(&ID), NoPRE(nopre), NoLoads(noloads), MD(0) { }
670 MemoryDependenceAnalysis *MD;
674 DenseMap<BasicBlock*, ValueNumberScope*> localAvail;
676 // This transformation requires dominator postdominator info
677 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
678 AU.addRequired<DominatorTree>();
680 AU.addRequired<MemoryDependenceAnalysis>();
681 AU.addRequired<AliasAnalysis>();
683 AU.addPreserved<DominatorTree>();
684 AU.addPreserved<AliasAnalysis>();
688 // FIXME: eliminate or document these better
689 bool processLoad(LoadInst* L,
690 SmallVectorImpl<Instruction*> &toErase);
691 bool processInstruction(Instruction *I,
692 SmallVectorImpl<Instruction*> &toErase);
693 bool processNonLocalLoad(LoadInst* L,
694 SmallVectorImpl<Instruction*> &toErase);
695 bool processBlock(BasicBlock *BB);
696 void dump(DenseMap<uint32_t, Value*>& d);
697 bool iterateOnFunction(Function &F);
698 Value *CollapsePhi(PHINode* p);
699 bool performPRE(Function& F);
700 Value *lookupNumber(BasicBlock *BB, uint32_t num);
701 void cleanupGlobalSets();
702 void verifyRemoved(const Instruction *I) const;
708 // createGVNPass - The public interface to this file...
709 FunctionPass *llvm::createGVNPass(bool NoPRE, bool NoLoads) {
710 return new GVN(NoPRE, NoLoads);
713 static RegisterPass<GVN> X("gvn",
714 "Global Value Numbering");
716 void GVN::dump(DenseMap<uint32_t, Value*>& d) {
718 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(),
719 E = d.end(); I != E; ++I) {
720 errs() << I->first << "\n";
726 static bool isSafeReplacement(PHINode* p, Instruction *inst) {
727 if (!isa<PHINode>(inst))
730 for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end();
732 if (PHINode* use_phi = dyn_cast<PHINode>(UI))
733 if (use_phi->getParent() == inst->getParent())
739 Value *GVN::CollapsePhi(PHINode *PN) {
740 Value *ConstVal = PN->hasConstantValue(DT);
741 if (!ConstVal) return 0;
743 Instruction *Inst = dyn_cast<Instruction>(ConstVal);
747 if (DT->dominates(Inst, PN))
748 if (isSafeReplacement(PN, Inst))
753 /// IsValueFullyAvailableInBlock - Return true if we can prove that the value
754 /// we're analyzing is fully available in the specified block. As we go, keep
755 /// track of which blocks we know are fully alive in FullyAvailableBlocks. This
756 /// map is actually a tri-state map with the following values:
757 /// 0) we know the block *is not* fully available.
758 /// 1) we know the block *is* fully available.
759 /// 2) we do not know whether the block is fully available or not, but we are
760 /// currently speculating that it will be.
761 /// 3) we are speculating for this block and have used that to speculate for
763 static bool IsValueFullyAvailableInBlock(BasicBlock *BB,
764 DenseMap<BasicBlock*, char> &FullyAvailableBlocks) {
765 // Optimistically assume that the block is fully available and check to see
766 // if we already know about this block in one lookup.
767 std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV =
768 FullyAvailableBlocks.insert(std::make_pair(BB, 2));
770 // If the entry already existed for this block, return the precomputed value.
772 // If this is a speculative "available" value, mark it as being used for
773 // speculation of other blocks.
774 if (IV.first->second == 2)
775 IV.first->second = 3;
776 return IV.first->second != 0;
779 // Otherwise, see if it is fully available in all predecessors.
780 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
782 // If this block has no predecessors, it isn't live-in here.
784 goto SpeculationFailure;
786 for (; PI != PE; ++PI)
787 // If the value isn't fully available in one of our predecessors, then it
788 // isn't fully available in this block either. Undo our previous
789 // optimistic assumption and bail out.
790 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks))
791 goto SpeculationFailure;
795 // SpeculationFailure - If we get here, we found out that this is not, after
796 // all, a fully-available block. We have a problem if we speculated on this and
797 // used the speculation to mark other blocks as available.
799 char &BBVal = FullyAvailableBlocks[BB];
801 // If we didn't speculate on this, just return with it set to false.
807 // If we did speculate on this value, we could have blocks set to 1 that are
808 // incorrect. Walk the (transitive) successors of this block and mark them as
810 SmallVector<BasicBlock*, 32> BBWorklist;
811 BBWorklist.push_back(BB);
814 BasicBlock *Entry = BBWorklist.pop_back_val();
815 // Note that this sets blocks to 0 (unavailable) if they happen to not
816 // already be in FullyAvailableBlocks. This is safe.
817 char &EntryVal = FullyAvailableBlocks[Entry];
818 if (EntryVal == 0) continue; // Already unavailable.
820 // Mark as unavailable.
823 for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I)
824 BBWorklist.push_back(*I);
825 } while (!BBWorklist.empty());
831 /// CanCoerceMustAliasedValueToLoad - Return true if
832 /// CoerceAvailableValueToLoadType will succeed.
833 static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
835 const TargetData &TD) {
836 // If the loaded or stored value is an first class array or struct, don't try
837 // to transform them. We need to be able to bitcast to integer.
838 if (isa<StructType>(LoadTy) || isa<ArrayType>(LoadTy) ||
839 isa<StructType>(StoredVal->getType()) ||
840 isa<ArrayType>(StoredVal->getType()))
843 // The store has to be at least as big as the load.
844 if (TD.getTypeSizeInBits(StoredVal->getType()) <
845 TD.getTypeSizeInBits(LoadTy))
852 /// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and
853 /// then a load from a must-aliased pointer of a different type, try to coerce
854 /// the stored value. LoadedTy is the type of the load we want to replace and
855 /// InsertPt is the place to insert new instructions.
857 /// If we can't do it, return null.
858 static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
859 const Type *LoadedTy,
860 Instruction *InsertPt,
861 const TargetData &TD) {
862 if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD))
865 const Type *StoredValTy = StoredVal->getType();
867 uint64_t StoreSize = TD.getTypeSizeInBits(StoredValTy);
868 uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy);
870 // If the store and reload are the same size, we can always reuse it.
871 if (StoreSize == LoadSize) {
872 if (isa<PointerType>(StoredValTy) && isa<PointerType>(LoadedTy)) {
873 // Pointer to Pointer -> use bitcast.
874 return new BitCastInst(StoredVal, LoadedTy, "", InsertPt);
877 // Convert source pointers to integers, which can be bitcast.
878 if (isa<PointerType>(StoredValTy)) {
879 StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
880 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
883 const Type *TypeToCastTo = LoadedTy;
884 if (isa<PointerType>(TypeToCastTo))
885 TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext());
887 if (StoredValTy != TypeToCastTo)
888 StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt);
890 // Cast to pointer if the load needs a pointer type.
891 if (isa<PointerType>(LoadedTy))
892 StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt);
897 // If the loaded value is smaller than the available value, then we can
898 // extract out a piece from it. If the available value is too small, then we
899 // can't do anything.
900 assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail");
902 // Convert source pointers to integers, which can be manipulated.
903 if (isa<PointerType>(StoredValTy)) {
904 StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
905 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
908 // Convert vectors and fp to integer, which can be manipulated.
909 if (!isa<IntegerType>(StoredValTy)) {
910 StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize);
911 StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt);
914 // If this is a big-endian system, we need to shift the value down to the low
915 // bits so that a truncate will work.
916 if (TD.isBigEndian()) {
917 Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize);
918 StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt);
921 // Truncate the integer to the right size now.
922 const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize);
923 StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt);
925 if (LoadedTy == NewIntTy)
928 // If the result is a pointer, inttoptr.
929 if (isa<PointerType>(LoadedTy))
930 return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt);
932 // Otherwise, bitcast.
933 return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt);
936 /// GetBaseWithConstantOffset - Analyze the specified pointer to see if it can
937 /// be expressed as a base pointer plus a constant offset. Return the base and
938 /// offset to the caller.
939 static Value *GetBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
940 const TargetData &TD) {
941 Operator *PtrOp = dyn_cast<Operator>(Ptr);
942 if (PtrOp == 0) return Ptr;
944 // Just look through bitcasts.
945 if (PtrOp->getOpcode() == Instruction::BitCast)
946 return GetBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD);
948 // If this is a GEP with constant indices, we can look through it.
949 GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp);
950 if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr;
952 gep_type_iterator GTI = gep_type_begin(GEP);
953 for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E;
955 ConstantInt *OpC = cast<ConstantInt>(*I);
956 if (OpC->isZero()) continue;
958 // Handle a struct and array indices which add their offset to the pointer.
959 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
960 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
962 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
963 Offset += OpC->getSExtValue()*Size;
967 // Re-sign extend from the pointer size if needed to get overflow edge cases
969 unsigned PtrSize = TD.getPointerSizeInBits();
971 Offset = (Offset << (64-PtrSize)) >> (64-PtrSize);
973 return GetBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD);
977 /// AnalyzeLoadFromClobberingWrite - This function is called when we have a
978 /// memdep query of a load that ends up being a clobbering memory write (store,
979 /// memset, memcpy, memmove). This means that the write *may* provide bits used
980 /// by the load but we can't be sure because the pointers don't mustalias.
982 /// Check this case to see if there is anything more we can do before we give
983 /// up. This returns -1 if we have to give up, or a byte number in the stored
984 /// value of the piece that feeds the load.
985 static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr,
987 uint64_t WriteSizeInBits,
988 const TargetData &TD) {
989 // If the loaded or stored value is an first class array or struct, don't try
990 // to transform them. We need to be able to bitcast to integer.
991 if (isa<StructType>(LoadTy) || isa<ArrayType>(LoadTy))
994 int64_t StoreOffset = 0, LoadOffset = 0;
995 Value *StoreBase = GetBaseWithConstantOffset(WritePtr, StoreOffset, TD);
997 GetBaseWithConstantOffset(LoadPtr, LoadOffset, TD);
998 if (StoreBase != LoadBase)
1001 // If the load and store are to the exact same address, they should have been
1002 // a must alias. AA must have gotten confused.
1003 // FIXME: Study to see if/when this happens.
1004 if (LoadOffset == StoreOffset) {
1006 dbgs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n"
1007 << "Base = " << *StoreBase << "\n"
1008 << "Store Ptr = " << *WritePtr << "\n"
1009 << "Store Offs = " << StoreOffset << "\n"
1010 << "Load Ptr = " << *LoadPtr << "\n";
1016 // If the load and store don't overlap at all, the store doesn't provide
1017 // anything to the load. In this case, they really don't alias at all, AA
1018 // must have gotten confused.
1019 // FIXME: Investigate cases where this bails out, e.g. rdar://7238614. Then
1020 // remove this check, as it is duplicated with what we have below.
1021 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy);
1023 if ((WriteSizeInBits & 7) | (LoadSize & 7))
1025 uint64_t StoreSize = WriteSizeInBits >> 3; // Convert to bytes.
1029 bool isAAFailure = false;
1030 if (StoreOffset < LoadOffset) {
1031 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset;
1033 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset;
1037 dbgs() << "STORE LOAD DEP WITH COMMON BASE:\n"
1038 << "Base = " << *StoreBase << "\n"
1039 << "Store Ptr = " << *WritePtr << "\n"
1040 << "Store Offs = " << StoreOffset << "\n"
1041 << "Load Ptr = " << *LoadPtr << "\n";
1047 // If the Load isn't completely contained within the stored bits, we don't
1048 // have all the bits to feed it. We could do something crazy in the future
1049 // (issue a smaller load then merge the bits in) but this seems unlikely to be
1051 if (StoreOffset > LoadOffset ||
1052 StoreOffset+StoreSize < LoadOffset+LoadSize)
1055 // Okay, we can do this transformation. Return the number of bytes into the
1056 // store that the load is.
1057 return LoadOffset-StoreOffset;
1060 /// AnalyzeLoadFromClobberingStore - This function is called when we have a
1061 /// memdep query of a load that ends up being a clobbering store.
1062 static int AnalyzeLoadFromClobberingStore(const Type *LoadTy, Value *LoadPtr,
1064 const TargetData &TD) {
1065 // Cannot handle reading from store of first-class aggregate yet.
1066 if (isa<StructType>(DepSI->getOperand(0)->getType()) ||
1067 isa<ArrayType>(DepSI->getOperand(0)->getType()))
1070 Value *StorePtr = DepSI->getPointerOperand();
1071 uint64_t StoreSize = TD.getTypeSizeInBits(DepSI->getOperand(0)->getType());
1072 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
1073 StorePtr, StoreSize, TD);
1076 static int AnalyzeLoadFromClobberingMemInst(const Type *LoadTy, Value *LoadPtr,
1078 const TargetData &TD) {
1079 // If the mem operation is a non-constant size, we can't handle it.
1080 ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength());
1081 if (SizeCst == 0) return -1;
1082 uint64_t MemSizeInBits = SizeCst->getZExtValue()*8;
1084 // If this is memset, we just need to see if the offset is valid in the size
1086 if (MI->getIntrinsicID() == Intrinsic::memset)
1087 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(),
1090 // If we have a memcpy/memmove, the only case we can handle is if this is a
1091 // copy from constant memory. In that case, we can read directly from the
1093 MemTransferInst *MTI = cast<MemTransferInst>(MI);
1095 Constant *Src = dyn_cast<Constant>(MTI->getSource());
1096 if (Src == 0) return -1;
1098 GlobalVariable *GV = dyn_cast<GlobalVariable>(Src->getUnderlyingObject());
1099 if (GV == 0 || !GV->isConstant()) return -1;
1101 // See if the access is within the bounds of the transfer.
1102 int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
1103 MI->getDest(), MemSizeInBits, TD);
1107 // Otherwise, see if we can constant fold a load from the constant with the
1108 // offset applied as appropriate.
1109 Src = ConstantExpr::getBitCast(Src,
1110 llvm::Type::getInt8PtrTy(Src->getContext()));
1111 Constant *OffsetCst =
1112 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
1113 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1);
1114 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
1115 if (ConstantFoldLoadFromConstPtr(Src, &TD))
1121 /// GetStoreValueForLoad - This function is called when we have a
1122 /// memdep query of a load that ends up being a clobbering store. This means
1123 /// that the store *may* provide bits used by the load but we can't be sure
1124 /// because the pointers don't mustalias. Check this case to see if there is
1125 /// anything more we can do before we give up.
1126 static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
1128 Instruction *InsertPt, const TargetData &TD){
1129 LLVMContext &Ctx = SrcVal->getType()->getContext();
1131 uint64_t StoreSize = TD.getTypeSizeInBits(SrcVal->getType())/8;
1132 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
1134 IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
1136 // Compute which bits of the stored value are being used by the load. Convert
1137 // to an integer type to start with.
1138 if (isa<PointerType>(SrcVal->getType()))
1139 SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx), "tmp");
1140 if (!isa<IntegerType>(SrcVal->getType()))
1141 SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8),
1144 // Shift the bits to the least significant depending on endianness.
1146 if (TD.isLittleEndian())
1147 ShiftAmt = Offset*8;
1149 ShiftAmt = (StoreSize-LoadSize-Offset)*8;
1152 SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt, "tmp");
1154 if (LoadSize != StoreSize)
1155 SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8),
1158 return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD);
1161 /// GetMemInstValueForLoad - This function is called when we have a
1162 /// memdep query of a load that ends up being a clobbering mem intrinsic.
1163 static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
1164 const Type *LoadTy, Instruction *InsertPt,
1165 const TargetData &TD){
1166 LLVMContext &Ctx = LoadTy->getContext();
1167 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
1169 IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
1171 // We know that this method is only called when the mem transfer fully
1172 // provides the bits for the load.
1173 if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) {
1174 // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and
1175 // independently of what the offset is.
1176 Value *Val = MSI->getValue();
1178 Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8));
1180 Value *OneElt = Val;
1182 // Splat the value out to the right number of bits.
1183 for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) {
1184 // If we can double the number of bytes set, do it.
1185 if (NumBytesSet*2 <= LoadSize) {
1186 Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8);
1187 Val = Builder.CreateOr(Val, ShVal);
1192 // Otherwise insert one byte at a time.
1193 Value *ShVal = Builder.CreateShl(Val, 1*8);
1194 Val = Builder.CreateOr(OneElt, ShVal);
1198 return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD);
1201 // Otherwise, this is a memcpy/memmove from a constant global.
1202 MemTransferInst *MTI = cast<MemTransferInst>(SrcInst);
1203 Constant *Src = cast<Constant>(MTI->getSource());
1205 // Otherwise, see if we can constant fold a load from the constant with the
1206 // offset applied as appropriate.
1207 Src = ConstantExpr::getBitCast(Src,
1208 llvm::Type::getInt8PtrTy(Src->getContext()));
1209 Constant *OffsetCst =
1210 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
1211 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1);
1212 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
1213 return ConstantFoldLoadFromConstPtr(Src, &TD);
1218 struct AvailableValueInBlock {
1219 /// BB - The basic block in question.
1222 SimpleVal, // A simple offsetted value that is accessed.
1223 MemIntrin // A memory intrinsic which is loaded from.
1226 /// V - The value that is live out of the block.
1227 PointerIntPair<Value *, 1, ValType> Val;
1229 /// Offset - The byte offset in Val that is interesting for the load query.
1232 static AvailableValueInBlock get(BasicBlock *BB, Value *V,
1233 unsigned Offset = 0) {
1234 AvailableValueInBlock Res;
1236 Res.Val.setPointer(V);
1237 Res.Val.setInt(SimpleVal);
1238 Res.Offset = Offset;
1242 static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI,
1243 unsigned Offset = 0) {
1244 AvailableValueInBlock Res;
1246 Res.Val.setPointer(MI);
1247 Res.Val.setInt(MemIntrin);
1248 Res.Offset = Offset;
1252 bool isSimpleValue() const { return Val.getInt() == SimpleVal; }
1253 Value *getSimpleValue() const {
1254 assert(isSimpleValue() && "Wrong accessor");
1255 return Val.getPointer();
1258 MemIntrinsic *getMemIntrinValue() const {
1259 assert(!isSimpleValue() && "Wrong accessor");
1260 return cast<MemIntrinsic>(Val.getPointer());
1263 /// MaterializeAdjustedValue - Emit code into this block to adjust the value
1264 /// defined here to the specified type. This handles various coercion cases.
1265 Value *MaterializeAdjustedValue(const Type *LoadTy,
1266 const TargetData *TD) const {
1268 if (isSimpleValue()) {
1269 Res = getSimpleValue();
1270 if (Res->getType() != LoadTy) {
1271 assert(TD && "Need target data to handle type mismatch case");
1272 Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(),
1275 DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " "
1276 << *getSimpleValue() << '\n'
1277 << *Res << '\n' << "\n\n\n");
1280 Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset,
1281 LoadTy, BB->getTerminator(), *TD);
1282 DEBUG(errs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
1283 << " " << *getMemIntrinValue() << '\n'
1284 << *Res << '\n' << "\n\n\n");
1290 /// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock,
1291 /// construct SSA form, allowing us to eliminate LI. This returns the value
1292 /// that should be used at LI's definition site.
1293 static Value *ConstructSSAForLoadSet(LoadInst *LI,
1294 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
1295 const TargetData *TD,
1296 const DominatorTree &DT,
1297 AliasAnalysis *AA) {
1298 // Check for the fully redundant, dominating load case. In this case, we can
1299 // just use the dominating value directly.
1300 if (ValuesPerBlock.size() == 1 &&
1301 DT.properlyDominates(ValuesPerBlock[0].BB, LI->getParent()))
1302 return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), TD);
1304 // Otherwise, we have to construct SSA form.
1305 SmallVector<PHINode*, 8> NewPHIs;
1306 SSAUpdater SSAUpdate(&NewPHIs);
1307 SSAUpdate.Initialize(LI);
1309 const Type *LoadTy = LI->getType();
1311 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1312 const AvailableValueInBlock &AV = ValuesPerBlock[i];
1313 BasicBlock *BB = AV.BB;
1315 if (SSAUpdate.HasValueForBlock(BB))
1318 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, TD));
1321 // Perform PHI construction.
1322 Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent());
1324 // If new PHI nodes were created, notify alias analysis.
1325 if (isa<PointerType>(V->getType()))
1326 for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
1327 AA->copyValue(LI, NewPHIs[i]);
1332 static bool isLifetimeStart(Instruction *Inst) {
1333 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
1334 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1338 /// processNonLocalLoad - Attempt to eliminate a load whose dependencies are
1339 /// non-local by performing PHI construction.
1340 bool GVN::processNonLocalLoad(LoadInst *LI,
1341 SmallVectorImpl<Instruction*> &toErase) {
1342 // Find the non-local dependencies of the load.
1343 SmallVector<NonLocalDepResult, 64> Deps;
1344 MD->getNonLocalPointerDependency(LI->getOperand(0), true, LI->getParent(),
1346 //DEBUG(dbgs() << "INVESTIGATING NONLOCAL LOAD: "
1347 // << Deps.size() << *LI << '\n');
1349 // If we had to process more than one hundred blocks to find the
1350 // dependencies, this load isn't worth worrying about. Optimizing
1351 // it will be too expensive.
1352 if (Deps.size() > 100)
1355 // If we had a phi translation failure, we'll have a single entry which is a
1356 // clobber in the current block. Reject this early.
1357 if (Deps.size() == 1 && Deps[0].getResult().isClobber()) {
1359 dbgs() << "GVN: non-local load ";
1360 WriteAsOperand(dbgs(), LI);
1361 dbgs() << " is clobbered by " << *Deps[0].getResult().getInst() << '\n';
1366 // Filter out useless results (non-locals, etc). Keep track of the blocks
1367 // where we have a value available in repl, also keep track of whether we see
1368 // dependencies that produce an unknown value for the load (such as a call
1369 // that could potentially clobber the load).
1370 SmallVector<AvailableValueInBlock, 16> ValuesPerBlock;
1371 SmallVector<BasicBlock*, 16> UnavailableBlocks;
1373 const TargetData *TD = 0;
1375 for (unsigned i = 0, e = Deps.size(); i != e; ++i) {
1376 BasicBlock *DepBB = Deps[i].getBB();
1377 MemDepResult DepInfo = Deps[i].getResult();
1379 if (DepInfo.isClobber()) {
1380 // The address being loaded in this non-local block may not be the same as
1381 // the pointer operand of the load if PHI translation occurs. Make sure
1382 // to consider the right address.
1383 Value *Address = Deps[i].getAddress();
1385 // If the dependence is to a store that writes to a superset of the bits
1386 // read by the load, we can extract the bits we need for the load from the
1388 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
1390 TD = getAnalysisIfAvailable<TargetData>();
1391 if (TD && Address) {
1392 int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address,
1395 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1396 DepSI->getOperand(0),
1403 // If the clobbering value is a memset/memcpy/memmove, see if we can
1404 // forward a value on from it.
1405 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) {
1407 TD = getAnalysisIfAvailable<TargetData>();
1408 if (TD && Address) {
1409 int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address,
1412 ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI,
1419 UnavailableBlocks.push_back(DepBB);
1423 Instruction *DepInst = DepInfo.getInst();
1425 // Loading the allocation -> undef.
1426 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) ||
1427 // Loading immediately after lifetime begin -> undef.
1428 isLifetimeStart(DepInst)) {
1429 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1430 UndefValue::get(LI->getType())));
1434 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1435 // Reject loads and stores that are to the same address but are of
1436 // different types if we have to.
1437 if (S->getOperand(0)->getType() != LI->getType()) {
1439 TD = getAnalysisIfAvailable<TargetData>();
1441 // If the stored value is larger or equal to the loaded value, we can
1443 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getOperand(0),
1444 LI->getType(), *TD)) {
1445 UnavailableBlocks.push_back(DepBB);
1450 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1455 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1456 // If the types mismatch and we can't handle it, reject reuse of the load.
1457 if (LD->getType() != LI->getType()) {
1459 TD = getAnalysisIfAvailable<TargetData>();
1461 // If the stored value is larger or equal to the loaded value, we can
1463 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){
1464 UnavailableBlocks.push_back(DepBB);
1468 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD));
1472 UnavailableBlocks.push_back(DepBB);
1476 // If we have no predecessors that produce a known value for this load, exit
1478 if (ValuesPerBlock.empty()) return false;
1480 // If all of the instructions we depend on produce a known value for this
1481 // load, then it is fully redundant and we can use PHI insertion to compute
1482 // its value. Insert PHIs and remove the fully redundant value now.
1483 if (UnavailableBlocks.empty()) {
1484 DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n');
1486 // Perform PHI construction.
1487 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT,
1488 VN.getAliasAnalysis());
1489 LI->replaceAllUsesWith(V);
1491 if (isa<PHINode>(V))
1493 if (isa<PointerType>(V->getType()))
1494 MD->invalidateCachedPointerInfo(V);
1495 toErase.push_back(LI);
1500 if (!EnablePRE || !EnableLoadPRE)
1503 // Okay, we have *some* definitions of the value. This means that the value
1504 // is available in some of our (transitive) predecessors. Lets think about
1505 // doing PRE of this load. This will involve inserting a new load into the
1506 // predecessor when it's not available. We could do this in general, but
1507 // prefer to not increase code size. As such, we only do this when we know
1508 // that we only have to insert *one* load (which means we're basically moving
1509 // the load, not inserting a new one).
1511 SmallPtrSet<BasicBlock *, 4> Blockers;
1512 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1513 Blockers.insert(UnavailableBlocks[i]);
1515 // Lets find first basic block with more than one predecessor. Walk backwards
1516 // through predecessors if needed.
1517 BasicBlock *LoadBB = LI->getParent();
1518 BasicBlock *TmpBB = LoadBB;
1520 bool isSinglePred = false;
1521 bool allSingleSucc = true;
1522 while (TmpBB->getSinglePredecessor()) {
1523 isSinglePred = true;
1524 TmpBB = TmpBB->getSinglePredecessor();
1525 if (!TmpBB) // If haven't found any, bail now.
1527 if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1529 if (Blockers.count(TmpBB))
1531 if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1532 allSingleSucc = false;
1538 // If we have a repl set with LI itself in it, this means we have a loop where
1539 // at least one of the values is LI. Since this means that we won't be able
1540 // to eliminate LI even if we insert uses in the other predecessors, we will
1541 // end up increasing code size. Reject this by scanning for LI.
1542 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
1543 if (ValuesPerBlock[i].isSimpleValue() &&
1544 ValuesPerBlock[i].getSimpleValue() == LI)
1547 // FIXME: It is extremely unclear what this loop is doing, other than
1548 // artificially restricting loadpre.
1551 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1552 const AvailableValueInBlock &AV = ValuesPerBlock[i];
1553 if (AV.isSimpleValue())
1554 // "Hot" Instruction is in some loop (because it dominates its dep.
1556 if (Instruction *I = dyn_cast<Instruction>(AV.getSimpleValue()))
1557 if (DT->dominates(LI, I)) {
1563 // We are interested only in "hot" instructions. We don't want to do any
1564 // mis-optimizations here.
1569 // Okay, we have some hope :). Check to see if the loaded value is fully
1570 // available in all but one predecessor.
1571 // FIXME: If we could restructure the CFG, we could make a common pred with
1572 // all the preds that don't have an available LI and insert a new load into
1574 BasicBlock *UnavailablePred = 0;
1576 DenseMap<BasicBlock*, char> FullyAvailableBlocks;
1577 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
1578 FullyAvailableBlocks[ValuesPerBlock[i].BB] = true;
1579 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1580 FullyAvailableBlocks[UnavailableBlocks[i]] = false;
1582 for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB);
1584 if (IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks))
1587 // If this load is not available in multiple predecessors, reject it.
1588 if (UnavailablePred && UnavailablePred != *PI)
1590 UnavailablePred = *PI;
1593 assert(UnavailablePred != 0 &&
1594 "Fully available value should be eliminated above!");
1596 // We don't currently handle critical edges :(
1597 if (UnavailablePred->getTerminator()->getNumSuccessors() != 1) {
1598 DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF CRITICAL EDGE '"
1599 << UnavailablePred->getName() << "': " << *LI << '\n');
1603 // Do PHI translation to get its value in the predecessor if necessary. The
1604 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1606 SmallVector<Instruction*, 8> NewInsts;
1608 // If all preds have a single successor, then we know it is safe to insert the
1609 // load on the pred (?!?), so we can insert code to materialize the pointer if
1610 // it is not available.
1611 PHITransAddr Address(LI->getOperand(0), TD);
1613 if (allSingleSucc) {
1614 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred,
1617 Address.PHITranslateValue(LoadBB, UnavailablePred);
1618 LoadPtr = Address.getAddr();
1620 // Make sure the value is live in the predecessor.
1621 if (Instruction *Inst = dyn_cast_or_null<Instruction>(LoadPtr))
1622 if (!DT->dominates(Inst->getParent(), UnavailablePred))
1626 // If we couldn't find or insert a computation of this phi translated value,
1629 assert(NewInsts.empty() && "Shouldn't insert insts on failure");
1630 DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1631 << *LI->getOperand(0) << "\n");
1635 // Assign value numbers to these new instructions.
1636 for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) {
1637 // FIXME: We really _ought_ to insert these value numbers into their
1638 // parent's availability map. However, in doing so, we risk getting into
1639 // ordering issues. If a block hasn't been processed yet, we would be
1640 // marking a value as AVAIL-IN, which isn't what we intend.
1641 VN.lookup_or_add(NewInsts[i]);
1644 // Make sure it is valid to move this load here. We have to watch out for:
1645 // @1 = getelementptr (i8* p, ...
1646 // test p and branch if == 0
1648 // It is valid to have the getelementptr before the test, even if p can be 0,
1649 // as getelementptr only does address arithmetic.
1650 // If we are not pushing the value through any multiple-successor blocks
1651 // we do not have this case. Otherwise, check that the load is safe to
1652 // put anywhere; this can be improved, but should be conservatively safe.
1653 if (!allSingleSucc &&
1654 // FIXME: REEVALUTE THIS.
1655 !isSafeToLoadUnconditionally(LoadPtr, UnavailablePred->getTerminator())) {
1656 assert(NewInsts.empty() && "Should not have inserted instructions");
1660 // Okay, we can eliminate this load by inserting a reload in the predecessor
1661 // and using PHI construction to get the value in the other predecessors, do
1663 DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n');
1664 DEBUG(if (!NewInsts.empty())
1665 dbgs() << "INSERTED " << NewInsts.size() << " INSTS: "
1666 << *NewInsts.back() << '\n');
1668 Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
1670 UnavailablePred->getTerminator());
1672 // Add the newly created load.
1673 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,NewLoad));
1675 // Perform PHI construction.
1676 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT,
1677 VN.getAliasAnalysis());
1678 LI->replaceAllUsesWith(V);
1679 if (isa<PHINode>(V))
1681 if (isa<PointerType>(V->getType()))
1682 MD->invalidateCachedPointerInfo(V);
1683 toErase.push_back(LI);
1688 /// processLoad - Attempt to eliminate a load, first by eliminating it
1689 /// locally, and then attempting non-local elimination if that fails.
1690 bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
1694 if (L->isVolatile())
1697 // ... to a pointer that has been loaded from before...
1698 MemDepResult Dep = MD->getDependency(L);
1700 // If the value isn't available, don't do anything!
1701 if (Dep.isClobber()) {
1702 // Check to see if we have something like this:
1703 // store i32 123, i32* %P
1704 // %A = bitcast i32* %P to i8*
1705 // %B = gep i8* %A, i32 1
1708 // We could do that by recognizing if the clobber instructions are obviously
1709 // a common base + constant offset, and if the previous store (or memset)
1710 // completely covers this load. This sort of thing can happen in bitfield
1712 Value *AvailVal = 0;
1713 if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst()))
1714 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
1715 int Offset = AnalyzeLoadFromClobberingStore(L->getType(),
1716 L->getPointerOperand(),
1719 AvailVal = GetStoreValueForLoad(DepSI->getOperand(0), Offset,
1720 L->getType(), L, *TD);
1723 // If the clobbering value is a memset/memcpy/memmove, see if we can forward
1724 // a value on from it.
1725 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) {
1726 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
1727 int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(),
1728 L->getPointerOperand(),
1731 AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,*TD);
1736 DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n'
1737 << *AvailVal << '\n' << *L << "\n\n\n");
1739 // Replace the load!
1740 L->replaceAllUsesWith(AvailVal);
1741 if (isa<PointerType>(AvailVal->getType()))
1742 MD->invalidateCachedPointerInfo(AvailVal);
1743 toErase.push_back(L);
1749 // fast print dep, using operator<< on instruction would be too slow
1750 dbgs() << "GVN: load ";
1751 WriteAsOperand(dbgs(), L);
1752 Instruction *I = Dep.getInst();
1753 dbgs() << " is clobbered by " << *I << '\n';
1758 // If it is defined in another block, try harder.
1759 if (Dep.isNonLocal())
1760 return processNonLocalLoad(L, toErase);
1762 Instruction *DepInst = Dep.getInst();
1763 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1764 Value *StoredVal = DepSI->getOperand(0);
1766 // The store and load are to a must-aliased pointer, but they may not
1767 // actually have the same type. See if we know how to reuse the stored
1768 // value (depending on its type).
1769 const TargetData *TD = 0;
1770 if (StoredVal->getType() != L->getType()) {
1771 if ((TD = getAnalysisIfAvailable<TargetData>())) {
1772 StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(),
1777 DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
1778 << '\n' << *L << "\n\n\n");
1785 L->replaceAllUsesWith(StoredVal);
1786 if (isa<PointerType>(StoredVal->getType()))
1787 MD->invalidateCachedPointerInfo(StoredVal);
1788 toErase.push_back(L);
1793 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
1794 Value *AvailableVal = DepLI;
1796 // The loads are of a must-aliased pointer, but they may not actually have
1797 // the same type. See if we know how to reuse the previously loaded value
1798 // (depending on its type).
1799 const TargetData *TD = 0;
1800 if (DepLI->getType() != L->getType()) {
1801 if ((TD = getAnalysisIfAvailable<TargetData>())) {
1802 AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD);
1803 if (AvailableVal == 0)
1806 DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
1807 << "\n" << *L << "\n\n\n");
1814 L->replaceAllUsesWith(AvailableVal);
1815 if (isa<PointerType>(DepLI->getType()))
1816 MD->invalidateCachedPointerInfo(DepLI);
1817 toErase.push_back(L);
1822 // If this load really doesn't depend on anything, then we must be loading an
1823 // undef value. This can happen when loading for a fresh allocation with no
1824 // intervening stores, for example.
1825 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) {
1826 L->replaceAllUsesWith(UndefValue::get(L->getType()));
1827 toErase.push_back(L);
1832 // If this load occurs either right after a lifetime begin,
1833 // then the loaded value is undefined.
1834 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) {
1835 if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
1836 L->replaceAllUsesWith(UndefValue::get(L->getType()));
1837 toErase.push_back(L);
1846 Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) {
1847 DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB);
1848 if (I == localAvail.end())
1851 ValueNumberScope *Locals = I->second;
1853 DenseMap<uint32_t, Value*>::iterator I = Locals->table.find(num);
1854 if (I != Locals->table.end())
1856 Locals = Locals->parent;
1863 /// processInstruction - When calculating availability, handle an instruction
1864 /// by inserting it into the appropriate sets
1865 bool GVN::processInstruction(Instruction *I,
1866 SmallVectorImpl<Instruction*> &toErase) {
1867 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1868 bool Changed = processLoad(LI, toErase);
1871 unsigned Num = VN.lookup_or_add(LI);
1872 localAvail[I->getParent()]->table.insert(std::make_pair(Num, LI));
1878 uint32_t NextNum = VN.getNextUnusedValueNumber();
1879 unsigned Num = VN.lookup_or_add(I);
1881 if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
1882 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1884 if (!BI->isConditional() || isa<Constant>(BI->getCondition()))
1887 Value *BranchCond = BI->getCondition();
1888 uint32_t CondVN = VN.lookup_or_add(BranchCond);
1890 BasicBlock *TrueSucc = BI->getSuccessor(0);
1891 BasicBlock *FalseSucc = BI->getSuccessor(1);
1893 if (TrueSucc->getSinglePredecessor())
1894 localAvail[TrueSucc]->table[CondVN] =
1895 ConstantInt::getTrue(TrueSucc->getContext());
1896 if (FalseSucc->getSinglePredecessor())
1897 localAvail[FalseSucc]->table[CondVN] =
1898 ConstantInt::getFalse(TrueSucc->getContext());
1902 // Allocations are always uniquely numbered, so we can save time and memory
1903 // by fast failing them.
1904 } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) {
1905 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1909 // Collapse PHI nodes
1910 if (PHINode* p = dyn_cast<PHINode>(I)) {
1911 Value *constVal = CollapsePhi(p);
1914 p->replaceAllUsesWith(constVal);
1915 if (MD && isa<PointerType>(constVal->getType()))
1916 MD->invalidateCachedPointerInfo(constVal);
1919 toErase.push_back(p);
1921 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1924 // If the number we were assigned was a brand new VN, then we don't
1925 // need to do a lookup to see if the number already exists
1926 // somewhere in the domtree: it can't!
1927 } else if (Num == NextNum) {
1928 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1930 // Perform fast-path value-number based elimination of values inherited from
1932 } else if (Value *repl = lookupNumber(I->getParent(), Num)) {
1935 I->replaceAllUsesWith(repl);
1936 if (MD && isa<PointerType>(repl->getType()))
1937 MD->invalidateCachedPointerInfo(repl);
1938 toErase.push_back(I);
1942 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1948 /// runOnFunction - This is the main transformation entry point for a function.
1949 bool GVN::runOnFunction(Function& F) {
1951 MD = &getAnalysis<MemoryDependenceAnalysis>();
1952 DT = &getAnalysis<DominatorTree>();
1953 VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
1957 bool Changed = false;
1958 bool ShouldContinue = true;
1960 // Merge unconditional branches, allowing PRE to catch more
1961 // optimization opportunities.
1962 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
1963 BasicBlock *BB = FI;
1965 bool removedBlock = MergeBlockIntoPredecessor(BB, this);
1966 if (removedBlock) NumGVNBlocks++;
1968 Changed |= removedBlock;
1971 unsigned Iteration = 0;
1973 while (ShouldContinue) {
1974 DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
1975 ShouldContinue = iterateOnFunction(F);
1976 Changed |= ShouldContinue;
1981 bool PREChanged = true;
1982 while (PREChanged) {
1983 PREChanged = performPRE(F);
1984 Changed |= PREChanged;
1987 // FIXME: Should perform GVN again after PRE does something. PRE can move
1988 // computations into blocks where they become fully redundant. Note that
1989 // we can't do this until PRE's critical edge splitting updates memdep.
1990 // Actually, when this happens, we should just fully integrate PRE into GVN.
1992 cleanupGlobalSets();
1998 bool GVN::processBlock(BasicBlock *BB) {
1999 // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and
2000 // incrementing BI before processing an instruction).
2001 SmallVector<Instruction*, 8> toErase;
2002 bool ChangedFunction = false;
2004 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
2006 ChangedFunction |= processInstruction(BI, toErase);
2007 if (toErase.empty()) {
2012 // If we need some instructions deleted, do it now.
2013 NumGVNInstr += toErase.size();
2015 // Avoid iterator invalidation.
2016 bool AtStart = BI == BB->begin();
2020 for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(),
2021 E = toErase.end(); I != E; ++I) {
2022 DEBUG(dbgs() << "GVN removed: " << **I << '\n');
2023 if (MD) MD->removeInstruction(*I);
2024 (*I)->eraseFromParent();
2025 DEBUG(verifyRemoved(*I));
2035 return ChangedFunction;
2038 /// performPRE - Perform a purely local form of PRE that looks for diamond
2039 /// control flow patterns and attempts to perform simple PRE at the join point.
2040 bool GVN::performPRE(Function &F) {
2041 bool Changed = false;
2042 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit;
2043 DenseMap<BasicBlock*, Value*> predMap;
2044 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
2045 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
2046 BasicBlock *CurrentBlock = *DI;
2048 // Nothing to PRE in the entry block.
2049 if (CurrentBlock == &F.getEntryBlock()) continue;
2051 for (BasicBlock::iterator BI = CurrentBlock->begin(),
2052 BE = CurrentBlock->end(); BI != BE; ) {
2053 Instruction *CurInst = BI++;
2055 if (isa<AllocaInst>(CurInst) ||
2056 isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) ||
2057 CurInst->getType()->isVoidTy() ||
2058 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
2059 isa<DbgInfoIntrinsic>(CurInst))
2062 uint32_t ValNo = VN.lookup(CurInst);
2064 // Look for the predecessors for PRE opportunities. We're
2065 // only trying to solve the basic diamond case, where
2066 // a value is computed in the successor and one predecessor,
2067 // but not the other. We also explicitly disallow cases
2068 // where the successor is its own predecessor, because they're
2069 // more complicated to get right.
2070 unsigned NumWith = 0;
2071 unsigned NumWithout = 0;
2072 BasicBlock *PREPred = 0;
2075 for (pred_iterator PI = pred_begin(CurrentBlock),
2076 PE = pred_end(CurrentBlock); PI != PE; ++PI) {
2077 // We're not interested in PRE where the block is its
2078 // own predecessor, on in blocks with predecessors
2079 // that are not reachable.
2080 if (*PI == CurrentBlock) {
2083 } else if (!localAvail.count(*PI)) {
2088 DenseMap<uint32_t, Value*>::iterator predV =
2089 localAvail[*PI]->table.find(ValNo);
2090 if (predV == localAvail[*PI]->table.end()) {
2093 } else if (predV->second == CurInst) {
2096 predMap[*PI] = predV->second;
2101 // Don't do PRE when it might increase code size, i.e. when
2102 // we would need to insert instructions in more than one pred.
2103 if (NumWithout != 1 || NumWith == 0)
2106 // Don't do PRE across indirect branch.
2107 if (isa<IndirectBrInst>(PREPred->getTerminator()))
2110 // We can't do PRE safely on a critical edge, so instead we schedule
2111 // the edge to be split and perform the PRE the next time we iterate
2113 unsigned SuccNum = 0;
2114 for (unsigned i = 0, e = PREPred->getTerminator()->getNumSuccessors();
2116 if (PREPred->getTerminator()->getSuccessor(i) == CurrentBlock) {
2121 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
2122 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
2126 // Instantiate the expression the in predecessor that lacked it.
2127 // Because we are going top-down through the block, all value numbers
2128 // will be available in the predecessor by the time we need them. Any
2129 // that weren't original present will have been instantiated earlier
2131 Instruction *PREInstr = CurInst->clone();
2132 bool success = true;
2133 for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) {
2134 Value *Op = PREInstr->getOperand(i);
2135 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2138 if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) {
2139 PREInstr->setOperand(i, V);
2146 // Fail out if we encounter an operand that is not available in
2147 // the PRE predecessor. This is typically because of loads which
2148 // are not value numbered precisely.
2151 DEBUG(verifyRemoved(PREInstr));
2155 PREInstr->insertBefore(PREPred->getTerminator());
2156 PREInstr->setName(CurInst->getName() + ".pre");
2157 predMap[PREPred] = PREInstr;
2158 VN.add(PREInstr, ValNo);
2161 // Update the availability map to include the new instruction.
2162 localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr));
2164 // Create a PHI to make the value available in this block.
2165 PHINode* Phi = PHINode::Create(CurInst->getType(),
2166 CurInst->getName() + ".pre-phi",
2167 CurrentBlock->begin());
2168 for (pred_iterator PI = pred_begin(CurrentBlock),
2169 PE = pred_end(CurrentBlock); PI != PE; ++PI)
2170 Phi->addIncoming(predMap[*PI], *PI);
2173 localAvail[CurrentBlock]->table[ValNo] = Phi;
2175 CurInst->replaceAllUsesWith(Phi);
2176 if (MD && isa<PointerType>(Phi->getType()))
2177 MD->invalidateCachedPointerInfo(Phi);
2180 DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
2181 if (MD) MD->removeInstruction(CurInst);
2182 CurInst->eraseFromParent();
2183 DEBUG(verifyRemoved(CurInst));
2188 for (SmallVector<std::pair<TerminatorInst*, unsigned>, 4>::iterator
2189 I = toSplit.begin(), E = toSplit.end(); I != E; ++I)
2190 SplitCriticalEdge(I->first, I->second, this);
2192 return Changed || toSplit.size();
2195 /// iterateOnFunction - Executes one iteration of GVN
2196 bool GVN::iterateOnFunction(Function &F) {
2197 cleanupGlobalSets();
2199 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
2200 DE = df_end(DT->getRootNode()); DI != DE; ++DI) {
2202 localAvail[DI->getBlock()] =
2203 new ValueNumberScope(localAvail[DI->getIDom()->getBlock()]);
2205 localAvail[DI->getBlock()] = new ValueNumberScope(0);
2208 // Top-down walk of the dominator tree
2209 bool Changed = false;
2211 // Needed for value numbering with phi construction to work.
2212 ReversePostOrderTraversal<Function*> RPOT(&F);
2213 for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(),
2214 RE = RPOT.end(); RI != RE; ++RI)
2215 Changed |= processBlock(*RI);
2217 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
2218 DE = df_end(DT->getRootNode()); DI != DE; ++DI)
2219 Changed |= processBlock(DI->getBlock());
2225 void GVN::cleanupGlobalSets() {
2228 for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator
2229 I = localAvail.begin(), E = localAvail.end(); I != E; ++I)
2234 /// verifyRemoved - Verify that the specified instruction does not occur in our
2235 /// internal data structures.
2236 void GVN::verifyRemoved(const Instruction *Inst) const {
2237 VN.verifyRemoved(Inst);
2239 // Walk through the value number scope to make sure the instruction isn't
2240 // ferreted away in it.
2241 for (DenseMap<BasicBlock*, ValueNumberScope*>::const_iterator
2242 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) {
2243 const ValueNumberScope *VNS = I->second;
2246 for (DenseMap<uint32_t, Value*>::const_iterator
2247 II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) {
2248 assert(II->second != Inst && "Inst still in value numbering scope!");