X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTransforms%2FScalar%2FGVN.cpp;h=5564dbde1bfb311849d0648a87cf488780cc1adf;hb=2e9f0b1a323848f6d548c1e9eb16ece6a3881dbd;hp=c966311c9e6be453d5cdb8f4000f3a4fbaeaba55;hpb=051a950000e21935165db56695e35bade668193b;p=oota-llvm.git diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp index c966311c9e6..5564dbde1bf 100644 --- a/lib/Transforms/Scalar/GVN.cpp +++ b/lib/Transforms/Scalar/GVN.cpp @@ -10,6 +10,9 @@ // This pass performs global value numbering to eliminate fully redundant // instructions. It also performs simple dead load elimination. // +// Note that this pass does the value numbering itself, it does not use the +// ValueNumbering analysis passes. +// //===----------------------------------------------------------------------===// #define DEBUG_TYPE "gvn" @@ -18,11 +21,8 @@ #include "llvm/Constants.h" #include "llvm/DerivedTypes.h" #include "llvm/Function.h" -#include "llvm/IntrinsicInst.h" #include "llvm/Instructions.h" -#include "llvm/ParameterAttributes.h" #include "llvm/Value.h" -#include "llvm/ADT/BitVector.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DepthFirstIterator.h" #include "llvm/ADT/SmallPtrSet.h" @@ -35,21 +35,15 @@ #include "llvm/Support/CommandLine.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/Debug.h" -#include "llvm/Support/GetElementPtrTypeIterator.h" -#include "llvm/Target/TargetData.h" -#include +#include "llvm/Transforms/Utils/BasicBlockUtils.h" using namespace llvm; STATISTIC(NumGVNInstr, "Number of instructions deleted"); STATISTIC(NumGVNLoad, "Number of loads deleted"); -STATISTIC(NumMemSetInfer, "Number of memsets inferred"); +STATISTIC(NumGVNPRE, "Number of instructions PRE'd"); -namespace { - cl::opt - FormMemSet("form-memset-from-stores", - cl::desc("Transform straight-line stores to memsets"), - cl::init(true), cl::Hidden); -} +static cl::opt EnablePRE("enable-pre", + cl::init(false), cl::Hidden); //===----------------------------------------------------------------------===// // ValueTable Class @@ -69,8 +63,8 @@ namespace { FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT, SHUFFLE, SELECT, TRUNC, ZEXT, SEXT, FPTOUI, FPTOSI, UITOFP, SITOFP, FPTRUNC, FPEXT, - PTRTOINT, INTTOPTR, BITCAST, GEP, CALL, EMPTY, - TOMBSTONE }; + PTRTOINT, INTTOPTR, BITCAST, GEP, CALL, CONSTANT, + EMPTY, TOMBSTONE }; ExpressionOpcode opcode; const Type* type; @@ -143,6 +137,8 @@ namespace { DenseMap valueNumbering; DenseMap expressionNumbering; AliasAnalysis* AA; + MemoryDependenceAnalysis* MD; + DominatorTree* DT; uint32_t nextValueNumber; @@ -158,6 +154,7 @@ namespace { Expression create_expression(CastInst* C); Expression create_expression(GetElementPtrInst* G); Expression create_expression(CallInst* C); + Expression create_expression(Constant* C); public: ValueTable() : nextValueNumber(1) { } uint32_t lookup_or_add(Value* V); @@ -167,7 +164,8 @@ namespace { void erase(Value* v); unsigned size(); void setAliasAnalysis(AliasAnalysis* A) { AA = A; } - uint32_t hash_operand(Value* v); + void setMemDep(MemoryDependenceAnalysis* M) { MD = M; } + void setDomTree(DominatorTree* D) { DT = D; } }; } @@ -235,7 +233,7 @@ Expression::ExpressionOpcode ValueTable::getOpcode(BinaryOperator* BO) { } Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) { - if (isa(C)) { + if (isa(C) || isa(C)) { switch (C->getPredicate()) { default: // THIS SHOULD NEVER HAPPEN assert(0 && "Comparison with unknown predicate?"); @@ -251,7 +249,7 @@ Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) { case ICmpInst::ICMP_SLE: return Expression::ICMPSLE; } } - assert(isa(C) && "Unknown compare"); + assert((isa(C) || isa(C)) && "Unknown compare"); switch (C->getPredicate()) { default: // THIS SHOULD NEVER HAPPEN assert(0 && "Comparison with unknown predicate?"); @@ -291,14 +289,6 @@ Expression::ExpressionOpcode ValueTable::getOpcode(CastInst* C) { } } -uint32_t ValueTable::hash_operand(Value* v) { - if (CallInst* CI = dyn_cast(v)) - if (!AA->doesNotAccessMemory(CI)) - return nextValueNumber++; - - return lookup_or_add(v); -} - Expression ValueTable::create_expression(CallInst* C) { Expression e; @@ -311,7 +301,7 @@ Expression ValueTable::create_expression(CallInst* C) { for (CallInst::op_iterator I = C->op_begin()+1, E = C->op_end(); I != E; ++I) - e.varargs.push_back(hash_operand(*I)); + e.varargs.push_back(lookup_or_add(*I)); return e; } @@ -319,8 +309,8 @@ Expression ValueTable::create_expression(CallInst* C) { Expression ValueTable::create_expression(BinaryOperator* BO) { Expression e; - e.firstVN = hash_operand(BO->getOperand(0)); - e.secondVN = hash_operand(BO->getOperand(1)); + e.firstVN = lookup_or_add(BO->getOperand(0)); + e.secondVN = lookup_or_add(BO->getOperand(1)); e.thirdVN = 0; e.function = 0; e.type = BO->getType(); @@ -332,8 +322,8 @@ Expression ValueTable::create_expression(BinaryOperator* BO) { Expression ValueTable::create_expression(CmpInst* C) { Expression e; - e.firstVN = hash_operand(C->getOperand(0)); - e.secondVN = hash_operand(C->getOperand(1)); + e.firstVN = lookup_or_add(C->getOperand(0)); + e.secondVN = lookup_or_add(C->getOperand(1)); e.thirdVN = 0; e.function = 0; e.type = C->getType(); @@ -345,7 +335,7 @@ Expression ValueTable::create_expression(CmpInst* C) { Expression ValueTable::create_expression(CastInst* C) { Expression e; - e.firstVN = hash_operand(C->getOperand(0)); + e.firstVN = lookup_or_add(C->getOperand(0)); e.secondVN = 0; e.thirdVN = 0; e.function = 0; @@ -358,9 +348,9 @@ Expression ValueTable::create_expression(CastInst* C) { Expression ValueTable::create_expression(ShuffleVectorInst* S) { Expression e; - e.firstVN = hash_operand(S->getOperand(0)); - e.secondVN = hash_operand(S->getOperand(1)); - e.thirdVN = hash_operand(S->getOperand(2)); + e.firstVN = lookup_or_add(S->getOperand(0)); + e.secondVN = lookup_or_add(S->getOperand(1)); + e.thirdVN = lookup_or_add(S->getOperand(2)); e.function = 0; e.type = S->getType(); e.opcode = Expression::SHUFFLE; @@ -371,8 +361,8 @@ Expression ValueTable::create_expression(ShuffleVectorInst* S) { Expression ValueTable::create_expression(ExtractElementInst* E) { Expression e; - e.firstVN = hash_operand(E->getOperand(0)); - e.secondVN = hash_operand(E->getOperand(1)); + e.firstVN = lookup_or_add(E->getOperand(0)); + e.secondVN = lookup_or_add(E->getOperand(1)); e.thirdVN = 0; e.function = 0; e.type = E->getType(); @@ -384,9 +374,9 @@ Expression ValueTable::create_expression(ExtractElementInst* E) { Expression ValueTable::create_expression(InsertElementInst* I) { Expression e; - e.firstVN = hash_operand(I->getOperand(0)); - e.secondVN = hash_operand(I->getOperand(1)); - e.thirdVN = hash_operand(I->getOperand(2)); + e.firstVN = lookup_or_add(I->getOperand(0)); + e.secondVN = lookup_or_add(I->getOperand(1)); + e.thirdVN = lookup_or_add(I->getOperand(2)); e.function = 0; e.type = I->getType(); e.opcode = Expression::INSERT; @@ -397,9 +387,9 @@ Expression ValueTable::create_expression(InsertElementInst* I) { Expression ValueTable::create_expression(SelectInst* I) { Expression e; - e.firstVN = hash_operand(I->getCondition()); - e.secondVN = hash_operand(I->getTrueValue()); - e.thirdVN = hash_operand(I->getFalseValue()); + e.firstVN = lookup_or_add(I->getCondition()); + e.secondVN = lookup_or_add(I->getTrueValue()); + e.thirdVN = lookup_or_add(I->getFalseValue()); e.function = 0; e.type = I->getType(); e.opcode = Expression::SELECT; @@ -409,8 +399,8 @@ Expression ValueTable::create_expression(SelectInst* I) { Expression ValueTable::create_expression(GetElementPtrInst* G) { Expression e; - - e.firstVN = hash_operand(G->getPointerOperand()); + + e.firstVN = lookup_or_add(G->getPointerOperand()); e.secondVN = 0; e.thirdVN = 0; e.function = 0; @@ -419,7 +409,7 @@ Expression ValueTable::create_expression(GetElementPtrInst* G) { for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end(); I != E; ++I) - e.varargs.push_back(hash_operand(*I)); + e.varargs.push_back(lookup_or_add(*I)); return e; } @@ -428,6 +418,11 @@ Expression ValueTable::create_expression(GetElementPtrInst* G) { // ValueTable External Functions //===----------------------------------------------------------------------===// +/// add - Insert a value into the table with a specified value number. +void ValueTable::add(Value* V, uint32_t num) { + valueNumbering.insert(std::make_pair(V, num)); +} + /// lookup_or_add - Returns the value number for the specified value, assigning /// it a new number if it did not have one before. uint32_t ValueTable::lookup_or_add(Value* V) { @@ -436,7 +431,7 @@ uint32_t ValueTable::lookup_or_add(Value* V) { return VI->second; if (CallInst* C = dyn_cast(V)) { - if (AA->onlyReadsMemory(C)) { // includes doesNotAccessMemory + if (AA->doesNotAccessMemory(C)) { Expression e = create_expression(C); DenseMap::iterator EI = expressionNumbering.find(e); @@ -449,6 +444,104 @@ uint32_t ValueTable::lookup_or_add(Value* V) { return nextValueNumber++; } + } else if (AA->onlyReadsMemory(C)) { + Expression e = create_expression(C); + + if (expressionNumbering.find(e) == expressionNumbering.end()) { + expressionNumbering.insert(std::make_pair(e, nextValueNumber)); + valueNumbering.insert(std::make_pair(V, nextValueNumber)); + return nextValueNumber++; + } + + Instruction* local_dep = MD->getDependency(C); + + if (local_dep == MemoryDependenceAnalysis::None) { + valueNumbering.insert(std::make_pair(V, nextValueNumber)); + return nextValueNumber++; + } else if (local_dep != MemoryDependenceAnalysis::NonLocal) { + if (!isa(local_dep)) { + valueNumbering.insert(std::make_pair(V, nextValueNumber)); + return nextValueNumber++; + } + + CallInst* local_cdep = cast(local_dep); + + if (local_cdep->getCalledFunction() != C->getCalledFunction() || + local_cdep->getNumOperands() != C->getNumOperands()) { + valueNumbering.insert(std::make_pair(V, nextValueNumber)); + return nextValueNumber++; + } else if (!C->getCalledFunction()) { + valueNumbering.insert(std::make_pair(V, nextValueNumber)); + return nextValueNumber++; + } else { + for (unsigned i = 1; i < C->getNumOperands(); ++i) { + uint32_t c_vn = lookup_or_add(C->getOperand(i)); + uint32_t cd_vn = lookup_or_add(local_cdep->getOperand(i)); + if (c_vn != cd_vn) { + valueNumbering.insert(std::make_pair(V, nextValueNumber)); + return nextValueNumber++; + } + } + + uint32_t v = lookup_or_add(local_cdep); + valueNumbering.insert(std::make_pair(V, v)); + return v; + } + } + + + DenseMap deps; + MD->getNonLocalDependency(C, deps); + CallInst* cdep = 0; + + for (DenseMap::iterator I = deps.begin(), + E = deps.end(); I != E; ++I) { + if (I->second == MemoryDependenceAnalysis::None) { + valueNumbering.insert(std::make_pair(V, nextValueNumber)); + + return nextValueNumber++; + } else if (I->second != MemoryDependenceAnalysis::NonLocal) { + if (DT->properlyDominates(I->first, C->getParent())) { + if (CallInst* CD = dyn_cast(I->second)) + cdep = CD; + else { + valueNumbering.insert(std::make_pair(V, nextValueNumber)); + return nextValueNumber++; + } + } else { + valueNumbering.insert(std::make_pair(V, nextValueNumber)); + return nextValueNumber++; + } + } + } + + if (!cdep) { + valueNumbering.insert(std::make_pair(V, nextValueNumber)); + return nextValueNumber++; + } + + if (cdep->getCalledFunction() != C->getCalledFunction() || + cdep->getNumOperands() != C->getNumOperands()) { + valueNumbering.insert(std::make_pair(V, nextValueNumber)); + return nextValueNumber++; + } else if (!C->getCalledFunction()) { + valueNumbering.insert(std::make_pair(V, nextValueNumber)); + return nextValueNumber++; + } else { + for (unsigned i = 1; i < C->getNumOperands(); ++i) { + uint32_t c_vn = lookup_or_add(C->getOperand(i)); + uint32_t cd_vn = lookup_or_add(cdep->getOperand(i)); + if (c_vn != cd_vn) { + valueNumbering.insert(std::make_pair(V, nextValueNumber)); + return nextValueNumber++; + } + } + + uint32_t v = lookup_or_add(cdep); + valueNumbering.insert(std::make_pair(V, v)); + return v; + } + } else { valueNumbering.insert(std::make_pair(V, nextValueNumber)); return nextValueNumber++; @@ -584,65 +677,29 @@ void ValueTable::erase(Value* V) { } //===----------------------------------------------------------------------===// -// ValueNumberedSet Class +// GVN Pass //===----------------------------------------------------------------------===// -namespace { -class VISIBILITY_HIDDEN ValueNumberedSet { - private: - SmallPtrSet contents; - BitVector numbers; - public: - ValueNumberedSet() { numbers.resize(1); } - ValueNumberedSet(const ValueNumberedSet& other) { - numbers = other.numbers; - contents = other.contents; - } - - typedef SmallPtrSet::iterator iterator; - - iterator begin() { return contents.begin(); } - iterator end() { return contents.end(); } - - bool insert(Value* v) { return contents.insert(v); } - void insert(iterator I, iterator E) { contents.insert(I, E); } - void erase(Value* v) { contents.erase(v); } - unsigned count(Value* v) { return contents.count(v); } - size_t size() { return contents.size(); } - - void set(unsigned i) { - if (i >= numbers.size()) - numbers.resize(i+1); - - numbers.set(i); - } - - void operator=(const ValueNumberedSet& other) { - contents = other.contents; - numbers = other.numbers; - } - - void reset(unsigned i) { - if (i < numbers.size()) - numbers.reset(i); - } - - bool test(unsigned i) { - if (i >= numbers.size()) - return false; - - return numbers.test(i); - } - - void clear() { - contents.clear(); - numbers.clear(); + +namespace llvm { + template<> struct DenseMapInfo { + static inline uint32_t getEmptyKey() { return ~0; } + static inline uint32_t getTombstoneKey() { return ~0 - 1; } + static unsigned getHashValue(const uint32_t& Val) { return Val * 37; } + static bool isPod() { return true; } + static bool isEqual(const uint32_t& LHS, const uint32_t& RHS) { + return LHS == RHS; } -}; + }; } -//===----------------------------------------------------------------------===// -// GVN Pass -//===----------------------------------------------------------------------===// +namespace { + struct VISIBILITY_HIDDEN ValueNumberScope { + ValueNumberScope* parent; + DenseMap table; + + ValueNumberScope(ValueNumberScope* p) : parent(p) { } + }; +} namespace { @@ -654,8 +711,7 @@ namespace { private: ValueTable VN; - - DenseMap availableOut; + DenseMap localAvail; typedef DenseMap > PhiMapType; PhiMapType phiMap; @@ -663,41 +719,35 @@ namespace { // This transformation requires dominator postdominator info virtual void getAnalysisUsage(AnalysisUsage &AU) const { - AU.setPreservesCFG(); AU.addRequired(); AU.addRequired(); AU.addRequired(); - AU.addRequired(); + + AU.addPreserved(); AU.addPreserved(); AU.addPreserved(); - AU.addPreserved(); } // Helper fuctions // FIXME: eliminate or document these better - Value* find_leader(ValueNumberedSet& vals, uint32_t v) ; - void val_insert(ValueNumberedSet& s, Value* v); bool processLoad(LoadInst* L, DenseMap &lastLoad, SmallVectorImpl &toErase); - bool processStore(StoreInst *SI, SmallVectorImpl &toErase); bool processInstruction(Instruction* I, - ValueNumberedSet& currAvail, DenseMap& lastSeenLoad, SmallVectorImpl &toErase); bool processNonLocalLoad(LoadInst* L, SmallVectorImpl &toErase); - bool processMemCpy(MemCpyInst* M, MemCpyInst* MDep, - SmallVectorImpl &toErase); - bool performCallSlotOptzn(MemCpyInst* cpy, CallInst* C, - SmallVectorImpl &toErase); + bool processBlock(DomTreeNode* DTN); Value *GetValueForBlock(BasicBlock *BB, LoadInst* orig, DenseMap &Phis, bool top_level = false); - void dump(DenseMap& d); + void dump(DenseMap& d); bool iterateOnFunction(Function &F); Value* CollapsePhi(PHINode* p); bool isSafeReplacement(PHINode* p, Instruction* inst); + bool performPRE(Function& F); + Value* lookupNumber(BasicBlock* BB, uint32_t num); }; char GVN::ID = 0; @@ -709,37 +759,11 @@ FunctionPass *llvm::createGVNPass() { return new GVN(); } static RegisterPass X("gvn", "Global Value Numbering"); -/// find_leader - Given a set and a value number, return the first -/// element of the set with that value number, or 0 if no such element -/// is present -Value* GVN::find_leader(ValueNumberedSet& vals, uint32_t v) { - if (!vals.test(v)) - return 0; - - for (ValueNumberedSet::iterator I = vals.begin(), E = vals.end(); - I != E; ++I) - if (v == VN.lookup(*I)) - return *I; - - assert(0 && "No leader found, but present bit is set?"); - return 0; -} - -/// val_insert - Insert a value into a set only if there is not a value -/// with the same value number already in the set -void GVN::val_insert(ValueNumberedSet& s, Value* v) { - uint32_t num = VN.lookup(v); - if (!s.test(num)) - s.insert(v); -} - -void GVN::dump(DenseMap& d) { +void GVN::dump(DenseMap& d) { printf("{\n"); - for (DenseMap::iterator I = d.begin(), + for (DenseMap::iterator I = d.begin(), E = d.end(); I != E; ++I) { - if (I->second == MemoryDependenceAnalysis::None) - printf("None\n"); - else + printf("%d\n", I->first); I->second->dump(); } printf("}\n"); @@ -995,623 +1019,49 @@ bool GVN::processLoad(LoadInst *L, DenseMap &lastLoad, return deletedLoad; } -/// isBytewiseValue - If the specified value can be set by repeating the same -/// byte in memory, return the i8 value that it is represented with. This is -/// true for all i8 values obviously, but is also true for i32 0, i32 -1, -/// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated -/// byte store (e.g. i16 0x1234), return null. -static Value *isBytewiseValue(Value *V) { - // All byte-wide stores are splatable, even of arbitrary variables. - if (V->getType() == Type::Int8Ty) return V; - - // Constant float and double values can be handled as integer values if the - // corresponding integer value is "byteable". An important case is 0.0. - if (ConstantFP *CFP = dyn_cast(V)) { - if (CFP->getType() == Type::FloatTy) - V = ConstantExpr::getBitCast(CFP, Type::Int32Ty); - if (CFP->getType() == Type::DoubleTy) - V = ConstantExpr::getBitCast(CFP, Type::Int64Ty); - // Don't handle long double formats, which have strange constraints. - } - - // We can handle constant integers that are power of two in size and a - // multiple of 8 bits. - if (ConstantInt *CI = dyn_cast(V)) { - unsigned Width = CI->getBitWidth(); - if (isPowerOf2_32(Width) && Width > 8) { - // We can handle this value if the recursive binary decomposition is the - // same at all levels. - APInt Val = CI->getValue(); - APInt Val2; - while (Val.getBitWidth() != 8) { - unsigned NextWidth = Val.getBitWidth()/2; - Val2 = Val.lshr(NextWidth); - Val2.trunc(Val.getBitWidth()/2); - Val.trunc(Val.getBitWidth()/2); - - // If the top/bottom halves aren't the same, reject it. - if (Val != Val2) - return 0; - } - return ConstantInt::get(Val); - } - } - - // Conceptually, we could handle things like: - // %a = zext i8 %X to i16 - // %b = shl i16 %a, 8 - // %c = or i16 %a, %b - // but until there is an example that actually needs this, it doesn't seem - // worth worrying about. - return 0; -} - -static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx, - bool &VariableIdxFound, TargetData &TD) { - // Skip over the first indices. - gep_type_iterator GTI = gep_type_begin(GEP); - for (unsigned i = 1; i != Idx; ++i, ++GTI) - /*skip along*/; - - // Compute the offset implied by the rest of the indices. - int64_t Offset = 0; - for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { - ConstantInt *OpC = dyn_cast(GEP->getOperand(i)); - if (OpC == 0) - return VariableIdxFound = true; - if (OpC->isZero()) continue; // No offset. - - // Handle struct indices, which add their field offset to the pointer. - if (const StructType *STy = dyn_cast(*GTI)) { - Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); - continue; - } - - // Otherwise, we have a sequential type like an array or vector. Multiply - // the index by the ElementSize. - uint64_t Size = TD.getABITypeSize(GTI.getIndexedType()); - Offset += Size*OpC->getSExtValue(); - } - - return Offset; -} - -/// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a -/// constant offset, and return that constant offset. For example, Ptr1 might -/// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8. -static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset, - TargetData &TD) { - // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical - // base. After that base, they may have some number of common (and - // potentially variable) indices. After that they handle some constant - // offset, which determines their offset from each other. At this point, we - // handle no other case. - GetElementPtrInst *GEP1 = dyn_cast(Ptr1); - GetElementPtrInst *GEP2 = dyn_cast(Ptr2); - if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0)) - return false; - - // Skip any common indices and track the GEP types. - unsigned Idx = 1; - for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx) - if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx)) - break; - - bool VariableIdxFound = false; - int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD); - int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, TD); - if (VariableIdxFound) return false; - - Offset = Offset2-Offset1; - return true; -} - - -/// MemsetRange - Represents a range of memset'd bytes with the ByteVal value. -/// This allows us to analyze stores like: -/// store 0 -> P+1 -/// store 0 -> P+0 -/// store 0 -> P+3 -/// store 0 -> P+2 -/// which sometimes happens with stores to arrays of structs etc. When we see -/// the first store, we make a range [1, 2). The second store extends the range -/// to [0, 2). The third makes a new range [2, 3). The fourth store joins the -/// two ranges into [0, 3) which is memset'able. -namespace { -struct MemsetRange { - // Start/End - A semi range that describes the span that this range covers. - // The range is closed at the start and open at the end: [Start, End). - int64_t Start, End; - - /// StartPtr - The getelementptr instruction that points to the start of the - /// range. - Value *StartPtr; - - /// Alignment - The known alignment of the first store. - unsigned Alignment; - - /// TheStores - The actual stores that make up this range. - SmallVector TheStores; - - bool isProfitableToUseMemset(const TargetData &TD) const; - -}; -} // end anon namespace - -bool MemsetRange::isProfitableToUseMemset(const TargetData &TD) const { - // If we found more than 8 stores to merge or 64 bytes, use memset. - if (TheStores.size() >= 8 || End-Start >= 64) return true; - - // Assume that the code generator is capable of merging pairs of stores - // together if it wants to. - if (TheStores.size() <= 2) return false; - - // If we have fewer than 8 stores, it can still be worthwhile to do this. - // For example, merging 4 i8 stores into an i32 store is useful almost always. - // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the - // memset will be split into 2 32-bit stores anyway) and doing so can - // pessimize the llvm optimizer. - // - // Since we don't have perfect knowledge here, make some assumptions: assume - // the maximum GPR width is the same size as the pointer size and assume that - // this width can be stored. If so, check to see whether we will end up - // actually reducing the number of stores used. - unsigned Bytes = unsigned(End-Start); - unsigned NumPointerStores = Bytes/TD.getPointerSize(); - - // Assume the remaining bytes if any are done a byte at a time. - unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize(); - - // If we will reduce the # stores (according to this heuristic), do the - // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32 - // etc. - return TheStores.size() > NumPointerStores+NumByteStores; -} - - -namespace { -class MemsetRanges { - /// Ranges - A sorted list of the memset ranges. We use std::list here - /// because each element is relatively large and expensive to copy. - std::list Ranges; - typedef std::list::iterator range_iterator; - TargetData &TD; -public: - MemsetRanges(TargetData &td) : TD(td) {} - - typedef std::list::const_iterator const_iterator; - const_iterator begin() const { return Ranges.begin(); } - const_iterator end() const { return Ranges.end(); } - bool empty() const { return Ranges.empty(); } - - void addStore(int64_t OffsetFromFirst, StoreInst *SI); -}; - -} // end anon namespace - - -/// addStore - Add a new store to the MemsetRanges data structure. This adds a -/// new range for the specified store at the specified offset, merging into -/// existing ranges as appropriate. -void MemsetRanges::addStore(int64_t Start, StoreInst *SI) { - int64_t End = Start+TD.getTypeStoreSize(SI->getOperand(0)->getType()); - - // Do a linear search of the ranges to see if this can be joined and/or to - // find the insertion point in the list. We keep the ranges sorted for - // simplicity here. This is a linear search of a linked list, which is ugly, - // however the number of ranges is limited, so this won't get crazy slow. - range_iterator I = Ranges.begin(), E = Ranges.end(); - - while (I != E && Start > I->End) - ++I; - - // We now know that I == E, in which case we didn't find anything to merge - // with, or that Start <= I->End. If End < I->Start or I == E, then we need - // to insert a new range. Handle this now. - if (I == E || End < I->Start) { - MemsetRange &R = *Ranges.insert(I, MemsetRange()); - R.Start = Start; - R.End = End; - R.StartPtr = SI->getPointerOperand(); - R.Alignment = SI->getAlignment(); - R.TheStores.push_back(SI); - return; - } - - // This store overlaps with I, add it. - I->TheStores.push_back(SI); - - // At this point, we may have an interval that completely contains our store. - // If so, just add it to the interval and return. - if (I->Start <= Start && I->End >= End) - return; - - // Now we know that Start <= I->End and End >= I->Start so the range overlaps - // but is not entirely contained within the range. - - // See if the range extends the start of the range. In this case, it couldn't - // possibly cause it to join the prior range, because otherwise we would have - // stopped on *it*. - if (Start < I->Start) { - I->Start = Start; - I->StartPtr = SI->getPointerOperand(); - } - - // Now we know that Start <= I->End and Start >= I->Start (so the startpoint - // is in or right at the end of I), and that End >= I->Start. Extend I out to - // End. - if (End > I->End) { - I->End = End; - range_iterator NextI = I;; - while (++NextI != E && End >= NextI->Start) { - // Merge the range in. - I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end()); - if (NextI->End > I->End) - I->End = NextI->End; - Ranges.erase(NextI); - NextI = I; - } - } -} - - - -/// processStore - When GVN is scanning forward over instructions, we look for -/// some other patterns to fold away. In particular, this looks for stores to -/// neighboring locations of memory. If it sees enough consequtive ones -/// (currently 4) it attempts to merge them together into a memcpy/memset. -bool GVN::processStore(StoreInst *SI, SmallVectorImpl &toErase) { - if (!FormMemSet) return false; - if (SI->isVolatile()) return false; - - // There are two cases that are interesting for this code to handle: memcpy - // and memset. Right now we only handle memset. - - // Ensure that the value being stored is something that can be memset'able a - // byte at a time like "0" or "-1" or any width, as well as things like - // 0xA0A0A0A0 and 0.0. - Value *ByteVal = isBytewiseValue(SI->getOperand(0)); - if (!ByteVal) - return false; - - TargetData &TD = getAnalysis(); - AliasAnalysis &AA = getAnalysis(); - - // Okay, so we now have a single store that can be splatable. Scan to find - // all subsequent stores of the same value to offset from the same pointer. - // Join these together into ranges, so we can decide whether contiguous blocks - // are stored. - MemsetRanges Ranges(TD); - - Value *StartPtr = SI->getPointerOperand(); - - BasicBlock::iterator BI = SI; - for (++BI; !isa(BI); ++BI) { - if (isa(BI) || isa(BI)) { - // If the call is readnone, ignore it, otherwise bail out. We don't even - // allow readonly here because we don't want something like: - // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). - if (AA.getModRefBehavior(CallSite::get(BI)) == - AliasAnalysis::DoesNotAccessMemory) - continue; - - // TODO: If this is a memset, try to join it in. - - break; - } else if (isa(BI) || isa(BI)) - break; - - // If this is a non-store instruction it is fine, ignore it. - StoreInst *NextStore = dyn_cast(BI); - if (NextStore == 0) continue; - - // If this is a store, see if we can merge it in. - if (NextStore->isVolatile()) break; - - // Check to see if this stored value is of the same byte-splattable value. - if (ByteVal != isBytewiseValue(NextStore->getOperand(0))) - break; - - // Check to see if this store is to a constant offset from the start ptr. - int64_t Offset; - if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, TD)) - break; - - Ranges.addStore(Offset, NextStore); - } - - // If we have no ranges, then we just had a single store with nothing that - // could be merged in. This is a very common case of course. - if (Ranges.empty()) - return false; - - // If we had at least one store that could be merged in, add the starting - // store as well. We try to avoid this unless there is at least something - // interesting as a small compile-time optimization. - Ranges.addStore(0, SI); - - - Function *MemSetF = 0; - - // Now that we have full information about ranges, loop over the ranges and - // emit memset's for anything big enough to be worthwhile. - bool MadeChange = false; - for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end(); - I != E; ++I) { - const MemsetRange &Range = *I; - - if (Range.TheStores.size() == 1) continue; - - // If it is profitable to lower this range to memset, do so now. - if (!Range.isProfitableToUseMemset(TD)) - continue; - - // Otherwise, we do want to transform this! Create a new memset. We put - // the memset right before the first instruction that isn't part of this - // memset block. This ensure that the memset is dominated by any addressing - // instruction needed by the start of the block. - BasicBlock::iterator InsertPt = BI; - - if (MemSetF == 0) - MemSetF = Intrinsic::getDeclaration(SI->getParent()->getParent() - ->getParent(), Intrinsic::memset_i64); - - // Get the starting pointer of the block. - StartPtr = Range.StartPtr; - - // Cast the start ptr to be i8* as memset requires. - const Type *i8Ptr = PointerType::getUnqual(Type::Int8Ty); - if (StartPtr->getType() != i8Ptr) - StartPtr = new BitCastInst(StartPtr, i8Ptr, StartPtr->getNameStart(), - InsertPt); - - Value *Ops[] = { - StartPtr, ByteVal, // Start, value - ConstantInt::get(Type::Int64Ty, Range.End-Range.Start), // size - ConstantInt::get(Type::Int32Ty, Range.Alignment) // align - }; - Value *C = CallInst::Create(MemSetF, Ops, Ops+4, "", InsertPt); - DEBUG(cerr << "Replace stores:\n"; - for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i) - cerr << *Range.TheStores[i]; - cerr << "With: " << *C); C=C; - - // Zap all the stores. - toErase.append(Range.TheStores.begin(), Range.TheStores.end()); - ++NumMemSetInfer; - MadeChange = true; - } - - return MadeChange; -} - - -/// performCallSlotOptzn - takes a memcpy and a call that it depends on, -/// and checks for the possibility of a call slot optimization by having -/// the call write its result directly into the destination of the memcpy. -bool GVN::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C, - SmallVectorImpl &toErase) { - // The general transformation to keep in mind is - // - // call @func(..., src, ...) - // memcpy(dest, src, ...) - // - // -> - // - // memcpy(dest, src, ...) - // call @func(..., dest, ...) - // - // Since moving the memcpy is technically awkward, we additionally check that - // src only holds uninitialized values at the moment of the call, meaning that - // the memcpy can be discarded rather than moved. - - // Deliberately get the source and destination with bitcasts stripped away, - // because we'll need to do type comparisons based on the underlying type. - Value* cpyDest = cpy->getDest(); - Value* cpySrc = cpy->getSource(); - CallSite CS = CallSite::get(C); - - // We need to be able to reason about the size of the memcpy, so we require - // that it be a constant. - ConstantInt* cpyLength = dyn_cast(cpy->getLength()); - if (!cpyLength) - return false; - - // Require that src be an alloca. This simplifies the reasoning considerably. - AllocaInst* srcAlloca = dyn_cast(cpySrc); - if (!srcAlloca) - return false; - - // Check that all of src is copied to dest. - TargetData& TD = getAnalysis(); - - ConstantInt* srcArraySize = dyn_cast(srcAlloca->getArraySize()); - if (!srcArraySize) - return false; - - uint64_t srcSize = TD.getABITypeSize(srcAlloca->getAllocatedType()) * - srcArraySize->getZExtValue(); - - if (cpyLength->getZExtValue() < srcSize) - return false; - - // Check that accessing the first srcSize bytes of dest will not cause a - // trap. Otherwise the transform is invalid since it might cause a trap - // to occur earlier than it otherwise would. - if (AllocaInst* A = dyn_cast(cpyDest)) { - // The destination is an alloca. Check it is larger than srcSize. - ConstantInt* destArraySize = dyn_cast(A->getArraySize()); - if (!destArraySize) - return false; - - uint64_t destSize = TD.getABITypeSize(A->getAllocatedType()) * - destArraySize->getZExtValue(); - - if (destSize < srcSize) - return false; - } else if (Argument* A = dyn_cast(cpyDest)) { - // If the destination is an sret parameter then only accesses that are - // outside of the returned struct type can trap. - if (!A->hasStructRetAttr()) - return false; - - const Type* StructTy = cast(A->getType())->getElementType(); - uint64_t destSize = TD.getABITypeSize(StructTy); - - if (destSize < srcSize) - return false; - } else { - return false; - } - - // Check that src is not accessed except via the call and the memcpy. This - // guarantees that it holds only undefined values when passed in (so the final - // memcpy can be dropped), that it is not read or written between the call and - // the memcpy, and that writing beyond the end of it is undefined. - SmallVector srcUseList(srcAlloca->use_begin(), - srcAlloca->use_end()); - while (!srcUseList.empty()) { - User* UI = srcUseList.back(); - srcUseList.pop_back(); - - if (isa(UI) || isa(UI)) { - for (User::use_iterator I = UI->use_begin(), E = UI->use_end(); - I != E; ++I) - srcUseList.push_back(*I); - } else if (UI != C && UI != cpy) { - return false; - } - } - - // Since we're changing the parameter to the callsite, we need to make sure - // that what would be the new parameter dominates the callsite. - DominatorTree& DT = getAnalysis(); - if (Instruction* cpyDestInst = dyn_cast(cpyDest)) - if (!DT.dominates(cpyDestInst, C)) - return false; - - // In addition to knowing that the call does not access src in some - // unexpected manner, for example via a global, which we deduce from - // the use analysis, we also need to know that it does not sneakily - // access dest. We rely on AA to figure this out for us. - AliasAnalysis& AA = getAnalysis(); - if (AA.getModRefInfo(C, cpy->getRawDest(), srcSize) != - AliasAnalysis::NoModRef) - return false; - - // All the checks have passed, so do the transformation. - for (unsigned i = 0; i < CS.arg_size(); ++i) - if (CS.getArgument(i) == cpySrc) { - if (cpySrc->getType() != cpyDest->getType()) - cpyDest = CastInst::createPointerCast(cpyDest, cpySrc->getType(), - cpyDest->getName(), C); - CS.setArgument(i, cpyDest); - } - - // Drop any cached information about the call, because we may have changed - // its dependence information by changing its parameter. - MemoryDependenceAnalysis& MD = getAnalysis(); - MD.dropInstruction(C); - - // Remove the memcpy - MD.removeInstruction(cpy); - toErase.push_back(cpy); - - return true; -} - -/// processMemCpy - perform simplication of memcpy's. If we have memcpy A which -/// copies X to Y, and memcpy B which copies Y to Z, then we can rewrite B to be -/// a memcpy from X to Z (or potentially a memmove, depending on circumstances). -/// This allows later passes to remove the first memcpy altogether. -bool GVN::processMemCpy(MemCpyInst* M, MemCpyInst* MDep, - SmallVectorImpl &toErase) { - // We can only transforms memcpy's where the dest of one is the source of the - // other - if (M->getSource() != MDep->getDest()) - return false; - - // Second, the length of the memcpy's must be the same, or the preceeding one - // must be larger than the following one. - ConstantInt* C1 = dyn_cast(MDep->getLength()); - ConstantInt* C2 = dyn_cast(M->getLength()); - if (!C1 || !C2) - return false; - - uint64_t DepSize = C1->getValue().getZExtValue(); - uint64_t CpySize = C2->getValue().getZExtValue(); - - if (DepSize < CpySize) - return false; - - // Finally, we have to make sure that the dest of the second does not - // alias the source of the first - AliasAnalysis& AA = getAnalysis(); - if (AA.alias(M->getRawDest(), CpySize, MDep->getRawSource(), DepSize) != - AliasAnalysis::NoAlias) - return false; - else if (AA.alias(M->getRawDest(), CpySize, M->getRawSource(), CpySize) != - AliasAnalysis::NoAlias) - return false; - else if (AA.alias(MDep->getRawDest(), DepSize, MDep->getRawSource(), DepSize) - != AliasAnalysis::NoAlias) - return false; - - // If all checks passed, then we can transform these memcpy's - Function* MemCpyFun = Intrinsic::getDeclaration( - M->getParent()->getParent()->getParent(), - M->getIntrinsicID()); - - std::vector args; - args.push_back(M->getRawDest()); - args.push_back(MDep->getRawSource()); - args.push_back(M->getLength()); - args.push_back(M->getAlignment()); +Value* GVN::lookupNumber(BasicBlock* BB, uint32_t num) { + DenseMap::iterator I = localAvail.find(BB); + if (I == localAvail.end()) + return 0; - CallInst* C = CallInst::Create(MemCpyFun, args.begin(), args.end(), "", M); + ValueNumberScope* locals = I->second; - MemoryDependenceAnalysis& MD = getAnalysis(); - if (MD.getDependency(C) == MDep) { - MD.dropInstruction(M); - toErase.push_back(M); - return true; + while (locals) { + DenseMap::iterator I = locals->table.find(num); + if (I != locals->table.end()) + return I->second; + else + locals = locals->parent; } - MD.removeInstruction(C); - toErase.push_back(C); - return false; + return 0; } /// processInstruction - When calculating availability, handle an instruction /// by inserting it into the appropriate sets -bool GVN::processInstruction(Instruction *I, ValueNumberedSet &currAvail, +bool GVN::processInstruction(Instruction *I, DenseMap &lastSeenLoad, SmallVectorImpl &toErase) { - if (LoadInst* L = dyn_cast(I)) - return processLoad(L, lastSeenLoad, toErase); + if (LoadInst* L = dyn_cast(I)) { + bool changed = processLoad(L, lastSeenLoad, toErase); + + if (!changed) { + unsigned num = VN.lookup_or_add(L); + localAvail[I->getParent()]->table.insert(std::make_pair(num, L)); + } + + return changed; + } - if (StoreInst *SI = dyn_cast(I)) - return processStore(SI, toErase); + unsigned num = VN.lookup_or_add(I); - if (MemCpyInst* M = dyn_cast(I)) { - MemoryDependenceAnalysis& MD = getAnalysis(); - - // The are two possible optimizations we can do for memcpy: - // a) memcpy-memcpy xform which exposes redundance for DSE - // b) call-memcpy xform for return slot optimization - Instruction* dep = MD.getDependency(M); - if (dep == MemoryDependenceAnalysis::None || - dep == MemoryDependenceAnalysis::NonLocal) - return false; - if (MemCpyInst *MemCpy = dyn_cast(dep)) - return processMemCpy(M, MemCpy, toErase); - if (CallInst* C = dyn_cast(dep)) - return performCallSlotOptzn(M, C, toErase); + // Allocations are always uniquely numbered, so we can save time and memory + // by fast failing them. + if (isa(I)) { + localAvail[I->getParent()]->table.insert(std::make_pair(num, I)); return false; } - unsigned num = VN.lookup_or_add(I); - // Collapse PHI nodes if (PHINode* p = dyn_cast(I)) { Value* constVal = CollapsePhi(p); @@ -1624,27 +1074,11 @@ bool GVN::processInstruction(Instruction *I, ValueNumberedSet &currAvail, p->replaceAllUsesWith(constVal); toErase.push_back(p); + } else { + localAvail[I->getParent()]->table.insert(std::make_pair(num, I)); } // Perform value-number based elimination - } else if (currAvail.test(num)) { - Value* repl = find_leader(currAvail, num); - - if (CallInst* CI = dyn_cast(I)) { - AliasAnalysis& AA = getAnalysis(); - if (!AA.doesNotAccessMemory(CI)) { - MemoryDependenceAnalysis& MD = getAnalysis(); - if (cast(repl)->getParent() != CI->getParent() || - MD.getDependency(CI) != MD.getDependency(cast(repl))) { - // There must be an intervening may-alias store, so nothing from - // this point on will be able to be replaced with the preceding call - currAvail.erase(repl); - currAvail.insert(I); - - return false; - } - } - } - + } else if (Value* repl = lookupNumber(I->getParent(), num)) { // Remove it! MemoryDependenceAnalysis& MD = getAnalysis(); MD.removeInstruction(I); @@ -1654,8 +1088,7 @@ bool GVN::processInstruction(Instruction *I, ValueNumberedSet &currAvail, toErase.push_back(I); return true; } else if (!I->isTerminator()) { - currAvail.set(num); - currAvail.insert(I); + localAvail[I->getParent()]->table.insert(std::make_pair(num, I)); } return false; @@ -1666,6 +1099,8 @@ bool GVN::processInstruction(Instruction *I, ValueNumberedSet &currAvail, // bool GVN::runOnFunction(Function& F) { VN.setAliasAnalysis(&getAnalysis()); + VN.setMemDep(&getAnalysis()); + VN.setDomTree(&getAnalysis()); bool changed = false; bool shouldContinue = true; @@ -1679,63 +1114,220 @@ bool GVN::runOnFunction(Function& F) { } -// GVN::iterateOnFunction - Executes one iteration of GVN -bool GVN::iterateOnFunction(Function &F) { - // Clean out global sets from any previous functions - VN.clear(); - availableOut.clear(); - phiMap.clear(); - - bool changed_function = false; - - DominatorTree &DT = getAnalysis(); - +bool GVN::processBlock(DomTreeNode* DTN) { + BasicBlock* BB = DTN->getBlock(); + SmallVector toErase; DenseMap lastSeenLoad; - - // Top-down walk of the dominator tree - for (df_iterator DI = df_begin(DT.getRootNode()), - E = df_end(DT.getRootNode()); DI != E; ++DI) { + bool changed_function = false; + + if (DTN->getIDom()) + localAvail[BB] = + new ValueNumberScope(localAvail[DTN->getIDom()->getBlock()]); + else + localAvail[BB] = new ValueNumberScope(0); + + for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); + BI != BE;) { + changed_function |= processInstruction(BI, lastSeenLoad, toErase); + if (toErase.empty()) { + ++BI; + continue; + } + + // If we need some instructions deleted, do it now. + NumGVNInstr += toErase.size(); - // Get the set to update for this block - ValueNumberedSet& currAvail = availableOut[DI->getBlock()]; - lastSeenLoad.clear(); + // Avoid iterator invalidation. + bool AtStart = BI == BB->begin(); + if (!AtStart) + --BI; - BasicBlock* BB = DI->getBlock(); + for (SmallVector::iterator I = toErase.begin(), + E = toErase.end(); I != E; ++I) + (*I)->eraseFromParent(); + + if (AtStart) + BI = BB->begin(); + else + ++BI; + + toErase.clear(); + } - // A block inherits AVAIL_OUT from its dominator - if (DI->getIDom() != 0) - currAvail = availableOut[DI->getIDom()->getBlock()]; + return changed_function; +} - for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); - BI != BE;) { - changed_function |= processInstruction(BI, currAvail, - lastSeenLoad, toErase); - if (toErase.empty()) { - ++BI; +/// performPRE - Perform a purely local form of PRE that looks for diamond +/// control flow patterns and attempts to perform simple PRE at the join point. +bool GVN::performPRE(Function& F) { + bool changed = false; + SmallVector, 4> toSplit; + for (df_iterator DI = df_begin(&F.getEntryBlock()), + DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) { + BasicBlock* CurrentBlock = *DI; + + // Nothing to PRE in the entry block. + if (CurrentBlock == &F.getEntryBlock()) continue; + + for (BasicBlock::iterator BI = CurrentBlock->begin(), + BE = CurrentBlock->end(); BI != BE; ) { + if (isa(BI) || isa(BI) || + isa(BI) || BI->mayReadFromMemory() || + BI->mayWriteToMemory()) { + BI++; continue; } - // If we need some instructions deleted, do it now. - NumGVNInstr += toErase.size(); + uint32_t valno = VN.lookup(BI); - // Avoid iterator invalidation. - bool AtStart = BI == BB->begin(); - if (!AtStart) - --BI; - - for (SmallVector::iterator I = toErase.begin(), - E = toErase.end(); I != E; ++I) - (*I)->eraseFromParent(); - - if (AtStart) - BI = BB->begin(); - else - ++BI; + // Look for the predecessors for PRE opportunities. We're + // only trying to solve the basic diamond case, where + // a value is computed in the successor and one predecessor, + // but not the other. We also explicitly disallow cases + // where the successor is its own predecessor, because they're + // more complicated to get right. + unsigned numWith = 0; + unsigned numWithout = 0; + BasicBlock* PREPred = 0; + DenseMap predMap; + for (pred_iterator PI = pred_begin(CurrentBlock), + PE = pred_end(CurrentBlock); PI != PE; ++PI) { + // We're not interested in PRE where the block is its + // own predecessor, on in blocks with predecessors + // that are not reachable. + if (*PI == CurrentBlock) { + numWithout = 2; + break; + } else if (!localAvail.count(*PI)) { + numWithout = 2; + break; + } + + DenseMap::iterator predV = + localAvail[*PI]->table.find(valno); + if (predV == localAvail[*PI]->table.end()) { + PREPred = *PI; + numWithout++; + } else if (predV->second == BI) { + numWithout = 2; + } else { + predMap[*PI] = predV->second; + numWith++; + } + } + + // Don't do PRE when it might increase code size, i.e. when + // we would need to insert instructions in more than one pred. + if (numWithout != 1 || numWith == 0) { + BI++; + continue; + } + + // We can't do PRE safely on a critical edge, so instead we schedule + // the edge to be split and perform the PRE the next time we iterate + // on the function. + unsigned succNum = 0; + for (unsigned i = 0, e = PREPred->getTerminator()->getNumSuccessors(); + i != e; ++i) + if (PREPred->getTerminator()->getSuccessor(i) == PREPred) { + succNum = i; + break; + } + + if (isCriticalEdge(PREPred->getTerminator(), succNum)) { + toSplit.push_back(std::make_pair(PREPred->getTerminator(), succNum)); + changed = true; + BI++; + continue; + } + + // Instantiate the expression the in predecessor that lacked it. + // Because we are going top-down through the block, all value numbers + // will be available in the predecessor by the time we need them. Any + // that weren't original present will have been instantiated earlier + // in this loop. + Instruction* PREInstr = BI->clone(); + bool success = true; + for (unsigned i = 0; i < BI->getNumOperands(); ++i) { + Value* op = BI->getOperand(i); + if (isa(op) || isa(op) || isa(op)) + PREInstr->setOperand(i, op); + else if (!lookupNumber(PREPred, VN.lookup(op))) { + success = false; + break; + } else + PREInstr->setOperand(i, lookupNumber(PREPred, VN.lookup(op))); + } + + // Fail out if we encounter an operand that is not available in + // the PRE predecessor. This is typically because of loads which + // are not value numbered precisely. + if (!success) { + delete PREInstr; + BI++; + continue; + } + + PREInstr->insertBefore(PREPred->getTerminator()); + PREInstr->setName(BI->getName() + ".pre"); + predMap[PREPred] = PREInstr; + VN.add(PREInstr, valno); + NumGVNPRE++; + + // Update the availability map to include the new instruction. + localAvail[PREPred]->table.insert(std::make_pair(valno, PREInstr)); + + // Create a PHI to make the value available in this block. + PHINode* Phi = PHINode::Create(BI->getType(), + BI->getName() + ".pre-phi", + CurrentBlock->begin()); + for (pred_iterator PI = pred_begin(CurrentBlock), + PE = pred_end(CurrentBlock); PI != PE; ++PI) + Phi->addIncoming(predMap[*PI], *PI); + + VN.add(Phi, valno); + localAvail[CurrentBlock]->table[valno] = Phi; + + BI->replaceAllUsesWith(Phi); + VN.erase(BI); + + Instruction* erase = BI; + BI++; + erase->eraseFromParent(); - toErase.clear(); + changed = true; } } - return changed_function; + for (SmallVector, 4>::iterator + I = toSplit.begin(), E = toSplit.end(); I != E; ++I) + SplitCriticalEdge(I->first, I->second, this); + + return changed; +} + +// GVN::iterateOnFunction - Executes one iteration of GVN +bool GVN::iterateOnFunction(Function &F) { + // Clean out global sets from any previous functions + VN.clear(); + phiMap.clear(); + + for (DenseMap::iterator + I = localAvail.begin(), E = localAvail.end(); I != E; ++I) + delete I->second; + localAvail.clear(); + + DominatorTree &DT = getAnalysis(); + + // Top-down walk of the dominator tree + bool changed = false; + for (df_iterator DI = df_begin(DT.getRootNode()), + DE = df_end(DT.getRootNode()); DI != DE; ++DI) + changed |= processBlock(*DI); + + if (EnablePRE) + changed |= performPRE(F); + + return changed; }