X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTransforms%2FScalar%2FGVN.cpp;h=a9445dd853024f9fd8ac8a0c321dcb9a09a1d147;hb=88ffdddcc42d80972644643da1096793dabae802;hp=f3aa7960f66a3b87c0c51347ee72bac24ed099da;hpb=9f435740b1c652926c80529e00c92925a3196d79;p=oota-llvm.git diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp index f3aa7960f66..a9445dd8530 100644 --- a/lib/Transforms/Scalar/GVN.cpp +++ b/lib/Transforms/Scalar/GVN.cpp @@ -2,8 +2,8 @@ // // The LLVM Compiler Infrastructure // -// This file was developed by the Owen Anderson and is distributed under -// the University of Illinois Open Source License. See LICENSE.TXT for details. +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // @@ -13,24 +13,42 @@ //===----------------------------------------------------------------------===// #define DEBUG_TYPE "gvn" -#include "llvm/Value.h" #include "llvm/Transforms/Scalar.h" #include "llvm/BasicBlock.h" -#include "llvm/Instructions.h" -#include "llvm/Function.h" +#include "llvm/Constants.h" #include "llvm/DerivedTypes.h" -#include "llvm/Analysis/Dominators.h" +#include "llvm/Function.h" +#include "llvm/IntrinsicInst.h" +#include "llvm/Instructions.h" +#include "llvm/ParameterAttributes.h" +#include "llvm/Value.h" #include "llvm/ADT/BitVector.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DepthFirstIterator.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" +#include "llvm/Analysis/Dominators.h" +#include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/MemoryDependenceAnalysis.h" #include "llvm/Support/CFG.h" +#include "llvm/Support/CommandLine.h" #include "llvm/Support/Compiler.h" +#include "llvm/Support/GetElementPtrTypeIterator.h" +#include "llvm/Target/TargetData.h" using namespace llvm; +STATISTIC(NumGVNInstr, "Number of instructions deleted"); +STATISTIC(NumGVNLoad, "Number of loads deleted"); +STATISTIC(NumMemSetInfer, "Number of memsets inferred"); + +namespace { + cl::opt + FormMemSet("form-memset-from-stores", + cl::desc("Transform straight-line stores to memsets"), + cl::init(false), cl::Hidden); +} + //===----------------------------------------------------------------------===// // ValueTable Class //===----------------------------------------------------------------------===// @@ -49,7 +67,7 @@ namespace { FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT, SHUFFLE, SELECT, TRUNC, ZEXT, SEXT, FPTOUI, FPTOSI, UITOFP, SITOFP, FPTRUNC, FPEXT, - PTRTOINT, INTTOPTR, BITCAST, GEP, EMPTY, + PTRTOINT, INTTOPTR, BITCAST, GEP, CALL, EMPTY, TOMBSTONE }; ExpressionOpcode opcode; @@ -58,6 +76,7 @@ namespace { uint32_t secondVN; uint32_t thirdVN; SmallVector varargs; + Value* function; Expression() { } Expression(ExpressionOpcode o) : opcode(o) { } @@ -69,6 +88,8 @@ namespace { return true; else if (type != other.type) return false; + else if (function != other.function) + return false; else if (firstVN != other.firstVN) return false; else if (secondVN != other.secondVN) @@ -94,6 +115,8 @@ namespace { return false; else if (type != other.type) return true; + else if (function != other.function) + return true; else if (firstVN != other.firstVN) return true; else if (secondVN != other.secondVN) @@ -117,6 +140,7 @@ namespace { private: DenseMap valueNumbering; DenseMap expressionNumbering; + AliasAnalysis* AA; uint32_t nextValueNumber; @@ -131,21 +155,29 @@ namespace { Expression create_expression(SelectInst* V); Expression create_expression(CastInst* C); Expression create_expression(GetElementPtrInst* G); + Expression create_expression(CallInst* C); public: - ValueTable() { nextValueNumber = 1; } + ValueTable() : nextValueNumber(1) { } uint32_t lookup_or_add(Value* V); uint32_t lookup(Value* V) const; void add(Value* V, uint32_t num); void clear(); void erase(Value* v); unsigned size(); + void setAliasAnalysis(AliasAnalysis* A) { AA = A; } + uint32_t hash_operand(Value* v); }; } namespace llvm { -template <> struct DenseMapKeyInfo { - static inline Expression getEmptyKey() { return Expression(Expression::EMPTY); } - static inline Expression getTombstoneKey() { return Expression(Expression::TOMBSTONE); } +template <> struct DenseMapInfo { + static inline Expression getEmptyKey() { + return Expression(Expression::EMPTY); + } + + static inline Expression getTombstoneKey() { + return Expression(Expression::TOMBSTONE); + } static unsigned getHashValue(const Expression e) { unsigned hash = e.opcode; @@ -154,16 +186,23 @@ template <> struct DenseMapKeyInfo { hash = e.secondVN + hash * 37; hash = e.thirdVN + hash * 37; - hash = (unsigned)((uintptr_t)e.type >> 4) ^ - (unsigned)((uintptr_t)e.type >> 9) + - hash * 37; + hash = ((unsigned)((uintptr_t)e.type >> 4) ^ + (unsigned)((uintptr_t)e.type >> 9)) + + hash * 37; - for (SmallVector::const_iterator I = e.varargs.begin(), E = e.varargs.end(); - I != E; ++I) + for (SmallVector::const_iterator I = e.varargs.begin(), + E = e.varargs.end(); I != E; ++I) hash = *I + hash * 37; + hash = ((unsigned)((uintptr_t)e.function >> 4) ^ + (unsigned)((uintptr_t)e.function >> 9)) + + hash * 37; + return hash; } + static bool isEqual(const Expression &LHS, const Expression &RHS) { + return LHS == RHS; + } static bool isPod() { return true; } }; } @@ -171,156 +210,117 @@ template <> struct DenseMapKeyInfo { //===----------------------------------------------------------------------===// // ValueTable Internal Functions //===----------------------------------------------------------------------===// -Expression::ExpressionOpcode - ValueTable::getOpcode(BinaryOperator* BO) { +Expression::ExpressionOpcode ValueTable::getOpcode(BinaryOperator* BO) { switch(BO->getOpcode()) { - case Instruction::Add: - return Expression::ADD; - case Instruction::Sub: - return Expression::SUB; - case Instruction::Mul: - return Expression::MUL; - case Instruction::UDiv: - return Expression::UDIV; - case Instruction::SDiv: - return Expression::SDIV; - case Instruction::FDiv: - return Expression::FDIV; - case Instruction::URem: - return Expression::UREM; - case Instruction::SRem: - return Expression::SREM; - case Instruction::FRem: - return Expression::FREM; - case Instruction::Shl: - return Expression::SHL; - case Instruction::LShr: - return Expression::LSHR; - case Instruction::AShr: - return Expression::ASHR; - case Instruction::And: - return Expression::AND; - case Instruction::Or: - return Expression::OR; - case Instruction::Xor: - return Expression::XOR; - - // THIS SHOULD NEVER HAPPEN - default: - assert(0 && "Binary operator with unknown opcode?"); - return Expression::ADD; + default: // THIS SHOULD NEVER HAPPEN + assert(0 && "Binary operator with unknown opcode?"); + case Instruction::Add: return Expression::ADD; + case Instruction::Sub: return Expression::SUB; + case Instruction::Mul: return Expression::MUL; + case Instruction::UDiv: return Expression::UDIV; + case Instruction::SDiv: return Expression::SDIV; + case Instruction::FDiv: return Expression::FDIV; + case Instruction::URem: return Expression::UREM; + case Instruction::SRem: return Expression::SREM; + case Instruction::FRem: return Expression::FREM; + case Instruction::Shl: return Expression::SHL; + case Instruction::LShr: return Expression::LSHR; + case Instruction::AShr: return Expression::ASHR; + case Instruction::And: return Expression::AND; + case Instruction::Or: return Expression::OR; + case Instruction::Xor: return Expression::XOR; } } Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) { - if (C->getOpcode() == Instruction::ICmp) { + if (isa(C)) { switch (C->getPredicate()) { - case ICmpInst::ICMP_EQ: - return Expression::ICMPEQ; - case ICmpInst::ICMP_NE: - return Expression::ICMPNE; - case ICmpInst::ICMP_UGT: - return Expression::ICMPUGT; - case ICmpInst::ICMP_UGE: - return Expression::ICMPUGE; - case ICmpInst::ICMP_ULT: - return Expression::ICMPULT; - case ICmpInst::ICMP_ULE: - return Expression::ICMPULE; - case ICmpInst::ICMP_SGT: - return Expression::ICMPSGT; - case ICmpInst::ICMP_SGE: - return Expression::ICMPSGE; - case ICmpInst::ICMP_SLT: - return Expression::ICMPSLT; - case ICmpInst::ICMP_SLE: - return Expression::ICMPSLE; - - // THIS SHOULD NEVER HAPPEN - default: - assert(0 && "Comparison with unknown predicate?"); - return Expression::ICMPEQ; - } - } else { - switch (C->getPredicate()) { - case FCmpInst::FCMP_OEQ: - return Expression::FCMPOEQ; - case FCmpInst::FCMP_OGT: - return Expression::FCMPOGT; - case FCmpInst::FCMP_OGE: - return Expression::FCMPOGE; - case FCmpInst::FCMP_OLT: - return Expression::FCMPOLT; - case FCmpInst::FCMP_OLE: - return Expression::FCMPOLE; - case FCmpInst::FCMP_ONE: - return Expression::FCMPONE; - case FCmpInst::FCMP_ORD: - return Expression::FCMPORD; - case FCmpInst::FCMP_UNO: - return Expression::FCMPUNO; - case FCmpInst::FCMP_UEQ: - return Expression::FCMPUEQ; - case FCmpInst::FCMP_UGT: - return Expression::FCMPUGT; - case FCmpInst::FCMP_UGE: - return Expression::FCMPUGE; - case FCmpInst::FCMP_ULT: - return Expression::FCMPULT; - case FCmpInst::FCMP_ULE: - return Expression::FCMPULE; - case FCmpInst::FCMP_UNE: - return Expression::FCMPUNE; - - // THIS SHOULD NEVER HAPPEN - default: - assert(0 && "Comparison with unknown predicate?"); - return Expression::FCMPOEQ; + default: // THIS SHOULD NEVER HAPPEN + assert(0 && "Comparison with unknown predicate?"); + case ICmpInst::ICMP_EQ: return Expression::ICMPEQ; + case ICmpInst::ICMP_NE: return Expression::ICMPNE; + case ICmpInst::ICMP_UGT: return Expression::ICMPUGT; + case ICmpInst::ICMP_UGE: return Expression::ICMPUGE; + case ICmpInst::ICMP_ULT: return Expression::ICMPULT; + case ICmpInst::ICMP_ULE: return Expression::ICMPULE; + case ICmpInst::ICMP_SGT: return Expression::ICMPSGT; + case ICmpInst::ICMP_SGE: return Expression::ICMPSGE; + case ICmpInst::ICMP_SLT: return Expression::ICMPSLT; + case ICmpInst::ICMP_SLE: return Expression::ICMPSLE; } } + assert(isa(C) && "Unknown compare"); + switch (C->getPredicate()) { + default: // THIS SHOULD NEVER HAPPEN + assert(0 && "Comparison with unknown predicate?"); + case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ; + case FCmpInst::FCMP_OGT: return Expression::FCMPOGT; + case FCmpInst::FCMP_OGE: return Expression::FCMPOGE; + case FCmpInst::FCMP_OLT: return Expression::FCMPOLT; + case FCmpInst::FCMP_OLE: return Expression::FCMPOLE; + case FCmpInst::FCMP_ONE: return Expression::FCMPONE; + case FCmpInst::FCMP_ORD: return Expression::FCMPORD; + case FCmpInst::FCMP_UNO: return Expression::FCMPUNO; + case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ; + case FCmpInst::FCMP_UGT: return Expression::FCMPUGT; + case FCmpInst::FCMP_UGE: return Expression::FCMPUGE; + case FCmpInst::FCMP_ULT: return Expression::FCMPULT; + case FCmpInst::FCMP_ULE: return Expression::FCMPULE; + case FCmpInst::FCMP_UNE: return Expression::FCMPUNE; + } } -Expression::ExpressionOpcode - ValueTable::getOpcode(CastInst* C) { +Expression::ExpressionOpcode ValueTable::getOpcode(CastInst* C) { switch(C->getOpcode()) { - case Instruction::Trunc: - return Expression::TRUNC; - case Instruction::ZExt: - return Expression::ZEXT; - case Instruction::SExt: - return Expression::SEXT; - case Instruction::FPToUI: - return Expression::FPTOUI; - case Instruction::FPToSI: - return Expression::FPTOSI; - case Instruction::UIToFP: - return Expression::UITOFP; - case Instruction::SIToFP: - return Expression::SITOFP; - case Instruction::FPTrunc: - return Expression::FPTRUNC; - case Instruction::FPExt: - return Expression::FPEXT; - case Instruction::PtrToInt: - return Expression::PTRTOINT; - case Instruction::IntToPtr: - return Expression::INTTOPTR; - case Instruction::BitCast: - return Expression::BITCAST; - - // THIS SHOULD NEVER HAPPEN - default: - assert(0 && "Cast operator with unknown opcode?"); - return Expression::BITCAST; + default: // THIS SHOULD NEVER HAPPEN + assert(0 && "Cast operator with unknown opcode?"); + case Instruction::Trunc: return Expression::TRUNC; + case Instruction::ZExt: return Expression::ZEXT; + case Instruction::SExt: return Expression::SEXT; + case Instruction::FPToUI: return Expression::FPTOUI; + case Instruction::FPToSI: return Expression::FPTOSI; + case Instruction::UIToFP: return Expression::UITOFP; + case Instruction::SIToFP: return Expression::SITOFP; + case Instruction::FPTrunc: return Expression::FPTRUNC; + case Instruction::FPExt: return Expression::FPEXT; + case Instruction::PtrToInt: return Expression::PTRTOINT; + case Instruction::IntToPtr: return Expression::INTTOPTR; + case Instruction::BitCast: return Expression::BITCAST; } } +uint32_t ValueTable::hash_operand(Value* v) { + if (CallInst* CI = dyn_cast(v)) + if (!AA->doesNotAccessMemory(CI)) + return nextValueNumber++; + + return lookup_or_add(v); +} + +Expression ValueTable::create_expression(CallInst* C) { + Expression e; + + e.type = C->getType(); + e.firstVN = 0; + e.secondVN = 0; + e.thirdVN = 0; + e.function = C->getCalledFunction(); + e.opcode = Expression::CALL; + + for (CallInst::op_iterator I = C->op_begin()+1, E = C->op_end(); + I != E; ++I) + e.varargs.push_back(hash_operand(*I)); + + return e; +} + Expression ValueTable::create_expression(BinaryOperator* BO) { Expression e; - e.firstVN = lookup_or_add(BO->getOperand(0)); - e.secondVN = lookup_or_add(BO->getOperand(1)); + e.firstVN = hash_operand(BO->getOperand(0)); + e.secondVN = hash_operand(BO->getOperand(1)); e.thirdVN = 0; + e.function = 0; e.type = BO->getType(); e.opcode = getOpcode(BO); @@ -330,9 +330,10 @@ Expression ValueTable::create_expression(BinaryOperator* BO) { Expression ValueTable::create_expression(CmpInst* C) { Expression e; - e.firstVN = lookup_or_add(C->getOperand(0)); - e.secondVN = lookup_or_add(C->getOperand(1)); + e.firstVN = hash_operand(C->getOperand(0)); + e.secondVN = hash_operand(C->getOperand(1)); e.thirdVN = 0; + e.function = 0; e.type = C->getType(); e.opcode = getOpcode(C); @@ -342,9 +343,10 @@ Expression ValueTable::create_expression(CmpInst* C) { Expression ValueTable::create_expression(CastInst* C) { Expression e; - e.firstVN = lookup_or_add(C->getOperand(0)); + e.firstVN = hash_operand(C->getOperand(0)); e.secondVN = 0; e.thirdVN = 0; + e.function = 0; e.type = C->getType(); e.opcode = getOpcode(C); @@ -354,9 +356,10 @@ Expression ValueTable::create_expression(CastInst* C) { Expression ValueTable::create_expression(ShuffleVectorInst* S) { Expression e; - e.firstVN = lookup_or_add(S->getOperand(0)); - e.secondVN = lookup_or_add(S->getOperand(1)); - e.thirdVN = lookup_or_add(S->getOperand(2)); + e.firstVN = hash_operand(S->getOperand(0)); + e.secondVN = hash_operand(S->getOperand(1)); + e.thirdVN = hash_operand(S->getOperand(2)); + e.function = 0; e.type = S->getType(); e.opcode = Expression::SHUFFLE; @@ -366,9 +369,10 @@ Expression ValueTable::create_expression(ShuffleVectorInst* S) { Expression ValueTable::create_expression(ExtractElementInst* E) { Expression e; - e.firstVN = lookup_or_add(E->getOperand(0)); - e.secondVN = lookup_or_add(E->getOperand(1)); + e.firstVN = hash_operand(E->getOperand(0)); + e.secondVN = hash_operand(E->getOperand(1)); e.thirdVN = 0; + e.function = 0; e.type = E->getType(); e.opcode = Expression::EXTRACT; @@ -378,9 +382,10 @@ Expression ValueTable::create_expression(ExtractElementInst* E) { Expression ValueTable::create_expression(InsertElementInst* I) { Expression e; - e.firstVN = lookup_or_add(I->getOperand(0)); - e.secondVN = lookup_or_add(I->getOperand(1)); - e.thirdVN = lookup_or_add(I->getOperand(2)); + e.firstVN = hash_operand(I->getOperand(0)); + e.secondVN = hash_operand(I->getOperand(1)); + e.thirdVN = hash_operand(I->getOperand(2)); + e.function = 0; e.type = I->getType(); e.opcode = Expression::INSERT; @@ -390,9 +395,10 @@ Expression ValueTable::create_expression(InsertElementInst* I) { Expression ValueTable::create_expression(SelectInst* I) { Expression e; - e.firstVN = lookup_or_add(I->getCondition()); - e.secondVN = lookup_or_add(I->getTrueValue()); - e.thirdVN = lookup_or_add(I->getFalseValue()); + e.firstVN = hash_operand(I->getCondition()); + e.secondVN = hash_operand(I->getTrueValue()); + e.thirdVN = hash_operand(I->getFalseValue()); + e.function = 0; e.type = I->getType(); e.opcode = Expression::SELECT; @@ -402,15 +408,16 @@ Expression ValueTable::create_expression(SelectInst* I) { Expression ValueTable::create_expression(GetElementPtrInst* G) { Expression e; - e.firstVN = lookup_or_add(G->getPointerOperand()); + e.firstVN = hash_operand(G->getPointerOperand()); e.secondVN = 0; e.thirdVN = 0; + e.function = 0; e.type = G->getType(); e.opcode = Expression::GEP; for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end(); I != E; ++I) - e.varargs.push_back(lookup_or_add(*I)); + e.varargs.push_back(hash_operand(*I)); return e; } @@ -426,8 +433,25 @@ uint32_t ValueTable::lookup_or_add(Value* V) { if (VI != valueNumbering.end()) return VI->second; - - if (BinaryOperator* BO = dyn_cast(V)) { + if (CallInst* C = dyn_cast(V)) { + if (AA->onlyReadsMemory(C)) { // includes doesNotAccessMemory + Expression e = create_expression(C); + + DenseMap::iterator EI = expressionNumbering.find(e); + if (EI != expressionNumbering.end()) { + valueNumbering.insert(std::make_pair(V, EI->second)); + return EI->second; + } else { + expressionNumbering.insert(std::make_pair(e, nextValueNumber)); + valueNumbering.insert(std::make_pair(V, nextValueNumber)); + + return nextValueNumber++; + } + } else { + valueNumbering.insert(std::make_pair(V, nextValueNumber)); + return nextValueNumber++; + } + } else if (BinaryOperator* BO = dyn_cast(V)) { Expression e = create_expression(BO); DenseMap::iterator EI = expressionNumbering.find(e); @@ -541,12 +565,8 @@ uint32_t ValueTable::lookup_or_add(Value* V) { /// the value has not yet been numbered. uint32_t ValueTable::lookup(Value* V) const { DenseMap::iterator VI = valueNumbering.find(V); - if (VI != valueNumbering.end()) - return VI->second; - else - assert(0 && "Value not numbered?"); - - return 0; + assert(VI != valueNumbering.end() && "Value not numbered?"); + return VI->second; } /// clear - Remove all entries from the ValueTable @@ -556,11 +576,16 @@ void ValueTable::clear() { nextValueNumber = 1; } +/// erase - Remove a value from the value numbering +void ValueTable::erase(Value* V) { + valueNumbering.erase(V); +} + //===----------------------------------------------------------------------===// // ValueNumberedSet Class //===----------------------------------------------------------------------===// namespace { -class ValueNumberedSet { +class VISIBILITY_HIDDEN ValueNumberedSet { private: SmallPtrSet contents; BitVector numbers; @@ -630,12 +655,20 @@ namespace { DenseMap availableOut; + typedef DenseMap > PhiMapType; + PhiMapType phiMap; + + // This transformation requires dominator postdominator info virtual void getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesCFG(); AU.addRequired(); AU.addRequired(); + AU.addRequired(); + AU.addRequired(); + AU.addPreserved(); AU.addPreserved(); + AU.addPreserved(); } // Helper fuctions @@ -643,21 +676,29 @@ namespace { Value* find_leader(ValueNumberedSet& vals, uint32_t v) ; void val_insert(ValueNumberedSet& s, Value* v); bool processLoad(LoadInst* L, - DenseMap& lastLoad, - SmallVector& toErase); + DenseMap &lastLoad, + SmallVectorImpl &toErase); + bool processStore(StoreInst *SI, SmallVectorImpl &toErase); bool processInstruction(Instruction* I, ValueNumberedSet& currAvail, DenseMap& lastSeenLoad, - SmallVector& toErase); - bool processNonLocalLoad(LoadInst* L, SmallVector& toErase); - Value *performPHIConstruction(BasicBlock *BB, LoadInst* orig, - DenseMap &Phis, - SmallPtrSet& visited); + SmallVectorImpl &toErase); + bool processNonLocalLoad(LoadInst* L, + SmallVectorImpl &toErase); + bool processMemCpy(MemCpyInst* M, MemCpyInst* MDep, + SmallVectorImpl &toErase); + bool performCallSlotOptzn(MemCpyInst* cpy, CallInst* C, + SmallVectorImpl &toErase); + Value *GetValueForBlock(BasicBlock *BB, LoadInst* orig, + DenseMap &Phis, + bool top_level = false); void dump(DenseMap& d); + bool iterateOnFunction(Function &F); + Value* CollapsePhi(PHINode* p); + bool isSafeReplacement(PHINode* p, Instruction* inst); }; char GVN::ID = 0; - } // createGVNPass - The public interface to this file... @@ -666,9 +707,6 @@ FunctionPass *llvm::createGVNPass() { return new GVN(); } static RegisterPass X("gvn", "Global Value Numbering"); -STATISTIC(NumGVNInstr, "Number of instructions deleted"); -STATISTIC(NumGVNLoad, "Number of loads deleted"); - /// find_leader - Given a set and a value number, return the first /// element of the set with that value number, or 0 if no such element /// is present @@ -705,98 +743,159 @@ void GVN::dump(DenseMap& d) { printf("}\n"); } +Value* GVN::CollapsePhi(PHINode* p) { + DominatorTree &DT = getAnalysis(); + Value* constVal = p->hasConstantValue(); + + if (!constVal) return 0; + + Instruction* inst = dyn_cast(constVal); + if (!inst) + return constVal; + + if (DT.dominates(inst, p)) + if (isSafeReplacement(p, inst)) + return inst; + return 0; +} -Value *GVN::performPHIConstruction(BasicBlock *BB, LoadInst* orig, - DenseMap &Phis, - SmallPtrSet& visited) { - DenseMap::iterator DI = Phis.find(BB); - if (DI != Phis.end()) - return DI->second; +bool GVN::isSafeReplacement(PHINode* p, Instruction* inst) { + if (!isa(inst)) + return true; - unsigned numPreds = std::distance(pred_begin(BB), pred_end(BB)); + for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end(); + UI != E; ++UI) + if (PHINode* use_phi = dyn_cast(UI)) + if (use_phi->getParent() == inst->getParent()) + return false; - if (numPreds == 1) { - DenseMap::iterator DI = Phis.find(BB); - if (DI != Phis.end()) { - Phis.insert(std::make_pair(BB, DI->second)); - return DI->second; - } else { - visited.insert(BB); - Value* domV = performPHIConstruction(*pred_begin(BB), orig, Phis, visited); - visited.erase(BB); - - Phis.insert(std::make_pair(BB, domV)); - return domV; - } - } else { - PHINode *PN = new PHINode(orig->getType(), orig->getName()+".rle", BB->begin()); - PN->reserveOperandSpace(numPreds); - Phis[BB] = PN; - - visited.insert(BB); - // Fill in the incoming values for the block. - for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) - if (!visited.count(*PI)) - PN->addIncoming(performPHIConstruction(*PI, orig, Phis, visited), *PI); - else - PN->addIncoming(Phis[*PI], *PI); - visited.erase(BB); - - bool all_same = PN->getNumIncomingValues() != 1; - Value* first = PN->getIncomingValue(0); - for (unsigned i = 1; i < PN->getNumIncomingValues(); ++i) - all_same &= (PN->getIncomingValue(i) == first); - - if (all_same) { - PN->eraseFromParent(); - Phis[BB] = first; - return first; - } else { - return PN; - } + return true; +} + +/// GetValueForBlock - Get the value to use within the specified basic block. +/// available values are in Phis. +Value *GVN::GetValueForBlock(BasicBlock *BB, LoadInst* orig, + DenseMap &Phis, + bool top_level) { + + // If we have already computed this value, return the previously computed val. + DenseMap::iterator V = Phis.find(BB); + if (V != Phis.end() && !top_level) return V->second; + + BasicBlock* singlePred = BB->getSinglePredecessor(); + if (singlePred) { + Value *ret = GetValueForBlock(singlePred, orig, Phis); + Phis[BB] = ret; + return ret; + } + + // Otherwise, the idom is the loop, so we need to insert a PHI node. Do so + // now, then get values to fill in the incoming values for the PHI. + PHINode *PN = new PHINode(orig->getType(), orig->getName()+".rle", + BB->begin()); + PN->reserveOperandSpace(std::distance(pred_begin(BB), pred_end(BB))); + + if (Phis.count(BB) == 0) + Phis.insert(std::make_pair(BB, PN)); + + // Fill in the incoming values for the block. + for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) { + Value* val = GetValueForBlock(*PI, orig, Phis); + PN->addIncoming(val, *PI); } + + AliasAnalysis& AA = getAnalysis(); + AA.copyValue(orig, PN); + + // Attempt to collapse PHI nodes that are trivially redundant + Value* v = CollapsePhi(PN); + if (!v) { + // Cache our phi construction results + phiMap[orig->getPointerOperand()].insert(PN); + return PN; + } + + MemoryDependenceAnalysis& MD = getAnalysis(); + + MD.removeInstruction(PN); + PN->replaceAllUsesWith(v); + + for (DenseMap::iterator I = Phis.begin(), + E = Phis.end(); I != E; ++I) + if (I->second == PN) + I->second = v; + + PN->eraseFromParent(); + + Phis[BB] = v; + return v; } -bool GVN::processNonLocalLoad(LoadInst* L, SmallVector& toErase) { +/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are +/// non-local by performing PHI construction. +bool GVN::processNonLocalLoad(LoadInst* L, + SmallVectorImpl &toErase) { MemoryDependenceAnalysis& MD = getAnalysis(); + // Find the non-local dependencies of the load DenseMap deps; - bool ret = MD.getNonLocalDependency(L, deps); - if (!ret) - return false; + MD.getNonLocalDependency(L, deps); DenseMap repl; + + // Filter out useless results (non-locals, etc) for (DenseMap::iterator I = deps.begin(), E = deps.end(); - I != E; ++I) - if (I->second == MemoryDependenceAnalysis::None) { + I != E; ++I) { + if (I->second == MemoryDependenceAnalysis::None) return false; - } else if (StoreInst* S = dyn_cast(I->second)) { - if (S->getPointerOperand() == L->getPointerOperand()) - repl.insert(std::make_pair(I->first, S->getOperand(0))); - else + + if (I->second == MemoryDependenceAnalysis::NonLocal) + continue; + + if (StoreInst* S = dyn_cast(I->second)) { + if (S->getPointerOperand() != L->getPointerOperand()) return false; + repl[I->first] = S->getOperand(0); } else if (LoadInst* LD = dyn_cast(I->second)) { - if (LD->getPointerOperand() == L->getPointerOperand()) - repl.insert(std::make_pair(I->first, LD)); - else + if (LD->getPointerOperand() != L->getPointerOperand()) return false; + repl[I->first] = LD; } else { return false; } + } + // Use cached PHI construction information from previous runs + SmallPtrSet& p = phiMap[L->getPointerOperand()]; + for (SmallPtrSet::iterator I = p.begin(), E = p.end(); + I != E; ++I) { + if ((*I)->getParent() == L->getParent()) { + MD.removeInstruction(L); + L->replaceAllUsesWith(*I); + toErase.push_back(L); + NumGVNLoad++; + return true; + } + + repl.insert(std::make_pair((*I)->getParent(), *I)); + } + + // Perform PHI construction SmallPtrSet visited; - Value* v = performPHIConstruction(L->getParent(), L, repl, visited); + Value* v = GetValueForBlock(L->getParent(), L, repl, true); MD.removeInstruction(L); L->replaceAllUsesWith(v); toErase.push_back(L); + NumGVNLoad++; return true; } -bool GVN::processLoad(LoadInst* L, - DenseMap& lastLoad, - SmallVector& toErase) { +/// processLoad - Attempt to eliminate a load, first by eliminating it +/// locally, and then attempting non-local elimination if that fails. +bool GVN::processLoad(LoadInst *L, DenseMap &lastLoad, + SmallVectorImpl &toErase) { if (L->isVolatile()) { lastLoad[L->getPointerOperand()] = L; return false; @@ -807,12 +906,23 @@ bool GVN::processLoad(LoadInst* L, // ... to a pointer that has been loaded from before... MemoryDependenceAnalysis& MD = getAnalysis(); + bool removedNonLocal = false; Instruction* dep = MD.getDependency(L); if (dep == MemoryDependenceAnalysis::NonLocal && - L->getParent() != &L->getParent()->getParent()->getEntryBlock()) - processNonLocalLoad(L, toErase); + L->getParent() != &L->getParent()->getParent()->getEntryBlock()) { + removedNonLocal = processNonLocalLoad(L, toErase); + + if (!removedNonLocal) + last = L; + + return removedNonLocal; + } + + bool deletedLoad = false; + // Walk up the dependency chain until we either find + // a dependency we can use, or we can't walk any further while (dep != MemoryDependenceAnalysis::None && dep != MemoryDependenceAnalysis::NonLocal && (isa(dep) || isa(dep))) { @@ -849,28 +959,549 @@ bool GVN::processLoad(LoadInst* L, dep = MD.getDependency(L, dep); } } - + + if (dep != MemoryDependenceAnalysis::None && + dep != MemoryDependenceAnalysis::NonLocal && + isa(dep)) { + // Check that this load is actually from the + // allocation we found + Value* v = L->getOperand(0); + while (true) { + if (BitCastInst *BC = dyn_cast(v)) + v = BC->getOperand(0); + else if (GetElementPtrInst *GEP = dyn_cast(v)) + v = GEP->getOperand(0); + else + break; + } + if (v == dep) { + // If this load depends directly on an allocation, there isn't + // anything stored there; therefore, we can optimize this load + // to undef. + MD.removeInstruction(L); + + L->replaceAllUsesWith(UndefValue::get(L->getType())); + toErase.push_back(L); + deletedLoad = true; + NumGVNLoad++; + } + } + if (!deletedLoad) last = L; return deletedLoad; } -/// buildsets_availout - When calculating availability, handle an instruction +/// isBytewiseValue - If the specified value can be set by repeating the same +/// byte in memory, return the i8 value that it is represented with. This is +/// true for all i8 values obviously, but is also true for i32 0, i32 -1, +/// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated +/// byte store (e.g. i16 0x1234), return null. +static Value *isBytewiseValue(Value *V) { + // All byte-wide stores are splatable, even of arbitrary variables. + if (V->getType() == Type::Int8Ty) return V; + + // Constant float and double values can be handled as integer values if the + // corresponding integer value is "byteable". An important case is 0.0. + if (ConstantFP *CFP = dyn_cast(V)) { + if (CFP->getType() == Type::FloatTy) + V = ConstantExpr::getBitCast(CFP, Type::Int32Ty); + if (CFP->getType() == Type::DoubleTy) + V = ConstantExpr::getBitCast(CFP, Type::Int64Ty); + // Don't handle long double formats, which have strange constraints. + } + + // We can handle constant integers that are power of two in size and a + // multiple of 8 bits. + if (ConstantInt *CI = dyn_cast(V)) { + unsigned Width = CI->getBitWidth(); + if (isPowerOf2_32(Width) && Width > 8) { + // We can handle this value if the recursive binary decomposition is the + // same at all levels. + APInt Val = CI->getValue(); + APInt Val2; + while (Val.getBitWidth() != 8) { + unsigned NextWidth = Val.getBitWidth()/2; + Val2 = Val.lshr(NextWidth); + Val2.trunc(Val.getBitWidth()/2); + Val.trunc(Val.getBitWidth()/2); + + // If the top/bottom halves aren't the same, reject it. + if (Val != Val2) + return 0; + } + return ConstantInt::get(Val); + } + } + + // Conceptually, we could handle things like: + // %a = zext i8 %X to i16 + // %b = shl i16 %a, 8 + // %c = or i16 %a, %b + // but until there is an example that actually needs this, it doesn't seem + // worth worrying about. + return 0; +} + +static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx, + bool &VariableIdxFound, TargetData &TD) { + // Skip over the first indices. + gep_type_iterator GTI = gep_type_begin(GEP); + for (unsigned i = 1; i != Idx; ++i, ++GTI) + /*skip along*/; + + // Compute the offset implied by the rest of the indices. + int64_t Offset = 0; + for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { + ConstantInt *OpC = dyn_cast(GEP->getOperand(i)); + if (OpC == 0) + return VariableIdxFound = true; + if (OpC->isZero()) continue; // No offset. + + // Handle struct indices, which add their field offset to the pointer. + if (const StructType *STy = dyn_cast(*GTI)) { + Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); + continue; + } + + // Otherwise, we have a sequential type like an array or vector. Multiply + // the index by the ElementSize. + uint64_t Size = TD.getABITypeSize(GTI.getIndexedType()); + Offset += Size*OpC->getSExtValue(); + } + + return Offset; +} + +/// IsPointerAtOffset - Return true if Ptr1 is exactly provably equal to Ptr2 +/// plus the specified constant offset. For example, Ptr1 might be &A[42], and +/// Ptr2 might be &A[40] and Offset might be 8. +static bool IsPointerAtOffset(Value *Ptr1, Value *Ptr2, uint64_t Offset, + TargetData &TD) { + // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical + // base. After that base, they may have some number of common (and + // potentially variable) indices. After that they handle some constant + // offset, which determines their offset from each other. At this point, we + // handle no other case. + GetElementPtrInst *GEP1 = dyn_cast(Ptr1); + GetElementPtrInst *GEP2 = dyn_cast(Ptr2); + if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0)) + return false; + + // Skip any common indices and track the GEP types. + unsigned Idx = 1; + for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx) + if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx)) + break; + + bool VariableIdxFound = false; + int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD); + int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, TD); + if (VariableIdxFound) return false; + + return Offset1 == Offset2+(int64_t)Offset; +} + + +/// processStore - When GVN is scanning forward over instructions, we look for +/// some other patterns to fold away. In particular, this looks for stores to +/// neighboring locations of memory. If it sees enough consequtive ones +/// (currently 4) it attempts to merge them together into a memcpy/memset. +bool GVN::processStore(StoreInst *SI, SmallVectorImpl &toErase) { + if (!FormMemSet) return false; + if (SI->isVolatile()) return false; + + // There are two cases that are interesting for this code to handle: memcpy + // and memset. Right now we only handle memset. + + // Ensure that the value being stored is something that can be memset'able a + // byte at a time like "0" or "-1" or any width, as well as things like + // 0xA0A0A0A0 and 0.0. + Value *ByteVal = isBytewiseValue(SI->getOperand(0)); + if (!ByteVal) + return false; + + TargetData &TD = getAnalysis(); + AliasAnalysis &AA = getAnalysis(); + + // Okay, so we now have a single store that can be splatable. Try to 'grow' + // this store by looking for neighboring stores to the immediate left or right + // of the store we have so far. While we could in theory handle stores in + // this order: A[0], A[2], A[1] + // in practice, right now we only worry about cases where stores are + // consequtive in increasing or decreasing address order. + uint64_t BytesSoFar = TD.getTypeStoreSize(SI->getOperand(0)->getType()); + uint64_t BytesFromSI = 0; + unsigned StartAlign = SI->getAlignment(); + Value *StartPtr = SI->getPointerOperand(); + SmallVector Stores; + Stores.push_back(SI); + + BasicBlock::iterator BI = SI; + for (++BI; !isa(BI); ++BI) { + if (isa(BI) || isa(BI)) { + // If the call is readnone, ignore it, otherwise bail out. We don't even + // allow readonly here because we don't want something like: + // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). + if (AA.getModRefBehavior(CallSite::get(BI)) == + AliasAnalysis::DoesNotAccessMemory) + continue; + + // TODO: If this is a memset, try to join it in. + + break; + } else if (isa(BI) || isa(BI)) + break; + + // If this is a non-store instruction it is fine, ignore it. + StoreInst *NextStore = dyn_cast(BI); + if (NextStore == 0) continue; + + // If this is a store, see if we can merge it in. + if (NextStore->isVolatile()) break; + + // Check to see if this stored value is of the same byte-splattable value. + if (ByteVal != isBytewiseValue(NextStore->getOperand(0))) + break; + + Value *ThisPointer = NextStore->getPointerOperand(); + unsigned AccessSize = TD.getTypeStoreSize(SI->getOperand(0)->getType()); + + // If so, check to see if the store is before the current range or after it + // in either case, extend the range, otherwise reject it. + if (IsPointerAtOffset(ThisPointer, StartPtr, BytesSoFar, TD)) { + // Okay, this extends the stored area on the end, just add to the bytes + // so far and remember this store. + BytesSoFar += AccessSize; + Stores.push_back(NextStore); + continue; + } + + if (IsPointerAtOffset(StartPtr, ThisPointer, AccessSize, TD)) { + // Okay, the store is before the current range. Reset our start pointer + // and get new alignment info etc. + BytesSoFar += AccessSize; + BytesFromSI += AccessSize; + Stores.push_back(NextStore); + StartPtr = ThisPointer; + StartAlign = NextStore->getAlignment(); + continue; + } + + // Otherwise, this store wasn't contiguous with our current range, bail out. + break; + } + + // If we found less than 4 stores to merge, bail out, it isn't worth losing + // type information in llvm IR to do the transformation. + if (Stores.size() < 4) + return false; + + // Otherwise, we do want to transform this! Create a new memset. We put the + // memset right after the first store that we found in this block. This + // ensures that the caller will increment the iterator to the memset before + // it deletes all the stores. + BasicBlock::iterator InsertPt = SI; ++InsertPt; + + Function *F = Intrinsic::getDeclaration(SI->getParent()->getParent() + ->getParent(), Intrinsic::memset_i64); + + // StartPtr may not dominate the starting point. Instead of using it, base + // the destination pointer off the input to the first store in the block. + StartPtr = SI->getPointerOperand(); + + // Cast the start ptr to be i8* as memset requires. + const Type *i8Ptr = PointerType::getUnqual(Type::Int8Ty); + if (StartPtr->getType() != i8Ptr) + StartPtr = new BitCastInst(StartPtr, i8Ptr, StartPtr->getNameStart(), + InsertPt); + + // Offset the pointer if needed. + if (BytesFromSI) + StartPtr = new GetElementPtrInst(StartPtr, ConstantInt::get(Type::Int64Ty, + -BytesFromSI), + "ptroffset", InsertPt); + + Value *Ops[] = { + StartPtr, ByteVal, // Start, value + ConstantInt::get(Type::Int64Ty, BytesSoFar), // size + ConstantInt::get(Type::Int32Ty, StartAlign) // align + }; + new CallInst(F, Ops, Ops+4, "", InsertPt); + + // Zap all the stores. + toErase.append(Stores.begin(), Stores.end()); + + ++NumMemSetInfer; + return true; +} + + +/// performCallSlotOptzn - takes a memcpy and a call that it depends on, +/// and checks for the possibility of a call slot optimization by having +/// the call write its result directly into the destination of the memcpy. +bool GVN::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C, + SmallVectorImpl &toErase) { + // The general transformation to keep in mind is + // + // call @func(..., src, ...) + // memcpy(dest, src, ...) + // + // -> + // + // memcpy(dest, src, ...) + // call @func(..., dest, ...) + // + // Since moving the memcpy is technically awkward, we additionally check that + // src only holds uninitialized values at the moment of the call, meaning that + // the memcpy can be discarded rather than moved. + + // Deliberately get the source and destination with bitcasts stripped away, + // because we'll need to do type comparisons based on the underlying type. + Value* cpyDest = cpy->getDest(); + Value* cpySrc = cpy->getSource(); + CallSite CS = CallSite::get(C); + + // We need to be able to reason about the size of the memcpy, so we require + // that it be a constant. + ConstantInt* cpyLength = dyn_cast(cpy->getLength()); + if (!cpyLength) + return false; + + // Require that src be an alloca. This simplifies the reasoning considerably. + AllocaInst* srcAlloca = dyn_cast(cpySrc); + if (!srcAlloca) + return false; + + // Check that all of src is copied to dest. + TargetData& TD = getAnalysis(); + + ConstantInt* srcArraySize = dyn_cast(srcAlloca->getArraySize()); + if (!srcArraySize) + return false; + + uint64_t srcSize = TD.getABITypeSize(srcAlloca->getAllocatedType()) * + srcArraySize->getZExtValue(); + + if (cpyLength->getZExtValue() < srcSize) + return false; + + // Check that accessing the first srcSize bytes of dest will not cause a + // trap. Otherwise the transform is invalid since it might cause a trap + // to occur earlier than it otherwise would. + if (AllocaInst* A = dyn_cast(cpyDest)) { + // The destination is an alloca. Check it is larger than srcSize. + ConstantInt* destArraySize = dyn_cast(A->getArraySize()); + if (!destArraySize) + return false; + + uint64_t destSize = TD.getABITypeSize(A->getAllocatedType()) * + destArraySize->getZExtValue(); + + if (destSize < srcSize) + return false; + } else if (Argument* A = dyn_cast(cpyDest)) { + // If the destination is an sret parameter then only accesses that are + // outside of the returned struct type can trap. + if (!A->hasStructRetAttr()) + return false; + + const Type* StructTy = cast(A->getType())->getElementType(); + uint64_t destSize = TD.getABITypeSize(StructTy); + + if (destSize < srcSize) + return false; + } else { + return false; + } + + // Check that src is not accessed except via the call and the memcpy. This + // guarantees that it holds only undefined values when passed in (so the final + // memcpy can be dropped), that it is not read or written between the call and + // the memcpy, and that writing beyond the end of it is undefined. + SmallVector srcUseList(srcAlloca->use_begin(), + srcAlloca->use_end()); + while (!srcUseList.empty()) { + User* UI = srcUseList.back(); + srcUseList.pop_back(); + + if (isa(UI) || isa(UI)) { + for (User::use_iterator I = UI->use_begin(), E = UI->use_end(); + I != E; ++I) + srcUseList.push_back(*I); + } else if (UI != C && UI != cpy) { + return false; + } + } + + // Since we're changing the parameter to the callsite, we need to make sure + // that what would be the new parameter dominates the callsite. + DominatorTree& DT = getAnalysis(); + if (Instruction* cpyDestInst = dyn_cast(cpyDest)) + if (!DT.dominates(cpyDestInst, C)) + return false; + + // In addition to knowing that the call does not access src in some + // unexpected manner, for example via a global, which we deduce from + // the use analysis, we also need to know that it does not sneakily + // access dest. We rely on AA to figure this out for us. + AliasAnalysis& AA = getAnalysis(); + if (AA.getModRefInfo(C, cpy->getRawDest(), srcSize) != + AliasAnalysis::NoModRef) + return false; + + // All the checks have passed, so do the transformation. + for (unsigned i = 0; i < CS.arg_size(); ++i) + if (CS.getArgument(i) == cpySrc) { + if (cpySrc->getType() != cpyDest->getType()) + cpyDest = CastInst::createPointerCast(cpyDest, cpySrc->getType(), + cpyDest->getName(), C); + CS.setArgument(i, cpyDest); + } + + // Drop any cached information about the call, because we may have changed + // its dependence information by changing its parameter. + MemoryDependenceAnalysis& MD = getAnalysis(); + MD.dropInstruction(C); + + // Remove the memcpy + MD.removeInstruction(cpy); + toErase.push_back(cpy); + + return true; +} + +/// processMemCpy - perform simplication of memcpy's. If we have memcpy A which +/// copies X to Y, and memcpy B which copies Y to Z, then we can rewrite B to be +/// a memcpy from X to Z (or potentially a memmove, depending on circumstances). +/// This allows later passes to remove the first memcpy altogether. +bool GVN::processMemCpy(MemCpyInst* M, MemCpyInst* MDep, + SmallVectorImpl &toErase) { + // We can only transforms memcpy's where the dest of one is the source of the + // other + if (M->getSource() != MDep->getDest()) + return false; + + // Second, the length of the memcpy's must be the same, or the preceeding one + // must be larger than the following one. + ConstantInt* C1 = dyn_cast(MDep->getLength()); + ConstantInt* C2 = dyn_cast(M->getLength()); + if (!C1 || !C2) + return false; + + uint64_t DepSize = C1->getValue().getZExtValue(); + uint64_t CpySize = C2->getValue().getZExtValue(); + + if (DepSize < CpySize) + return false; + + // Finally, we have to make sure that the dest of the second does not + // alias the source of the first + AliasAnalysis& AA = getAnalysis(); + if (AA.alias(M->getRawDest(), CpySize, MDep->getRawSource(), DepSize) != + AliasAnalysis::NoAlias) + return false; + else if (AA.alias(M->getRawDest(), CpySize, M->getRawSource(), CpySize) != + AliasAnalysis::NoAlias) + return false; + else if (AA.alias(MDep->getRawDest(), DepSize, MDep->getRawSource(), DepSize) + != AliasAnalysis::NoAlias) + return false; + + // If all checks passed, then we can transform these memcpy's + Function* MemCpyFun = Intrinsic::getDeclaration( + M->getParent()->getParent()->getParent(), + M->getIntrinsicID()); + + std::vector args; + args.push_back(M->getRawDest()); + args.push_back(MDep->getRawSource()); + args.push_back(M->getLength()); + args.push_back(M->getAlignment()); + + CallInst* C = new CallInst(MemCpyFun, args.begin(), args.end(), "", M); + + MemoryDependenceAnalysis& MD = getAnalysis(); + if (MD.getDependency(C) == MDep) { + MD.dropInstruction(M); + toErase.push_back(M); + return true; + } + + MD.removeInstruction(C); + toErase.push_back(C); + return false; +} + +/// processInstruction - When calculating availability, handle an instruction /// by inserting it into the appropriate sets -bool GVN::processInstruction(Instruction* I, - ValueNumberedSet& currAvail, - DenseMap& lastSeenLoad, - SmallVector& toErase) { - if (LoadInst* L = dyn_cast(I)) { +bool GVN::processInstruction(Instruction *I, ValueNumberedSet &currAvail, + DenseMap &lastSeenLoad, + SmallVectorImpl &toErase) { + if (LoadInst* L = dyn_cast(I)) return processLoad(L, lastSeenLoad, toErase); + + if (StoreInst *SI = dyn_cast(I)) + return processStore(SI, toErase); + + if (MemCpyInst* M = dyn_cast(I)) { + MemoryDependenceAnalysis& MD = getAnalysis(); + + // The are two possible optimizations we can do for memcpy: + // a) memcpy-memcpy xform which exposes redundance for DSE + // b) call-memcpy xform for return slot optimization + Instruction* dep = MD.getDependency(M); + if (dep == MemoryDependenceAnalysis::None || + dep == MemoryDependenceAnalysis::NonLocal) + return false; + if (MemCpyInst *MemCpy = dyn_cast(dep)) + return processMemCpy(M, MemCpy, toErase); + if (CallInst* C = dyn_cast(dep)) + return performCallSlotOptzn(M, C, toErase); + return false; } unsigned num = VN.lookup_or_add(I); - if (currAvail.test(num)) { + // Collapse PHI nodes + if (PHINode* p = dyn_cast(I)) { + Value* constVal = CollapsePhi(p); + + if (constVal) { + for (PhiMapType::iterator PI = phiMap.begin(), PE = phiMap.end(); + PI != PE; ++PI) + if (PI->second.count(p)) + PI->second.erase(p); + + p->replaceAllUsesWith(constVal); + toErase.push_back(p); + } + // Perform value-number based elimination + } else if (currAvail.test(num)) { Value* repl = find_leader(currAvail, num); + if (CallInst* CI = dyn_cast(I)) { + AliasAnalysis& AA = getAnalysis(); + if (!AA.doesNotAccessMemory(CI)) { + MemoryDependenceAnalysis& MD = getAnalysis(); + if (cast(repl)->getParent() != CI->getParent() || + MD.getDependency(CI) != MD.getDependency(cast(repl))) { + // There must be an intervening may-alias store, so nothing from + // this point on will be able to be replaced with the preceding call + currAvail.erase(repl); + currAvail.insert(I); + + return false; + } + } + } + + // Remove it! + MemoryDependenceAnalysis& MD = getAnalysis(); + MD.removeInstruction(I); + + VN.erase(I); I->replaceAllUsesWith(repl); toErase.push_back(I); return true; @@ -885,25 +1516,43 @@ bool GVN::processInstruction(Instruction* I, // GVN::runOnFunction - This is the main transformation entry point for a // function. // -bool GVN::runOnFunction(Function &F) { +bool GVN::runOnFunction(Function& F) { + VN.setAliasAnalysis(&getAnalysis()); + + bool changed = false; + bool shouldContinue = true; + + while (shouldContinue) { + shouldContinue = iterateOnFunction(F); + changed |= shouldContinue; + } + + return changed; +} + + +// GVN::iterateOnFunction - Executes one iteration of GVN +bool GVN::iterateOnFunction(Function &F) { // Clean out global sets from any previous functions VN.clear(); availableOut.clear(); + phiMap.clear(); bool changed_function = false; DominatorTree &DT = getAnalysis(); SmallVector toErase; - + DenseMap lastSeenLoad; + // Top-down walk of the dominator tree for (df_iterator DI = df_begin(DT.getRootNode()), E = df_end(DT.getRootNode()); DI != E; ++DI) { // Get the set to update for this block ValueNumberedSet& currAvail = availableOut[DI->getBlock()]; - DenseMap lastSeenLoad; - + lastSeenLoad.clear(); + BasicBlock* BB = DI->getBlock(); // A block inherits AVAIL_OUT from its dominator @@ -911,15 +1560,19 @@ bool GVN::runOnFunction(Function &F) { currAvail = availableOut[DI->getIDom()->getBlock()]; for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); - BI != BE; ++BI) { - changed_function |= processInstruction(BI, currAvail, lastSeenLoad, toErase); + BI != BE; ) { + changed_function |= processInstruction(BI, currAvail, + lastSeenLoad, toErase); NumGVNInstr += toErase.size(); + // Avoid iterator invalidation + ++BI; + for (SmallVector::iterator I = toErase.begin(), E = toErase.end(); I != E; ++I) (*I)->eraseFromParent(); - + toErase.clear(); } }