X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FAnalysis%2FScalarEvolutionExpander.cpp;h=3ec6fe42d472e52b2cfd88c332f61d435b1e348d;hb=789457847002f5289dbbc5cfce9d68c72e00bed1;hp=e249421a1f31d9e41bf8978b84658644bdc9ceb5;hpb=7cbd8a3e92221437048b484d5ef9c0a22d0f8c58;p=oota-llvm.git diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp index e249421a1f3..3ec6fe42d47 100644 --- a/lib/Analysis/ScalarEvolutionExpander.cpp +++ b/lib/Analysis/ScalarEvolutionExpander.cpp @@ -15,33 +15,75 @@ #include "llvm/Analysis/ScalarEvolutionExpander.h" #include "llvm/Analysis/LoopInfo.h" +#include "llvm/LLVMContext.h" +#include "llvm/Target/TargetData.h" +#include "llvm/ADT/STLExtras.h" using namespace llvm; -/// InsertCastOfTo - Insert a cast of V to the specified type, doing what -/// we can to share the casts. -Value *SCEVExpander::InsertCastOfTo(Instruction::CastOps opcode, Value *V, - const Type *Ty) { +/// InsertNoopCastOfTo - Insert a cast of V to the specified type, +/// which must be possible with a noop cast, doing what we can to share +/// the casts. +Value *SCEVExpander::InsertNoopCastOfTo(Value *V, const Type *Ty) { + Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false); + assert((Op == Instruction::BitCast || + Op == Instruction::PtrToInt || + Op == Instruction::IntToPtr) && + "InsertNoopCastOfTo cannot perform non-noop casts!"); + assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) && + "InsertNoopCastOfTo cannot change sizes!"); + + // Short-circuit unnecessary bitcasts. + if (Op == Instruction::BitCast && V->getType() == Ty) + return V; + + // Short-circuit unnecessary inttoptr<->ptrtoint casts. + if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) && + SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) { + if (CastInst *CI = dyn_cast(V)) + if ((CI->getOpcode() == Instruction::PtrToInt || + CI->getOpcode() == Instruction::IntToPtr) && + SE.getTypeSizeInBits(CI->getType()) == + SE.getTypeSizeInBits(CI->getOperand(0)->getType())) + return CI->getOperand(0); + if (ConstantExpr *CE = dyn_cast(V)) + if ((CE->getOpcode() == Instruction::PtrToInt || + CE->getOpcode() == Instruction::IntToPtr) && + SE.getTypeSizeInBits(CE->getType()) == + SE.getTypeSizeInBits(CE->getOperand(0)->getType())) + return CE->getOperand(0); + } + // FIXME: keep track of the cast instruction. if (Constant *C = dyn_cast(V)) - return ConstantExpr::getCast(opcode, C, Ty); + return ConstantExpr::getCast(Op, C, Ty); if (Argument *A = dyn_cast(V)) { // Check to see if there is already a cast! for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); - UI != E; ++UI) { + UI != E; ++UI) if ((*UI)->getType() == Ty) if (CastInst *CI = dyn_cast(cast(*UI))) - if (CI->getOpcode() == opcode) { + if (CI->getOpcode() == Op) { // If the cast isn't the first instruction of the function, move it. - if (BasicBlock::iterator(CI) != + if (BasicBlock::iterator(CI) != A->getParent()->getEntryBlock().begin()) { - CI->moveBefore(A->getParent()->getEntryBlock().begin()); + // Recreate the cast at the beginning of the entry block. + // The old cast is left in place in case it is being used + // as an insert point. + Instruction *NewCI = + CastInst::Create(Op, V, Ty, "", + A->getParent()->getEntryBlock().begin()); + NewCI->takeName(CI); + CI->replaceAllUsesWith(NewCI); + return NewCI; } return CI; } - } - return CastInst::Create(opcode, V, Ty, V->getName(), - A->getParent()->getEntryBlock().begin()); + + Instruction *I = CastInst::Create(Op, V, Ty, V->getName(), + A->getParent()->getEntryBlock().begin()); + InsertedValues.insert(I); + return I; } Instruction *I = cast(V); @@ -51,14 +93,19 @@ Value *SCEVExpander::InsertCastOfTo(Instruction::CastOps opcode, Value *V, UI != E; ++UI) { if ((*UI)->getType() == Ty) if (CastInst *CI = dyn_cast(cast(*UI))) - if (CI->getOpcode() == opcode) { + if (CI->getOpcode() == Op) { BasicBlock::iterator It = I; ++It; if (isa(I)) It = cast(I)->getNormalDest()->begin(); while (isa(It)) ++It; if (It != BasicBlock::iterator(CI)) { - // Splice the cast immediately after the operand in question. - CI->moveBefore(It); + // Recreate the cast at the beginning of the entry block. + // The old cast is left in place in case it is being used + // as an insert point. + Instruction *NewCI = CastInst::Create(Op, V, Ty, "", It); + NewCI->takeName(CI); + CI->replaceAllUsesWith(NewCI); + return NewCI; } return CI; } @@ -67,13 +114,15 @@ Value *SCEVExpander::InsertCastOfTo(Instruction::CastOps opcode, Value *V, if (InvokeInst *II = dyn_cast(I)) IP = II->getNormalDest()->begin(); while (isa(IP)) ++IP; - return CastInst::Create(opcode, V, Ty, V->getName(), IP); + Instruction *CI = CastInst::Create(Op, V, Ty, V->getName(), IP); + InsertedValues.insert(CI); + return CI; } /// InsertBinop - Insert the specified binary operator, doing a small amount /// of work to avoid inserting an obviously redundant operation. -Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, Value *LHS, - Value *RHS, Instruction *&InsertPt) { +Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, + Value *LHS, Value *RHS) { // Fold a binop with constant operands. if (Constant *CLHS = dyn_cast(LHS)) if (Constant *CRHS = dyn_cast(RHS)) @@ -81,70 +130,401 @@ Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, Value *LHS, // Do a quick scan to see if we have this binop nearby. If so, reuse it. unsigned ScanLimit = 6; - for (BasicBlock::iterator IP = InsertPt, E = InsertPt->getParent()->begin(); - ScanLimit; --IP, --ScanLimit) { - if (BinaryOperator *BinOp = dyn_cast(IP)) - if (BinOp->getOpcode() == Opcode && BinOp->getOperand(0) == LHS && - BinOp->getOperand(1) == RHS) { - // If we found the instruction *at* the insert point, insert later - // instructions after it. - if (BinOp == InsertPt) - InsertPt = ++IP; - return BinOp; + BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); + // Scanning starts from the last instruction before the insertion point. + BasicBlock::iterator IP = Builder.GetInsertPoint(); + if (IP != BlockBegin) { + --IP; + for (; ScanLimit; --IP, --ScanLimit) { + if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS && + IP->getOperand(1) == RHS) + return IP; + if (IP == BlockBegin) break; + } + } + + // If we haven't found this binop, insert it. + Value *BO = Builder.CreateBinOp(Opcode, LHS, RHS, "tmp"); + InsertedValues.insert(BO); + return BO; +} + +/// FactorOutConstant - Test if S is divisible by Factor, using signed +/// division. If so, update S with Factor divided out and return true. +/// S need not be evenly divisble if a reasonable remainder can be +/// computed. +/// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made +/// unnecessary; in its place, just signed-divide Ops[i] by the scale and +/// check to see if the divide was folded. +static bool FactorOutConstant(const SCEV *&S, + const SCEV *&Remainder, + const APInt &Factor, + ScalarEvolution &SE) { + // Everything is divisible by one. + if (Factor == 1) + return true; + + // For a Constant, check for a multiple of the given factor. + if (const SCEVConstant *C = dyn_cast(S)) { + ConstantInt *CI = + ConstantInt::get(SE.getContext(), C->getValue()->getValue().sdiv(Factor)); + // If the quotient is zero and the remainder is non-zero, reject + // the value at this scale. It will be considered for subsequent + // smaller scales. + if (C->isZero() || !CI->isZero()) { + const SCEV *Div = SE.getConstant(CI); + S = Div; + Remainder = + SE.getAddExpr(Remainder, + SE.getConstant(C->getValue()->getValue().srem(Factor))); + return true; + } + } + + // In a Mul, check if there is a constant operand which is a multiple + // of the given factor. + if (const SCEVMulExpr *M = dyn_cast(S)) + if (const SCEVConstant *C = dyn_cast(M->getOperand(0))) + if (!C->getValue()->getValue().srem(Factor)) { + const SmallVectorImpl &MOperands = M->getOperands(); + SmallVector NewMulOps(MOperands.begin(), + MOperands.end()); + NewMulOps[0] = + SE.getConstant(C->getValue()->getValue().sdiv(Factor)); + S = SE.getMulExpr(NewMulOps); + return true; + } + + // In an AddRec, check if both start and step are divisible. + if (const SCEVAddRecExpr *A = dyn_cast(S)) { + const SCEV *Step = A->getStepRecurrence(SE); + const SCEV *StepRem = SE.getIntegerSCEV(0, Step->getType()); + if (!FactorOutConstant(Step, StepRem, Factor, SE)) + return false; + if (!StepRem->isZero()) + return false; + const SCEV *Start = A->getStart(); + if (!FactorOutConstant(Start, Remainder, Factor, SE)) + return false; + S = SE.getAddRecExpr(Start, Step, A->getLoop()); + return true; + } + + return false; +} + +/// expandAddToGEP - Expand a SCEVAddExpr with a pointer type into a GEP +/// instead of using ptrtoint+arithmetic+inttoptr. This helps +/// BasicAliasAnalysis analyze the result. +/// +/// Design note: This depends on ScalarEvolution not recognizing inttoptr +/// and ptrtoint operators, as they may introduce pointer arithmetic +/// which may not be safely converted into getelementptr. +/// +/// Design note: It might seem desirable for this function to be more +/// loop-aware. If some of the indices are loop-invariant while others +/// aren't, it might seem desirable to emit multiple GEPs, keeping the +/// loop-invariant portions of the overall computation outside the loop. +/// However, there are a few reasons this is not done here. Hoisting simple +/// arithmetic is a low-level optimization that often isn't very +/// important until late in the optimization process. In fact, passes +/// like InstructionCombining will combine GEPs, even if it means +/// pushing loop-invariant computation down into loops, so even if the +/// GEPs were split here, the work would quickly be undone. The +/// LoopStrengthReduction pass, which is usually run quite late (and +/// after the last InstructionCombining pass), takes care of hoisting +/// loop-invariant portions of expressions, after considering what +/// can be folded using target addressing modes. +/// +Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, + const SCEV *const *op_end, + const PointerType *PTy, + const Type *Ty, + Value *V) { + const Type *ElTy = PTy->getElementType(); + SmallVector GepIndices; + SmallVector Ops(op_begin, op_end); + bool AnyNonZeroIndices = false; + + // Decend down the pointer's type and attempt to convert the other + // operands into GEP indices, at each level. The first index in a GEP + // indexes into the array implied by the pointer operand; the rest of + // the indices index into the element or field type selected by the + // preceding index. + for (;;) { + APInt ElSize = APInt(SE.getTypeSizeInBits(Ty), + ElTy->isSized() ? SE.TD->getTypeAllocSize(ElTy) : 0); + SmallVector NewOps; + SmallVector ScaledOps; + for (unsigned i = 0, e = Ops.size(); i != e; ++i) { + // Split AddRecs up into parts as either of the parts may be usable + // without the other. + if (const SCEVAddRecExpr *A = dyn_cast(Ops[i])) + if (!A->getStart()->isZero()) { + const SCEV *Start = A->getStart(); + Ops.push_back(SE.getAddRecExpr(SE.getIntegerSCEV(0, A->getType()), + A->getStepRecurrence(SE), + A->getLoop())); + Ops[i] = Start; + ++e; + } + // If the scale size is not 0, attempt to factor out a scale. + if (ElSize != 0) { + const SCEV *Op = Ops[i]; + const SCEV *Remainder = SE.getIntegerSCEV(0, Op->getType()); + if (FactorOutConstant(Op, Remainder, ElSize, SE)) { + ScaledOps.push_back(Op); // Op now has ElSize factored out. + NewOps.push_back(Remainder); + continue; + } + } + // If the operand was not divisible, add it to the list of operands + // we'll scan next iteration. + NewOps.push_back(Ops[i]); + } + Ops = NewOps; + AnyNonZeroIndices |= !ScaledOps.empty(); + Value *Scaled = ScaledOps.empty() ? + Constant::getNullValue(Ty) : + expandCodeFor(SE.getAddExpr(ScaledOps), Ty); + GepIndices.push_back(Scaled); + + // Collect struct field index operands. + if (!Ops.empty()) + while (const StructType *STy = dyn_cast(ElTy)) { + if (const SCEVConstant *C = dyn_cast(Ops[0])) + if (SE.getTypeSizeInBits(C->getType()) <= 64) { + const StructLayout &SL = *SE.TD->getStructLayout(STy); + uint64_t FullOffset = C->getValue()->getZExtValue(); + if (FullOffset < SL.getSizeInBytes()) { + unsigned ElIdx = SL.getElementContainingOffset(FullOffset); + GepIndices.push_back( + ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx)); + ElTy = STy->getTypeAtIndex(ElIdx); + Ops[0] = + SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx)); + AnyNonZeroIndices = true; + continue; + } + } + break; } - if (IP == E) break; + + if (const ArrayType *ATy = dyn_cast(ElTy)) { + ElTy = ATy->getElementType(); + continue; + } + break; } - // If we don't have - return BinaryOperator::Create(Opcode, LHS, RHS, "tmp", InsertPt); + // If none of the operands were convertable to proper GEP indices, cast + // the base to i8* and do an ugly getelementptr with that. It's still + // better than ptrtoint+arithmetic+inttoptr at least. + if (!AnyNonZeroIndices) { + V = InsertNoopCastOfTo(V, + Type::getInt8Ty(Ty->getContext())->getPointerTo(PTy->getAddressSpace())); + Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty); + + // Fold a GEP with constant operands. + if (Constant *CLHS = dyn_cast(V)) + if (Constant *CRHS = dyn_cast(Idx)) + return ConstantExpr::getGetElementPtr(CLHS, &CRHS, 1); + + // Do a quick scan to see if we have this GEP nearby. If so, reuse it. + unsigned ScanLimit = 6; + BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); + // Scanning starts from the last instruction before the insertion point. + BasicBlock::iterator IP = Builder.GetInsertPoint(); + if (IP != BlockBegin) { + --IP; + for (; ScanLimit; --IP, --ScanLimit) { + if (IP->getOpcode() == Instruction::GetElementPtr && + IP->getOperand(0) == V && IP->getOperand(1) == Idx) + return IP; + if (IP == BlockBegin) break; + } + } + + Value *GEP = Builder.CreateGEP(V, Idx, "scevgep"); + InsertedValues.insert(GEP); + return GEP; + } + + // Insert a pretty getelementptr. Note that this GEP is not marked inbounds, + // because ScalarEvolution may have changed the address arithmetic to + // compute a value which is beyond the end of the allocated object. + Value *GEP = Builder.CreateGEP(V, + GepIndices.begin(), + GepIndices.end(), + "scevgep"); + Ops.push_back(SE.getUnknown(GEP)); + InsertedValues.insert(GEP); + return expand(SE.getAddExpr(Ops)); +} + +Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) { + const Type *Ty = SE.getEffectiveSCEVType(S->getType()); + Value *V = expand(S->getOperand(S->getNumOperands()-1)); + + // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the + // comments on expandAddToGEP for details. + if (SE.TD) + if (const PointerType *PTy = dyn_cast(V->getType())) { + const SmallVectorImpl &Ops = S->getOperands(); + return expandAddToGEP(&Ops[0], &Ops[Ops.size() - 1], PTy, Ty, V); + } + + V = InsertNoopCastOfTo(V, Ty); + + // Emit a bunch of add instructions + for (int i = S->getNumOperands()-2; i >= 0; --i) { + Value *W = expandCodeFor(S->getOperand(i), Ty); + V = InsertBinop(Instruction::Add, V, W); + } + return V; } -Value *SCEVExpander::visitMulExpr(SCEVMulExpr *S) { +Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) { + const Type *Ty = SE.getEffectiveSCEVType(S->getType()); int FirstOp = 0; // Set if we should emit a subtract. - if (SCEVConstant *SC = dyn_cast(S->getOperand(0))) + if (const SCEVConstant *SC = dyn_cast(S->getOperand(0))) if (SC->getValue()->isAllOnesValue()) FirstOp = 1; int i = S->getNumOperands()-2; - Value *V = expand(S->getOperand(i+1)); + Value *V = expandCodeFor(S->getOperand(i+1), Ty); // Emit a bunch of multiply instructions - for (; i >= FirstOp; --i) - V = InsertBinop(Instruction::Mul, V, expand(S->getOperand(i)), - InsertPt); + for (; i >= FirstOp; --i) { + Value *W = expandCodeFor(S->getOperand(i), Ty); + V = InsertBinop(Instruction::Mul, V, W); + } + // -1 * ... ---> 0 - ... if (FirstOp == 1) - V = InsertBinop(Instruction::Sub, Constant::getNullValue(V->getType()), V, - InsertPt); + V = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), V); return V; } -Value *SCEVExpander::visitAddRecExpr(SCEVAddRecExpr *S) { - const Type *Ty = S->getType(); +Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) { + const Type *Ty = SE.getEffectiveSCEVType(S->getType()); + + Value *LHS = expandCodeFor(S->getLHS(), Ty); + if (const SCEVConstant *SC = dyn_cast(S->getRHS())) { + const APInt &RHS = SC->getValue()->getValue(); + if (RHS.isPowerOf2()) + return InsertBinop(Instruction::LShr, LHS, + ConstantInt::get(Ty, RHS.logBase2())); + } + + Value *RHS = expandCodeFor(S->getRHS(), Ty); + return InsertBinop(Instruction::UDiv, LHS, RHS); +} + +/// Move parts of Base into Rest to leave Base with the minimal +/// expression that provides a pointer operand suitable for a +/// GEP expansion. +static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest, + ScalarEvolution &SE) { + while (const SCEVAddRecExpr *A = dyn_cast(Base)) { + Base = A->getStart(); + Rest = SE.getAddExpr(Rest, + SE.getAddRecExpr(SE.getIntegerSCEV(0, A->getType()), + A->getStepRecurrence(SE), + A->getLoop())); + } + if (const SCEVAddExpr *A = dyn_cast(Base)) { + Base = A->getOperand(A->getNumOperands()-1); + SmallVector NewAddOps(A->op_begin(), A->op_end()); + NewAddOps.back() = Rest; + Rest = SE.getAddExpr(NewAddOps); + ExposePointerBase(Base, Rest, SE); + } +} + +Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { + const Type *Ty = SE.getEffectiveSCEVType(S->getType()); const Loop *L = S->getLoop(); - // We cannot yet do fp recurrences, e.g. the xform of {X,+,F} --> X+{0,+,F} - assert(Ty->isInteger() && "Cannot expand fp recurrences yet!"); + + // First check for an existing canonical IV in a suitable type. + PHINode *CanonicalIV = 0; + if (PHINode *PN = L->getCanonicalInductionVariable()) + if (SE.isSCEVable(PN->getType()) && + isa(SE.getEffectiveSCEVType(PN->getType())) && + SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty)) + CanonicalIV = PN; + + // Rewrite an AddRec in terms of the canonical induction variable, if + // its type is more narrow. + if (CanonicalIV && + SE.getTypeSizeInBits(CanonicalIV->getType()) > + SE.getTypeSizeInBits(Ty)) { + const SCEV *Start = SE.getAnyExtendExpr(S->getStart(), + CanonicalIV->getType()); + const SCEV *Step = SE.getAnyExtendExpr(S->getStepRecurrence(SE), + CanonicalIV->getType()); + Value *V = expand(SE.getAddRecExpr(Start, Step, S->getLoop())); + BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); + BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); + BasicBlock::iterator NewInsertPt = + next(BasicBlock::iterator(cast(V))); + while (isa(NewInsertPt)) ++NewInsertPt; + V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0, + NewInsertPt); + Builder.SetInsertPoint(SaveInsertBB, SaveInsertPt); + return V; + } // {X,+,F} --> X + {0,+,F} - if (!isa(S->getStart()) || - !cast(S->getStart())->getValue()->isZero()) { - Value *Start = expand(S->getStart()); - std::vector NewOps(S->op_begin(), S->op_end()); + if (!S->getStart()->isZero()) { + const SmallVectorImpl &SOperands = S->getOperands(); + SmallVector NewOps(SOperands.begin(), SOperands.end()); NewOps[0] = SE.getIntegerSCEV(0, Ty); - Value *Rest = expand(SE.getAddRecExpr(NewOps, L)); + const SCEV *Rest = SE.getAddRecExpr(NewOps, L); + + // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the + // comments on expandAddToGEP for details. + if (SE.TD) { + const SCEV *Base = S->getStart(); + const SCEV *RestArray[1] = { Rest }; + // Dig into the expression to find the pointer base for a GEP. + ExposePointerBase(Base, RestArray[0], SE); + // If we found a pointer, expand the AddRec with a GEP. + if (const PointerType *PTy = dyn_cast(Base->getType())) { + // Make sure the Base isn't something exotic, such as a multiplied + // or divided pointer value. In those cases, the result type isn't + // actually a pointer type. + if (!isa(Base) && !isa(Base)) { + Value *StartV = expand(Base); + assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!"); + return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV); + } + } + } - // FIXME: look for an existing add to use. - return InsertBinop(Instruction::Add, Rest, Start, InsertPt); + // Just do a normal add. Pre-expand the operands to suppress folding. + return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())), + SE.getUnknown(expand(Rest)))); } // {0,+,1} --> Insert a canonical induction variable into the loop! - if (S->getNumOperands() == 2 && + if (S->isAffine() && S->getOperand(1) == SE.getIntegerSCEV(1, Ty)) { + // If there's a canonical IV, just use it. + if (CanonicalIV) { + assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) && + "IVs with types different from the canonical IV should " + "already have been handled!"); + return CanonicalIV; + } + // Create and insert the PHI node for the induction variable in the // specified loop. BasicBlock *Header = L->getHeader(); + BasicBlock *Preheader = L->getLoopPreheader(); PHINode *PN = PHINode::Create(Ty, "indvar", Header->begin()); - PN->addIncoming(Constant::getNullValue(Ty), L->getLoopPreheader()); + InsertedValues.insert(PN); + PN->addIncoming(Constant::getNullValue(Ty), Preheader); pred_iterator HPI = pred_begin(Header); assert(HPI != pred_end(Header) && "Loop with zero preds???"); @@ -157,86 +537,192 @@ Value *SCEVExpander::visitAddRecExpr(SCEVAddRecExpr *S) { Constant *One = ConstantInt::get(Ty, 1); Instruction *Add = BinaryOperator::CreateAdd(PN, One, "indvar.next", (*HPI)->getTerminator()); + InsertedValues.insert(Add); pred_iterator PI = pred_begin(Header); - if (*PI == L->getLoopPreheader()) + if (*PI == Preheader) ++PI; PN->addIncoming(Add, *PI); return PN; } + // {0,+,F} --> {0,+,1} * F // Get the canonical induction variable I for this loop. - Value *I = getOrInsertCanonicalInductionVariable(L, Ty); + Value *I = CanonicalIV ? + CanonicalIV : + getOrInsertCanonicalInductionVariable(L, Ty); // If this is a simple linear addrec, emit it now as a special case. - if (S->getNumOperands() == 2) { // {0,+,F} --> i*F - Value *F = expand(S->getOperand(1)); - - // IF the step is by one, just return the inserted IV. - if (ConstantInt *CI = dyn_cast(F)) - if (CI->getValue() == 1) - return I; - - // If the insert point is directly inside of the loop, emit the multiply at - // the insert point. Otherwise, L is a loop that is a parent of the insert - // point loop. If we can, move the multiply to the outer most loop that it - // is safe to be in. - Instruction *MulInsertPt = InsertPt; - Loop *InsertPtLoop = LI.getLoopFor(MulInsertPt->getParent()); - if (InsertPtLoop != L && InsertPtLoop && - L->contains(InsertPtLoop->getHeader())) { - while (InsertPtLoop != L) { - // If we cannot hoist the multiply out of this loop, don't. - if (!InsertPtLoop->isLoopInvariant(F)) break; - - // Otherwise, move the insert point to the preheader of the loop. - MulInsertPt = InsertPtLoop->getLoopPreheader()->getTerminator(); - InsertPtLoop = InsertPtLoop->getParentLoop(); - } - } - - return InsertBinop(Instruction::Mul, I, F, MulInsertPt); - } + if (S->isAffine()) // {0,+,F} --> i*F + return + expand(SE.getTruncateOrNoop( + SE.getMulExpr(SE.getUnknown(I), + SE.getNoopOrAnyExtend(S->getOperand(1), + I->getType())), + Ty)); // If this is a chain of recurrences, turn it into a closed form, using the // folders, then expandCodeFor the closed form. This allows the folders to // simplify the expression without having to build a bunch of special code // into this folder. - SCEVHandle IH = SE.getUnknown(I); // Get I as a "symbolic" SCEV. + const SCEV *IH = SE.getUnknown(I); // Get I as a "symbolic" SCEV. - SCEVHandle V = S->evaluateAtIteration(IH, SE); + // Promote S up to the canonical IV type, if the cast is foldable. + const SCEV *NewS = S; + const SCEV *Ext = SE.getNoopOrAnyExtend(S, I->getType()); + if (isa(Ext)) + NewS = Ext; + + const SCEV *V = cast(NewS)->evaluateAtIteration(IH, SE); //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n"; - return expand(V); + // Truncate the result down to the original type, if needed. + const SCEV *T = SE.getTruncateOrNoop(V, Ty); + return expand(T); +} + +Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) { + const Type *Ty = SE.getEffectiveSCEVType(S->getType()); + Value *V = expandCodeFor(S->getOperand(), + SE.getEffectiveSCEVType(S->getOperand()->getType())); + Value *I = Builder.CreateTrunc(V, Ty, "tmp"); + InsertedValues.insert(I); + return I; +} + +Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) { + const Type *Ty = SE.getEffectiveSCEVType(S->getType()); + Value *V = expandCodeFor(S->getOperand(), + SE.getEffectiveSCEVType(S->getOperand()->getType())); + Value *I = Builder.CreateZExt(V, Ty, "tmp"); + InsertedValues.insert(I); + return I; +} + +Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) { + const Type *Ty = SE.getEffectiveSCEVType(S->getType()); + Value *V = expandCodeFor(S->getOperand(), + SE.getEffectiveSCEVType(S->getOperand()->getType())); + Value *I = Builder.CreateSExt(V, Ty, "tmp"); + InsertedValues.insert(I); + return I; } -Value *SCEVExpander::visitSMaxExpr(SCEVSMaxExpr *S) { - Value *LHS = expand(S->getOperand(0)); - for (unsigned i = 1; i < S->getNumOperands(); ++i) { - Value *RHS = expand(S->getOperand(i)); - Value *ICmp = new ICmpInst(ICmpInst::ICMP_SGT, LHS, RHS, "tmp", InsertPt); - LHS = SelectInst::Create(ICmp, LHS, RHS, "smax", InsertPt); +Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) { + Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); + const Type *Ty = LHS->getType(); + for (int i = S->getNumOperands()-2; i >= 0; --i) { + // In the case of mixed integer and pointer types, do the + // rest of the comparisons as integer. + if (S->getOperand(i)->getType() != Ty) { + Ty = SE.getEffectiveSCEVType(Ty); + LHS = InsertNoopCastOfTo(LHS, Ty); + } + Value *RHS = expandCodeFor(S->getOperand(i), Ty); + Value *ICmp = Builder.CreateICmpSGT(LHS, RHS, "tmp"); + InsertedValues.insert(ICmp); + Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax"); + InsertedValues.insert(Sel); + LHS = Sel; } + // In the case of mixed integer and pointer types, cast the + // final result back to the pointer type. + if (LHS->getType() != S->getType()) + LHS = InsertNoopCastOfTo(LHS, S->getType()); return LHS; } -Value *SCEVExpander::visitUMaxExpr(SCEVUMaxExpr *S) { - Value *LHS = expand(S->getOperand(0)); - for (unsigned i = 1; i < S->getNumOperands(); ++i) { - Value *RHS = expand(S->getOperand(i)); - Value *ICmp = new ICmpInst(ICmpInst::ICMP_UGT, LHS, RHS, "tmp", InsertPt); - LHS = SelectInst::Create(ICmp, LHS, RHS, "umax", InsertPt); +Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) { + Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); + const Type *Ty = LHS->getType(); + for (int i = S->getNumOperands()-2; i >= 0; --i) { + // In the case of mixed integer and pointer types, do the + // rest of the comparisons as integer. + if (S->getOperand(i)->getType() != Ty) { + Ty = SE.getEffectiveSCEVType(Ty); + LHS = InsertNoopCastOfTo(LHS, Ty); + } + Value *RHS = expandCodeFor(S->getOperand(i), Ty); + Value *ICmp = Builder.CreateICmpUGT(LHS, RHS, "tmp"); + InsertedValues.insert(ICmp); + Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax"); + InsertedValues.insert(Sel); + LHS = Sel; } + // In the case of mixed integer and pointer types, cast the + // final result back to the pointer type. + if (LHS->getType() != S->getType()) + LHS = InsertNoopCastOfTo(LHS, S->getType()); return LHS; } -Value *SCEVExpander::expand(SCEV *S) { - // Check to see if we already expanded this. - std::map::iterator I = InsertedExpressions.find(S); +Value *SCEVExpander::expandCodeFor(const SCEV *SH, const Type *Ty) { + // Expand the code for this SCEV. + Value *V = expand(SH); + if (Ty) { + assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) && + "non-trivial casts should be done with the SCEVs directly!"); + V = InsertNoopCastOfTo(V, Ty); + } + return V; +} + +Value *SCEVExpander::expand(const SCEV *S) { + // Compute an insertion point for this SCEV object. Hoist the instructions + // as far out in the loop nest as possible. + Instruction *InsertPt = Builder.GetInsertPoint(); + for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ; + L = L->getParentLoop()) + if (S->isLoopInvariant(L)) { + if (!L) break; + if (BasicBlock *Preheader = L->getLoopPreheader()) + InsertPt = Preheader->getTerminator(); + } else { + // If the SCEV is computable at this level, insert it into the header + // after the PHIs (and after any other instructions that we've inserted + // there) so that it is guaranteed to dominate any user inside the loop. + if (L && S->hasComputableLoopEvolution(L)) + InsertPt = L->getHeader()->getFirstNonPHI(); + while (isInsertedInstruction(InsertPt)) + InsertPt = next(BasicBlock::iterator(InsertPt)); + break; + } + + // Check to see if we already expanded this here. + std::map, + AssertingVH >::iterator I = + InsertedExpressions.find(std::make_pair(S, InsertPt)); if (I != InsertedExpressions.end()) return I->second; - + + BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); + BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); + Builder.SetInsertPoint(InsertPt->getParent(), InsertPt); + + // Expand the expression into instructions. Value *V = visit(S); - InsertedExpressions[S] = V; + + // Remember the expanded value for this SCEV at this location. + InsertedExpressions[std::make_pair(S, InsertPt)] = V; + + Builder.SetInsertPoint(SaveInsertBB, SaveInsertPt); + return V; +} + +/// getOrInsertCanonicalInductionVariable - This method returns the +/// canonical induction variable of the specified type for the specified +/// loop (inserting one if there is none). A canonical induction variable +/// starts at zero and steps by one on each iteration. +Value * +SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L, + const Type *Ty) { + assert(Ty->isInteger() && "Can only insert integer induction variables!"); + const SCEV *H = SE.getAddRecExpr(SE.getIntegerSCEV(0, Ty), + SE.getIntegerSCEV(1, Ty), L); + BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); + BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); + Value *V = expandCodeFor(H, 0, L->getHeader()->begin()); + if (SaveInsertBB) + Builder.SetInsertPoint(SaveInsertBB, SaveInsertPt); return V; }