+ BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
+ // Scanning starts from the last instruction before the insertion point.
+ BasicBlock::iterator IP = Builder.GetInsertPoint();
+ if (IP != BlockBegin) {
+ --IP;
+ for (; ScanLimit; --IP, --ScanLimit) {
+ if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
+ IP->getOperand(1) == RHS)
+ return IP;
+ if (IP == BlockBegin) break;
+ }
+ }
+
+ // If we haven't found this binop, insert it.
+ Value *BO = Builder.CreateBinOp(Opcode, LHS, RHS, "tmp");
+ InsertedValues.insert(BO);
+ return BO;
+}
+
+/// FactorOutConstant - Test if S is divisible by Factor, using signed
+/// division. If so, update S with Factor divided out and return true.
+/// S need not be evenly divisble if a reasonable remainder can be
+/// computed.
+/// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made
+/// unnecessary; in its place, just signed-divide Ops[i] by the scale and
+/// check to see if the divide was folded.
+static bool FactorOutConstant(const SCEV *&S,
+ const SCEV *&Remainder,
+ const APInt &Factor,
+ ScalarEvolution &SE) {
+ // Everything is divisible by one.
+ if (Factor == 1)
+ return true;
+
+ // For a Constant, check for a multiple of the given factor.
+ if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
+ ConstantInt *CI =
+ ConstantInt::get(SE.getContext(), C->getValue()->getValue().sdiv(Factor));
+ // If the quotient is zero and the remainder is non-zero, reject
+ // the value at this scale. It will be considered for subsequent
+ // smaller scales.
+ if (C->isZero() || !CI->isZero()) {
+ const SCEV *Div = SE.getConstant(CI);
+ S = Div;
+ Remainder =
+ SE.getAddExpr(Remainder,
+ SE.getConstant(C->getValue()->getValue().srem(Factor)));
+ return true;
+ }
+ }
+
+ // In a Mul, check if there is a constant operand which is a multiple
+ // of the given factor.
+ if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S))
+ if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
+ if (!C->getValue()->getValue().srem(Factor)) {
+ const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands();
+ SmallVector<const SCEV *, 4> NewMulOps(MOperands.begin(),
+ MOperands.end());
+ NewMulOps[0] =
+ SE.getConstant(C->getValue()->getValue().sdiv(Factor));
+ S = SE.getMulExpr(NewMulOps);
+ return true;
+ }
+
+ // In an AddRec, check if both start and step are divisible.
+ if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
+ const SCEV *Step = A->getStepRecurrence(SE);
+ const SCEV *StepRem = SE.getIntegerSCEV(0, Step->getType());
+ if (!FactorOutConstant(Step, StepRem, Factor, SE))
+ return false;
+ if (!StepRem->isZero())
+ return false;
+ const SCEV *Start = A->getStart();
+ if (!FactorOutConstant(Start, Remainder, Factor, SE))
+ return false;
+ S = SE.getAddRecExpr(Start, Step, A->getLoop());
+ return true;
+ }
+
+ return false;
+}
+
+/// expandAddToGEP - Expand a SCEVAddExpr with a pointer type into a GEP
+/// instead of using ptrtoint+arithmetic+inttoptr. This helps
+/// BasicAliasAnalysis analyze the result.
+///
+/// Design note: This depends on ScalarEvolution not recognizing inttoptr
+/// and ptrtoint operators, as they may introduce pointer arithmetic
+/// which may not be safely converted into getelementptr.
+///
+/// Design note: It might seem desirable for this function to be more
+/// loop-aware. If some of the indices are loop-invariant while others
+/// aren't, it might seem desirable to emit multiple GEPs, keeping the
+/// loop-invariant portions of the overall computation outside the loop.
+/// However, there are a few reasons this is not done here. Hoisting simple
+/// arithmetic is a low-level optimization that often isn't very
+/// important until late in the optimization process. In fact, passes
+/// like InstructionCombining will combine GEPs, even if it means
+/// pushing loop-invariant computation down into loops, so even if the
+/// GEPs were split here, the work would quickly be undone. The
+/// LoopStrengthReduction pass, which is usually run quite late (and
+/// after the last InstructionCombining pass), takes care of hoisting
+/// loop-invariant portions of expressions, after considering what
+/// can be folded using target addressing modes.
+///
+Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
+ const SCEV *const *op_end,
+ const PointerType *PTy,
+ const Type *Ty,
+ Value *V) {
+ const Type *ElTy = PTy->getElementType();
+ SmallVector<Value *, 4> GepIndices;
+ SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
+ bool AnyNonZeroIndices = false;
+
+ // Decend down the pointer's type and attempt to convert the other
+ // operands into GEP indices, at each level. The first index in a GEP
+ // indexes into the array implied by the pointer operand; the rest of
+ // the indices index into the element or field type selected by the
+ // preceding index.
+ for (;;) {
+ APInt ElSize = APInt(SE.getTypeSizeInBits(Ty),
+ ElTy->isSized() ? SE.TD->getTypeAllocSize(ElTy) : 0);
+ SmallVector<const SCEV *, 8> NewOps;
+ SmallVector<const SCEV *, 8> ScaledOps;
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
+ // Split AddRecs up into parts as either of the parts may be usable
+ // without the other.
+ if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i]))
+ if (!A->getStart()->isZero()) {
+ const SCEV *Start = A->getStart();
+ Ops.push_back(SE.getAddRecExpr(SE.getIntegerSCEV(0, A->getType()),
+ A->getStepRecurrence(SE),
+ A->getLoop()));
+ Ops[i] = Start;
+ ++e;
+ }
+ // If the scale size is not 0, attempt to factor out a scale.
+ if (ElSize != 0) {
+ const SCEV *Op = Ops[i];
+ const SCEV *Remainder = SE.getIntegerSCEV(0, Op->getType());
+ if (FactorOutConstant(Op, Remainder, ElSize, SE)) {
+ ScaledOps.push_back(Op); // Op now has ElSize factored out.
+ NewOps.push_back(Remainder);
+ continue;
+ }
+ }
+ // If the operand was not divisible, add it to the list of operands
+ // we'll scan next iteration.
+ NewOps.push_back(Ops[i]);
+ }
+ Ops = NewOps;
+ AnyNonZeroIndices |= !ScaledOps.empty();
+ Value *Scaled = ScaledOps.empty() ?
+ Constant::getNullValue(Ty) :
+ expandCodeFor(SE.getAddExpr(ScaledOps), Ty);
+ GepIndices.push_back(Scaled);
+
+ // Collect struct field index operands.
+ if (!Ops.empty())
+ while (const StructType *STy = dyn_cast<StructType>(ElTy)) {
+ if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
+ if (SE.getTypeSizeInBits(C->getType()) <= 64) {
+ const StructLayout &SL = *SE.TD->getStructLayout(STy);
+ uint64_t FullOffset = C->getValue()->getZExtValue();
+ if (FullOffset < SL.getSizeInBytes()) {
+ unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
+ GepIndices.push_back(
+ ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx));
+ ElTy = STy->getTypeAtIndex(ElIdx);
+ Ops[0] =
+ SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
+ AnyNonZeroIndices = true;
+ continue;
+ }
+ }
+ break;