1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the scalar evolution expander,
11 // which is used to generate the code corresponding to a given scalar evolution
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Analysis/ScalarEvolutionExpander.h"
17 #include "llvm/Analysis/LoopInfo.h"
18 #include "llvm/IntrinsicInst.h"
19 #include "llvm/LLVMContext.h"
20 #include "llvm/Support/Debug.h"
21 #include "llvm/DataLayout.h"
22 #include "llvm/Target/TargetLowering.h"
23 #include "llvm/ADT/STLExtras.h"
27 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
28 /// reusing an existing cast if a suitable one exists, moving an existing
29 /// cast if a suitable one exists but isn't in the right place, or
30 /// creating a new one.
31 Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
32 Instruction::CastOps Op,
33 BasicBlock::iterator IP) {
34 // This function must be called with the builder having a valid insertion
35 // point. It doesn't need to be the actual IP where the uses of the returned
36 // cast will be added, but it must dominate such IP.
37 // We use this precondition to produce a cast that will dominate all its
38 // uses. In particular, this is crucial for the case where the builder's
39 // insertion point *is* the point where we were asked to put the cast.
40 // Since we don't know the builder's insertion point is actually
41 // where the uses will be added (only that it dominates it), we are
42 // not allowed to move it.
43 BasicBlock::iterator BIP = Builder.GetInsertPoint();
45 Instruction *Ret = NULL;
47 // Check to see if there is already a cast!
48 for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
51 if (U->getType() == Ty)
52 if (CastInst *CI = dyn_cast<CastInst>(U))
53 if (CI->getOpcode() == Op) {
54 // If the cast isn't where we want it, create a new cast at IP.
55 // Likewise, do not reuse a cast at BIP because it must dominate
56 // instructions that might be inserted before BIP.
57 if (BasicBlock::iterator(CI) != IP || BIP == IP) {
58 // Create a new cast, and leave the old cast in place in case
59 // it is being used as an insert point. Clear its operand
60 // so that it doesn't hold anything live.
61 Ret = CastInst::Create(Op, V, Ty, "", IP);
63 CI->replaceAllUsesWith(Ret);
64 CI->setOperand(0, UndefValue::get(V->getType()));
74 Ret = CastInst::Create(Op, V, Ty, V->getName(), IP);
76 // We assert at the end of the function since IP might point to an
77 // instruction with different dominance properties than a cast
78 // (an invoke for example) and not dominate BIP (but the cast does).
79 assert(SE.DT->dominates(Ret, BIP));
81 rememberInstruction(Ret);
85 /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
86 /// which must be possible with a noop cast, doing what we can to share
88 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
89 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
90 assert((Op == Instruction::BitCast ||
91 Op == Instruction::PtrToInt ||
92 Op == Instruction::IntToPtr) &&
93 "InsertNoopCastOfTo cannot perform non-noop casts!");
94 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
95 "InsertNoopCastOfTo cannot change sizes!");
97 // Short-circuit unnecessary bitcasts.
98 if (Op == Instruction::BitCast) {
99 if (V->getType() == Ty)
101 if (CastInst *CI = dyn_cast<CastInst>(V)) {
102 if (CI->getOperand(0)->getType() == Ty)
103 return CI->getOperand(0);
106 // Short-circuit unnecessary inttoptr<->ptrtoint casts.
107 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
108 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
109 if (CastInst *CI = dyn_cast<CastInst>(V))
110 if ((CI->getOpcode() == Instruction::PtrToInt ||
111 CI->getOpcode() == Instruction::IntToPtr) &&
112 SE.getTypeSizeInBits(CI->getType()) ==
113 SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
114 return CI->getOperand(0);
115 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
116 if ((CE->getOpcode() == Instruction::PtrToInt ||
117 CE->getOpcode() == Instruction::IntToPtr) &&
118 SE.getTypeSizeInBits(CE->getType()) ==
119 SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
120 return CE->getOperand(0);
123 // Fold a cast of a constant.
124 if (Constant *C = dyn_cast<Constant>(V))
125 return ConstantExpr::getCast(Op, C, Ty);
127 // Cast the argument at the beginning of the entry block, after
128 // any bitcasts of other arguments.
129 if (Argument *A = dyn_cast<Argument>(V)) {
130 BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
131 while ((isa<BitCastInst>(IP) &&
132 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
133 cast<BitCastInst>(IP)->getOperand(0) != A) ||
134 isa<DbgInfoIntrinsic>(IP) ||
135 isa<LandingPadInst>(IP))
137 return ReuseOrCreateCast(A, Ty, Op, IP);
140 // Cast the instruction immediately after the instruction.
141 Instruction *I = cast<Instruction>(V);
142 BasicBlock::iterator IP = I; ++IP;
143 if (InvokeInst *II = dyn_cast<InvokeInst>(I))
144 IP = II->getNormalDest()->begin();
145 while (isa<PHINode>(IP) || isa<LandingPadInst>(IP))
147 return ReuseOrCreateCast(I, Ty, Op, IP);
150 /// InsertBinop - Insert the specified binary operator, doing a small amount
151 /// of work to avoid inserting an obviously redundant operation.
152 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
153 Value *LHS, Value *RHS) {
154 // Fold a binop with constant operands.
155 if (Constant *CLHS = dyn_cast<Constant>(LHS))
156 if (Constant *CRHS = dyn_cast<Constant>(RHS))
157 return ConstantExpr::get(Opcode, CLHS, CRHS);
159 // Do a quick scan to see if we have this binop nearby. If so, reuse it.
160 unsigned ScanLimit = 6;
161 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
162 // Scanning starts from the last instruction before the insertion point.
163 BasicBlock::iterator IP = Builder.GetInsertPoint();
164 if (IP != BlockBegin) {
166 for (; ScanLimit; --IP, --ScanLimit) {
167 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
169 if (isa<DbgInfoIntrinsic>(IP))
171 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
172 IP->getOperand(1) == RHS)
174 if (IP == BlockBegin) break;
178 // Save the original insertion point so we can restore it when we're done.
179 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
180 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
182 // Move the insertion point out of as many loops as we can.
183 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
184 if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
185 BasicBlock *Preheader = L->getLoopPreheader();
186 if (!Preheader) break;
188 // Ok, move up a level.
189 Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
192 // If we haven't found this binop, insert it.
193 Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS));
194 BO->setDebugLoc(SaveInsertPt->getDebugLoc());
195 rememberInstruction(BO);
197 // Restore the original insert point.
199 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
204 /// FactorOutConstant - Test if S is divisible by Factor, using signed
205 /// division. If so, update S with Factor divided out and return true.
206 /// S need not be evenly divisible if a reasonable remainder can be
208 /// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made
209 /// unnecessary; in its place, just signed-divide Ops[i] by the scale and
210 /// check to see if the divide was folded.
211 static bool FactorOutConstant(const SCEV *&S,
212 const SCEV *&Remainder,
215 const DataLayout *TD) {
216 // Everything is divisible by one.
222 S = SE.getConstant(S->getType(), 1);
226 // For a Constant, check for a multiple of the given factor.
227 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
231 // Check for divisibility.
232 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
234 ConstantInt::get(SE.getContext(),
235 C->getValue()->getValue().sdiv(
236 FC->getValue()->getValue()));
237 // If the quotient is zero and the remainder is non-zero, reject
238 // the value at this scale. It will be considered for subsequent
241 const SCEV *Div = SE.getConstant(CI);
244 SE.getAddExpr(Remainder,
245 SE.getConstant(C->getValue()->getValue().srem(
246 FC->getValue()->getValue())));
252 // In a Mul, check if there is a constant operand which is a multiple
253 // of the given factor.
254 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
256 // With DataLayout, the size is known. Check if there is a constant
257 // operand which is a multiple of the given factor. If so, we can
259 const SCEVConstant *FC = cast<SCEVConstant>(Factor);
260 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
261 if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) {
262 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
264 SE.getConstant(C->getValue()->getValue().sdiv(
265 FC->getValue()->getValue()));
266 S = SE.getMulExpr(NewMulOps);
270 // Without DataLayout, check if Factor can be factored out of any of the
271 // Mul's operands. If so, we can just remove it.
272 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
273 const SCEV *SOp = M->getOperand(i);
274 const SCEV *Remainder = SE.getConstant(SOp->getType(), 0);
275 if (FactorOutConstant(SOp, Remainder, Factor, SE, TD) &&
276 Remainder->isZero()) {
277 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
279 S = SE.getMulExpr(NewMulOps);
286 // In an AddRec, check if both start and step are divisible.
287 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
288 const SCEV *Step = A->getStepRecurrence(SE);
289 const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
290 if (!FactorOutConstant(Step, StepRem, Factor, SE, TD))
292 if (!StepRem->isZero())
294 const SCEV *Start = A->getStart();
295 if (!FactorOutConstant(Start, Remainder, Factor, SE, TD))
297 // FIXME: can use A->getNoWrapFlags(FlagNW)
298 S = SE.getAddRecExpr(Start, Step, A->getLoop(), SCEV::FlagAnyWrap);
305 /// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
306 /// is the number of SCEVAddRecExprs present, which are kept at the end of
309 static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
311 ScalarEvolution &SE) {
312 unsigned NumAddRecs = 0;
313 for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
315 // Group Ops into non-addrecs and addrecs.
316 SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
317 SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
318 // Let ScalarEvolution sort and simplify the non-addrecs list.
319 const SCEV *Sum = NoAddRecs.empty() ?
320 SE.getConstant(Ty, 0) :
321 SE.getAddExpr(NoAddRecs);
322 // If it returned an add, use the operands. Otherwise it simplified
323 // the sum into a single value, so just use that.
325 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
326 Ops.append(Add->op_begin(), Add->op_end());
327 else if (!Sum->isZero())
329 // Then append the addrecs.
330 Ops.append(AddRecs.begin(), AddRecs.end());
333 /// SplitAddRecs - Flatten a list of add operands, moving addrec start values
334 /// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
335 /// This helps expose more opportunities for folding parts of the expressions
336 /// into GEP indices.
338 static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
340 ScalarEvolution &SE) {
342 SmallVector<const SCEV *, 8> AddRecs;
343 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
344 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
345 const SCEV *Start = A->getStart();
346 if (Start->isZero()) break;
347 const SCEV *Zero = SE.getConstant(Ty, 0);
348 AddRecs.push_back(SE.getAddRecExpr(Zero,
349 A->getStepRecurrence(SE),
351 // FIXME: A->getNoWrapFlags(FlagNW)
353 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
355 Ops.append(Add->op_begin(), Add->op_end());
356 e += Add->getNumOperands();
361 if (!AddRecs.empty()) {
362 // Add the addrecs onto the end of the list.
363 Ops.append(AddRecs.begin(), AddRecs.end());
364 // Resort the operand list, moving any constants to the front.
365 SimplifyAddOperands(Ops, Ty, SE);
369 /// expandAddToGEP - Expand an addition expression with a pointer type into
370 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
371 /// BasicAliasAnalysis and other passes analyze the result. See the rules
372 /// for getelementptr vs. inttoptr in
373 /// http://llvm.org/docs/LangRef.html#pointeraliasing
376 /// Design note: The correctness of using getelementptr here depends on
377 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
378 /// they may introduce pointer arithmetic which may not be safely converted
379 /// into getelementptr.
381 /// Design note: It might seem desirable for this function to be more
382 /// loop-aware. If some of the indices are loop-invariant while others
383 /// aren't, it might seem desirable to emit multiple GEPs, keeping the
384 /// loop-invariant portions of the overall computation outside the loop.
385 /// However, there are a few reasons this is not done here. Hoisting simple
386 /// arithmetic is a low-level optimization that often isn't very
387 /// important until late in the optimization process. In fact, passes
388 /// like InstructionCombining will combine GEPs, even if it means
389 /// pushing loop-invariant computation down into loops, so even if the
390 /// GEPs were split here, the work would quickly be undone. The
391 /// LoopStrengthReduction pass, which is usually run quite late (and
392 /// after the last InstructionCombining pass), takes care of hoisting
393 /// loop-invariant portions of expressions, after considering what
394 /// can be folded using target addressing modes.
396 Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
397 const SCEV *const *op_end,
401 Type *ElTy = PTy->getElementType();
402 SmallVector<Value *, 4> GepIndices;
403 SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
404 bool AnyNonZeroIndices = false;
406 // Split AddRecs up into parts as either of the parts may be usable
407 // without the other.
408 SplitAddRecs(Ops, Ty, SE);
410 // Descend down the pointer's type and attempt to convert the other
411 // operands into GEP indices, at each level. The first index in a GEP
412 // indexes into the array implied by the pointer operand; the rest of
413 // the indices index into the element or field type selected by the
416 // If the scale size is not 0, attempt to factor out a scale for
418 SmallVector<const SCEV *, 8> ScaledOps;
419 if (ElTy->isSized()) {
420 Type *IntPtrTy = SE.TD ? SE.TD->getIntPtrType(PTy) :
421 IntegerType::getInt64Ty(PTy->getContext());
422 const SCEV *ElSize = SE.getSizeOfExpr(ElTy, IntPtrTy);
423 if (!ElSize->isZero()) {
424 SmallVector<const SCEV *, 8> NewOps;
425 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
426 const SCEV *Op = Ops[i];
427 const SCEV *Remainder = SE.getConstant(Ty, 0);
428 if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.TD)) {
429 // Op now has ElSize factored out.
430 ScaledOps.push_back(Op);
431 if (!Remainder->isZero())
432 NewOps.push_back(Remainder);
433 AnyNonZeroIndices = true;
435 // The operand was not divisible, so add it to the list of operands
436 // we'll scan next iteration.
437 NewOps.push_back(Ops[i]);
440 // If we made any changes, update Ops.
441 if (!ScaledOps.empty()) {
443 SimplifyAddOperands(Ops, Ty, SE);
448 // Record the scaled array index for this level of the type. If
449 // we didn't find any operands that could be factored, tentatively
450 // assume that element zero was selected (since the zero offset
451 // would obviously be folded away).
452 Value *Scaled = ScaledOps.empty() ?
453 Constant::getNullValue(Ty) :
454 expandCodeFor(SE.getAddExpr(ScaledOps), Ty);
455 GepIndices.push_back(Scaled);
457 // Collect struct field index operands.
458 while (StructType *STy = dyn_cast<StructType>(ElTy)) {
459 bool FoundFieldNo = false;
460 // An empty struct has no fields.
461 if (STy->getNumElements() == 0) break;
463 // With DataLayout, field offsets are known. See if a constant offset
464 // falls within any of the struct fields.
465 if (Ops.empty()) break;
466 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
467 if (SE.getTypeSizeInBits(C->getType()) <= 64) {
468 const StructLayout &SL = *SE.TD->getStructLayout(STy);
469 uint64_t FullOffset = C->getValue()->getZExtValue();
470 if (FullOffset < SL.getSizeInBytes()) {
471 unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
472 GepIndices.push_back(
473 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx));
474 ElTy = STy->getTypeAtIndex(ElIdx);
476 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
477 AnyNonZeroIndices = true;
482 // Without DataLayout, just check for an offsetof expression of the
483 // appropriate struct type.
484 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
485 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) {
488 if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) {
489 GepIndices.push_back(FieldNo);
491 STy->getTypeAtIndex(cast<ConstantInt>(FieldNo)->getZExtValue());
492 Ops[i] = SE.getConstant(Ty, 0);
493 AnyNonZeroIndices = true;
499 // If no struct field offsets were found, tentatively assume that
500 // field zero was selected (since the zero offset would obviously
503 ElTy = STy->getTypeAtIndex(0u);
504 GepIndices.push_back(
505 Constant::getNullValue(Type::getInt32Ty(Ty->getContext())));
509 if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
510 ElTy = ATy->getElementType();
515 // If none of the operands were convertible to proper GEP indices, cast
516 // the base to i8* and do an ugly getelementptr with that. It's still
517 // better than ptrtoint+arithmetic+inttoptr at least.
518 if (!AnyNonZeroIndices) {
519 // Cast the base to i8*.
520 V = InsertNoopCastOfTo(V,
521 Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
523 assert(!isa<Instruction>(V) ||
524 SE.DT->dominates(cast<Instruction>(V), Builder.GetInsertPoint()));
526 // Expand the operands for a plain byte offset.
527 Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
529 // Fold a GEP with constant operands.
530 if (Constant *CLHS = dyn_cast<Constant>(V))
531 if (Constant *CRHS = dyn_cast<Constant>(Idx))
532 return ConstantExpr::getGetElementPtr(CLHS, CRHS);
534 // Do a quick scan to see if we have this GEP nearby. If so, reuse it.
535 unsigned ScanLimit = 6;
536 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
537 // Scanning starts from the last instruction before the insertion point.
538 BasicBlock::iterator IP = Builder.GetInsertPoint();
539 if (IP != BlockBegin) {
541 for (; ScanLimit; --IP, --ScanLimit) {
542 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
544 if (isa<DbgInfoIntrinsic>(IP))
546 if (IP->getOpcode() == Instruction::GetElementPtr &&
547 IP->getOperand(0) == V && IP->getOperand(1) == Idx)
549 if (IP == BlockBegin) break;
553 // Save the original insertion point so we can restore it when we're done.
554 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
555 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
557 // Move the insertion point out of as many loops as we can.
558 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
559 if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
560 BasicBlock *Preheader = L->getLoopPreheader();
561 if (!Preheader) break;
563 // Ok, move up a level.
564 Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
568 Value *GEP = Builder.CreateGEP(V, Idx, "uglygep");
569 rememberInstruction(GEP);
571 // Restore the original insert point.
573 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
578 // Save the original insertion point so we can restore it when we're done.
579 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
580 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
582 // Move the insertion point out of as many loops as we can.
583 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
584 if (!L->isLoopInvariant(V)) break;
586 bool AnyIndexNotLoopInvariant = false;
587 for (SmallVectorImpl<Value *>::const_iterator I = GepIndices.begin(),
588 E = GepIndices.end(); I != E; ++I)
589 if (!L->isLoopInvariant(*I)) {
590 AnyIndexNotLoopInvariant = true;
593 if (AnyIndexNotLoopInvariant)
596 BasicBlock *Preheader = L->getLoopPreheader();
597 if (!Preheader) break;
599 // Ok, move up a level.
600 Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
603 // Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
604 // because ScalarEvolution may have changed the address arithmetic to
605 // compute a value which is beyond the end of the allocated object.
607 if (V->getType() != PTy)
608 Casted = InsertNoopCastOfTo(Casted, PTy);
609 Value *GEP = Builder.CreateGEP(Casted,
612 Ops.push_back(SE.getUnknown(GEP));
613 rememberInstruction(GEP);
615 // Restore the original insert point.
617 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
619 return expand(SE.getAddExpr(Ops));
622 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
623 /// SCEV expansion. If they are nested, this is the most nested. If they are
624 /// neighboring, pick the later.
625 static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
629 if (A->contains(B)) return B;
630 if (B->contains(A)) return A;
631 if (DT.dominates(A->getHeader(), B->getHeader())) return B;
632 if (DT.dominates(B->getHeader(), A->getHeader())) return A;
633 return A; // Arbitrarily break the tie.
636 /// getRelevantLoop - Get the most relevant loop associated with the given
637 /// expression, according to PickMostRelevantLoop.
638 const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
639 // Test whether we've already computed the most relevant loop for this SCEV.
640 std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair =
641 RelevantLoops.insert(std::make_pair(S, static_cast<const Loop *>(0)));
643 return Pair.first->second;
645 if (isa<SCEVConstant>(S))
646 // A constant has no relevant loops.
648 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
649 if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
650 return Pair.first->second = SE.LI->getLoopFor(I->getParent());
651 // A non-instruction has no relevant loops.
654 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
656 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
658 for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end();
660 L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT);
661 return RelevantLoops[N] = L;
663 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
664 const Loop *Result = getRelevantLoop(C->getOperand());
665 return RelevantLoops[C] = Result;
667 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
669 PickMostRelevantLoop(getRelevantLoop(D->getLHS()),
670 getRelevantLoop(D->getRHS()),
672 return RelevantLoops[D] = Result;
674 llvm_unreachable("Unexpected SCEV type!");
679 /// LoopCompare - Compare loops by PickMostRelevantLoop.
683 explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
685 bool operator()(std::pair<const Loop *, const SCEV *> LHS,
686 std::pair<const Loop *, const SCEV *> RHS) const {
687 // Keep pointer operands sorted at the end.
688 if (LHS.second->getType()->isPointerTy() !=
689 RHS.second->getType()->isPointerTy())
690 return LHS.second->getType()->isPointerTy();
692 // Compare loops with PickMostRelevantLoop.
693 if (LHS.first != RHS.first)
694 return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
696 // If one operand is a non-constant negative and the other is not,
697 // put the non-constant negative on the right so that a sub can
698 // be used instead of a negate and add.
699 if (LHS.second->isNonConstantNegative()) {
700 if (!RHS.second->isNonConstantNegative())
702 } else if (RHS.second->isNonConstantNegative())
705 // Otherwise they are equivalent according to this comparison.
712 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
713 Type *Ty = SE.getEffectiveSCEVType(S->getType());
715 // Collect all the add operands in a loop, along with their associated loops.
716 // Iterate in reverse so that constants are emitted last, all else equal, and
717 // so that pointer operands are inserted first, which the code below relies on
718 // to form more involved GEPs.
719 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
720 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()),
721 E(S->op_begin()); I != E; ++I)
722 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
724 // Sort by loop. Use a stable sort so that constants follow non-constants and
725 // pointer operands precede non-pointer operands.
726 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
728 // Emit instructions to add all the operands. Hoist as much as possible
729 // out of loops, and form meaningful getelementptrs where possible.
731 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
732 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
733 const Loop *CurLoop = I->first;
734 const SCEV *Op = I->second;
736 // This is the first operand. Just expand it.
739 } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
740 // The running sum expression is a pointer. Try to form a getelementptr
741 // at this level with that as the base.
742 SmallVector<const SCEV *, 4> NewOps;
743 for (; I != E && I->first == CurLoop; ++I) {
744 // If the operand is SCEVUnknown and not instructions, peek through
745 // it, to enable more of it to be folded into the GEP.
746 const SCEV *X = I->second;
747 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
748 if (!isa<Instruction>(U->getValue()))
749 X = SE.getSCEV(U->getValue());
752 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
753 } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
754 // The running sum is an integer, and there's a pointer at this level.
755 // Try to form a getelementptr. If the running sum is instructions,
756 // use a SCEVUnknown to avoid re-analyzing them.
757 SmallVector<const SCEV *, 4> NewOps;
758 NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) :
760 for (++I; I != E && I->first == CurLoop; ++I)
761 NewOps.push_back(I->second);
762 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op));
763 } else if (Op->isNonConstantNegative()) {
764 // Instead of doing a negate and add, just do a subtract.
765 Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty);
766 Sum = InsertNoopCastOfTo(Sum, Ty);
767 Sum = InsertBinop(Instruction::Sub, Sum, W);
771 Value *W = expandCodeFor(Op, Ty);
772 Sum = InsertNoopCastOfTo(Sum, Ty);
773 // Canonicalize a constant to the RHS.
774 if (isa<Constant>(Sum)) std::swap(Sum, W);
775 Sum = InsertBinop(Instruction::Add, Sum, W);
783 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
784 Type *Ty = SE.getEffectiveSCEVType(S->getType());
786 // Collect all the mul operands in a loop, along with their associated loops.
787 // Iterate in reverse so that constants are emitted last, all else equal.
788 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
789 for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()),
790 E(S->op_begin()); I != E; ++I)
791 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
793 // Sort by loop. Use a stable sort so that constants follow non-constants.
794 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
796 // Emit instructions to mul all the operands. Hoist as much as possible
799 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
800 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
801 const SCEV *Op = I->second;
803 // This is the first operand. Just expand it.
806 } else if (Op->isAllOnesValue()) {
807 // Instead of doing a multiply by negative one, just do a negate.
808 Prod = InsertNoopCastOfTo(Prod, Ty);
809 Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod);
813 Value *W = expandCodeFor(Op, Ty);
814 Prod = InsertNoopCastOfTo(Prod, Ty);
815 // Canonicalize a constant to the RHS.
816 if (isa<Constant>(Prod)) std::swap(Prod, W);
817 Prod = InsertBinop(Instruction::Mul, Prod, W);
825 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
826 Type *Ty = SE.getEffectiveSCEVType(S->getType());
828 Value *LHS = expandCodeFor(S->getLHS(), Ty);
829 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
830 const APInt &RHS = SC->getValue()->getValue();
831 if (RHS.isPowerOf2())
832 return InsertBinop(Instruction::LShr, LHS,
833 ConstantInt::get(Ty, RHS.logBase2()));
836 Value *RHS = expandCodeFor(S->getRHS(), Ty);
837 return InsertBinop(Instruction::UDiv, LHS, RHS);
840 /// Move parts of Base into Rest to leave Base with the minimal
841 /// expression that provides a pointer operand suitable for a
843 static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
844 ScalarEvolution &SE) {
845 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) {
846 Base = A->getStart();
847 Rest = SE.getAddExpr(Rest,
848 SE.getAddRecExpr(SE.getConstant(A->getType(), 0),
849 A->getStepRecurrence(SE),
851 // FIXME: A->getNoWrapFlags(FlagNW)
854 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
855 Base = A->getOperand(A->getNumOperands()-1);
856 SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end());
857 NewAddOps.back() = Rest;
858 Rest = SE.getAddExpr(NewAddOps);
859 ExposePointerBase(Base, Rest, SE);
863 /// Determine if this is a well-behaved chain of instructions leading back to
864 /// the PHI. If so, it may be reused by expanded expressions.
865 bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
867 if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
868 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
870 // If any of the operands don't dominate the insert position, bail.
871 // Addrec operands are always loop-invariant, so this can only happen
872 // if there are instructions which haven't been hoisted.
873 if (L == IVIncInsertLoop) {
874 for (User::op_iterator OI = IncV->op_begin()+1,
875 OE = IncV->op_end(); OI != OE; ++OI)
876 if (Instruction *OInst = dyn_cast<Instruction>(OI))
877 if (!SE.DT->dominates(OInst, IVIncInsertPos))
880 // Advance to the next instruction.
881 IncV = dyn_cast<Instruction>(IncV->getOperand(0));
885 if (IncV->mayHaveSideEffects())
891 return isNormalAddRecExprPHI(PN, IncV, L);
894 /// getIVIncOperand returns an induction variable increment's induction
895 /// variable operand.
897 /// If allowScale is set, any type of GEP is allowed as long as the nonIV
898 /// operands dominate InsertPos.
900 /// If allowScale is not set, ensure that a GEP increment conforms to one of the
901 /// simple patterns generated by getAddRecExprPHILiterally and
902 /// expandAddtoGEP. If the pattern isn't recognized, return NULL.
903 Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
904 Instruction *InsertPos,
906 if (IncV == InsertPos)
909 switch (IncV->getOpcode()) {
912 // Check for a simple Add/Sub or GEP of a loop invariant step.
913 case Instruction::Add:
914 case Instruction::Sub: {
915 Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
916 if (!OInst || SE.DT->dominates(OInst, InsertPos))
917 return dyn_cast<Instruction>(IncV->getOperand(0));
920 case Instruction::BitCast:
921 return dyn_cast<Instruction>(IncV->getOperand(0));
922 case Instruction::GetElementPtr:
923 for (Instruction::op_iterator I = IncV->op_begin()+1, E = IncV->op_end();
925 if (isa<Constant>(*I))
927 if (Instruction *OInst = dyn_cast<Instruction>(*I)) {
928 if (!SE.DT->dominates(OInst, InsertPos))
932 // allow any kind of GEP as long as it can be hoisted.
935 // This must be a pointer addition of constants (pretty), which is already
936 // handled, or some number of address-size elements (ugly). Ugly geps
937 // have 2 operands. i1* is used by the expander to represent an
938 // address-size element.
939 if (IncV->getNumOperands() != 2)
941 unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace();
942 if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
943 && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
947 return dyn_cast<Instruction>(IncV->getOperand(0));
951 /// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
952 /// it available to other uses in this loop. Recursively hoist any operands,
953 /// until we reach a value that dominates InsertPos.
954 bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) {
955 if (SE.DT->dominates(IncV, InsertPos))
958 // InsertPos must itself dominate IncV so that IncV's new position satisfies
959 // its existing users.
960 if (isa<PHINode>(InsertPos)
961 || !SE.DT->dominates(InsertPos->getParent(), IncV->getParent()))
964 // Check that the chain of IV operands leading back to Phi can be hoisted.
965 SmallVector<Instruction*, 4> IVIncs;
967 Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
970 // IncV is safe to hoist.
971 IVIncs.push_back(IncV);
973 if (SE.DT->dominates(IncV, InsertPos))
976 for (SmallVectorImpl<Instruction*>::reverse_iterator I = IVIncs.rbegin(),
977 E = IVIncs.rend(); I != E; ++I) {
978 (*I)->moveBefore(InsertPos);
983 /// Determine if this cyclic phi is in a form that would have been generated by
984 /// LSR. We don't care if the phi was actually expanded in this pass, as long
985 /// as it is in a low-cost form, for example, no implied multiplication. This
986 /// should match any patterns generated by getAddRecExprPHILiterally and
988 bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
990 for(Instruction *IVOper = IncV;
991 (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
992 /*allowScale=*/false));) {
999 /// expandIVInc - Expand an IV increment at Builder's current InsertPos.
1000 /// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
1001 /// need to materialize IV increments elsewhere to handle difficult situations.
1002 Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
1003 Type *ExpandTy, Type *IntTy,
1006 // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
1007 if (ExpandTy->isPointerTy()) {
1008 PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
1009 // If the step isn't constant, don't use an implicitly scaled GEP, because
1010 // that would require a multiply inside the loop.
1011 if (!isa<ConstantInt>(StepV))
1012 GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
1013 GEPPtrTy->getAddressSpace());
1014 const SCEV *const StepArray[1] = { SE.getSCEV(StepV) };
1015 IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN);
1016 if (IncV->getType() != PN->getType()) {
1017 IncV = Builder.CreateBitCast(IncV, PN->getType());
1018 rememberInstruction(IncV);
1021 IncV = useSubtract ?
1022 Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
1023 Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
1024 rememberInstruction(IncV);
1029 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
1030 /// the base addrec, which is the addrec without any non-loop-dominating
1031 /// values, and return the PHI.
1033 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
1037 assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
1039 // Reuse a previously-inserted PHI, if present.
1040 BasicBlock *LatchBlock = L->getLoopLatch();
1042 for (BasicBlock::iterator I = L->getHeader()->begin();
1043 PHINode *PN = dyn_cast<PHINode>(I); ++I) {
1044 if (!SE.isSCEVable(PN->getType()) ||
1045 (SE.getEffectiveSCEVType(PN->getType()) !=
1046 SE.getEffectiveSCEVType(Normalized->getType())) ||
1047 SE.getSCEV(PN) != Normalized)
1051 cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
1054 if (!isExpandedAddRecExprPHI(PN, IncV, L))
1056 if (L == IVIncInsertLoop && !hoistIVInc(IncV, IVIncInsertPos))
1060 if (!isNormalAddRecExprPHI(PN, IncV, L))
1062 if (L == IVIncInsertLoop)
1064 if (SE.DT->dominates(IncV, IVIncInsertPos))
1066 // Make sure the increment is where we want it. But don't move it
1067 // down past a potential existing post-inc user.
1068 IncV->moveBefore(IVIncInsertPos);
1069 IVIncInsertPos = IncV;
1070 IncV = cast<Instruction>(IncV->getOperand(0));
1071 } while (IncV != PN);
1073 // Ok, the add recurrence looks usable.
1074 // Remember this PHI, even in post-inc mode.
1075 InsertedValues.insert(PN);
1076 // Remember the increment.
1077 rememberInstruction(IncV);
1082 // Save the original insertion point so we can restore it when we're done.
1083 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
1084 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
1086 // Another AddRec may need to be recursively expanded below. For example, if
1087 // this AddRec is quadratic, the StepV may itself be an AddRec in this
1088 // loop. Remove this loop from the PostIncLoops set before expanding such
1089 // AddRecs. Otherwise, we cannot find a valid position for the step
1090 // (i.e. StepV can never dominate its loop header). Ideally, we could do
1091 // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
1092 // so it's not worth implementing SmallPtrSet::swap.
1093 PostIncLoopSet SavedPostIncLoops = PostIncLoops;
1094 PostIncLoops.clear();
1096 // Expand code for the start value.
1097 Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy,
1098 L->getHeader()->begin());
1100 // StartV must be hoisted into L's preheader to dominate the new phi.
1101 assert(!isa<Instruction>(StartV) ||
1102 SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(),
1105 // Expand code for the step value. Do this before creating the PHI so that PHI
1106 // reuse code doesn't see an incomplete PHI.
1107 const SCEV *Step = Normalized->getStepRecurrence(SE);
1108 // If the stride is negative, insert a sub instead of an add for the increment
1109 // (unless it's a constant, because subtracts of constants are canonicalized
1111 bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1113 Step = SE.getNegativeSCEV(Step);
1114 // Expand the step somewhere that dominates the loop header.
1115 Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
1118 BasicBlock *Header = L->getHeader();
1119 Builder.SetInsertPoint(Header, Header->begin());
1120 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1121 PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
1122 Twine(IVName) + ".iv");
1123 rememberInstruction(PN);
1125 // Create the step instructions and populate the PHI.
1126 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1127 BasicBlock *Pred = *HPI;
1129 // Add a start value.
1130 if (!L->contains(Pred)) {
1131 PN->addIncoming(StartV, Pred);
1135 // Create a step value and add it to the PHI.
1136 // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
1137 // instructions at IVIncInsertPos.
1138 Instruction *InsertPos = L == IVIncInsertLoop ?
1139 IVIncInsertPos : Pred->getTerminator();
1140 Builder.SetInsertPoint(InsertPos);
1141 Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1143 PN->addIncoming(IncV, Pred);
1146 // Restore the original insert point.
1148 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
1150 // After expanding subexpressions, restore the PostIncLoops set so the caller
1151 // can ensure that IVIncrement dominates the current uses.
1152 PostIncLoops = SavedPostIncLoops;
1154 // Remember this PHI, even in post-inc mode.
1155 InsertedValues.insert(PN);
1160 Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
1161 Type *STy = S->getType();
1162 Type *IntTy = SE.getEffectiveSCEVType(STy);
1163 const Loop *L = S->getLoop();
1165 // Determine a normalized form of this expression, which is the expression
1166 // before any post-inc adjustment is made.
1167 const SCEVAddRecExpr *Normalized = S;
1168 if (PostIncLoops.count(L)) {
1169 PostIncLoopSet Loops;
1172 cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, 0, 0,
1173 Loops, SE, *SE.DT));
1176 // Strip off any non-loop-dominating component from the addrec start.
1177 const SCEV *Start = Normalized->getStart();
1178 const SCEV *PostLoopOffset = 0;
1179 if (!SE.properlyDominates(Start, L->getHeader())) {
1180 PostLoopOffset = Start;
1181 Start = SE.getConstant(Normalized->getType(), 0);
1182 Normalized = cast<SCEVAddRecExpr>(
1183 SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE),
1184 Normalized->getLoop(),
1185 // FIXME: Normalized->getNoWrapFlags(FlagNW)
1186 SCEV::FlagAnyWrap));
1189 // Strip off any non-loop-dominating component from the addrec step.
1190 const SCEV *Step = Normalized->getStepRecurrence(SE);
1191 const SCEV *PostLoopScale = 0;
1192 if (!SE.dominates(Step, L->getHeader())) {
1193 PostLoopScale = Step;
1194 Step = SE.getConstant(Normalized->getType(), 1);
1196 cast<SCEVAddRecExpr>(SE.getAddRecExpr(Start, Step,
1197 Normalized->getLoop(),
1198 // FIXME: Normalized
1199 // ->getNoWrapFlags(FlagNW)
1200 SCEV::FlagAnyWrap));
1203 // Expand the core addrec. If we need post-loop scaling, force it to
1204 // expand to an integer type to avoid the need for additional casting.
1205 Type *ExpandTy = PostLoopScale ? IntTy : STy;
1206 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy);
1208 // Accommodate post-inc mode, if necessary.
1210 if (!PostIncLoops.count(L))
1213 // In PostInc mode, use the post-incremented value.
1214 BasicBlock *LatchBlock = L->getLoopLatch();
1215 assert(LatchBlock && "PostInc mode requires a unique loop latch!");
1216 Result = PN->getIncomingValueForBlock(LatchBlock);
1218 // For an expansion to use the postinc form, the client must call
1219 // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
1220 // or dominated by IVIncInsertPos.
1221 if (isa<Instruction>(Result)
1222 && !SE.DT->dominates(cast<Instruction>(Result),
1223 Builder.GetInsertPoint())) {
1224 // The induction variable's postinc expansion does not dominate this use.
1225 // IVUsers tries to prevent this case, so it is rare. However, it can
1226 // happen when an IVUser outside the loop is not dominated by the latch
1227 // block. Adjusting IVIncInsertPos before expansion begins cannot handle
1228 // all cases. Consider a phi outide whose operand is replaced during
1229 // expansion with the value of the postinc user. Without fundamentally
1230 // changing the way postinc users are tracked, the only remedy is
1231 // inserting an extra IV increment. StepV might fold into PostLoopOffset,
1232 // but hopefully expandCodeFor handles that.
1234 !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1236 Step = SE.getNegativeSCEV(Step);
1237 // Expand the step somewhere that dominates the loop header.
1238 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
1239 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
1240 Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
1241 // Restore the insertion point to the place where the caller has
1242 // determined dominates all uses.
1243 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
1244 Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1248 // Re-apply any non-loop-dominating scale.
1249 if (PostLoopScale) {
1250 Result = InsertNoopCastOfTo(Result, IntTy);
1251 Result = Builder.CreateMul(Result,
1252 expandCodeFor(PostLoopScale, IntTy));
1253 rememberInstruction(Result);
1256 // Re-apply any non-loop-dominating offset.
1257 if (PostLoopOffset) {
1258 if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
1259 const SCEV *const OffsetArray[1] = { PostLoopOffset };
1260 Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result);
1262 Result = InsertNoopCastOfTo(Result, IntTy);
1263 Result = Builder.CreateAdd(Result,
1264 expandCodeFor(PostLoopOffset, IntTy));
1265 rememberInstruction(Result);
1272 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
1273 if (!CanonicalMode) return expandAddRecExprLiterally(S);
1275 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1276 const Loop *L = S->getLoop();
1278 // First check for an existing canonical IV in a suitable type.
1279 PHINode *CanonicalIV = 0;
1280 if (PHINode *PN = L->getCanonicalInductionVariable())
1281 if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
1284 // Rewrite an AddRec in terms of the canonical induction variable, if
1285 // its type is more narrow.
1287 SE.getTypeSizeInBits(CanonicalIV->getType()) >
1288 SE.getTypeSizeInBits(Ty)) {
1289 SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
1290 for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1291 NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
1292 Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
1293 // FIXME: S->getNoWrapFlags(FlagNW)
1294 SCEV::FlagAnyWrap));
1295 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
1296 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
1297 BasicBlock::iterator NewInsertPt =
1298 llvm::next(BasicBlock::iterator(cast<Instruction>(V)));
1299 while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt) ||
1300 isa<LandingPadInst>(NewInsertPt))
1302 V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0,
1304 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
1308 // {X,+,F} --> X + {0,+,F}
1309 if (!S->getStart()->isZero()) {
1310 SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end());
1311 NewOps[0] = SE.getConstant(Ty, 0);
1312 // FIXME: can use S->getNoWrapFlags()
1313 const SCEV *Rest = SE.getAddRecExpr(NewOps, L, SCEV::FlagAnyWrap);
1315 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
1316 // comments on expandAddToGEP for details.
1317 const SCEV *Base = S->getStart();
1318 const SCEV *RestArray[1] = { Rest };
1319 // Dig into the expression to find the pointer base for a GEP.
1320 ExposePointerBase(Base, RestArray[0], SE);
1321 // If we found a pointer, expand the AddRec with a GEP.
1322 if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
1323 // Make sure the Base isn't something exotic, such as a multiplied
1324 // or divided pointer value. In those cases, the result type isn't
1325 // actually a pointer type.
1326 if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) {
1327 Value *StartV = expand(Base);
1328 assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
1329 return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV);
1333 // Just do a normal add. Pre-expand the operands to suppress folding.
1334 return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())),
1335 SE.getUnknown(expand(Rest))));
1338 // If we don't yet have a canonical IV, create one.
1340 // Create and insert the PHI node for the induction variable in the
1342 BasicBlock *Header = L->getHeader();
1343 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1344 CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
1346 rememberInstruction(CanonicalIV);
1348 Constant *One = ConstantInt::get(Ty, 1);
1349 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1350 BasicBlock *HP = *HPI;
1351 if (L->contains(HP)) {
1352 // Insert a unit add instruction right before the terminator
1353 // corresponding to the back-edge.
1354 Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
1356 HP->getTerminator());
1357 Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
1358 rememberInstruction(Add);
1359 CanonicalIV->addIncoming(Add, HP);
1361 CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
1366 // {0,+,1} --> Insert a canonical induction variable into the loop!
1367 if (S->isAffine() && S->getOperand(1)->isOne()) {
1368 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
1369 "IVs with types different from the canonical IV should "
1370 "already have been handled!");
1374 // {0,+,F} --> {0,+,1} * F
1376 // If this is a simple linear addrec, emit it now as a special case.
1377 if (S->isAffine()) // {0,+,F} --> i*F
1379 expand(SE.getTruncateOrNoop(
1380 SE.getMulExpr(SE.getUnknown(CanonicalIV),
1381 SE.getNoopOrAnyExtend(S->getOperand(1),
1382 CanonicalIV->getType())),
1385 // If this is a chain of recurrences, turn it into a closed form, using the
1386 // folders, then expandCodeFor the closed form. This allows the folders to
1387 // simplify the expression without having to build a bunch of special code
1388 // into this folder.
1389 const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV.
1391 // Promote S up to the canonical IV type, if the cast is foldable.
1392 const SCEV *NewS = S;
1393 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
1394 if (isa<SCEVAddRecExpr>(Ext))
1397 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1398 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n";
1400 // Truncate the result down to the original type, if needed.
1401 const SCEV *T = SE.getTruncateOrNoop(V, Ty);
1405 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
1406 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1407 Value *V = expandCodeFor(S->getOperand(),
1408 SE.getEffectiveSCEVType(S->getOperand()->getType()));
1409 Value *I = Builder.CreateTrunc(V, Ty);
1410 rememberInstruction(I);
1414 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
1415 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1416 Value *V = expandCodeFor(S->getOperand(),
1417 SE.getEffectiveSCEVType(S->getOperand()->getType()));
1418 Value *I = Builder.CreateZExt(V, Ty);
1419 rememberInstruction(I);
1423 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
1424 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1425 Value *V = expandCodeFor(S->getOperand(),
1426 SE.getEffectiveSCEVType(S->getOperand()->getType()));
1427 Value *I = Builder.CreateSExt(V, Ty);
1428 rememberInstruction(I);
1432 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
1433 Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1434 Type *Ty = LHS->getType();
1435 for (int i = S->getNumOperands()-2; i >= 0; --i) {
1436 // In the case of mixed integer and pointer types, do the
1437 // rest of the comparisons as integer.
1438 if (S->getOperand(i)->getType() != Ty) {
1439 Ty = SE.getEffectiveSCEVType(Ty);
1440 LHS = InsertNoopCastOfTo(LHS, Ty);
1442 Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1443 Value *ICmp = Builder.CreateICmpSGT(LHS, RHS);
1444 rememberInstruction(ICmp);
1445 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax");
1446 rememberInstruction(Sel);
1449 // In the case of mixed integer and pointer types, cast the
1450 // final result back to the pointer type.
1451 if (LHS->getType() != S->getType())
1452 LHS = InsertNoopCastOfTo(LHS, S->getType());
1456 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
1457 Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1458 Type *Ty = LHS->getType();
1459 for (int i = S->getNumOperands()-2; i >= 0; --i) {
1460 // In the case of mixed integer and pointer types, do the
1461 // rest of the comparisons as integer.
1462 if (S->getOperand(i)->getType() != Ty) {
1463 Ty = SE.getEffectiveSCEVType(Ty);
1464 LHS = InsertNoopCastOfTo(LHS, Ty);
1466 Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1467 Value *ICmp = Builder.CreateICmpUGT(LHS, RHS);
1468 rememberInstruction(ICmp);
1469 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax");
1470 rememberInstruction(Sel);
1473 // In the case of mixed integer and pointer types, cast the
1474 // final result back to the pointer type.
1475 if (LHS->getType() != S->getType())
1476 LHS = InsertNoopCastOfTo(LHS, S->getType());
1480 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty,
1482 Builder.SetInsertPoint(IP->getParent(), IP);
1483 return expandCodeFor(SH, Ty);
1486 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) {
1487 // Expand the code for this SCEV.
1488 Value *V = expand(SH);
1490 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
1491 "non-trivial casts should be done with the SCEVs directly!");
1492 V = InsertNoopCastOfTo(V, Ty);
1497 Value *SCEVExpander::expand(const SCEV *S) {
1498 // Compute an insertion point for this SCEV object. Hoist the instructions
1499 // as far out in the loop nest as possible.
1500 Instruction *InsertPt = Builder.GetInsertPoint();
1501 for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ;
1502 L = L->getParentLoop())
1503 if (SE.isLoopInvariant(S, L)) {
1505 if (BasicBlock *Preheader = L->getLoopPreheader())
1506 InsertPt = Preheader->getTerminator();
1508 // LSR sets the insertion point for AddRec start/step values to the
1509 // block start to simplify value reuse, even though it's an invalid
1510 // position. SCEVExpander must correct for this in all cases.
1511 InsertPt = L->getHeader()->getFirstInsertionPt();
1514 // If the SCEV is computable at this level, insert it into the header
1515 // after the PHIs (and after any other instructions that we've inserted
1516 // there) so that it is guaranteed to dominate any user inside the loop.
1517 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1518 InsertPt = L->getHeader()->getFirstInsertionPt();
1519 while (InsertPt != Builder.GetInsertPoint()
1520 && (isInsertedInstruction(InsertPt)
1521 || isa<DbgInfoIntrinsic>(InsertPt))) {
1522 InsertPt = llvm::next(BasicBlock::iterator(InsertPt));
1527 // Check to see if we already expanded this here.
1528 std::map<std::pair<const SCEV *, Instruction *>,
1529 AssertingVH<Value> >::iterator I =
1530 InsertedExpressions.find(std::make_pair(S, InsertPt));
1531 if (I != InsertedExpressions.end())
1534 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
1535 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
1536 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
1538 // Expand the expression into instructions.
1539 Value *V = visit(S);
1541 // Remember the expanded value for this SCEV at this location.
1543 // This is independent of PostIncLoops. The mapped value simply materializes
1544 // the expression at this insertion point. If the mapped value happened to be
1545 // a postinc expansion, it could be reused by a non postinc user, but only if
1546 // its insertion point was already at the head of the loop.
1547 InsertedExpressions[std::make_pair(S, InsertPt)] = V;
1549 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
1553 void SCEVExpander::rememberInstruction(Value *I) {
1554 if (!PostIncLoops.empty())
1555 InsertedPostIncValues.insert(I);
1557 InsertedValues.insert(I);
1560 void SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) {
1561 Builder.SetInsertPoint(BB, I);
1564 /// getOrInsertCanonicalInductionVariable - This method returns the
1565 /// canonical induction variable of the specified type for the specified
1566 /// loop (inserting one if there is none). A canonical induction variable
1567 /// starts at zero and steps by one on each iteration.
1569 SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
1571 assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
1573 // Build a SCEV for {0,+,1}<L>.
1574 // Conservatively use FlagAnyWrap for now.
1575 const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0),
1576 SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap);
1578 // Emit code for it.
1579 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
1580 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
1581 PHINode *V = cast<PHINode>(expandCodeFor(H, 0, L->getHeader()->begin()));
1583 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
1588 /// Sort values by integer width for replaceCongruentIVs.
1589 static bool width_descending(Value *lhs, Value *rhs) {
1590 // Put pointers at the back and make sure pointer < pointer = false.
1591 if (!lhs->getType()->isIntegerTy() || !rhs->getType()->isIntegerTy())
1592 return rhs->getType()->isIntegerTy() && !lhs->getType()->isIntegerTy();
1593 return rhs->getType()->getPrimitiveSizeInBits()
1594 < lhs->getType()->getPrimitiveSizeInBits();
1597 /// replaceCongruentIVs - Check for congruent phis in this loop header and
1598 /// replace them with their most canonical representative. Return the number of
1599 /// phis eliminated.
1601 /// This does not depend on any SCEVExpander state but should be used in
1602 /// the same context that SCEVExpander is used.
1603 unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
1604 SmallVectorImpl<WeakVH> &DeadInsts,
1605 const TargetLowering *TLI) {
1606 // Find integer phis in order of increasing width.
1607 SmallVector<PHINode*, 8> Phis;
1608 for (BasicBlock::iterator I = L->getHeader()->begin();
1609 PHINode *Phi = dyn_cast<PHINode>(I); ++I) {
1610 Phis.push_back(Phi);
1613 std::sort(Phis.begin(), Phis.end(), width_descending);
1615 unsigned NumElim = 0;
1616 DenseMap<const SCEV *, PHINode *> ExprToIVMap;
1617 // Process phis from wide to narrow. Mapping wide phis to the their truncation
1618 // so narrow phis can reuse them.
1619 for (SmallVectorImpl<PHINode*>::const_iterator PIter = Phis.begin(),
1620 PEnd = Phis.end(); PIter != PEnd; ++PIter) {
1621 PHINode *Phi = *PIter;
1623 // Fold constant phis. They may be congruent to other constant phis and
1624 // would confuse the logic below that expects proper IVs.
1625 if (Value *V = Phi->hasConstantValue()) {
1626 Phi->replaceAllUsesWith(V);
1627 DeadInsts.push_back(Phi);
1629 DEBUG_WITH_TYPE(DebugType, dbgs()
1630 << "INDVARS: Eliminated constant iv: " << *Phi << '\n');
1634 if (!SE.isSCEVable(Phi->getType()))
1637 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
1640 if (Phi->getType()->isIntegerTy() && TLI
1641 && TLI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
1642 // This phi can be freely truncated to the narrowest phi type. Map the
1643 // truncated expression to it so it will be reused for narrow types.
1644 const SCEV *TruncExpr =
1645 SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType());
1646 ExprToIVMap[TruncExpr] = Phi;
1651 // Replacing a pointer phi with an integer phi or vice-versa doesn't make
1653 if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
1656 if (BasicBlock *LatchBlock = L->getLoopLatch()) {
1657 Instruction *OrigInc =
1658 cast<Instruction>(OrigPhiRef->getIncomingValueForBlock(LatchBlock));
1659 Instruction *IsomorphicInc =
1660 cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
1662 // If this phi has the same width but is more canonical, replace the
1663 // original with it. As part of the "more canonical" determination,
1664 // respect a prior decision to use an IV chain.
1665 if (OrigPhiRef->getType() == Phi->getType()
1666 && !(ChainedPhis.count(Phi)
1667 || isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L))
1668 && (ChainedPhis.count(Phi)
1669 || isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
1670 std::swap(OrigPhiRef, Phi);
1671 std::swap(OrigInc, IsomorphicInc);
1673 // Replacing the congruent phi is sufficient because acyclic redundancy
1674 // elimination, CSE/GVN, should handle the rest. However, once SCEV proves
1675 // that a phi is congruent, it's often the head of an IV user cycle that
1676 // is isomorphic with the original phi. It's worth eagerly cleaning up the
1677 // common case of a single IV increment so that DeleteDeadPHIs can remove
1678 // cycles that had postinc uses.
1679 const SCEV *TruncExpr = SE.getTruncateOrNoop(SE.getSCEV(OrigInc),
1680 IsomorphicInc->getType());
1681 if (OrigInc != IsomorphicInc
1682 && TruncExpr == SE.getSCEV(IsomorphicInc)
1683 && ((isa<PHINode>(OrigInc) && isa<PHINode>(IsomorphicInc))
1684 || hoistIVInc(OrigInc, IsomorphicInc))) {
1685 DEBUG_WITH_TYPE(DebugType, dbgs()
1686 << "INDVARS: Eliminated congruent iv.inc: "
1687 << *IsomorphicInc << '\n');
1688 Value *NewInc = OrigInc;
1689 if (OrigInc->getType() != IsomorphicInc->getType()) {
1690 Instruction *IP = isa<PHINode>(OrigInc)
1691 ? (Instruction*)L->getHeader()->getFirstInsertionPt()
1692 : OrigInc->getNextNode();
1693 IRBuilder<> Builder(IP);
1694 Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
1696 CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName);
1698 IsomorphicInc->replaceAllUsesWith(NewInc);
1699 DeadInsts.push_back(IsomorphicInc);
1702 DEBUG_WITH_TYPE(DebugType, dbgs()
1703 << "INDVARS: Eliminated congruent iv: " << *Phi << '\n');
1705 Value *NewIV = OrigPhiRef;
1706 if (OrigPhiRef->getType() != Phi->getType()) {
1707 IRBuilder<> Builder(L->getHeader()->getFirstInsertionPt());
1708 Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
1709 NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
1711 Phi->replaceAllUsesWith(NewIV);
1712 DeadInsts.push_back(Phi);
1718 // Search for a SCEV subexpression that is not safe to expand. Any expression
1719 // that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
1720 // UDiv expressions. We don't know if the UDiv is derived from an IR divide
1721 // instruction, but the important thing is that we prove the denominator is
1722 // nonzero before expansion.
1724 // IVUsers already checks that IV-derived expressions are safe. So this check is
1725 // only needed when the expression includes some subexpression that is not IV
1728 // Currently, we only allow division by a nonzero constant here. If this is
1729 // inadequate, we could easily allow division by SCEVUnknown by using
1730 // ValueTracking to check isKnownNonZero().
1731 struct SCEVFindUnsafe {
1734 SCEVFindUnsafe(): IsUnsafe(false) {}
1736 bool follow(const SCEV *S) {
1737 const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S);
1740 const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS());
1741 if (SC && !SC->getValue()->isZero())
1746 bool isDone() const { return IsUnsafe; }
1751 bool isSafeToExpand(const SCEV *S) {
1752 SCEVFindUnsafe Search;
1753 visitAll(S, Search);
1754 return !Search.IsUnsafe;