1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the scalar evolution expander,
11 // which is used to generate the code corresponding to a given scalar evolution
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Analysis/ScalarEvolutionExpander.h"
17 #include "llvm/Analysis/LoopInfo.h"
18 #include "llvm/IntrinsicInst.h"
19 #include "llvm/LLVMContext.h"
20 #include "llvm/Support/Debug.h"
21 #include "llvm/Target/TargetData.h"
22 #include "llvm/Target/TargetLowering.h"
23 #include "llvm/ADT/STLExtras.h"
27 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
28 /// reusing an existing cast if a suitable one exists, moving an existing
29 /// cast if a suitable one exists but isn't in the right place, or
30 /// creating a new one.
31 Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
32 Instruction::CastOps Op,
33 BasicBlock::iterator IP) {
34 // Check to see if there is already a cast!
35 for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
38 if (U->getType() == Ty)
39 if (CastInst *CI = dyn_cast<CastInst>(U))
40 if (CI->getOpcode() == Op) {
41 // If the cast isn't where we want it, fix it.
42 if (BasicBlock::iterator(CI) != IP) {
43 // Create a new cast, and leave the old cast in place in case
44 // it is being used as an insert point. Clear its operand
45 // so that it doesn't hold anything live.
46 Instruction *NewCI = CastInst::Create(Op, V, Ty, "", IP);
48 CI->replaceAllUsesWith(NewCI);
49 CI->setOperand(0, UndefValue::get(V->getType()));
50 rememberInstruction(NewCI);
53 rememberInstruction(CI);
59 Instruction *I = CastInst::Create(Op, V, Ty, V->getName(), IP);
60 rememberInstruction(I);
64 /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
65 /// which must be possible with a noop cast, doing what we can to share
67 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
68 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
69 assert((Op == Instruction::BitCast ||
70 Op == Instruction::PtrToInt ||
71 Op == Instruction::IntToPtr) &&
72 "InsertNoopCastOfTo cannot perform non-noop casts!");
73 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
74 "InsertNoopCastOfTo cannot change sizes!");
76 // Short-circuit unnecessary bitcasts.
77 if (Op == Instruction::BitCast) {
78 if (V->getType() == Ty)
80 if (CastInst *CI = dyn_cast<CastInst>(V)) {
81 if (CI->getOperand(0)->getType() == Ty)
82 return CI->getOperand(0);
85 // Short-circuit unnecessary inttoptr<->ptrtoint casts.
86 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
87 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
88 if (CastInst *CI = dyn_cast<CastInst>(V))
89 if ((CI->getOpcode() == Instruction::PtrToInt ||
90 CI->getOpcode() == Instruction::IntToPtr) &&
91 SE.getTypeSizeInBits(CI->getType()) ==
92 SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
93 return CI->getOperand(0);
94 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
95 if ((CE->getOpcode() == Instruction::PtrToInt ||
96 CE->getOpcode() == Instruction::IntToPtr) &&
97 SE.getTypeSizeInBits(CE->getType()) ==
98 SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
99 return CE->getOperand(0);
102 // Fold a cast of a constant.
103 if (Constant *C = dyn_cast<Constant>(V))
104 return ConstantExpr::getCast(Op, C, Ty);
106 // Cast the argument at the beginning of the entry block, after
107 // any bitcasts of other arguments.
108 if (Argument *A = dyn_cast<Argument>(V)) {
109 BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
110 while ((isa<BitCastInst>(IP) &&
111 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
112 cast<BitCastInst>(IP)->getOperand(0) != A) ||
113 isa<DbgInfoIntrinsic>(IP) ||
114 isa<LandingPadInst>(IP))
116 return ReuseOrCreateCast(A, Ty, Op, IP);
119 // Cast the instruction immediately after the instruction.
120 Instruction *I = cast<Instruction>(V);
121 BasicBlock::iterator IP = I; ++IP;
122 if (InvokeInst *II = dyn_cast<InvokeInst>(I))
123 IP = II->getNormalDest()->begin();
124 while (isa<PHINode>(IP) || isa<DbgInfoIntrinsic>(IP) ||
125 isa<LandingPadInst>(IP))
127 return ReuseOrCreateCast(I, Ty, Op, IP);
130 /// InsertBinop - Insert the specified binary operator, doing a small amount
131 /// of work to avoid inserting an obviously redundant operation.
132 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
133 Value *LHS, Value *RHS) {
134 // Fold a binop with constant operands.
135 if (Constant *CLHS = dyn_cast<Constant>(LHS))
136 if (Constant *CRHS = dyn_cast<Constant>(RHS))
137 return ConstantExpr::get(Opcode, CLHS, CRHS);
139 // Do a quick scan to see if we have this binop nearby. If so, reuse it.
140 unsigned ScanLimit = 6;
141 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
142 // Scanning starts from the last instruction before the insertion point.
143 BasicBlock::iterator IP = Builder.GetInsertPoint();
144 if (IP != BlockBegin) {
146 for (; ScanLimit; --IP, --ScanLimit) {
147 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
149 if (isa<DbgInfoIntrinsic>(IP))
151 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
152 IP->getOperand(1) == RHS)
154 if (IP == BlockBegin) break;
158 // Save the original insertion point so we can restore it when we're done.
159 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
160 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
162 // Move the insertion point out of as many loops as we can.
163 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
164 if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
165 BasicBlock *Preheader = L->getLoopPreheader();
166 if (!Preheader) break;
168 // Ok, move up a level.
169 Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
172 // If we haven't found this binop, insert it.
173 Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS));
174 BO->setDebugLoc(SaveInsertPt->getDebugLoc());
175 rememberInstruction(BO);
177 // Restore the original insert point.
179 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
184 /// FactorOutConstant - Test if S is divisible by Factor, using signed
185 /// division. If so, update S with Factor divided out and return true.
186 /// S need not be evenly divisible if a reasonable remainder can be
188 /// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made
189 /// unnecessary; in its place, just signed-divide Ops[i] by the scale and
190 /// check to see if the divide was folded.
191 static bool FactorOutConstant(const SCEV *&S,
192 const SCEV *&Remainder,
195 const TargetData *TD) {
196 // Everything is divisible by one.
202 S = SE.getConstant(S->getType(), 1);
206 // For a Constant, check for a multiple of the given factor.
207 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
211 // Check for divisibility.
212 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
214 ConstantInt::get(SE.getContext(),
215 C->getValue()->getValue().sdiv(
216 FC->getValue()->getValue()));
217 // If the quotient is zero and the remainder is non-zero, reject
218 // the value at this scale. It will be considered for subsequent
221 const SCEV *Div = SE.getConstant(CI);
224 SE.getAddExpr(Remainder,
225 SE.getConstant(C->getValue()->getValue().srem(
226 FC->getValue()->getValue())));
232 // In a Mul, check if there is a constant operand which is a multiple
233 // of the given factor.
234 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
236 // With TargetData, the size is known. Check if there is a constant
237 // operand which is a multiple of the given factor. If so, we can
239 const SCEVConstant *FC = cast<SCEVConstant>(Factor);
240 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
241 if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) {
242 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
244 SE.getConstant(C->getValue()->getValue().sdiv(
245 FC->getValue()->getValue()));
246 S = SE.getMulExpr(NewMulOps);
250 // Without TargetData, check if Factor can be factored out of any of the
251 // Mul's operands. If so, we can just remove it.
252 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
253 const SCEV *SOp = M->getOperand(i);
254 const SCEV *Remainder = SE.getConstant(SOp->getType(), 0);
255 if (FactorOutConstant(SOp, Remainder, Factor, SE, TD) &&
256 Remainder->isZero()) {
257 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
259 S = SE.getMulExpr(NewMulOps);
266 // In an AddRec, check if both start and step are divisible.
267 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
268 const SCEV *Step = A->getStepRecurrence(SE);
269 const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
270 if (!FactorOutConstant(Step, StepRem, Factor, SE, TD))
272 if (!StepRem->isZero())
274 const SCEV *Start = A->getStart();
275 if (!FactorOutConstant(Start, Remainder, Factor, SE, TD))
277 // FIXME: can use A->getNoWrapFlags(FlagNW)
278 S = SE.getAddRecExpr(Start, Step, A->getLoop(), SCEV::FlagAnyWrap);
285 /// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
286 /// is the number of SCEVAddRecExprs present, which are kept at the end of
289 static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
291 ScalarEvolution &SE) {
292 unsigned NumAddRecs = 0;
293 for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
295 // Group Ops into non-addrecs and addrecs.
296 SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
297 SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
298 // Let ScalarEvolution sort and simplify the non-addrecs list.
299 const SCEV *Sum = NoAddRecs.empty() ?
300 SE.getConstant(Ty, 0) :
301 SE.getAddExpr(NoAddRecs);
302 // If it returned an add, use the operands. Otherwise it simplified
303 // the sum into a single value, so just use that.
305 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
306 Ops.append(Add->op_begin(), Add->op_end());
307 else if (!Sum->isZero())
309 // Then append the addrecs.
310 Ops.append(AddRecs.begin(), AddRecs.end());
313 /// SplitAddRecs - Flatten a list of add operands, moving addrec start values
314 /// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
315 /// This helps expose more opportunities for folding parts of the expressions
316 /// into GEP indices.
318 static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
320 ScalarEvolution &SE) {
322 SmallVector<const SCEV *, 8> AddRecs;
323 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
324 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
325 const SCEV *Start = A->getStart();
326 if (Start->isZero()) break;
327 const SCEV *Zero = SE.getConstant(Ty, 0);
328 AddRecs.push_back(SE.getAddRecExpr(Zero,
329 A->getStepRecurrence(SE),
331 // FIXME: A->getNoWrapFlags(FlagNW)
333 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
335 Ops.append(Add->op_begin(), Add->op_end());
336 e += Add->getNumOperands();
341 if (!AddRecs.empty()) {
342 // Add the addrecs onto the end of the list.
343 Ops.append(AddRecs.begin(), AddRecs.end());
344 // Resort the operand list, moving any constants to the front.
345 SimplifyAddOperands(Ops, Ty, SE);
349 /// expandAddToGEP - Expand an addition expression with a pointer type into
350 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
351 /// BasicAliasAnalysis and other passes analyze the result. See the rules
352 /// for getelementptr vs. inttoptr in
353 /// http://llvm.org/docs/LangRef.html#pointeraliasing
356 /// Design note: The correctness of using getelementptr here depends on
357 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
358 /// they may introduce pointer arithmetic which may not be safely converted
359 /// into getelementptr.
361 /// Design note: It might seem desirable for this function to be more
362 /// loop-aware. If some of the indices are loop-invariant while others
363 /// aren't, it might seem desirable to emit multiple GEPs, keeping the
364 /// loop-invariant portions of the overall computation outside the loop.
365 /// However, there are a few reasons this is not done here. Hoisting simple
366 /// arithmetic is a low-level optimization that often isn't very
367 /// important until late in the optimization process. In fact, passes
368 /// like InstructionCombining will combine GEPs, even if it means
369 /// pushing loop-invariant computation down into loops, so even if the
370 /// GEPs were split here, the work would quickly be undone. The
371 /// LoopStrengthReduction pass, which is usually run quite late (and
372 /// after the last InstructionCombining pass), takes care of hoisting
373 /// loop-invariant portions of expressions, after considering what
374 /// can be folded using target addressing modes.
376 Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
377 const SCEV *const *op_end,
381 Type *ElTy = PTy->getElementType();
382 SmallVector<Value *, 4> GepIndices;
383 SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
384 bool AnyNonZeroIndices = false;
386 // Split AddRecs up into parts as either of the parts may be usable
387 // without the other.
388 SplitAddRecs(Ops, Ty, SE);
390 // Descend down the pointer's type and attempt to convert the other
391 // operands into GEP indices, at each level. The first index in a GEP
392 // indexes into the array implied by the pointer operand; the rest of
393 // the indices index into the element or field type selected by the
396 // If the scale size is not 0, attempt to factor out a scale for
398 SmallVector<const SCEV *, 8> ScaledOps;
399 if (ElTy->isSized()) {
400 const SCEV *ElSize = SE.getSizeOfExpr(ElTy);
401 if (!ElSize->isZero()) {
402 SmallVector<const SCEV *, 8> NewOps;
403 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
404 const SCEV *Op = Ops[i];
405 const SCEV *Remainder = SE.getConstant(Ty, 0);
406 if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.TD)) {
407 // Op now has ElSize factored out.
408 ScaledOps.push_back(Op);
409 if (!Remainder->isZero())
410 NewOps.push_back(Remainder);
411 AnyNonZeroIndices = true;
413 // The operand was not divisible, so add it to the list of operands
414 // we'll scan next iteration.
415 NewOps.push_back(Ops[i]);
418 // If we made any changes, update Ops.
419 if (!ScaledOps.empty()) {
421 SimplifyAddOperands(Ops, Ty, SE);
426 // Record the scaled array index for this level of the type. If
427 // we didn't find any operands that could be factored, tentatively
428 // assume that element zero was selected (since the zero offset
429 // would obviously be folded away).
430 Value *Scaled = ScaledOps.empty() ?
431 Constant::getNullValue(Ty) :
432 expandCodeFor(SE.getAddExpr(ScaledOps), Ty);
433 GepIndices.push_back(Scaled);
435 // Collect struct field index operands.
436 while (StructType *STy = dyn_cast<StructType>(ElTy)) {
437 bool FoundFieldNo = false;
438 // An empty struct has no fields.
439 if (STy->getNumElements() == 0) break;
441 // With TargetData, field offsets are known. See if a constant offset
442 // falls within any of the struct fields.
443 if (Ops.empty()) break;
444 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
445 if (SE.getTypeSizeInBits(C->getType()) <= 64) {
446 const StructLayout &SL = *SE.TD->getStructLayout(STy);
447 uint64_t FullOffset = C->getValue()->getZExtValue();
448 if (FullOffset < SL.getSizeInBytes()) {
449 unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
450 GepIndices.push_back(
451 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx));
452 ElTy = STy->getTypeAtIndex(ElIdx);
454 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
455 AnyNonZeroIndices = true;
460 // Without TargetData, just check for an offsetof expression of the
461 // appropriate struct type.
462 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
463 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) {
466 if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) {
467 GepIndices.push_back(FieldNo);
469 STy->getTypeAtIndex(cast<ConstantInt>(FieldNo)->getZExtValue());
470 Ops[i] = SE.getConstant(Ty, 0);
471 AnyNonZeroIndices = true;
477 // If no struct field offsets were found, tentatively assume that
478 // field zero was selected (since the zero offset would obviously
481 ElTy = STy->getTypeAtIndex(0u);
482 GepIndices.push_back(
483 Constant::getNullValue(Type::getInt32Ty(Ty->getContext())));
487 if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
488 ElTy = ATy->getElementType();
493 // If none of the operands were convertible to proper GEP indices, cast
494 // the base to i8* and do an ugly getelementptr with that. It's still
495 // better than ptrtoint+arithmetic+inttoptr at least.
496 if (!AnyNonZeroIndices) {
497 // Cast the base to i8*.
498 V = InsertNoopCastOfTo(V,
499 Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
501 // Expand the operands for a plain byte offset.
502 Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
504 // Fold a GEP with constant operands.
505 if (Constant *CLHS = dyn_cast<Constant>(V))
506 if (Constant *CRHS = dyn_cast<Constant>(Idx))
507 return ConstantExpr::getGetElementPtr(CLHS, CRHS);
509 // Do a quick scan to see if we have this GEP nearby. If so, reuse it.
510 unsigned ScanLimit = 6;
511 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
512 // Scanning starts from the last instruction before the insertion point.
513 BasicBlock::iterator IP = Builder.GetInsertPoint();
514 if (IP != BlockBegin) {
516 for (; ScanLimit; --IP, --ScanLimit) {
517 // Don't count dbg.value against the ScanLimit, to avoid perturbing the
519 if (isa<DbgInfoIntrinsic>(IP))
521 if (IP->getOpcode() == Instruction::GetElementPtr &&
522 IP->getOperand(0) == V && IP->getOperand(1) == Idx)
524 if (IP == BlockBegin) break;
528 // Save the original insertion point so we can restore it when we're done.
529 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
530 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
532 // Move the insertion point out of as many loops as we can.
533 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
534 if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
535 BasicBlock *Preheader = L->getLoopPreheader();
536 if (!Preheader) break;
538 // Ok, move up a level.
539 Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
543 Value *GEP = Builder.CreateGEP(V, Idx, "uglygep");
544 rememberInstruction(GEP);
546 // Restore the original insert point.
548 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
553 // Save the original insertion point so we can restore it when we're done.
554 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
555 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
557 // Move the insertion point out of as many loops as we can.
558 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
559 if (!L->isLoopInvariant(V)) break;
561 bool AnyIndexNotLoopInvariant = false;
562 for (SmallVectorImpl<Value *>::const_iterator I = GepIndices.begin(),
563 E = GepIndices.end(); I != E; ++I)
564 if (!L->isLoopInvariant(*I)) {
565 AnyIndexNotLoopInvariant = true;
568 if (AnyIndexNotLoopInvariant)
571 BasicBlock *Preheader = L->getLoopPreheader();
572 if (!Preheader) break;
574 // Ok, move up a level.
575 Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
578 // Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
579 // because ScalarEvolution may have changed the address arithmetic to
580 // compute a value which is beyond the end of the allocated object.
582 if (V->getType() != PTy)
583 Casted = InsertNoopCastOfTo(Casted, PTy);
584 Value *GEP = Builder.CreateGEP(Casted,
587 Ops.push_back(SE.getUnknown(GEP));
588 rememberInstruction(GEP);
590 // Restore the original insert point.
592 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
594 return expand(SE.getAddExpr(Ops));
597 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
598 /// SCEV expansion. If they are nested, this is the most nested. If they are
599 /// neighboring, pick the later.
600 static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
604 if (A->contains(B)) return B;
605 if (B->contains(A)) return A;
606 if (DT.dominates(A->getHeader(), B->getHeader())) return B;
607 if (DT.dominates(B->getHeader(), A->getHeader())) return A;
608 return A; // Arbitrarily break the tie.
611 /// getRelevantLoop - Get the most relevant loop associated with the given
612 /// expression, according to PickMostRelevantLoop.
613 const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
614 // Test whether we've already computed the most relevant loop for this SCEV.
615 std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair =
616 RelevantLoops.insert(std::make_pair(S, static_cast<const Loop *>(0)));
618 return Pair.first->second;
620 if (isa<SCEVConstant>(S))
621 // A constant has no relevant loops.
623 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
624 if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
625 return Pair.first->second = SE.LI->getLoopFor(I->getParent());
626 // A non-instruction has no relevant loops.
629 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
631 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
633 for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end();
635 L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT);
636 return RelevantLoops[N] = L;
638 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
639 const Loop *Result = getRelevantLoop(C->getOperand());
640 return RelevantLoops[C] = Result;
642 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
644 PickMostRelevantLoop(getRelevantLoop(D->getLHS()),
645 getRelevantLoop(D->getRHS()),
647 return RelevantLoops[D] = Result;
649 llvm_unreachable("Unexpected SCEV type!");
655 /// LoopCompare - Compare loops by PickMostRelevantLoop.
659 explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
661 bool operator()(std::pair<const Loop *, const SCEV *> LHS,
662 std::pair<const Loop *, const SCEV *> RHS) const {
663 // Keep pointer operands sorted at the end.
664 if (LHS.second->getType()->isPointerTy() !=
665 RHS.second->getType()->isPointerTy())
666 return LHS.second->getType()->isPointerTy();
668 // Compare loops with PickMostRelevantLoop.
669 if (LHS.first != RHS.first)
670 return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
672 // If one operand is a non-constant negative and the other is not,
673 // put the non-constant negative on the right so that a sub can
674 // be used instead of a negate and add.
675 if (LHS.second->isNonConstantNegative()) {
676 if (!RHS.second->isNonConstantNegative())
678 } else if (RHS.second->isNonConstantNegative())
681 // Otherwise they are equivalent according to this comparison.
688 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
689 Type *Ty = SE.getEffectiveSCEVType(S->getType());
691 // Collect all the add operands in a loop, along with their associated loops.
692 // Iterate in reverse so that constants are emitted last, all else equal, and
693 // so that pointer operands are inserted first, which the code below relies on
694 // to form more involved GEPs.
695 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
696 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()),
697 E(S->op_begin()); I != E; ++I)
698 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
700 // Sort by loop. Use a stable sort so that constants follow non-constants and
701 // pointer operands precede non-pointer operands.
702 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
704 // Emit instructions to add all the operands. Hoist as much as possible
705 // out of loops, and form meaningful getelementptrs where possible.
707 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
708 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
709 const Loop *CurLoop = I->first;
710 const SCEV *Op = I->second;
712 // This is the first operand. Just expand it.
715 } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
716 // The running sum expression is a pointer. Try to form a getelementptr
717 // at this level with that as the base.
718 SmallVector<const SCEV *, 4> NewOps;
719 for (; I != E && I->first == CurLoop; ++I) {
720 // If the operand is SCEVUnknown and not instructions, peek through
721 // it, to enable more of it to be folded into the GEP.
722 const SCEV *X = I->second;
723 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
724 if (!isa<Instruction>(U->getValue()))
725 X = SE.getSCEV(U->getValue());
728 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
729 } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
730 // The running sum is an integer, and there's a pointer at this level.
731 // Try to form a getelementptr. If the running sum is instructions,
732 // use a SCEVUnknown to avoid re-analyzing them.
733 SmallVector<const SCEV *, 4> NewOps;
734 NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) :
736 for (++I; I != E && I->first == CurLoop; ++I)
737 NewOps.push_back(I->second);
738 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op));
739 } else if (Op->isNonConstantNegative()) {
740 // Instead of doing a negate and add, just do a subtract.
741 Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty);
742 Sum = InsertNoopCastOfTo(Sum, Ty);
743 Sum = InsertBinop(Instruction::Sub, Sum, W);
747 Value *W = expandCodeFor(Op, Ty);
748 Sum = InsertNoopCastOfTo(Sum, Ty);
749 // Canonicalize a constant to the RHS.
750 if (isa<Constant>(Sum)) std::swap(Sum, W);
751 Sum = InsertBinop(Instruction::Add, Sum, W);
759 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
760 Type *Ty = SE.getEffectiveSCEVType(S->getType());
762 // Collect all the mul operands in a loop, along with their associated loops.
763 // Iterate in reverse so that constants are emitted last, all else equal.
764 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
765 for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()),
766 E(S->op_begin()); I != E; ++I)
767 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
769 // Sort by loop. Use a stable sort so that constants follow non-constants.
770 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
772 // Emit instructions to mul all the operands. Hoist as much as possible
775 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
776 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
777 const SCEV *Op = I->second;
779 // This is the first operand. Just expand it.
782 } else if (Op->isAllOnesValue()) {
783 // Instead of doing a multiply by negative one, just do a negate.
784 Prod = InsertNoopCastOfTo(Prod, Ty);
785 Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod);
789 Value *W = expandCodeFor(Op, Ty);
790 Prod = InsertNoopCastOfTo(Prod, Ty);
791 // Canonicalize a constant to the RHS.
792 if (isa<Constant>(Prod)) std::swap(Prod, W);
793 Prod = InsertBinop(Instruction::Mul, Prod, W);
801 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
802 Type *Ty = SE.getEffectiveSCEVType(S->getType());
804 Value *LHS = expandCodeFor(S->getLHS(), Ty);
805 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
806 const APInt &RHS = SC->getValue()->getValue();
807 if (RHS.isPowerOf2())
808 return InsertBinop(Instruction::LShr, LHS,
809 ConstantInt::get(Ty, RHS.logBase2()));
812 Value *RHS = expandCodeFor(S->getRHS(), Ty);
813 return InsertBinop(Instruction::UDiv, LHS, RHS);
816 /// Move parts of Base into Rest to leave Base with the minimal
817 /// expression that provides a pointer operand suitable for a
819 static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
820 ScalarEvolution &SE) {
821 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) {
822 Base = A->getStart();
823 Rest = SE.getAddExpr(Rest,
824 SE.getAddRecExpr(SE.getConstant(A->getType(), 0),
825 A->getStepRecurrence(SE),
827 // FIXME: A->getNoWrapFlags(FlagNW)
830 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
831 Base = A->getOperand(A->getNumOperands()-1);
832 SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end());
833 NewAddOps.back() = Rest;
834 Rest = SE.getAddExpr(NewAddOps);
835 ExposePointerBase(Base, Rest, SE);
839 /// Determine if this is a well-behaved chain of instructions leading back to
840 /// the PHI. If so, it may be reused by expanded expressions.
841 bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
843 if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
844 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
846 // If any of the operands don't dominate the insert position, bail.
847 // Addrec operands are always loop-invariant, so this can only happen
848 // if there are instructions which haven't been hoisted.
849 if (L == IVIncInsertLoop) {
850 for (User::op_iterator OI = IncV->op_begin()+1,
851 OE = IncV->op_end(); OI != OE; ++OI)
852 if (Instruction *OInst = dyn_cast<Instruction>(OI))
853 if (!SE.DT->dominates(OInst, IVIncInsertPos))
856 // Advance to the next instruction.
857 IncV = dyn_cast<Instruction>(IncV->getOperand(0));
861 if (IncV->mayHaveSideEffects())
867 return isNormalAddRecExprPHI(PN, IncV, L);
870 /// Determine if this cyclic phi is in a form that would have been generated by
871 /// LSR. We don't care if the phi was actually expanded in this pass, as long
872 /// as it is in a low-cost form, for example, no implied multiplication. This
873 /// should match any patterns generated by getAddRecExprPHILiterally and
875 bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
877 switch (IncV->getOpcode()) {
878 // Check for a simple Add/Sub or GEP of a loop invariant step.
879 case Instruction::Add:
880 case Instruction::Sub:
881 return IncV->getOperand(0) == PN
882 && L->isLoopInvariant(IncV->getOperand(1));
883 case Instruction::BitCast:
884 IncV = dyn_cast<GetElementPtrInst>(IncV->getOperand(0));
887 // fall-thru to GEP handling
888 case Instruction::GetElementPtr: {
889 // This must be a pointer addition of constants (pretty) or some number of
890 // address-size elements (ugly).
891 for (Instruction::op_iterator I = IncV->op_begin()+1, E = IncV->op_end();
893 if (isa<Constant>(*I))
895 // ugly geps have 2 operands.
896 // i1* is used by the expander to represent an address-size element.
897 if (IncV->getNumOperands() != 2)
899 unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace();
900 if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
901 && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
903 // Ensure the operands dominate the insertion point. I don't know of a
904 // case when this would not be true, so this is somewhat untested.
905 if (L == IVIncInsertLoop) {
906 for (User::op_iterator OI = IncV->op_begin()+1,
907 OE = IncV->op_end(); OI != OE; ++OI)
908 if (Instruction *OInst = dyn_cast<Instruction>(OI))
909 if (!SE.DT->dominates(OInst, IVIncInsertPos))
914 IncV = dyn_cast<Instruction>(IncV->getOperand(0));
915 if (IncV && IncV->getOpcode() == Instruction::BitCast)
916 IncV = dyn_cast<Instruction>(IncV->getOperand(0));
924 /// expandIVInc - Expand an IV increment at Builder's current InsertPos.
925 /// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
926 /// need to materialize IV increments elsewhere to handle difficult situations.
927 Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
928 Type *ExpandTy, Type *IntTy,
931 // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
932 if (ExpandTy->isPointerTy()) {
933 PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
934 // If the step isn't constant, don't use an implicitly scaled GEP, because
935 // that would require a multiply inside the loop.
936 if (!isa<ConstantInt>(StepV))
937 GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
938 GEPPtrTy->getAddressSpace());
939 const SCEV *const StepArray[1] = { SE.getSCEV(StepV) };
940 IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN);
941 if (IncV->getType() != PN->getType()) {
942 IncV = Builder.CreateBitCast(IncV, PN->getType());
943 rememberInstruction(IncV);
947 Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
948 Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
949 rememberInstruction(IncV);
954 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
955 /// the base addrec, which is the addrec without any non-loop-dominating
956 /// values, and return the PHI.
958 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
962 assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
964 // Reuse a previously-inserted PHI, if present.
965 BasicBlock *LatchBlock = L->getLoopLatch();
967 for (BasicBlock::iterator I = L->getHeader()->begin();
968 PHINode *PN = dyn_cast<PHINode>(I); ++I) {
969 if (!SE.isSCEVable(PN->getType()) ||
970 (SE.getEffectiveSCEVType(PN->getType()) !=
971 SE.getEffectiveSCEVType(Normalized->getType())) ||
972 SE.getSCEV(PN) != Normalized)
976 cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
979 if (!isExpandedAddRecExprPHI(PN, IncV, L))
983 if (!isNormalAddRecExprPHI(PN, IncV, L))
986 // Ok, the add recurrence looks usable.
987 // Remember this PHI, even in post-inc mode.
988 InsertedValues.insert(PN);
989 // Remember the increment.
990 rememberInstruction(IncV);
991 if (L == IVIncInsertLoop)
993 if (SE.DT->dominates(IncV, IVIncInsertPos))
995 // Make sure the increment is where we want it. But don't move it
996 // down past a potential existing post-inc user.
997 IncV->moveBefore(IVIncInsertPos);
998 IVIncInsertPos = IncV;
999 IncV = cast<Instruction>(IncV->getOperand(0));
1000 } while (IncV != PN);
1005 // Save the original insertion point so we can restore it when we're done.
1006 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
1007 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
1009 // Another AddRec may need to be recursively expanded below. For example, if
1010 // this AddRec is quadratic, the StepV may itself be an AddRec in this
1011 // loop. Remove this loop from the PostIncLoops set before expanding such
1012 // AddRecs. Otherwise, we cannot find a valid position for the step
1013 // (i.e. StepV can never dominate its loop header). Ideally, we could do
1014 // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
1015 // so it's not worth implementing SmallPtrSet::swap.
1016 PostIncLoopSet SavedPostIncLoops = PostIncLoops;
1017 PostIncLoops.clear();
1019 // Expand code for the start value.
1020 Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy,
1021 L->getHeader()->begin());
1023 // StartV must be hoisted into L's preheader to dominate the new phi.
1024 assert(!isa<Instruction>(StartV) ||
1025 SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(),
1028 // Expand code for the step value. Do this before creating the PHI so that PHI
1029 // reuse code doesn't see an incomplete PHI.
1030 const SCEV *Step = Normalized->getStepRecurrence(SE);
1031 // If the stride is negative, insert a sub instead of an add for the increment
1032 // (unless it's a constant, because subtracts of constants are canonicalized
1034 bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1036 Step = SE.getNegativeSCEV(Step);
1037 // Expand the step somewhere that dominates the loop header.
1038 Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
1041 BasicBlock *Header = L->getHeader();
1042 Builder.SetInsertPoint(Header, Header->begin());
1043 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1044 PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
1045 Twine(IVName) + ".iv");
1046 rememberInstruction(PN);
1048 // Create the step instructions and populate the PHI.
1049 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1050 BasicBlock *Pred = *HPI;
1052 // Add a start value.
1053 if (!L->contains(Pred)) {
1054 PN->addIncoming(StartV, Pred);
1058 // Create a step value and add it to the PHI.
1059 // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
1060 // instructions at IVIncInsertPos.
1061 Instruction *InsertPos = L == IVIncInsertLoop ?
1062 IVIncInsertPos : Pred->getTerminator();
1063 Builder.SetInsertPoint(InsertPos);
1064 Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1066 PN->addIncoming(IncV, Pred);
1069 // Restore the original insert point.
1071 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
1073 // After expanding subexpressions, restore the PostIncLoops set so the caller
1074 // can ensure that IVIncrement dominates the current uses.
1075 PostIncLoops = SavedPostIncLoops;
1077 // Remember this PHI, even in post-inc mode.
1078 InsertedValues.insert(PN);
1083 Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
1084 Type *STy = S->getType();
1085 Type *IntTy = SE.getEffectiveSCEVType(STy);
1086 const Loop *L = S->getLoop();
1088 // Determine a normalized form of this expression, which is the expression
1089 // before any post-inc adjustment is made.
1090 const SCEVAddRecExpr *Normalized = S;
1091 if (PostIncLoops.count(L)) {
1092 PostIncLoopSet Loops;
1095 cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, 0, 0,
1096 Loops, SE, *SE.DT));
1099 // Strip off any non-loop-dominating component from the addrec start.
1100 const SCEV *Start = Normalized->getStart();
1101 const SCEV *PostLoopOffset = 0;
1102 if (!SE.properlyDominates(Start, L->getHeader())) {
1103 PostLoopOffset = Start;
1104 Start = SE.getConstant(Normalized->getType(), 0);
1105 Normalized = cast<SCEVAddRecExpr>(
1106 SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE),
1107 Normalized->getLoop(),
1108 // FIXME: Normalized->getNoWrapFlags(FlagNW)
1109 SCEV::FlagAnyWrap));
1112 // Strip off any non-loop-dominating component from the addrec step.
1113 const SCEV *Step = Normalized->getStepRecurrence(SE);
1114 const SCEV *PostLoopScale = 0;
1115 if (!SE.dominates(Step, L->getHeader())) {
1116 PostLoopScale = Step;
1117 Step = SE.getConstant(Normalized->getType(), 1);
1119 cast<SCEVAddRecExpr>(SE.getAddRecExpr(Start, Step,
1120 Normalized->getLoop(),
1121 // FIXME: Normalized
1122 // ->getNoWrapFlags(FlagNW)
1123 SCEV::FlagAnyWrap));
1126 // Expand the core addrec. If we need post-loop scaling, force it to
1127 // expand to an integer type to avoid the need for additional casting.
1128 Type *ExpandTy = PostLoopScale ? IntTy : STy;
1129 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy);
1131 // Accommodate post-inc mode, if necessary.
1133 if (!PostIncLoops.count(L))
1136 // In PostInc mode, use the post-incremented value.
1137 BasicBlock *LatchBlock = L->getLoopLatch();
1138 assert(LatchBlock && "PostInc mode requires a unique loop latch!");
1139 Result = PN->getIncomingValueForBlock(LatchBlock);
1141 // For an expansion to use the postinc form, the client must call
1142 // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
1143 // or dominated by IVIncInsertPos.
1144 if (isa<Instruction>(Result)
1145 && !SE.DT->dominates(cast<Instruction>(Result),
1146 Builder.GetInsertPoint())) {
1147 // The induction variable's postinc expansion does not dominate this use.
1148 // IVUsers tries to prevent this case, so it is rare. However, it can
1149 // happen when an IVUser outside the loop is not dominated by the latch
1150 // block. Adjusting IVIncInsertPos before expansion begins cannot handle
1151 // all cases. Consider a phi outide whose operand is replaced during
1152 // expansion with the value of the postinc user. Without fundamentally
1153 // changing the way postinc users are tracked, the only remedy is
1154 // inserting an extra IV increment. StepV might fold into PostLoopOffset,
1155 // but hopefully expandCodeFor handles that.
1157 !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1159 Step = SE.getNegativeSCEV(Step);
1160 // Expand the step somewhere that dominates the loop header.
1161 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
1162 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
1163 Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
1164 // Restore the insertion point to the place where the caller has
1165 // determined dominates all uses.
1166 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
1167 Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1171 // Re-apply any non-loop-dominating scale.
1172 if (PostLoopScale) {
1173 Result = InsertNoopCastOfTo(Result, IntTy);
1174 Result = Builder.CreateMul(Result,
1175 expandCodeFor(PostLoopScale, IntTy));
1176 rememberInstruction(Result);
1179 // Re-apply any non-loop-dominating offset.
1180 if (PostLoopOffset) {
1181 if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
1182 const SCEV *const OffsetArray[1] = { PostLoopOffset };
1183 Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result);
1185 Result = InsertNoopCastOfTo(Result, IntTy);
1186 Result = Builder.CreateAdd(Result,
1187 expandCodeFor(PostLoopOffset, IntTy));
1188 rememberInstruction(Result);
1195 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
1196 if (!CanonicalMode) return expandAddRecExprLiterally(S);
1198 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1199 const Loop *L = S->getLoop();
1201 // First check for an existing canonical IV in a suitable type.
1202 PHINode *CanonicalIV = 0;
1203 if (PHINode *PN = L->getCanonicalInductionVariable())
1204 if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
1207 // Rewrite an AddRec in terms of the canonical induction variable, if
1208 // its type is more narrow.
1210 SE.getTypeSizeInBits(CanonicalIV->getType()) >
1211 SE.getTypeSizeInBits(Ty)) {
1212 SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
1213 for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1214 NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
1215 Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
1216 // FIXME: S->getNoWrapFlags(FlagNW)
1217 SCEV::FlagAnyWrap));
1218 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
1219 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
1220 BasicBlock::iterator NewInsertPt =
1221 llvm::next(BasicBlock::iterator(cast<Instruction>(V)));
1222 while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt) ||
1223 isa<LandingPadInst>(NewInsertPt))
1225 V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0,
1227 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
1231 // {X,+,F} --> X + {0,+,F}
1232 if (!S->getStart()->isZero()) {
1233 SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end());
1234 NewOps[0] = SE.getConstant(Ty, 0);
1235 // FIXME: can use S->getNoWrapFlags()
1236 const SCEV *Rest = SE.getAddRecExpr(NewOps, L, SCEV::FlagAnyWrap);
1238 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
1239 // comments on expandAddToGEP for details.
1240 const SCEV *Base = S->getStart();
1241 const SCEV *RestArray[1] = { Rest };
1242 // Dig into the expression to find the pointer base for a GEP.
1243 ExposePointerBase(Base, RestArray[0], SE);
1244 // If we found a pointer, expand the AddRec with a GEP.
1245 if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
1246 // Make sure the Base isn't something exotic, such as a multiplied
1247 // or divided pointer value. In those cases, the result type isn't
1248 // actually a pointer type.
1249 if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) {
1250 Value *StartV = expand(Base);
1251 assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
1252 return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV);
1256 // Just do a normal add. Pre-expand the operands to suppress folding.
1257 return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())),
1258 SE.getUnknown(expand(Rest))));
1261 // If we don't yet have a canonical IV, create one.
1263 // Create and insert the PHI node for the induction variable in the
1265 BasicBlock *Header = L->getHeader();
1266 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1267 CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
1269 rememberInstruction(CanonicalIV);
1271 Constant *One = ConstantInt::get(Ty, 1);
1272 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1273 BasicBlock *HP = *HPI;
1274 if (L->contains(HP)) {
1275 // Insert a unit add instruction right before the terminator
1276 // corresponding to the back-edge.
1277 Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
1279 HP->getTerminator());
1280 Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
1281 rememberInstruction(Add);
1282 CanonicalIV->addIncoming(Add, HP);
1284 CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
1289 // {0,+,1} --> Insert a canonical induction variable into the loop!
1290 if (S->isAffine() && S->getOperand(1)->isOne()) {
1291 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
1292 "IVs with types different from the canonical IV should "
1293 "already have been handled!");
1297 // {0,+,F} --> {0,+,1} * F
1299 // If this is a simple linear addrec, emit it now as a special case.
1300 if (S->isAffine()) // {0,+,F} --> i*F
1302 expand(SE.getTruncateOrNoop(
1303 SE.getMulExpr(SE.getUnknown(CanonicalIV),
1304 SE.getNoopOrAnyExtend(S->getOperand(1),
1305 CanonicalIV->getType())),
1308 // If this is a chain of recurrences, turn it into a closed form, using the
1309 // folders, then expandCodeFor the closed form. This allows the folders to
1310 // simplify the expression without having to build a bunch of special code
1311 // into this folder.
1312 const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV.
1314 // Promote S up to the canonical IV type, if the cast is foldable.
1315 const SCEV *NewS = S;
1316 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
1317 if (isa<SCEVAddRecExpr>(Ext))
1320 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1321 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n";
1323 // Truncate the result down to the original type, if needed.
1324 const SCEV *T = SE.getTruncateOrNoop(V, Ty);
1328 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
1329 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1330 Value *V = expandCodeFor(S->getOperand(),
1331 SE.getEffectiveSCEVType(S->getOperand()->getType()));
1332 Value *I = Builder.CreateTrunc(V, Ty);
1333 rememberInstruction(I);
1337 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
1338 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1339 Value *V = expandCodeFor(S->getOperand(),
1340 SE.getEffectiveSCEVType(S->getOperand()->getType()));
1341 Value *I = Builder.CreateZExt(V, Ty);
1342 rememberInstruction(I);
1346 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
1347 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1348 Value *V = expandCodeFor(S->getOperand(),
1349 SE.getEffectiveSCEVType(S->getOperand()->getType()));
1350 Value *I = Builder.CreateSExt(V, Ty);
1351 rememberInstruction(I);
1355 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
1356 Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1357 Type *Ty = LHS->getType();
1358 for (int i = S->getNumOperands()-2; i >= 0; --i) {
1359 // In the case of mixed integer and pointer types, do the
1360 // rest of the comparisons as integer.
1361 if (S->getOperand(i)->getType() != Ty) {
1362 Ty = SE.getEffectiveSCEVType(Ty);
1363 LHS = InsertNoopCastOfTo(LHS, Ty);
1365 Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1366 Value *ICmp = Builder.CreateICmpSGT(LHS, RHS);
1367 rememberInstruction(ICmp);
1368 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax");
1369 rememberInstruction(Sel);
1372 // In the case of mixed integer and pointer types, cast the
1373 // final result back to the pointer type.
1374 if (LHS->getType() != S->getType())
1375 LHS = InsertNoopCastOfTo(LHS, S->getType());
1379 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
1380 Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1381 Type *Ty = LHS->getType();
1382 for (int i = S->getNumOperands()-2; i >= 0; --i) {
1383 // In the case of mixed integer and pointer types, do the
1384 // rest of the comparisons as integer.
1385 if (S->getOperand(i)->getType() != Ty) {
1386 Ty = SE.getEffectiveSCEVType(Ty);
1387 LHS = InsertNoopCastOfTo(LHS, Ty);
1389 Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1390 Value *ICmp = Builder.CreateICmpUGT(LHS, RHS);
1391 rememberInstruction(ICmp);
1392 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax");
1393 rememberInstruction(Sel);
1396 // In the case of mixed integer and pointer types, cast the
1397 // final result back to the pointer type.
1398 if (LHS->getType() != S->getType())
1399 LHS = InsertNoopCastOfTo(LHS, S->getType());
1403 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty,
1405 BasicBlock::iterator IP = I;
1406 while (isInsertedInstruction(IP) || isa<DbgInfoIntrinsic>(IP))
1408 Builder.SetInsertPoint(IP->getParent(), IP);
1409 return expandCodeFor(SH, Ty);
1412 Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) {
1413 // Expand the code for this SCEV.
1414 Value *V = expand(SH);
1416 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
1417 "non-trivial casts should be done with the SCEVs directly!");
1418 V = InsertNoopCastOfTo(V, Ty);
1423 Value *SCEVExpander::expand(const SCEV *S) {
1424 // Compute an insertion point for this SCEV object. Hoist the instructions
1425 // as far out in the loop nest as possible.
1426 Instruction *InsertPt = Builder.GetInsertPoint();
1427 for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ;
1428 L = L->getParentLoop())
1429 if (SE.isLoopInvariant(S, L)) {
1431 if (BasicBlock *Preheader = L->getLoopPreheader())
1432 InsertPt = Preheader->getTerminator();
1434 // LSR sets the insertion point for AddRec start/step values to the
1435 // block start to simplify value reuse, even though it's an invalid
1436 // position. SCEVExpander must correct for this in all cases.
1437 InsertPt = L->getHeader()->getFirstInsertionPt();
1440 // If the SCEV is computable at this level, insert it into the header
1441 // after the PHIs (and after any other instructions that we've inserted
1442 // there) so that it is guaranteed to dominate any user inside the loop.
1443 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1444 InsertPt = L->getHeader()->getFirstInsertionPt();
1445 while (isInsertedInstruction(InsertPt) || isa<DbgInfoIntrinsic>(InsertPt))
1446 InsertPt = llvm::next(BasicBlock::iterator(InsertPt));
1450 // Check to see if we already expanded this here.
1451 std::map<std::pair<const SCEV *, Instruction *>,
1452 AssertingVH<Value> >::iterator I =
1453 InsertedExpressions.find(std::make_pair(S, InsertPt));
1454 if (I != InsertedExpressions.end())
1457 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
1458 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
1459 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
1461 // Expand the expression into instructions.
1462 Value *V = visit(S);
1464 // Remember the expanded value for this SCEV at this location.
1466 // This is independent of PostIncLoops. The mapped value simply materializes
1467 // the expression at this insertion point. If the mapped value happened to be
1468 // a postinc expansion, it could be reused by a non postinc user, but only if
1469 // its insertion point was already at the head of the loop.
1470 InsertedExpressions[std::make_pair(S, InsertPt)] = V;
1472 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
1476 void SCEVExpander::rememberInstruction(Value *I) {
1477 if (!PostIncLoops.empty())
1478 InsertedPostIncValues.insert(I);
1480 InsertedValues.insert(I);
1482 // If we just claimed an existing instruction and that instruction had
1483 // been the insert point, adjust the insert point forward so that
1484 // subsequently inserted code will be dominated.
1485 if (Builder.GetInsertPoint() == I) {
1486 BasicBlock::iterator It = cast<Instruction>(I);
1487 do { ++It; } while (isInsertedInstruction(It) ||
1488 isa<DbgInfoIntrinsic>(It));
1489 Builder.SetInsertPoint(Builder.GetInsertBlock(), It);
1493 void SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) {
1494 // If we acquired more instructions since the old insert point was saved,
1495 // advance past them.
1496 while (isInsertedInstruction(I) || isa<DbgInfoIntrinsic>(I)) ++I;
1498 Builder.SetInsertPoint(BB, I);
1501 /// getOrInsertCanonicalInductionVariable - This method returns the
1502 /// canonical induction variable of the specified type for the specified
1503 /// loop (inserting one if there is none). A canonical induction variable
1504 /// starts at zero and steps by one on each iteration.
1506 SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
1508 assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
1510 // Build a SCEV for {0,+,1}<L>.
1511 // Conservatively use FlagAnyWrap for now.
1512 const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0),
1513 SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap);
1515 // Emit code for it.
1516 BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
1517 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
1518 PHINode *V = cast<PHINode>(expandCodeFor(H, 0, L->getHeader()->begin()));
1520 restoreInsertPoint(SaveInsertBB, SaveInsertPt);
1525 /// hoistStep - Attempt to hoist an IV increment above a potential use.
1527 /// To successfully hoist, two criteria must be met:
1528 /// - IncV operands dominate InsertPos and
1529 /// - InsertPos dominates IncV
1531 /// Meeting the second condition means that we don't need to check all of IncV's
1532 /// existing uses (it's moving up in the domtree).
1534 /// This does not yet recursively hoist the operands, although that would
1535 /// not be difficult.
1537 /// This does not require a SCEVExpander instance and could be replaced by a
1538 /// general code-insertion helper.
1539 bool SCEVExpander::hoistStep(Instruction *IncV, Instruction *InsertPos,
1540 const DominatorTree *DT) {
1541 if (DT->dominates(IncV, InsertPos))
1544 if (!DT->dominates(InsertPos->getParent(), IncV->getParent()))
1547 if (IncV->mayHaveSideEffects())
1550 // Attempt to hoist IncV
1551 for (User::op_iterator OI = IncV->op_begin(), OE = IncV->op_end();
1553 Instruction *OInst = dyn_cast<Instruction>(OI);
1554 if (OInst && (OInst == InsertPos || !DT->dominates(OInst, InsertPos)))
1557 IncV->moveBefore(InsertPos);
1561 /// Sort values by integer width for replaceCongruentIVs.
1562 static bool width_descending(Value *lhs, Value *rhs) {
1563 // Put pointers at the back and make sure pointer < pointer = false.
1564 if (!lhs->getType()->isIntegerTy() || !rhs->getType()->isIntegerTy())
1565 return rhs->getType()->isIntegerTy() && !lhs->getType()->isIntegerTy();
1566 return rhs->getType()->getPrimitiveSizeInBits()
1567 < lhs->getType()->getPrimitiveSizeInBits();
1570 /// replaceCongruentIVs - Check for congruent phis in this loop header and
1571 /// replace them with their most canonical representative. Return the number of
1572 /// phis eliminated.
1574 /// This does not depend on any SCEVExpander state but should be used in
1575 /// the same context that SCEVExpander is used.
1576 unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
1577 SmallVectorImpl<WeakVH> &DeadInsts,
1578 const TargetLowering *TLI) {
1579 // Find integer phis in order of increasing width.
1580 SmallVector<PHINode*, 8> Phis;
1581 for (BasicBlock::iterator I = L->getHeader()->begin();
1582 PHINode *Phi = dyn_cast<PHINode>(I); ++I) {
1583 Phis.push_back(Phi);
1586 std::sort(Phis.begin(), Phis.end(), width_descending);
1588 unsigned NumElim = 0;
1589 DenseMap<const SCEV *, PHINode *> ExprToIVMap;
1590 // Process phis from wide to narrow. Mapping wide phis to the their truncation
1591 // so narrow phis can reuse them.
1592 for (SmallVectorImpl<PHINode*>::const_iterator PIter = Phis.begin(),
1593 PEnd = Phis.end(); PIter != PEnd; ++PIter) {
1594 PHINode *Phi = *PIter;
1596 if (!SE.isSCEVable(Phi->getType()))
1599 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
1602 if (Phi->getType()->isIntegerTy() && TLI
1603 && TLI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
1604 // This phi can be freely truncated to the narrowest phi type. Map the
1605 // truncated expression to it so it will be reused for narrow types.
1606 const SCEV *TruncExpr =
1607 SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType());
1608 ExprToIVMap[TruncExpr] = Phi;
1613 // Replacing a pointer phi with an integer phi or vice-versa doesn't make
1615 if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
1618 if (BasicBlock *LatchBlock = L->getLoopLatch()) {
1619 Instruction *OrigInc =
1620 cast<Instruction>(OrigPhiRef->getIncomingValueForBlock(LatchBlock));
1621 Instruction *IsomorphicInc =
1622 cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
1624 // If this phi has the same width but is more canonical, replace the
1625 // original with it.
1626 if (OrigPhiRef->getType() == Phi->getType()
1627 && !isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)
1628 && isExpandedAddRecExprPHI(Phi, IsomorphicInc, L)) {
1629 std::swap(OrigPhiRef, Phi);
1630 std::swap(OrigInc, IsomorphicInc);
1632 // Replacing the congruent phi is sufficient because acyclic redundancy
1633 // elimination, CSE/GVN, should handle the rest. However, once SCEV proves
1634 // that a phi is congruent, it's often the head of an IV user cycle that
1635 // is isomorphic with the original phi. It's worth eagerly cleaning up the
1636 // common case of a single IV increment so that DeleteDeadPHIs can remove
1637 // cycles that had postinc uses.
1638 const SCEV *TruncExpr = SE.getTruncateOrNoop(SE.getSCEV(OrigInc),
1639 IsomorphicInc->getType());
1640 if (OrigInc != IsomorphicInc
1641 && TruncExpr == SE.getSCEV(IsomorphicInc) &&
1642 hoistStep(OrigInc, IsomorphicInc, DT)) {
1643 DEBUG_WITH_TYPE(DebugType, dbgs()
1644 << "INDVARS: Eliminated congruent iv.inc: "
1645 << *IsomorphicInc << '\n');
1646 Value *NewInc = OrigInc;
1647 if (OrigInc->getType() != IsomorphicInc->getType()) {
1648 IRBuilder<> Builder(OrigInc->getNextNode());
1649 Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
1651 CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName);
1653 IsomorphicInc->replaceAllUsesWith(NewInc);
1654 DeadInsts.push_back(IsomorphicInc);
1657 DEBUG_WITH_TYPE(DebugType, dbgs()
1658 << "INDVARS: Eliminated congruent iv: " << *Phi << '\n');
1660 Value *NewIV = OrigPhiRef;
1661 if (OrigPhiRef->getType() != Phi->getType()) {
1662 IRBuilder<> Builder(L->getHeader()->getFirstInsertionPt());
1663 Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
1664 NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
1666 Phi->replaceAllUsesWith(NewIV);
1667 DeadInsts.push_back(Phi);