//
// The LLVM Compiler Infrastructure
//
-// This file was developed by Nate Begeman and is distributed under the
-// University of Illinois Open Source License. See LICENSE.TXT for details.
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Target/TargetLowering.h"
#include <algorithm>
STATISTIC(NumVariable, "Number of PHIs with variable strides");
STATISTIC(NumEliminated , "Number of strides eliminated");
+namespace {
+ // Hidden options for help debugging.
+ cl::opt<bool> AllowPHIIVReuse("lsr-allow-phi-iv-reuse",
+ cl::init(true), cl::Hidden);
+}
+
namespace {
struct BasedUser;
/// IVStrideUse - Keep track of one use of a strided induction variable, where
/// the stride is stored externally. The Offset member keeps track of the
- /// offset from the IV, User is the actual user of the operand, and 'Operand'
- /// is the operand # of the User that is the use.
+ /// offset from the IV, User is the actual user of the operand, and
+ /// 'OperandValToReplace' is the operand of the User that is the use.
struct VISIBILITY_HIDDEN IVStrideUse {
SCEVHandle Offset;
Instruction *User;
/// StrideOrder - An ordering of the keys in IVUsesByStride that is stable:
/// We use this to iterate over the IVUsesByStride collection without being
/// dependent on random ordering of pointers in the process.
- std::vector<SCEVHandle> StrideOrder;
+ SmallVector<SCEVHandle, 16> StrideOrder;
/// CastedValues - As we need to cast values to uintptr_t, this keeps track
/// of the casted version of each value. This is accessed by
/// getCastedVersionOf.
- std::map<Value*, Value*> CastedPointers;
+ DenseMap<Value*, Value*> CastedPointers;
/// DeadInsts - Keep track of instructions we may have made dead, so that
/// we can remove them after we are done working.
- std::set<Instruction*> DeadInsts;
+ SmallPtrSet<Instruction*,16> DeadInsts;
/// TLI - Keep a pointer of a TargetLowering to consult for determining
/// transformation profitability.
Value *getCastedVersionOf(Instruction::CastOps opcode, Value *V);
private:
bool AddUsersIfInteresting(Instruction *I, Loop *L,
- std::set<Instruction*> &Processed);
- SCEVHandle GetExpressionSCEV(Instruction *E, Loop *L);
+ SmallPtrSet<Instruction*,16> &Processed);
+ SCEVHandle GetExpressionSCEV(Instruction *E);
ICmpInst *ChangeCompareStride(Loop *L, ICmpInst *Cond,
IVStrideUse* &CondUse,
const SCEVHandle* &CondStride);
void StrengthReduceStridedIVUsers(const SCEVHandle &Stride,
IVUsersOfOneStride &Uses,
Loop *L, bool isOnlyStride);
- void DeleteTriviallyDeadInstructions(std::set<Instruction*> &Insts);
+ void DeleteTriviallyDeadInstructions(SmallPtrSet<Instruction*,16> &Insts);
};
char LoopStrengthReduce::ID = 0;
RegisterPass<LoopStrengthReduce> X("loop-reduce", "Loop Strength Reduction");
/// specified set are trivially dead, delete them and see if this makes any of
/// their operands subsequently dead.
void LoopStrengthReduce::
-DeleteTriviallyDeadInstructions(std::set<Instruction*> &Insts) {
+DeleteTriviallyDeadInstructions(SmallPtrSet<Instruction*,16> &Insts) {
while (!Insts.empty()) {
Instruction *I = *Insts.begin();
- Insts.erase(Insts.begin());
+ Insts.erase(I);
+
+ if (PHINode *PN = dyn_cast<PHINode>(I)) {
+ // If all incoming values to the Phi are the same, we can replace the Phi
+ // with that value.
+ if (Value *PNV = PN->hasConstantValue()) {
+ if (Instruction *U = dyn_cast<Instruction>(PNV))
+ Insts.insert(U);
+ PN->replaceAllUsesWith(PNV);
+ SE->deleteValueFromRecords(PN);
+ PN->eraseFromParent();
+ Changed = true;
+ continue;
+ }
+ }
+
if (isInstructionTriviallyDead(I)) {
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
if (Instruction *U = dyn_cast<Instruction>(I->getOperand(i)))
/// GetExpressionSCEV - Compute and return the SCEV for the specified
/// instruction.
-SCEVHandle LoopStrengthReduce::GetExpressionSCEV(Instruction *Exp, Loop *L) {
+SCEVHandle LoopStrengthReduce::GetExpressionSCEV(Instruction *Exp) {
// Pointer to pointer bitcast instructions return the same value as their
// operand.
if (BitCastInst *BCI = dyn_cast<BitCastInst>(Exp)) {
if (SE->hasSCEV(BCI) || !isa<Instruction>(BCI->getOperand(0)))
return SE->getSCEV(BCI);
- SCEVHandle R = GetExpressionSCEV(cast<Instruction>(BCI->getOperand(0)), L);
+ SCEVHandle R = GetExpressionSCEV(cast<Instruction>(BCI->getOperand(0)));
SE->setSCEV(BCI, R);
return R;
}
return SE->getSCEV(Exp);
// Analyze all of the subscripts of this getelementptr instruction, looking
- // for uses that are determined by the trip count of L. First, skip all
- // operands the are not dependent on the IV.
+ // for uses that are determined by the trip count of the loop. First, skip
+ // all operands the are not dependent on the IV.
// Build up the base expression. Insert an LLVM cast of the pointer to
// uintptr_t first.
/// the loop, resulting in reg-reg copies (if we use the pre-inc value when we
/// should use the post-inc value).
static bool IVUseShouldUsePostIncValue(Instruction *User, Instruction *IV,
- Loop *L, DominatorTree *DT, Pass *P) {
+ Loop *L, DominatorTree *DT, Pass *P,
+ SmallPtrSet<Instruction*,16> &DeadInsts){
// If the user is in the loop, use the preinc value.
if (L->contains(User->getParent())) return false;
// post-incremented value.
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
if (PN->getIncomingValue(i) == IV) {
- SplitCriticalEdge(PN->getIncomingBlock(i), PN->getParent(), P,
- true);
+ SplitCriticalEdge(PN->getIncomingBlock(i), PN->getParent(), P, false);
// Splitting the critical edge can reduce the number of entries in this
// PHI.
e = PN->getNumIncomingValues();
if (--NumUses == 0) break;
}
+
+ // PHI node might have become a constant value after SplitCriticalEdge.
+ DeadInsts.insert(User);
return true;
}
/// reducible SCEV, recursively add its users to the IVUsesByStride set and
/// return true. Otherwise, return false.
bool LoopStrengthReduce::AddUsersIfInteresting(Instruction *I, Loop *L,
- std::set<Instruction*> &Processed) {
+ SmallPtrSet<Instruction*,16> &Processed) {
if (!I->getType()->isInteger() && !isa<PointerType>(I->getType()))
return false; // Void and FP expressions cannot be reduced.
- if (!Processed.insert(I).second)
+ if (!Processed.insert(I))
return true; // Instruction already handled.
// Get the symbolic expression for this instruction.
- SCEVHandle ISE = GetExpressionSCEV(I, L);
+ SCEVHandle ISE = GetExpressionSCEV(I);
if (isa<SCEVCouldNotCompute>(ISE)) return false;
// Get the start and stride for this expression.
// Okay, we found a user that we cannot reduce. Analyze the instruction
// and decide what to do with it. If we are a use inside of the loop, use
// the value before incrementation, otherwise use it after incrementation.
- if (IVUseShouldUsePostIncValue(User, I, L, DT, this)) {
+ if (IVUseShouldUsePostIncValue(User, I, L, DT, this, DeadInsts)) {
// The value used will be incremented by the stride more than we are
// expecting, so subtract this off.
SCEVHandle NewStart = SE->getMinusSCEV(Start, Stride);
// operands of Inst to use the new expression 'NewBase', with 'Imm' added
// to it.
void RewriteInstructionToUseNewBase(const SCEVHandle &NewBase,
- SCEVExpander &Rewriter, Loop *L,
- Pass *P);
+ SCEVExpander &Rewriter, Loop *L, Pass *P,
+ SmallPtrSet<Instruction*,16> &DeadInsts);
Value *InsertCodeForBaseAtPosition(const SCEVHandle &NewBase,
SCEVExpander &Rewriter,
// operands of Inst to use the new expression 'NewBase', with 'Imm' added
// to it.
void BasedUser::RewriteInstructionToUseNewBase(const SCEVHandle &NewBase,
- SCEVExpander &Rewriter,
- Loop *L, Pass *P) {
+ SCEVExpander &Rewriter, Loop *L, Pass *P,
+ SmallPtrSet<Instruction*,16> &DeadInsts) {
if (!isa<PHINode>(Inst)) {
// By default, insert code at the user instruction.
BasicBlock::iterator InsertPt = Inst;
// have multiple entries for the same predecessor. We use a map to make sure
// that a PHI node only has a single Value* for each predecessor (which also
// prevents us from inserting duplicate code in some blocks).
- std::map<BasicBlock*, Value*> InsertedCode;
+ DenseMap<BasicBlock*, Value*> InsertedCode;
PHINode *PN = cast<PHINode>(Inst);
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
if (PN->getIncomingValue(i) == OperandValToReplace) {
(PN->getParent() != L->getHeader() || !L->contains(PHIPred))) {
// First step, split the critical edge.
- SplitCriticalEdge(PHIPred, PN->getParent(), P, true);
+ SplitCriticalEdge(PHIPred, PN->getParent(), P, false);
// Next step: move the basic block. In particular, if the PHI node
// is outside of the loop, and PredTI is in the loop, we want to
Rewriter.clear();
}
}
+
+ // PHI node might have become a constant value after SplitCriticalEdge.
+ DeadInsts.insert(Inst);
+
DOUT << " CHANGED: IMM =" << *Imm << " Inst = " << *Inst;
}
bool LoopStrengthReduce::ValidStride(bool HasBaseReg,
int64_t Scale,
const std::vector<BasedUser>& UsersToProcess) {
+ if (!TLI)
+ return true;
+
for (unsigned i=0, e = UsersToProcess.size(); i!=e; ++i) {
// If this is a load or other access, pass the type of the access in.
const Type *AccessTy = Type::VoidTy;
AccessTy = SI->getOperand(0)->getType();
else if (LoadInst *LI = dyn_cast<LoadInst>(UsersToProcess[i].Inst))
AccessTy = LI->getType();
+ else if (isa<PHINode>(UsersToProcess[i].Inst)) {
+ if (AllowPHIIVReuse)
+ continue;
+ return false;
+ }
TargetLowering::AddrMode AM;
if (SCEVConstant *SC = dyn_cast<SCEVConstant>(UsersToProcess[i].Imm))
const std::vector<BasedUser>& UsersToProcess) {
if (SCEVConstant *SC = dyn_cast<SCEVConstant>(Stride)) {
int64_t SInt = SC->getValue()->getSExtValue();
- for (std::map<SCEVHandle, IVsOfOneStride>::iterator SI= IVsByStride.begin(),
- SE = IVsByStride.end(); SI != SE; ++SI) {
+ for (unsigned NewStride = 0, e = StrideOrder.size(); NewStride != e;
+ ++NewStride) {
+ std::map<SCEVHandle, IVsOfOneStride>::iterator SI =
+ IVsByStride.find(StrideOrder[NewStride]);
+ if (SI == IVsByStride.end())
+ continue;
int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
if (SI->first != Stride &&
(unsigned(abs(SInt)) < SSInt || (SInt % SSInt) != 0))
continue;
int64_t Scale = SInt / SSInt;
- // When scale is 1, we don't need to worry about whether the
- // multiplication can be folded into the addressing mode.
- if (!AllUsesAreAddresses && Scale != 1)
- continue;
// Check that this stride is valid for all the types used for loads and
// stores; if it can be used for some and not others, we might as well use
// the original stride everywhere, since we have to create the IV for it
- // anyway.
- if (ValidStride(HasBaseReg, Scale, UsersToProcess))
+ // anyway. If the scale is 1, then we don't need to worry about folding
+ // multiplications.
+ if (Scale == 1 ||
+ (AllUsesAreAddresses &&
+ ValidStride(HasBaseReg, Scale, UsersToProcess)))
for (std::vector<IVExpr>::iterator II = SI->second.IVs.begin(),
IE = SI->second.IVs.end(); II != IE; ++II)
// FIXME: Only handle base == 0 for now.
return SC->getValue()->getValue().isNegative();
}
+/// isAddress - Returns true if the specified instruction is using the
+/// specified value as an address.
+static bool isAddressUse(Instruction *Inst, Value *OperandVal) {
+ bool isAddress = isa<LoadInst>(Inst);
+ if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
+ if (SI->getOperand(1) == OperandVal)
+ isAddress = true;
+ } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
+ // Addressing modes can also be folded into prefetches and a variety
+ // of intrinsics.
+ switch (II->getIntrinsicID()) {
+ default: break;
+ case Intrinsic::prefetch:
+ case Intrinsic::x86_sse2_loadu_dq:
+ case Intrinsic::x86_sse2_loadu_pd:
+ case Intrinsic::x86_sse_loadu_ps:
+ case Intrinsic::x86_sse_storeu_ps:
+ case Intrinsic::x86_sse2_storeu_pd:
+ case Intrinsic::x86_sse2_storeu_dq:
+ case Intrinsic::x86_sse2_storel_dq:
+ if (II->getOperand(1) == OperandVal)
+ isAddress = true;
+ break;
+ case Intrinsic::x86_sse2_loadh_pd:
+ case Intrinsic::x86_sse2_loadl_pd:
+ if (II->getOperand(2) == OperandVal)
+ isAddress = true;
+ break;
+ }
+ }
+ return isAddress;
+}
+
// CollectIVUsers - Transform our list of users and offsets to a bit more
// complex table. In this new vector, each 'BasedUser' contains 'Base' the base
// of the strided accessas well as the old information from Uses. We
// instructions. If we can represent anything there, move it to the imm
// fields of the BasedUsers. We do this so that it increases the commonality
// of the remaining uses.
+ unsigned NumPHI = 0;
for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
// If the user is not in the current loop, this means it is using the exit
// value of the IV. Do not put anything in the base, make sure it's all in
// Addressing modes can be folded into loads and stores. Be careful that
// the store is through the expression, not of the expression though.
- bool isAddress = isa<LoadInst>(UsersToProcess[i].Inst);
- if (StoreInst *SI = dyn_cast<StoreInst>(UsersToProcess[i].Inst)) {
- if (SI->getOperand(1) == UsersToProcess[i].OperandValToReplace)
- isAddress = true;
- } else if (IntrinsicInst *II =
- dyn_cast<IntrinsicInst>(UsersToProcess[i].Inst)) {
- // Addressing modes can also be folded into prefetches and a variety
- // of intrinsics.
- switch (II->getIntrinsicID()) {
- default: break;
- case Intrinsic::prefetch:
- case Intrinsic::x86_sse2_loadu_dq:
- case Intrinsic::x86_sse2_loadu_pd:
- case Intrinsic::x86_sse_loadu_ps:
- case Intrinsic::x86_sse_storeu_ps:
- case Intrinsic::x86_sse2_storeu_pd:
- case Intrinsic::x86_sse2_storeu_dq:
- case Intrinsic::x86_sse2_storel_dq:
- if (II->getOperand(1) == UsersToProcess[i].OperandValToReplace)
- isAddress = true;
- break;
- case Intrinsic::x86_sse2_loadh_pd:
- case Intrinsic::x86_sse2_loadl_pd:
- if (II->getOperand(2) == UsersToProcess[i].OperandValToReplace)
- isAddress = true;
- break;
- }
+ bool isPHI = false;
+ bool isAddress = isAddressUse(UsersToProcess[i].Inst,
+ UsersToProcess[i].OperandValToReplace);
+ if (isa<PHINode>(UsersToProcess[i].Inst)) {
+ isPHI = true;
+ ++NumPHI;
}
// If this use isn't an address, then not all uses are addresses.
- if (!isAddress)
+ if (!isAddress && !(AllowPHIIVReuse && isPHI))
AllUsesAreAddresses = false;
MoveImmediateValues(TLI, UsersToProcess[i].Inst, UsersToProcess[i].Base,
}
}
+ // If one of the use if a PHI node and all other uses are addresses, still
+ // allow iv reuse. Essentially we are trading one constant multiplication
+ // for one fewer iv.
+ if (NumPHI > 1)
+ AllUsesAreAddresses = false;
+
return CommonExprs;
}
Loop *L,
bool isOnlyStride) {
// If all the users are moved to another stride, then there is nothing to do.
- if (Uses.Users.size() == 0)
+ if (Uses.Users.empty())
return;
// Keep track if every use in UsersToProcess is an address. If they all are,
// Get a base value.
SCEVHandle Base = UsersToProcess[i].Base;
- // Compact everything with this base to be consequetive with this one.
+ // Compact everything with this base to be consequtive with this one.
for (unsigned j = i+1; j != e; ++j) {
if (UsersToProcess[j].Base == Base) {
std::swap(UsersToProcess[i+1], UsersToProcess[j]);
// If we are reusing the iv, then it must be multiplied by a constant
// factor take advantage of addressing mode scale component.
if (RewriteFactor != 0) {
- RewriteExpr =
- SE->getMulExpr(SE->getIntegerSCEV(RewriteFactor,
- RewriteExpr->getType()),
- RewriteExpr);
+ RewriteExpr = SE->getMulExpr(SE->getIntegerSCEV(RewriteFactor,
+ RewriteExpr->getType()),
+ RewriteExpr);
// The common base is emitted in the loop preheader. But since we
// are reusing an IV, it has not been used to initialize the PHI node.
// Add BaseV to the PHI value if needed.
RewriteExpr = SE->getAddExpr(RewriteExpr, SE->getUnknown(BaseV));
- User.RewriteInstructionToUseNewBase(RewriteExpr, Rewriter, L, this);
+ User.RewriteInstructionToUseNewBase(RewriteExpr, Rewriter, L, this,
+ DeadInsts);
// Mark old value we replaced as possibly dead, so that it is elminated
// if we just replaced the last use of that value.
/// v1 = v1 + 3
/// if (v1 < 30) goto loop
ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond,
- IVStrideUse* &CondUse,
+ IVStrideUse* &CondUse,
const SCEVHandle* &CondStride) {
if (StrideOrder.size() < 2 ||
IVUsesByStride[*CondStride].Users.size() != 1)
if (!C) return Cond;
ICmpInst::Predicate Predicate = Cond->getPredicate();
- bool isSigned = ICmpInst::isSignedPredicate(Predicate);
int64_t CmpSSInt = SC->getValue()->getSExtValue();
int64_t CmpVal = C->getValue().getSExtValue();
- uint64_t SignBit = 1ULL << (C->getValue().getBitWidth()-1);
+ unsigned BitWidth = C->getValue().getBitWidth();
+ uint64_t SignBit = 1ULL << (BitWidth-1);
+ const Type *CmpTy = C->getType();
+ const Type *NewCmpTy = NULL;
+ unsigned TyBits = CmpTy->getPrimitiveSizeInBits();
+ unsigned NewTyBits = 0;
int64_t NewCmpVal = CmpVal;
SCEVHandle *NewStride = NULL;
Value *NewIncV = NULL;
int64_t Scale = 1;
- const Type *CmpTy = C->getType();
- const Type *NewCmpTy = NULL;
// Look for a suitable stride / iv as replacement.
std::stable_sort(StrideOrder.begin(), StrideOrder.end(), StrideCompare());
if (!isa<SCEVConstant>(SI->first))
continue;
int64_t SSInt = cast<SCEVConstant>(SI->first)->getValue()->getSExtValue();
- if (abs(SSInt) < abs(CmpSSInt) && (CmpSSInt % SSInt) == 0) {
- Scale = CmpSSInt / SSInt;
- NewCmpVal = CmpVal / Scale;
- } else if (abs(SSInt) > abs(CmpSSInt) && (SSInt % CmpSSInt) == 0) {
- Scale = SSInt / CmpSSInt;
- NewCmpVal = CmpVal * Scale;
- } else
+ if (abs(SSInt) <= abs(CmpSSInt) || (SSInt % CmpSSInt) != 0)
continue;
+ Scale = SSInt / CmpSSInt;
+ NewCmpVal = CmpVal * Scale;
+ APInt Mul = APInt(BitWidth, NewCmpVal);
+ // Check for overflow.
+ if (Mul.getSExtValue() != NewCmpVal) {
+ NewCmpVal = CmpVal;
+ continue;
+ }
+
// Watch out for overflow.
- if (isSigned && (CmpVal & SignBit) != (NewCmpVal & SignBit))
+ if (ICmpInst::isSignedPredicate(Predicate) &&
+ (CmpVal & SignBit) != (NewCmpVal & SignBit))
NewCmpVal = CmpVal;
+
if (NewCmpVal != CmpVal) {
// Pick the best iv to use trying to avoid a cast.
NewIncV = NULL;
}
NewCmpTy = NewIncV->getType();
- if (RequiresTypeConversion(CmpTy, NewCmpTy)) {
- // FIXME: allow reuse of iv of a smaller type?
+ NewTyBits = isa<PointerType>(NewCmpTy)
+ ? UIntPtrTy->getPrimitiveSizeInBits()
+ : NewCmpTy->getPrimitiveSizeInBits();
+ if (RequiresTypeConversion(NewCmpTy, CmpTy)) {
+ // Check if it is possible to rewrite it using a iv / stride of a smaller
+ // integer type.
+ bool TruncOk = false;
+ if (NewCmpTy->isInteger()) {
+ unsigned Bits = NewTyBits;
+ if (ICmpInst::isSignedPredicate(Predicate))
+ --Bits;
+ uint64_t Mask = (1ULL << Bits) - 1;
+ if (((uint64_t)NewCmpVal & Mask) == (uint64_t)NewCmpVal)
+ TruncOk = true;
+ }
+ if (!TruncOk) {
+ NewCmpVal = CmpVal;
+ continue;
+ }
+ }
+
+ // Don't rewrite if use offset is non-constant and the new type is
+ // of a different type.
+ // FIXME: too conservative?
+ if (NewTyBits != TyBits && !isa<SCEVConstant>(CondUse->Offset)) {
NewCmpVal = CmpVal;
continue;
}
if (NewCmpVal != CmpVal) {
// Create a new compare instruction using new stride / iv.
ICmpInst *OldCond = Cond;
- Value *RHS = ConstantInt::get(C->getType(), NewCmpVal);
- // Both sides of a ICmpInst must be of the same type.
- if (NewCmpTy != CmpTy) {
- if (isa<PointerType>(NewCmpTy) && !isa<PointerType>(CmpTy))
- RHS= SCEVExpander::InsertCastOfTo(Instruction::IntToPtr, RHS, NewCmpTy);
- else
- RHS = SCEVExpander::InsertCastOfTo(Instruction::BitCast, RHS, NewCmpTy);
+ Value *RHS;
+ if (!isa<PointerType>(NewCmpTy))
+ RHS = ConstantInt::get(NewCmpTy, NewCmpVal);
+ else {
+ RHS = ConstantInt::get(UIntPtrTy, NewCmpVal);
+ RHS = SCEVExpander::InsertCastOfTo(Instruction::IntToPtr, RHS, NewCmpTy);
}
+ // Insert new compare instruction.
Cond = new ICmpInst(Predicate, NewIncV, RHS);
Cond->setName(L->getHeader()->getName() + ".termcond");
OldCond->getParent()->getInstList().insert(OldCond, Cond);
+
+ // Remove the old compare instruction. The old indvar is probably dead too.
+ DeadInsts.insert(cast<Instruction>(CondUse->OperandValToReplace));
OldCond->replaceAllUsesWith(Cond);
+ SE->deleteValueFromRecords(OldCond);
OldCond->eraseFromParent();
+
IVUsesByStride[*CondStride].Users.pop_back();
- SCEVHandle NewOffset = SE->getMulExpr(CondUse->Offset,
- SE->getConstant(ConstantInt::get(CondUse->Offset->getType(), Scale)));
+ SCEVHandle NewOffset = TyBits == NewTyBits
+ ? SE->getMulExpr(CondUse->Offset,
+ SE->getConstant(ConstantInt::get(CmpTy, Scale)))
+ : SE->getConstant(ConstantInt::get(NewCmpTy,
+ cast<SCEVConstant>(CondUse->Offset)->getValue()->getSExtValue()*Scale));
IVUsesByStride[*NewStride].addUser(NewOffset, Cond, NewIncV);
CondUse = &IVUsesByStride[*NewStride].Users.back();
CondStride = NewStride;
// Find all uses of induction variables in this loop, and catagorize
// them by stride. Start by finding all of the PHI nodes in the header for
// this loop. If they are induction variables, inspect their uses.
- std::set<Instruction*> Processed; // Don't reprocess instructions.
+ SmallPtrSet<Instruction*,16> Processed; // Don't reprocess instructions.
for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I)
AddUsersIfInteresting(I, L, Processed);
// Note: this processes each stride/type pair individually. All users passed
// into StrengthReduceStridedIVUsers have the same type AND stride. Also,
- // node that we iterate over IVUsesByStride indirectly by using StrideOrder.
+ // note that we iterate over IVUsesByStride indirectly by using StrideOrder.
// This extra layer of indirection makes the ordering of strides deterministic
// - not dependent on map order.
for (unsigned Stride = 0, e = StrideOrder.size(); Stride != e; ++Stride) {
PHINode *PN;
while ((PN = dyn_cast<PHINode>(I))) {
++I; // Preincrement iterator to avoid invalidating it when deleting PN.
-
+
// At this point, we know that we have killed one or more GEP
// instructions. It is worth checking to see if the cann indvar is also
// dead, so that we can remove it as well. The requirements for the cann