#include "llvm/IntrinsicInst.h"
#include "llvm/Intrinsics.h"
#include "llvm/LLVMContext.h"
+#include "llvm/Metadata.h"
#include "llvm/Pass.h"
#include "llvm/Type.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Vectorize.h"
#include <algorithm>
#include <map>
MaxIter("bb-vectorize-max-iter", cl::init(0), cl::Hidden,
cl::desc("The maximum number of pairing iterations"));
+static cl::opt<bool>
+Pow2LenOnly("bb-vectorize-pow2-len-only", cl::init(false), cl::Hidden,
+ cl::desc("Don't try to form non-2^n-length vectors"));
+
+static cl::opt<unsigned>
+MaxInsts("bb-vectorize-max-instr-per-group", cl::init(500), cl::Hidden,
+ cl::desc("The maximum number of pairable instructions per group"));
+
static cl::opt<unsigned>
MaxCandPairsForCycleCheck("bb-vectorize-max-cycle-check-pairs", cl::init(200),
cl::Hidden, cl::desc("The maximum number of candidate pairs with which to use"
" a full cycle check"));
+static cl::opt<bool>
+NoBools("bb-vectorize-no-bools", cl::init(false), cl::Hidden,
+ cl::desc("Don't try to vectorize boolean (i1) values"));
+
static cl::opt<bool>
NoInts("bb-vectorize-no-ints", cl::init(false), cl::Hidden,
cl::desc("Don't try to vectorize integer values"));
NoFloats("bb-vectorize-no-floats", cl::init(false), cl::Hidden,
cl::desc("Don't try to vectorize floating-point values"));
+static cl::opt<bool>
+NoPointers("bb-vectorize-no-pointers", cl::init(false), cl::Hidden,
+ cl::desc("Don't try to vectorize pointer values"));
+
static cl::opt<bool>
NoCasts("bb-vectorize-no-casts", cl::init(false), cl::Hidden,
cl::desc("Don't try to vectorize casting (conversion) operations"));
NoFMA("bb-vectorize-no-fma", cl::init(false), cl::Hidden,
cl::desc("Don't try to vectorize the fused-multiply-add intrinsic"));
+static cl::opt<bool>
+NoSelect("bb-vectorize-no-select", cl::init(false), cl::Hidden,
+ cl::desc("Don't try to vectorize select instructions"));
+
+static cl::opt<bool>
+NoCmp("bb-vectorize-no-cmp", cl::init(false), cl::Hidden,
+ cl::desc("Don't try to vectorize comparison instructions"));
+
+static cl::opt<bool>
+NoGEP("bb-vectorize-no-gep", cl::init(false), cl::Hidden,
+ cl::desc("Don't try to vectorize getelementptr instructions"));
+
static cl::opt<bool>
NoMemOps("bb-vectorize-no-mem-ops", cl::init(false), cl::Hidden,
cl::desc("Don't try to vectorize loads and stores"));
AlignedOnly("bb-vectorize-aligned-only", cl::init(false), cl::Hidden,
cl::desc("Only generate aligned loads and stores"));
+static cl::opt<bool>
+NoMemOpBoost("bb-vectorize-no-mem-op-boost",
+ cl::init(false), cl::Hidden,
+ cl::desc("Don't boost the chain-depth contribution of loads and stores"));
+
static cl::opt<bool>
FastDep("bb-vectorize-fast-dep", cl::init(false), cl::Hidden,
cl::desc("Use a fast instruction dependency analysis"));
namespace {
struct BBVectorize : public BasicBlockPass {
static char ID; // Pass identification, replacement for typeid
- BBVectorize() : BasicBlockPass(ID) {
+
+ const VectorizeConfig Config;
+
+ BBVectorize(const VectorizeConfig &C = VectorizeConfig())
+ : BasicBlockPass(ID), Config(C) {
initializeBBVectorizePass(*PassRegistry::getPassRegistry());
}
+ BBVectorize(Pass *P, const VectorizeConfig &C)
+ : BasicBlockPass(ID), Config(C) {
+ AA = &P->getAnalysis<AliasAnalysis>();
+ SE = &P->getAnalysis<ScalarEvolution>();
+ TD = P->getAnalysisIfAvailable<TargetData>();
+ }
+
typedef std::pair<Value *, Value *> ValuePair;
typedef std::pair<ValuePair, size_t> ValuePairWithDepth;
typedef std::pair<ValuePair, ValuePair> VPPair; // A ValuePair pair
// FIXME: const correct?
- bool vectorizePairs(BasicBlock &BB);
+ bool vectorizePairs(BasicBlock &BB, bool NonPow2Len = false);
- void getCandidatePairs(BasicBlock &BB,
+ bool getCandidatePairs(BasicBlock &BB,
+ BasicBlock::iterator &Start,
std::multimap<Value *, Value *> &CandidatePairs,
- std::vector<Value *> &PairableInsts);
+ std::vector<Value *> &PairableInsts, bool NonPow2Len);
void computeConnectedPairs(std::multimap<Value *, Value *> &CandidatePairs,
std::vector<Value *> &PairableInsts,
bool isInstVectorizable(Instruction *I, bool &IsSimpleLoadStore);
bool areInstsCompatible(Instruction *I, Instruction *J,
- bool IsSimpleLoadStore);
+ bool IsSimpleLoadStore, bool NonPow2Len);
bool trackUsesOfI(DenseSet<Value *> &Users,
AliasSetTracker &WriteSet, Instruction *I,
Instruction *J, bool UpdateUsers = true,
std::multimap<Value *, Value *> *LoadMoveSet = 0);
-
+
void computePairsConnectedTo(
std::multimap<Value *, Value *> &CandidatePairs,
std::vector<Value *> &PairableInsts,
bool UseCycleCheck);
Value *getReplacementPointerInput(LLVMContext& Context, Instruction *I,
- Instruction *J, unsigned o, bool &FlipMemInputs);
+ Instruction *J, unsigned o, bool FlipMemInputs);
void fillNewShuffleMask(LLVMContext& Context, Instruction *J,
- unsigned NumElem, unsigned MaskOffset, unsigned NumInElem,
- unsigned IdxOffset, std::vector<Constant*> &Mask);
+ unsigned MaskOffset, unsigned NumInElem,
+ unsigned NumInElem1, unsigned IdxOffset,
+ std::vector<Constant*> &Mask);
Value *getReplacementShuffleMask(LLVMContext& Context, Instruction *I,
Instruction *J);
+ bool expandIEChain(LLVMContext& Context, Instruction *I, Instruction *J,
+ unsigned o, Value *&LOp, unsigned numElemL,
+ Type *ArgTypeL, Type *ArgTypeR,
+ unsigned IdxOff = 0);
+
Value *getReplacementInput(LLVMContext& Context, Instruction *I,
Instruction *J, unsigned o, bool FlipMemInputs);
void getReplacementInputsForPair(LLVMContext& Context, Instruction *I,
Instruction *J, SmallVector<Value *, 3> &ReplacedOperands,
- bool &FlipMemInputs);
+ bool FlipMemInputs);
void replaceOutputsOfPair(LLVMContext& Context, Instruction *I,
Instruction *J, Instruction *K,
Instruction *&InsertionPt, Instruction *&K1,
- Instruction *&K2, bool &FlipMemInputs);
+ Instruction *&K2, bool FlipMemInputs);
void collectPairLoadMoveSet(BasicBlock &BB,
DenseMap<Value *, Value *> &ChosenPairs,
DenseMap<Value *, Value *> &ChosenPairs,
std::multimap<Value *, Value *> &LoadMoveSet);
+ void collectPtrInfo(std::vector<Value *> &PairableInsts,
+ DenseMap<Value *, Value *> &ChosenPairs,
+ DenseSet<Value *> &LowPtrInsts);
+
bool canMoveUsesOfIAfterJ(BasicBlock &BB,
std::multimap<Value *, Value *> &LoadMoveSet,
Instruction *I, Instruction *J);
Instruction *&InsertionPt,
Instruction *I, Instruction *J);
- virtual bool runOnBasicBlock(BasicBlock &BB) {
- AA = &getAnalysis<AliasAnalysis>();
- SE = &getAnalysis<ScalarEvolution>();
- TD = getAnalysisIfAvailable<TargetData>();
+ void combineMetadata(Instruction *K, const Instruction *J);
+ bool vectorizeBB(BasicBlock &BB) {
bool changed = false;
// Iterate a sufficient number of times to merge types of size 1 bit,
// then 2 bits, then 4, etc. up to half of the target vector width of the
// target vector register.
- for (unsigned v = 2, n = 1; v <= VectorBits && (!MaxIter || n <= MaxIter);
+ unsigned n = 1;
+ for (unsigned v = 2;
+ v <= Config.VectorBits && (!Config.MaxIter || n <= Config.MaxIter);
v *= 2, ++n) {
- DEBUG(dbgs() << "BBV: fusing loop #" << n <<
+ DEBUG(dbgs() << "BBV: fusing loop #" << n <<
" for " << BB.getName() << " in " <<
BB.getParent()->getName() << "...\n");
if (vectorizePairs(BB))
break;
}
+ if (changed && !Pow2LenOnly) {
+ ++n;
+ for (; !Config.MaxIter || n <= Config.MaxIter; ++n) {
+ DEBUG(dbgs() << "BBV: fusing for non-2^n-length vectors loop #: " <<
+ n << " for " << BB.getName() << " in " <<
+ BB.getParent()->getName() << "...\n");
+ if (!vectorizePairs(BB, true)) break;
+ }
+ }
+
DEBUG(dbgs() << "BBV: done!\n");
return changed;
}
+ virtual bool runOnBasicBlock(BasicBlock &BB) {
+ AA = &getAnalysis<AliasAnalysis>();
+ SE = &getAnalysis<ScalarEvolution>();
+ TD = getAnalysisIfAvailable<TargetData>();
+
+ return vectorizeBB(BB);
+ }
+
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
BasicBlockPass::getAnalysisUsage(AU);
AU.addRequired<AliasAnalysis>();
AU.addRequired<ScalarEvolution>();
AU.addPreserved<AliasAnalysis>();
AU.addPreserved<ScalarEvolution>();
+ AU.setPreservesCFG();
}
- // This returns the vector type that holds a pair of the provided type.
- // If the provided type is already a vector, then its length is doubled.
- static inline VectorType *getVecTypeForPair(Type *ElemTy) {
+ static inline VectorType *getVecTypeForPair(Type *ElemTy, Type *Elem2Ty) {
+ assert(ElemTy->getScalarType() == Elem2Ty->getScalarType() &&
+ "Cannot form vector from incompatible scalar types");
+ Type *STy = ElemTy->getScalarType();
+
+ unsigned numElem;
if (VectorType *VTy = dyn_cast<VectorType>(ElemTy)) {
- unsigned numElem = VTy->getNumElements();
- return VectorType::get(ElemTy->getScalarType(), numElem*2);
+ numElem = VTy->getNumElements();
+ } else {
+ numElem = 1;
+ }
+
+ if (VectorType *VTy = dyn_cast<VectorType>(Elem2Ty)) {
+ numElem += VTy->getNumElements();
+ } else {
+ numElem += 1;
+ }
+
+ return VectorType::get(STy, numElem);
+ }
+
+ static inline void getInstructionTypes(Instruction *I,
+ Type *&T1, Type *&T2) {
+ if (isa<StoreInst>(I)) {
+ // For stores, it is the value type, not the pointer type that matters
+ // because the value is what will come from a vector register.
+
+ Value *IVal = cast<StoreInst>(I)->getValueOperand();
+ T1 = IVal->getType();
} else {
- return VectorType::get(ElemTy, 2);
+ T1 = I->getType();
}
+
+ if (I->isCast())
+ T2 = cast<CastInst>(I)->getSrcTy();
+ else
+ T2 = T1;
}
// Returns the weight associated with the provided value. A chain of
// candidate chains where longer chains are considered to be better.
// Note: when this function returns 0, the resulting instructions are
// not actually fused.
- static inline size_t getDepthFactor(Value *V) {
+ inline size_t getDepthFactor(Value *V) {
// InsertElement and ExtractElement have a depth factor of zero. This is
// for two reasons: First, they cannot be usefully fused. Second, because
// the pass generates a lot of these, they can confuse the simple metric
if (isa<InsertElementInst>(V) || isa<ExtractElementInst>(V))
return 0;
+ // Give a load or store half of the required depth so that load/store
+ // pairs will vectorize.
+ if (!Config.NoMemOpBoost && (isa<LoadInst>(V) || isa<StoreInst>(V)))
+ return Config.ReqChainDepth/2;
+
return 1;
}
// true if the offset could be determined to be some constant value.
// For example, if OffsetInElmts == 1, then J accesses the memory directly
// after I; if OffsetInElmts == -1 then I accesses the memory
- // directly after J. This function assumes that both instructions
- // have the same type.
+ // directly after J.
bool getPairPtrInfo(Instruction *I, Instruction *J,
Value *&IPtr, Value *&JPtr, unsigned &IAlignment, unsigned &JAlignment,
int64_t &OffsetInElmts) {
Type *VTy = cast<PointerType>(IPtr->getType())->getElementType();
int64_t VTyTSS = (int64_t) TD->getTypeStoreSize(VTy);
- assert(VTy == cast<PointerType>(JPtr->getType())->getElementType());
+ Type *VTy2 = cast<PointerType>(JPtr->getType())->getElementType();
+ if (VTy != VTy2 && Offset < 0) {
+ int64_t VTy2TSS = (int64_t) TD->getTypeStoreSize(VTy2);
+ OffsetInElmts = Offset/VTy2TSS;
+ return (abs64(Offset) % VTy2TSS) == 0;
+ }
OffsetInElmts = Offset/VTyTSS;
return (abs64(Offset) % VTyTSS) == 0;
case Intrinsic::exp:
case Intrinsic::exp2:
case Intrinsic::pow:
- return !NoMath;
+ return Config.VectorizeMath;
case Intrinsic::fma:
- return !NoFMA;
+ return Config.VectorizeFMA;
}
}
// This function implements one vectorization iteration on the provided
// basic block. It returns true if the block is changed.
- bool BBVectorize::vectorizePairs(BasicBlock &BB) {
- std::vector<Value *> PairableInsts;
- std::multimap<Value *, Value *> CandidatePairs;
- getCandidatePairs(BB, CandidatePairs, PairableInsts);
- if (PairableInsts.size() == 0) return false;
-
- // Now we have a map of all of the pairable instructions and we need to
- // select the best possible pairing. A good pairing is one such that the
- // users of the pair are also paired. This defines a (directed) forest
- // over the pairs such that two pairs are connected iff the second pair
- // uses the first.
-
- // Note that it only matters that both members of the second pair use some
- // element of the first pair (to allow for splatting).
-
- std::multimap<ValuePair, ValuePair> ConnectedPairs;
- computeConnectedPairs(CandidatePairs, PairableInsts, ConnectedPairs);
- if (ConnectedPairs.size() == 0) return false;
-
- // Build the pairable-instruction dependency map
- DenseSet<ValuePair> PairableInstUsers;
- buildDepMap(BB, CandidatePairs, PairableInsts, PairableInstUsers);
-
- // There is now a graph of the connected pairs. For each variable, pick the
- // pairing with the largest tree meeting the depth requirement on at least
- // one branch. Then select all pairings that are part of that tree and
- // remove them from the list of available pairings and pairable variables.
-
- DenseMap<Value *, Value *> ChosenPairs;
- choosePairs(CandidatePairs, PairableInsts, ConnectedPairs,
- PairableInstUsers, ChosenPairs);
-
- if (ChosenPairs.size() == 0) return false;
- NumFusedOps += ChosenPairs.size();
+ bool BBVectorize::vectorizePairs(BasicBlock &BB, bool NonPow2Len) {
+ bool ShouldContinue;
+ BasicBlock::iterator Start = BB.getFirstInsertionPt();
+
+ std::vector<Value *> AllPairableInsts;
+ DenseMap<Value *, Value *> AllChosenPairs;
+
+ do {
+ std::vector<Value *> PairableInsts;
+ std::multimap<Value *, Value *> CandidatePairs;
+ ShouldContinue = getCandidatePairs(BB, Start, CandidatePairs,
+ PairableInsts, NonPow2Len);
+ if (PairableInsts.empty()) continue;
+
+ // Now we have a map of all of the pairable instructions and we need to
+ // select the best possible pairing. A good pairing is one such that the
+ // users of the pair are also paired. This defines a (directed) forest
+ // over the pairs such that two pairs are connected if the second pair
+ // uses the first.
+
+ // Note that it only matters that both members of the second pair use some
+ // element of the first pair (to allow for splatting).
+
+ std::multimap<ValuePair, ValuePair> ConnectedPairs;
+ computeConnectedPairs(CandidatePairs, PairableInsts, ConnectedPairs);
+ if (ConnectedPairs.empty()) continue;
+
+ // Build the pairable-instruction dependency map
+ DenseSet<ValuePair> PairableInstUsers;
+ buildDepMap(BB, CandidatePairs, PairableInsts, PairableInstUsers);
+
+ // There is now a graph of the connected pairs. For each variable, pick
+ // the pairing with the largest tree meeting the depth requirement on at
+ // least one branch. Then select all pairings that are part of that tree
+ // and remove them from the list of available pairings and pairable
+ // variables.
+
+ DenseMap<Value *, Value *> ChosenPairs;
+ choosePairs(CandidatePairs, PairableInsts, ConnectedPairs,
+ PairableInstUsers, ChosenPairs);
+
+ if (ChosenPairs.empty()) continue;
+ AllPairableInsts.insert(AllPairableInsts.end(), PairableInsts.begin(),
+ PairableInsts.end());
+ AllChosenPairs.insert(ChosenPairs.begin(), ChosenPairs.end());
+ } while (ShouldContinue);
+
+ if (AllChosenPairs.empty()) return false;
+ NumFusedOps += AllChosenPairs.size();
// A set of pairs has now been selected. It is now necessary to replace the
// paired instructions with vector instructions. For this procedure each
- // operand much be replaced with a vector operand. This vector is formed
+ // operand must be replaced with a vector operand. This vector is formed
// by using build_vector on the old operands. The replaced values are then
// replaced with a vector_extract on the result. Subsequent optimization
// passes should coalesce the build/extract combinations.
- fuseChosenPairs(BB, PairableInsts, ChosenPairs);
+ fuseChosenPairs(BB, AllPairableInsts, AllChosenPairs);
+ // It is important to cleanup here so that future iterations of this
+ // function have less work to do.
+ (void) SimplifyInstructionsInBlock(&BB, TD, AA->getTargetLibraryInfo());
return true;
}
} else if (LoadInst *L = dyn_cast<LoadInst>(I)) {
// Vectorize simple loads if possbile:
IsSimpleLoadStore = L->isSimple();
- if (!IsSimpleLoadStore || NoMemOps)
+ if (!IsSimpleLoadStore || !Config.VectorizeMemOps)
return false;
} else if (StoreInst *S = dyn_cast<StoreInst>(I)) {
// Vectorize simple stores if possbile:
IsSimpleLoadStore = S->isSimple();
- if (!IsSimpleLoadStore || NoMemOps)
+ if (!IsSimpleLoadStore || !Config.VectorizeMemOps)
return false;
} else if (CastInst *C = dyn_cast<CastInst>(I)) {
// We can vectorize casts, but not casts of pointer types, etc.
- if (NoCasts)
+ if (!Config.VectorizeCasts)
return false;
Type *SrcTy = C->getSrcTy();
- if (!SrcTy->isSingleValueType() || SrcTy->isPointerTy())
+ if (!SrcTy->isSingleValueType())
return false;
Type *DestTy = C->getDestTy();
- if (!DestTy->isSingleValueType() || DestTy->isPointerTy())
+ if (!DestTy->isSingleValueType())
+ return false;
+ } else if (isa<SelectInst>(I)) {
+ if (!Config.VectorizeSelect)
+ return false;
+ } else if (isa<CmpInst>(I)) {
+ if (!Config.VectorizeCmp)
+ return false;
+ } else if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(I)) {
+ if (!Config.VectorizeGEP)
+ return false;
+
+ // Currently, vector GEPs exist only with one index.
+ if (G->getNumIndices() != 1)
return false;
} else if (!(I->isBinaryOp() || isa<ShuffleVectorInst>(I) ||
isa<ExtractElementInst>(I) || isa<InsertElementInst>(I))) {
return false;
Type *T1, *T2;
- if (isa<StoreInst>(I)) {
- // For stores, it is the value type, not the pointer type that matters
- // because the value is what will come from a vector register.
-
- Value *IVal = cast<StoreInst>(I)->getValueOperand();
- T1 = IVal->getType();
- } else {
- T1 = I->getType();
- }
-
- if (I->isCast())
- T2 = cast<CastInst>(I)->getSrcTy();
- else
- T2 = T1;
+ getInstructionTypes(I, T1, T2);
// Not every type can be vectorized...
if (!(VectorType::isValidElementType(T1) || T1->isVectorTy()) ||
!(VectorType::isValidElementType(T2) || T2->isVectorTy()))
return false;
- if (NoInts && (T1->isIntOrIntVectorTy() || T2->isIntOrIntVectorTy()))
+ if (T1->getScalarSizeInBits() == 1 && T2->getScalarSizeInBits() == 1) {
+ if (!Config.VectorizeBools)
+ return false;
+ } else {
+ if (!Config.VectorizeInts
+ && (T1->isIntOrIntVectorTy() || T2->isIntOrIntVectorTy()))
+ return false;
+ }
+
+ if (!Config.VectorizeFloats
+ && (T1->isFPOrFPVectorTy() || T2->isFPOrFPVectorTy()))
return false;
- if (NoFloats && (T1->isFPOrFPVectorTy() || T2->isFPOrFPVectorTy()))
+ // Don't vectorize target-specific types.
+ if (T1->isX86_FP80Ty() || T1->isPPC_FP128Ty() || T1->isX86_MMXTy())
+ return false;
+ if (T2->isX86_FP80Ty() || T2->isPPC_FP128Ty() || T2->isX86_MMXTy())
return false;
- if (T1->getPrimitiveSizeInBits() > VectorBits/2 ||
- T2->getPrimitiveSizeInBits() > VectorBits/2)
+ if ((!Config.VectorizePointers || TD == 0) &&
+ (T1->getScalarType()->isPointerTy() ||
+ T2->getScalarType()->isPointerTy()))
+ return false;
+
+ if (T1->getPrimitiveSizeInBits() >= Config.VectorBits ||
+ T2->getPrimitiveSizeInBits() >= Config.VectorBits)
return false;
return true;
// that I has already been determined to be vectorizable and that J is not
// in the use tree of I.
bool BBVectorize::areInstsCompatible(Instruction *I, Instruction *J,
- bool IsSimpleLoadStore) {
+ bool IsSimpleLoadStore, bool NonPow2Len) {
DEBUG(if (DebugInstructionExamination) dbgs() << "BBV: looking at " << *I <<
" <-> " << *J << "\n");
// Loads and stores can be merged if they have different alignments,
// but are otherwise the same.
- LoadInst *LI, *LJ;
- StoreInst *SI, *SJ;
- if ((LI = dyn_cast<LoadInst>(I)) && (LJ = dyn_cast<LoadInst>(J))) {
- if (I->getType() != J->getType())
- return false;
+ if (!J->isSameOperationAs(I, Instruction::CompareIgnoringAlignment |
+ (NonPow2Len ? Instruction::CompareUsingScalarTypes : 0)))
+ return false;
- if (LI->getPointerOperand()->getType() !=
- LJ->getPointerOperand()->getType() ||
- LI->isVolatile() != LJ->isVolatile() ||
- LI->getOrdering() != LJ->getOrdering() ||
- LI->getSynchScope() != LJ->getSynchScope())
- return false;
- } else if ((SI = dyn_cast<StoreInst>(I)) && (SJ = dyn_cast<StoreInst>(J))) {
- if (SI->getValueOperand()->getType() !=
- SJ->getValueOperand()->getType() ||
- SI->getPointerOperand()->getType() !=
- SJ->getPointerOperand()->getType() ||
- SI->isVolatile() != SJ->isVolatile() ||
- SI->getOrdering() != SJ->getOrdering() ||
- SI->getSynchScope() != SJ->getSynchScope())
- return false;
- } else if (!J->isSameOperationAs(I)) {
+ Type *IT1, *IT2, *JT1, *JT2;
+ getInstructionTypes(I, IT1, IT2);
+ getInstructionTypes(J, JT1, JT2);
+ unsigned MaxTypeBits = std::max(
+ IT1->getPrimitiveSizeInBits() + JT1->getPrimitiveSizeInBits(),
+ IT2->getPrimitiveSizeInBits() + JT2->getPrimitiveSizeInBits());
+ if (MaxTypeBits > Config.VectorBits)
return false;
- }
+
// FIXME: handle addsub-type operations!
if (IsSimpleLoadStore) {
int64_t OffsetInElmts = 0;
if (getPairPtrInfo(I, J, IPtr, JPtr, IAlignment, JAlignment,
OffsetInElmts) && abs64(OffsetInElmts) == 1) {
- if (AlignedOnly) {
- Type *aType = isa<StoreInst>(I) ?
+ if (Config.AlignedOnly) {
+ Type *aTypeI = isa<StoreInst>(I) ?
cast<StoreInst>(I)->getValueOperand()->getType() : I->getType();
+ Type *aTypeJ = isa<StoreInst>(J) ?
+ cast<StoreInst>(J)->getValueOperand()->getType() : J->getType();
+
// An aligned load or store is possible only if the instruction
// with the lower offset has an alignment suitable for the
// vector type.
-
+
unsigned BottomAlignment = IAlignment;
if (OffsetInElmts < 0) BottomAlignment = JAlignment;
-
- Type *VType = getVecTypeForPair(aType);
+
+ Type *VType = getVecTypeForPair(aTypeI, aTypeJ);
unsigned VecAlignment = TD->getPrefTypeAlignment(VType);
if (BottomAlignment < VecAlignment)
return false;
} else {
return false;
}
- } else if (isa<ShuffleVectorInst>(I)) {
- // Only merge two shuffles if they're both constant
- return isa<Constant>(I->getOperand(2)) &&
- isa<Constant>(J->getOperand(2));
- // FIXME: We may want to vectorize non-constant shuffles also.
+ }
+
+ // The powi intrinsic is special because only the first argument is
+ // vectorized, the second arguments must be equal.
+ CallInst *CI = dyn_cast<CallInst>(I);
+ Function *FI;
+ if (CI && (FI = CI->getCalledFunction()) &&
+ FI->getIntrinsicID() == Intrinsic::powi) {
+
+ Value *A1I = CI->getArgOperand(1),
+ *A1J = cast<CallInst>(J)->getArgOperand(1);
+ const SCEV *A1ISCEV = SE->getSCEV(A1I),
+ *A1JSCEV = SE->getSCEV(A1J);
+ return (A1ISCEV == A1JSCEV);
}
return true;
UsesI = true;
if (!UsesI)
- for (User::op_iterator JU = J->op_begin(), e = J->op_end();
- JU != e; ++JU) {
+ for (User::op_iterator JU = J->op_begin(), JE = J->op_end();
+ JU != JE; ++JU) {
Value *V = *JU;
if (I == V || Users.count(V)) {
UsesI = true;
} else {
for (AliasSetTracker::iterator W = WriteSet.begin(),
WE = WriteSet.end(); W != WE; ++W) {
- for (AliasSet::iterator A = W->begin(), AE = W->end();
- A != AE; ++A) {
- AliasAnalysis::Location ptrLoc(A->getValue(), A->getSize(),
- A->getTBAAInfo());
- if (AA->getModRefInfo(J, ptrLoc) != AliasAnalysis::NoModRef) {
- UsesI = true;
- break;
- }
+ if (W->aliasesUnknownInst(J, *AA)) {
+ UsesI = true;
+ break;
}
- if (UsesI) break;
}
}
}
// This function iterates over all instruction pairs in the provided
// basic block and collects all candidate pairs for vectorization.
- void BBVectorize::getCandidatePairs(BasicBlock &BB,
+ bool BBVectorize::getCandidatePairs(BasicBlock &BB,
+ BasicBlock::iterator &Start,
std::multimap<Value *, Value *> &CandidatePairs,
- std::vector<Value *> &PairableInsts) {
+ std::vector<Value *> &PairableInsts, bool NonPow2Len) {
BasicBlock::iterator E = BB.end();
- for (BasicBlock::iterator I = BB.getFirstInsertionPt(); I != E; ++I) {
+ if (Start == E) return false;
+
+ bool ShouldContinue = false, IAfterStart = false;
+ for (BasicBlock::iterator I = Start++; I != E; ++I) {
+ if (I == Start) IAfterStart = true;
+
bool IsSimpleLoadStore;
if (!isInstVectorizable(I, IsSimpleLoadStore)) continue;
// Look for an instruction with which to pair instruction *I...
DenseSet<Value *> Users;
AliasSetTracker WriteSet(*AA);
- BasicBlock::iterator J = I; ++J;
- for (unsigned ss = 0; J != E && ss <= SearchLimit; ++J, ++ss) {
+ bool JAfterStart = IAfterStart;
+ BasicBlock::iterator J = llvm::next(I);
+ for (unsigned ss = 0; J != E && ss <= Config.SearchLimit; ++J, ++ss) {
+ if (J == Start) JAfterStart = true;
+
// Determine if J uses I, if so, exit the loop.
- bool UsesI = trackUsesOfI(Users, WriteSet, I, J, !FastDep);
- if (FastDep) {
+ bool UsesI = trackUsesOfI(Users, WriteSet, I, J, !Config.FastDep);
+ if (Config.FastDep) {
// Note: For this heuristic to be effective, independent operations
// must tend to be intermixed. This is likely to be true from some
// kinds of grouped loop unrolling (but not the generic LLVM pass),
// J does not use I, and comes before the first use of I, so it can be
// merged with I if the instructions are compatible.
- if (!areInstsCompatible(I, J, IsSimpleLoadStore)) continue;
+ if (!areInstsCompatible(I, J, IsSimpleLoadStore, NonPow2Len)) continue;
// J is a candidate for merging with I.
if (!PairableInsts.size() ||
PairableInsts[PairableInsts.size()-1] != I) {
PairableInsts.push_back(I);
}
+
CandidatePairs.insert(ValuePair(I, J));
+
+ // The next call to this function must start after the last instruction
+ // selected during this invocation.
+ if (JAfterStart) {
+ Start = llvm::next(J);
+ IAfterStart = JAfterStart = false;
+ }
+
DEBUG(if (DebugCandidateSelection) dbgs() << "BBV: candidate pair "
<< *I << " <-> " << *J << "\n");
+
+ // If we have already found too many pairs, break here and this function
+ // will be called again starting after the last instruction selected
+ // during this invocation.
+ if (PairableInsts.size() >= Config.MaxInsts) {
+ ShouldContinue = true;
+ break;
+ }
}
+
+ if (ShouldContinue)
+ break;
}
DEBUG(dbgs() << "BBV: found " << PairableInsts.size()
<< " instructions with candidate pairs\n");
+
+ return ShouldContinue;
}
// Finds candidate pairs connected to the pair P = <PI, PJ>. This means that
std::vector<Value *> &PairableInsts,
std::multimap<ValuePair, ValuePair> &ConnectedPairs,
ValuePair P) {
+ StoreInst *SI, *SJ;
+
// For each possible pairing for this variable, look at the uses of
// the first value...
for (Value::use_iterator I = P.first->use_begin(),
E = P.first->use_end(); I != E; ++I) {
+ if (isa<LoadInst>(*I)) {
+ // A pair cannot be connected to a load because the load only takes one
+ // operand (the address) and it is a scalar even after vectorization.
+ continue;
+ } else if ((SI = dyn_cast<StoreInst>(*I)) &&
+ P.first == SI->getPointerOperand()) {
+ // Similarly, a pair cannot be connected to a store through its
+ // pointer operand.
+ continue;
+ }
+
VPIteratorPair IPairRange = CandidatePairs.equal_range(*I);
// For each use of the first variable, look for uses of the second
// variable...
for (Value::use_iterator J = P.second->use_begin(),
E2 = P.second->use_end(); J != E2; ++J) {
+ if ((SJ = dyn_cast<StoreInst>(*J)) &&
+ P.second == SJ->getPointerOperand())
+ continue;
+
VPIteratorPair JPairRange = CandidatePairs.equal_range(*J);
// Look for <I, J>:
ConnectedPairs.insert(VPPair(P, ValuePair(*J, *I)));
}
- if (SplatBreaksChain) continue;
+ if (Config.SplatBreaksChain) continue;
// Look for cases where just the first value in the pair is used by
// both members of another pair (splatting).
for (Value::use_iterator J = P.first->use_begin(); J != E; ++J) {
+ if ((SJ = dyn_cast<StoreInst>(*J)) &&
+ P.first == SJ->getPointerOperand())
+ continue;
+
if (isSecondInIteratorPair<Value*>(*J, IPairRange))
ConnectedPairs.insert(VPPair(P, ValuePair(*I, *J)));
}
}
- if (SplatBreaksChain) return;
+ if (Config.SplatBreaksChain) return;
// Look for cases where just the second value in the pair is used by
// both members of another pair (splatting).
for (Value::use_iterator I = P.second->use_begin(),
E = P.second->use_end(); I != E; ++I) {
+ if (isa<LoadInst>(*I))
+ continue;
+ else if ((SI = dyn_cast<StoreInst>(*I)) &&
+ P.second == SI->getPointerOperand())
+ continue;
+
VPIteratorPair IPairRange = CandidatePairs.equal_range(*I);
for (Value::use_iterator J = P.second->use_begin(); J != E; ++J) {
+ if ((SJ = dyn_cast<StoreInst>(*J)) &&
+ P.second == SJ->getPointerOperand())
+ continue;
+
if (isSecondInIteratorPair<Value*>(*J, IPairRange))
ConnectedPairs.insert(VPPair(P, ValuePair(*I, *J)));
}
// A lookup table of visisted pairs is kept because the PairableInstUserMap
// contains non-direct associations.
DenseSet<ValuePair> Visited;
- std::vector<ValuePair> Q;
+ SmallVector<ValuePair, 32> Q;
// General depth-first post-order traversal:
Q.push_back(P);
- while (!Q.empty()) {
- ValuePair QTop = Q.back();
-
+ do {
+ ValuePair QTop = Q.pop_back_val();
Visited.insert(QTop);
- Q.pop_back();
DEBUG(if (DebugCycleCheck)
dbgs() << "BBV: cycle check visiting: " << *QTop.first << " <-> "
return true;
}
- if (CurrentPairs.count(C->second) > 0 &&
- Visited.count(C->second) == 0)
+ if (CurrentPairs.count(C->second) && !Visited.count(C->second))
Q.push_back(C->second);
}
- }
+ } while (!Q.empty());
return false;
}
// Each of these pairs is viewed as the root node of a Tree. The Tree
// is then walked (depth-first). As this happens, we keep track of
// the pairs that compose the Tree and the maximum depth of the Tree.
- std::vector<ValuePairWithDepth> Q;
+ SmallVector<ValuePairWithDepth, 32> Q;
// General depth-first post-order traversal:
Q.push_back(ValuePairWithDepth(J, getDepthFactor(J.first)));
- while (!Q.empty()) {
+ do {
ValuePairWithDepth QTop = Q.back();
// Push each child onto the queue:
bool MoreChildren = false;
size_t MaxChildDepth = QTop.second;
VPPIteratorPair qtRange = ConnectedPairs.equal_range(QTop.first);
- for (std::map<ValuePair, ValuePair>::iterator k = qtRange.first;
+ for (std::multimap<ValuePair, ValuePair>::iterator k = qtRange.first;
k != qtRange.second; ++k) {
// Make sure that this child pair is still a candidate:
bool IsStillCand = false;
Tree.insert(ValuePairWithDepth(QTop.first, MaxChildDepth));
Q.pop_back();
}
- }
+ } while (!Q.empty());
}
// Given some initial tree, prune it by removing conflicting pairs (pairs
DenseMap<ValuePair, size_t> &Tree,
DenseSet<ValuePair> &PrunedTree, ValuePair J,
bool UseCycleCheck) {
- std::vector<ValuePairWithDepth> Q;
+ SmallVector<ValuePairWithDepth, 32> Q;
// General depth-first post-order traversal:
Q.push_back(ValuePairWithDepth(J, getDepthFactor(J.first)));
- while (!Q.empty()) {
- ValuePairWithDepth QTop = Q.back();
+ do {
+ ValuePairWithDepth QTop = Q.pop_back_val();
PrunedTree.insert(QTop.first);
- Q.pop_back();
// Visit each child, pruning as necessary...
- DenseMap<ValuePair, size_t> BestChilden;
+ DenseMap<ValuePair, size_t> BestChildren;
VPPIteratorPair QTopRange = ConnectedPairs.equal_range(QTop.first);
- for (std::map<ValuePair, ValuePair>::iterator K = QTopRange.first;
+ for (std::multimap<ValuePair, ValuePair>::iterator K = QTopRange.first;
K != QTopRange.second; ++K) {
DenseMap<ValuePair, size_t>::iterator C = Tree.find(K->second);
if (C == Tree.end()) continue;
bool CanAdd = true;
for (DenseMap<ValuePair, size_t>::iterator C2
- = BestChilden.begin(), E2 = BestChilden.end();
+ = BestChildren.begin(), E2 = BestChildren.end();
C2 != E2; ++C2) {
if (C2->first.first == C->first.first ||
C2->first.first == C->first.second ||
if (!CanAdd) continue;
// And check the queue too...
- for (std::vector<ValuePairWithDepth>::iterator C2 = Q.begin(),
+ for (SmallVector<ValuePairWithDepth, 32>::iterator C2 = Q.begin(),
E2 = Q.end(); C2 != E2; ++C2) {
if (C2->first.first == C->first.first ||
C2->first.first == C->first.second ||
}
if (!CanAdd) continue;
- // To check for non-trivial cycles formed by the addition of the
- // current pair we've formed a list of all relevant pairs, now use a
- // graph walk to check for a cycle. We start from the current pair and
- // walk the use tree to see if we again reach the current pair. If we
- // do, then the current pair is rejected.
+ // To check for non-trivial cycles formed by the addition of the
+ // current pair we've formed a list of all relevant pairs, now use a
+ // graph walk to check for a cycle. We start from the current pair and
+ // walk the use tree to see if we again reach the current pair. If we
+ // do, then the current pair is rejected.
// FIXME: It may be more efficient to use a topological-ordering
// algorithm to improve the cycle check. This should be investigated.
// conflict is found, then remove the previously-selected child
// before adding this one in its place.
for (DenseMap<ValuePair, size_t>::iterator C2
- = BestChilden.begin(); C2 != BestChilden.end();) {
+ = BestChildren.begin(); C2 != BestChildren.end();) {
if (C2->first.first == C->first.first ||
C2->first.first == C->first.second ||
C2->first.second == C->first.first ||
C2->first.second == C->first.second ||
pairsConflict(C2->first, C->first, PairableInstUsers))
- BestChilden.erase(C2++);
+ BestChildren.erase(C2++);
else
++C2;
}
- BestChilden.insert(ValuePairWithDepth(C->first, C->second));
+ BestChildren.insert(ValuePairWithDepth(C->first, C->second));
}
for (DenseMap<ValuePair, size_t>::iterator C
- = BestChilden.begin(), E2 = BestChilden.end();
+ = BestChildren.begin(), E2 = BestChildren.end();
C != E2; ++C) {
size_t DepthF = getDepthFactor(C->first.first);
Q.push_back(ValuePairWithDepth(C->first, QTop.second+DepthF));
}
- }
+ } while (!Q.empty());
}
// This function finds the best tree of mututally-compatible connected
<< *J->first << " <-> " << *J->second << "} of depth " <<
MaxDepth << " and size " << PrunedTree.size() <<
" (effective size: " << EffSize << ")\n");
- if (MaxDepth >= ReqChainDepth && EffSize > BestEffSize) {
+ if (MaxDepth >= Config.ReqChainDepth && EffSize > BestEffSize) {
BestMaxDepth = MaxDepth;
BestEffSize = EffSize;
BestTree = PrunedTree;
std::multimap<ValuePair, ValuePair> &ConnectedPairs,
DenseSet<ValuePair> &PairableInstUsers,
DenseMap<Value *, Value *>& ChosenPairs) {
- bool UseCycleCheck = CandidatePairs.size() <= MaxCandPairsForCycleCheck;
+ bool UseCycleCheck =
+ CandidatePairs.size() <= Config.MaxCandPairsForCycleCheck;
std::multimap<ValuePair, ValuePair> PairableInstUserMap;
for (std::vector<Value *>::iterator I = PairableInsts.begin(),
E = PairableInsts.end(); I != E; ++I) {
// instruction that fuses I with J.
Value *BBVectorize::getReplacementPointerInput(LLVMContext& Context,
Instruction *I, Instruction *J, unsigned o,
- bool &FlipMemInputs) {
+ bool FlipMemInputs) {
Value *IPtr, *JPtr;
unsigned IAlignment, JAlignment;
int64_t OffsetInElmts;
+
+ // Note: the analysis might fail here, that is why FlipMemInputs has
+ // been precomputed (OffsetInElmts must be unused here).
(void) getPairPtrInfo(I, J, IPtr, JPtr, IAlignment, JAlignment,
OffsetInElmts);
// The pointer value is taken to be the one with the lowest offset.
Value *VPtr;
- if (OffsetInElmts > 0) {
+ if (!FlipMemInputs) {
VPtr = IPtr;
} else {
- FlipMemInputs = true;
VPtr = JPtr;
}
- Type *ArgType = cast<PointerType>(IPtr->getType())->getElementType();
- Type *VArgType = getVecTypeForPair(ArgType);
+ Type *ArgTypeI = cast<PointerType>(IPtr->getType())->getElementType();
+ Type *ArgTypeJ = cast<PointerType>(JPtr->getType())->getElementType();
+ Type *VArgType = getVecTypeForPair(ArgTypeI, ArgTypeJ);
Type *VArgPtrType = PointerType::get(VArgType,
cast<PointerType>(IPtr->getType())->getAddressSpace());
return new BitCastInst(VPtr, VArgPtrType, getReplacementName(I, true, o),
}
void BBVectorize::fillNewShuffleMask(LLVMContext& Context, Instruction *J,
- unsigned NumElem, unsigned MaskOffset, unsigned NumInElem,
- unsigned IdxOffset, std::vector<Constant*> &Mask) {
- for (unsigned v = 0; v < NumElem/2; ++v) {
+ unsigned MaskOffset, unsigned NumInElem,
+ unsigned NumInElem1, unsigned IdxOffset,
+ std::vector<Constant*> &Mask) {
+ unsigned NumElem1 = cast<VectorType>(J->getType())->getNumElements();
+ for (unsigned v = 0; v < NumElem1; ++v) {
int m = cast<ShuffleVectorInst>(J)->getMaskValue(v);
if (m < 0) {
Mask[v+MaskOffset] = UndefValue::get(Type::getInt32Ty(Context));
} else {
unsigned mm = m + (int) IdxOffset;
- if (m >= (int) NumInElem)
+ if (m >= (int) NumInElem1)
mm += (int) NumInElem;
Mask[v+MaskOffset] =
// This is the shuffle mask. We need to append the second
// mask to the first, and the numbers need to be adjusted.
- Type *ArgType = I->getType();
- Type *VArgType = getVecTypeForPair(ArgType);
+ Type *ArgTypeI = I->getType();
+ Type *ArgTypeJ = J->getType();
+ Type *VArgType = getVecTypeForPair(ArgTypeI, ArgTypeJ);
+
+ unsigned NumElemI = cast<VectorType>(ArgTypeI)->getNumElements();
// Get the total number of elements in the fused vector type.
// By definition, this must equal the number of elements in
unsigned NumElem = cast<VectorType>(VArgType)->getNumElements();
std::vector<Constant*> Mask(NumElem);
- Type *OpType = I->getOperand(0)->getType();
- unsigned NumInElem = cast<VectorType>(OpType)->getNumElements();
+ Type *OpTypeI = I->getOperand(0)->getType();
+ unsigned NumInElemI = cast<VectorType>(OpTypeI)->getNumElements();
+ Type *OpTypeJ = J->getOperand(0)->getType();
+ unsigned NumInElemJ = cast<VectorType>(OpTypeJ)->getNumElements();
+
+ // The fused vector will be:
+ // -----------------------------------------------------
+ // | NumInElemI | NumInElemJ | NumInElemI | NumInElemJ |
+ // -----------------------------------------------------
+ // from which we'll extract NumElem total elements (where the first NumElemI
+ // of them come from the mask in I and the remainder come from the mask
+ // in J.
// For the mask from the first pair...
- fillNewShuffleMask(Context, I, NumElem, 0, NumInElem, 0, Mask);
+ fillNewShuffleMask(Context, I, 0, NumInElemJ, NumInElemI,
+ 0, Mask);
// For the mask from the second pair...
- fillNewShuffleMask(Context, J, NumElem, NumElem/2, NumInElem, NumInElem,
- Mask);
+ fillNewShuffleMask(Context, J, NumElemI, NumInElemI, NumInElemJ,
+ NumInElemI, Mask);
return ConstantVector::get(Mask);
}
+ bool BBVectorize::expandIEChain(LLVMContext& Context, Instruction *I,
+ Instruction *J, unsigned o, Value *&LOp,
+ unsigned numElemL,
+ Type *ArgTypeL, Type *ArgTypeH,
+ unsigned IdxOff) {
+ bool ExpandedIEChain = false;
+ if (InsertElementInst *LIE = dyn_cast<InsertElementInst>(LOp)) {
+ // If we have a pure insertelement chain, then this can be rewritten
+ // into a chain that directly builds the larger type.
+ bool PureChain = true;
+ InsertElementInst *LIENext = LIE;
+ do {
+ if (!isa<UndefValue>(LIENext->getOperand(0)) &&
+ !isa<InsertElementInst>(LIENext->getOperand(0))) {
+ PureChain = false;
+ break;
+ }
+ } while ((LIENext =
+ dyn_cast<InsertElementInst>(LIENext->getOperand(0))));
+
+ if (PureChain) {
+ SmallVector<Value *, 8> VectElemts(numElemL,
+ UndefValue::get(ArgTypeL->getScalarType()));
+ InsertElementInst *LIENext = LIE;
+ do {
+ unsigned Idx =
+ cast<ConstantInt>(LIENext->getOperand(2))->getSExtValue();
+ VectElemts[Idx] = LIENext->getOperand(1);
+ } while ((LIENext =
+ dyn_cast<InsertElementInst>(LIENext->getOperand(0))));
+
+ LIENext = 0;
+ Value *LIEPrev = UndefValue::get(ArgTypeH);
+ for (unsigned i = 0; i < numElemL; ++i) {
+ if (isa<UndefValue>(VectElemts[i])) continue;
+ LIENext = InsertElementInst::Create(LIEPrev, VectElemts[i],
+ ConstantInt::get(Type::getInt32Ty(Context),
+ i + IdxOff),
+ getReplacementName(I, true, o, i+1));
+ LIENext->insertBefore(J);
+ LIEPrev = LIENext;
+ }
+
+ LOp = LIENext ? (Value*) LIENext : UndefValue::get(ArgTypeH);
+ ExpandedIEChain = true;
+ }
+ }
+
+ return ExpandedIEChain;
+ }
+
// Returns the value to be used as the specified operand of the vector
// instruction that fuses I with J.
Value *BBVectorize::getReplacementInput(LLVMContext& Context, Instruction *I,
Value *CV0 = ConstantInt::get(Type::getInt32Ty(Context), 0);
Value *CV1 = ConstantInt::get(Type::getInt32Ty(Context), 1);
- // Compute the fused vector type for this operand
- Type *ArgType = I->getOperand(o)->getType();
- VectorType *VArgType = getVecTypeForPair(ArgType);
+ // Compute the fused vector type for this operand
+ Type *ArgTypeI = I->getOperand(o)->getType();
+ Type *ArgTypeJ = J->getOperand(o)->getType();
+ VectorType *VArgType = getVecTypeForPair(ArgTypeI, ArgTypeJ);
Instruction *L = I, *H = J;
+ Type *ArgTypeL = ArgTypeI, *ArgTypeH = ArgTypeJ;
if (FlipMemInputs) {
L = J;
H = I;
+ ArgTypeL = ArgTypeJ;
+ ArgTypeH = ArgTypeI;
}
- if (ArgType->isVectorTy()) {
- unsigned numElem = cast<VectorType>(VArgType)->getNumElements();
- std::vector<Constant*> Mask(numElem);
- for (unsigned v = 0; v < numElem; ++v)
- Mask[v] = ConstantInt::get(Type::getInt32Ty(Context), v);
+ unsigned numElemL;
+ if (ArgTypeL->isVectorTy())
+ numElemL = cast<VectorType>(ArgTypeL)->getNumElements();
+ else
+ numElemL = 1;
- Instruction *BV = new ShuffleVectorInst(L->getOperand(o),
- H->getOperand(o),
- ConstantVector::get(Mask),
- getReplacementName(I, true, o));
- BV->insertBefore(J);
- return BV;
+ unsigned numElemH;
+ if (ArgTypeH->isVectorTy())
+ numElemH = cast<VectorType>(ArgTypeH)->getNumElements();
+ else
+ numElemH = 1;
+
+ Value *LOp = L->getOperand(o);
+ Value *HOp = H->getOperand(o);
+ unsigned numElem = VArgType->getNumElements();
+
+ // First, we check if we can reuse the "original" vector outputs (if these
+ // exist). We might need a shuffle.
+ ExtractElementInst *LEE = dyn_cast<ExtractElementInst>(LOp);
+ ExtractElementInst *HEE = dyn_cast<ExtractElementInst>(HOp);
+ ShuffleVectorInst *LSV = dyn_cast<ShuffleVectorInst>(LOp);
+ ShuffleVectorInst *HSV = dyn_cast<ShuffleVectorInst>(HOp);
+
+ // FIXME: If we're fusing shuffle instructions, then we can't apply this
+ // optimization. The input vectors to the shuffle might be a different
+ // length from the shuffle outputs. Unfortunately, the replacement
+ // shuffle mask has already been formed, and the mask entries are sensitive
+ // to the sizes of the inputs.
+ bool IsSizeChangeShuffle =
+ isa<ShuffleVectorInst>(L) &&
+ (LOp->getType() != L->getType() || HOp->getType() != H->getType());
+
+ if ((LEE || LSV) && (HEE || HSV) && !IsSizeChangeShuffle) {
+ // We can have at most two unique vector inputs.
+ bool CanUseInputs = true;
+ Value *I1, *I2 = 0;
+ if (LEE) {
+ I1 = LEE->getOperand(0);
+ } else {
+ I1 = LSV->getOperand(0);
+ I2 = LSV->getOperand(1);
+ if (I2 == I1 || isa<UndefValue>(I2))
+ I2 = 0;
+ }
+
+ if (HEE) {
+ Value *I3 = HEE->getOperand(0);
+ if (!I2 && I3 != I1)
+ I2 = I3;
+ else if (I3 != I1 && I3 != I2)
+ CanUseInputs = false;
+ } else {
+ Value *I3 = HSV->getOperand(0);
+ if (!I2 && I3 != I1)
+ I2 = I3;
+ else if (I3 != I1 && I3 != I2)
+ CanUseInputs = false;
+
+ if (CanUseInputs) {
+ Value *I4 = HSV->getOperand(1);
+ if (!isa<UndefValue>(I4)) {
+ if (!I2 && I4 != I1)
+ I2 = I4;
+ else if (I4 != I1 && I4 != I2)
+ CanUseInputs = false;
+ }
+ }
+ }
+
+ if (CanUseInputs) {
+ unsigned LOpElem =
+ cast<VectorType>(cast<Instruction>(LOp)->getOperand(0)->getType())
+ ->getNumElements();
+ unsigned HOpElem =
+ cast<VectorType>(cast<Instruction>(HOp)->getOperand(0)->getType())
+ ->getNumElements();
+
+ // We have one or two input vectors. We need to map each index of the
+ // operands to the index of the original vector.
+ SmallVector<std::pair<int, int>, 8> II(numElem);
+ for (unsigned i = 0; i < numElemL; ++i) {
+ int Idx, INum;
+ if (LEE) {
+ Idx =
+ cast<ConstantInt>(LEE->getOperand(1))->getSExtValue();
+ INum = LEE->getOperand(0) == I1 ? 0 : 1;
+ } else {
+ Idx = LSV->getMaskValue(i);
+ if (Idx < (int) LOpElem) {
+ INum = LSV->getOperand(0) == I1 ? 0 : 1;
+ } else {
+ Idx -= LOpElem;
+ INum = LSV->getOperand(1) == I1 ? 0 : 1;
+ }
+ }
+
+ II[i] = std::pair<int, int>(Idx, INum);
+ }
+ for (unsigned i = 0; i < numElemH; ++i) {
+ int Idx, INum;
+ if (HEE) {
+ Idx =
+ cast<ConstantInt>(HEE->getOperand(1))->getSExtValue();
+ INum = HEE->getOperand(0) == I1 ? 0 : 1;
+ } else {
+ Idx = HSV->getMaskValue(i);
+ if (Idx < (int) HOpElem) {
+ INum = HSV->getOperand(0) == I1 ? 0 : 1;
+ } else {
+ Idx -= HOpElem;
+ INum = HSV->getOperand(1) == I1 ? 0 : 1;
+ }
+ }
+
+ II[i + numElemL] = std::pair<int, int>(Idx, INum);
+ }
+
+ // We now have an array which tells us from which index of which
+ // input vector each element of the operand comes.
+ VectorType *I1T = cast<VectorType>(I1->getType());
+ unsigned I1Elem = I1T->getNumElements();
+
+ if (!I2) {
+ // In this case there is only one underlying vector input. Check for
+ // the trivial case where we can use the input directly.
+ if (I1Elem == numElem) {
+ bool ElemInOrder = true;
+ for (unsigned i = 0; i < numElem; ++i) {
+ if (II[i].first != (int) i && II[i].first != -1) {
+ ElemInOrder = false;
+ break;
+ }
+ }
+
+ if (ElemInOrder)
+ return I1;
+ }
+
+ // A shuffle is needed.
+ std::vector<Constant *> Mask(numElem);
+ for (unsigned i = 0; i < numElem; ++i) {
+ int Idx = II[i].first;
+ if (Idx == -1)
+ Mask[i] = UndefValue::get(Type::getInt32Ty(Context));
+ else
+ Mask[i] = ConstantInt::get(Type::getInt32Ty(Context), Idx);
+ }
+
+ Instruction *S =
+ new ShuffleVectorInst(I1, UndefValue::get(I1T),
+ ConstantVector::get(Mask),
+ getReplacementName(I, true, o));
+ S->insertBefore(J);
+ return S;
+ }
+
+ VectorType *I2T = cast<VectorType>(I2->getType());
+ unsigned I2Elem = I2T->getNumElements();
+
+ // This input comes from two distinct vectors. The first step is to
+ // make sure that both vectors are the same length. If not, the
+ // smaller one will need to grow before they can be shuffled together.
+ if (I1Elem < I2Elem) {
+ std::vector<Constant *> Mask(I2Elem);
+ unsigned v = 0;
+ for (; v < I1Elem; ++v)
+ Mask[v] = ConstantInt::get(Type::getInt32Ty(Context), v);
+ for (; v < I2Elem; ++v)
+ Mask[v] = UndefValue::get(Type::getInt32Ty(Context));
+
+ Instruction *NewI1 =
+ new ShuffleVectorInst(I1, UndefValue::get(I1T),
+ ConstantVector::get(Mask),
+ getReplacementName(I, true, o, 1));
+ NewI1->insertBefore(J);
+ I1 = NewI1;
+ I1T = I2T;
+ I1Elem = I2Elem;
+ } else if (I1Elem > I2Elem) {
+ std::vector<Constant *> Mask(I1Elem);
+ unsigned v = 0;
+ for (; v < I2Elem; ++v)
+ Mask[v] = ConstantInt::get(Type::getInt32Ty(Context), v);
+ for (; v < I1Elem; ++v)
+ Mask[v] = UndefValue::get(Type::getInt32Ty(Context));
+
+ Instruction *NewI2 =
+ new ShuffleVectorInst(I2, UndefValue::get(I2T),
+ ConstantVector::get(Mask),
+ getReplacementName(I, true, o, 1));
+ NewI2->insertBefore(J);
+ I2 = NewI2;
+ I2T = I1T;
+ I2Elem = I1Elem;
+ }
+
+ // Now that both I1 and I2 are the same length we can shuffle them
+ // together (and use the result).
+ std::vector<Constant *> Mask(numElem);
+ for (unsigned v = 0; v < numElem; ++v) {
+ if (II[v].first == -1) {
+ Mask[v] = UndefValue::get(Type::getInt32Ty(Context));
+ } else {
+ int Idx = II[v].first + II[v].second * I1Elem;
+ Mask[v] = ConstantInt::get(Type::getInt32Ty(Context), Idx);
+ }
+ }
+
+ Instruction *NewOp =
+ new ShuffleVectorInst(I1, I2, ConstantVector::get(Mask),
+ getReplacementName(I, true, o));
+ NewOp->insertBefore(J);
+ return NewOp;
+ }
}
- // If these two inputs are the output of another vector instruction,
- // then we should use that output directly. It might be necessary to
- // permute it first. [When pairings are fused recursively, you can
- // end up with cases where a large vector is decomposed into scalars
- // using extractelement instructions, then built into size-2
- // vectors using insertelement and the into larger vectors using
- // shuffles. InstCombine does not simplify all of these cases well,
- // and so we make sure that shuffles are generated here when possible.
- ExtractElementInst *LEE
- = dyn_cast<ExtractElementInst>(L->getOperand(o));
- ExtractElementInst *HEE
- = dyn_cast<ExtractElementInst>(H->getOperand(o));
-
- if (LEE && HEE &&
- LEE->getOperand(0)->getType() == HEE->getOperand(0)->getType()) {
- VectorType *EEType = cast<VectorType>(LEE->getOperand(0)->getType());
- unsigned LowIndx = cast<ConstantInt>(LEE->getOperand(1))->getZExtValue();
- unsigned HighIndx = cast<ConstantInt>(HEE->getOperand(1))->getZExtValue();
- if (LEE->getOperand(0) == HEE->getOperand(0)) {
- if (LowIndx == 0 && HighIndx == 1)
- return LEE->getOperand(0);
-
- std::vector<Constant*> Mask(2);
- Mask[0] = ConstantInt::get(Type::getInt32Ty(Context), LowIndx);
- Mask[1] = ConstantInt::get(Type::getInt32Ty(Context), HighIndx);
-
- Instruction *BV = new ShuffleVectorInst(LEE->getOperand(0),
- UndefValue::get(EEType),
- ConstantVector::get(Mask),
- getReplacementName(I, true, o));
- BV->insertBefore(J);
- return BV;
+ Type *ArgType = ArgTypeL;
+ if (numElemL < numElemH) {
+ if (numElemL == 1 && expandIEChain(Context, I, J, o, HOp, numElemH,
+ ArgTypeL, VArgType, 1)) {
+ // This is another short-circuit case: we're combining a scalar into
+ // a vector that is formed by an IE chain. We've just expanded the IE
+ // chain, now insert the scalar and we're done.
+
+ Instruction *S = InsertElementInst::Create(HOp, LOp, CV0,
+ getReplacementName(I, true, o));
+ S->insertBefore(J);
+ return S;
+ } else if (!expandIEChain(Context, I, J, o, LOp, numElemL, ArgTypeL,
+ ArgTypeH)) {
+ // The two vector inputs to the shuffle must be the same length,
+ // so extend the smaller vector to be the same length as the larger one.
+ Instruction *NLOp;
+ if (numElemL > 1) {
+
+ std::vector<Constant *> Mask(numElemH);
+ unsigned v = 0;
+ for (; v < numElemL; ++v)
+ Mask[v] = ConstantInt::get(Type::getInt32Ty(Context), v);
+ for (; v < numElemH; ++v)
+ Mask[v] = UndefValue::get(Type::getInt32Ty(Context));
+
+ NLOp = new ShuffleVectorInst(LOp, UndefValue::get(ArgTypeL),
+ ConstantVector::get(Mask),
+ getReplacementName(I, true, o, 1));
+ } else {
+ NLOp = InsertElementInst::Create(UndefValue::get(ArgTypeH), LOp, CV0,
+ getReplacementName(I, true, o, 1));
+ }
+
+ NLOp->insertBefore(J);
+ LOp = NLOp;
+ }
+
+ ArgType = ArgTypeH;
+ } else if (numElemL > numElemH) {
+ if (numElemH == 1 && expandIEChain(Context, I, J, o, LOp, numElemL,
+ ArgTypeH, VArgType)) {
+ Instruction *S =
+ InsertElementInst::Create(LOp, HOp,
+ ConstantInt::get(Type::getInt32Ty(Context),
+ numElemL),
+ getReplacementName(I, true, o));
+ S->insertBefore(J);
+ return S;
+ } else if (!expandIEChain(Context, I, J, o, HOp, numElemH, ArgTypeH,
+ ArgTypeL)) {
+ Instruction *NHOp;
+ if (numElemH > 1) {
+ std::vector<Constant *> Mask(numElemL);
+ unsigned v = 0;
+ for (; v < numElemH; ++v)
+ Mask[v] = ConstantInt::get(Type::getInt32Ty(Context), v);
+ for (; v < numElemL; ++v)
+ Mask[v] = UndefValue::get(Type::getInt32Ty(Context));
+
+ NHOp = new ShuffleVectorInst(HOp, UndefValue::get(ArgTypeH),
+ ConstantVector::get(Mask),
+ getReplacementName(I, true, o, 1));
+ } else {
+ NHOp = InsertElementInst::Create(UndefValue::get(ArgTypeL), HOp, CV0,
+ getReplacementName(I, true, o, 1));
+ }
+
+ NHOp->insertBefore(J);
+ HOp = NHOp;
}
+ }
- std::vector<Constant*> Mask(2);
- HighIndx += EEType->getNumElements();
- Mask[0] = ConstantInt::get(Type::getInt32Ty(Context), LowIndx);
- Mask[1] = ConstantInt::get(Type::getInt32Ty(Context), HighIndx);
+ if (ArgType->isVectorTy()) {
+ unsigned numElem = cast<VectorType>(VArgType)->getNumElements();
+ std::vector<Constant*> Mask(numElem);
+ for (unsigned v = 0; v < numElem; ++v) {
+ unsigned Idx = v;
+ // If the low vector was expanded, we need to skip the extra
+ // undefined entries.
+ if (v >= numElemL && numElemH > numElemL)
+ Idx += (numElemH - numElemL);
+ Mask[v] = ConstantInt::get(Type::getInt32Ty(Context), Idx);
+ }
- Instruction *BV = new ShuffleVectorInst(LEE->getOperand(0),
- HEE->getOperand(0),
- ConstantVector::get(Mask),
- getReplacementName(I, true, o));
+ Instruction *BV = new ShuffleVectorInst(LOp, HOp,
+ ConstantVector::get(Mask),
+ getReplacementName(I, true, o));
BV->insertBefore(J);
return BV;
}
Instruction *BV1 = InsertElementInst::Create(
- UndefValue::get(VArgType),
- L->getOperand(o), CV0,
+ UndefValue::get(VArgType), LOp, CV0,
getReplacementName(I, true, o, 1));
BV1->insertBefore(I);
- Instruction *BV2 = InsertElementInst::Create(BV1, H->getOperand(o),
- CV1,
+ Instruction *BV2 = InsertElementInst::Create(BV1, HOp, CV1,
getReplacementName(I, true, o, 2));
BV2->insertBefore(J);
return BV2;
void BBVectorize::getReplacementInputsForPair(LLVMContext& Context,
Instruction *I, Instruction *J,
SmallVector<Value *, 3> &ReplacedOperands,
- bool &FlipMemInputs) {
- FlipMemInputs = false;
+ bool FlipMemInputs) {
unsigned NumOperands = I->getNumOperands();
for (unsigned p = 0, o = NumOperands-1; p < NumOperands; ++p, --o) {
ReplacedOperands[o] = getReplacementPointerInput(Context, I, J, o,
FlipMemInputs);
continue;
- } else if (isa<CallInst>(I) && o == NumOperands-1) {
+ } else if (isa<CallInst>(I)) {
Function *F = cast<CallInst>(I)->getCalledFunction();
unsigned IID = F->getIntrinsicID();
- BasicBlock &BB = *I->getParent();
+ if (o == NumOperands-1) {
+ BasicBlock &BB = *I->getParent();
- Module *M = BB.getParent()->getParent();
- Type *ArgType = I->getType();
- Type *VArgType = getVecTypeForPair(ArgType);
+ Module *M = BB.getParent()->getParent();
+ Type *ArgTypeI = I->getType();
+ Type *ArgTypeJ = J->getType();
+ Type *VArgType = getVecTypeForPair(ArgTypeI, ArgTypeJ);
- // FIXME: is it safe to do this here?
- ReplacedOperands[o] = Intrinsic::getDeclaration(M,
- (Intrinsic::ID) IID, VArgType);
- continue;
+ ReplacedOperands[o] = Intrinsic::getDeclaration(M,
+ (Intrinsic::ID) IID, VArgType);
+ continue;
+ } else if (IID == Intrinsic::powi && o == 1) {
+ // The second argument of powi is a single integer and we've already
+ // checked that both arguments are equal. As a result, we just keep
+ // I's second argument.
+ ReplacedOperands[o] = I->getOperand(o);
+ continue;
+ }
} else if (isa<ShuffleVectorInst>(I) && o == NumOperands-1) {
ReplacedOperands[o] = getReplacementShuffleMask(Context, I, J);
continue;
Instruction *J, Instruction *K,
Instruction *&InsertionPt,
Instruction *&K1, Instruction *&K2,
- bool &FlipMemInputs) {
- Value *CV0 = ConstantInt::get(Type::getInt32Ty(Context), 0);
- Value *CV1 = ConstantInt::get(Type::getInt32Ty(Context), 1);
-
+ bool FlipMemInputs) {
if (isa<StoreInst>(I)) {
AA->replaceWithNewValue(I, K);
AA->replaceWithNewValue(J, K);
} else {
Type *IType = I->getType();
- Type *VType = getVecTypeForPair(IType);
+ Type *JType = J->getType();
+
+ VectorType *VType = getVecTypeForPair(IType, JType);
+ unsigned numElem = VType->getNumElements();
+
+ unsigned numElemI, numElemJ;
+ if (IType->isVectorTy())
+ numElemI = cast<VectorType>(IType)->getNumElements();
+ else
+ numElemI = 1;
+
+ if (JType->isVectorTy())
+ numElemJ = cast<VectorType>(JType)->getNumElements();
+ else
+ numElemJ = 1;
if (IType->isVectorTy()) {
- unsigned numElem = cast<VectorType>(IType)->getNumElements();
- std::vector<Constant*> Mask1(numElem), Mask2(numElem);
- for (unsigned v = 0; v < numElem; ++v) {
- Mask1[v] = ConstantInt::get(Type::getInt32Ty(Context), v);
- Mask2[v] = ConstantInt::get(Type::getInt32Ty(Context), numElem+v);
- }
+ std::vector<Constant*> Mask1(numElemI), Mask2(numElemI);
+ for (unsigned v = 0; v < numElemI; ++v) {
+ Mask1[v] = ConstantInt::get(Type::getInt32Ty(Context), v);
+ Mask2[v] = ConstantInt::get(Type::getInt32Ty(Context), numElemJ+v);
+ }
- K1 = new ShuffleVectorInst(K, UndefValue::get(VType),
- ConstantVector::get(
- FlipMemInputs ? Mask2 : Mask1),
- getReplacementName(K, false, 1));
- K2 = new ShuffleVectorInst(K, UndefValue::get(VType),
- ConstantVector::get(
- FlipMemInputs ? Mask1 : Mask2),
- getReplacementName(K, false, 2));
+ K1 = new ShuffleVectorInst(K, UndefValue::get(VType),
+ ConstantVector::get(
+ FlipMemInputs ? Mask2 : Mask1),
+ getReplacementName(K, false, 1));
} else {
+ Value *CV0 = ConstantInt::get(Type::getInt32Ty(Context), 0);
+ Value *CV1 = ConstantInt::get(Type::getInt32Ty(Context), numElem-1);
K1 = ExtractElementInst::Create(K, FlipMemInputs ? CV1 : CV0,
getReplacementName(K, false, 1));
+ }
+
+ if (JType->isVectorTy()) {
+ std::vector<Constant*> Mask1(numElemJ), Mask2(numElemJ);
+ for (unsigned v = 0; v < numElemJ; ++v) {
+ Mask1[v] = ConstantInt::get(Type::getInt32Ty(Context), v);
+ Mask2[v] = ConstantInt::get(Type::getInt32Ty(Context), numElemI+v);
+ }
+
+ K2 = new ShuffleVectorInst(K, UndefValue::get(VType),
+ ConstantVector::get(
+ FlipMemInputs ? Mask1 : Mask2),
+ getReplacementName(K, false, 2));
+ } else {
+ Value *CV0 = ConstantInt::get(Type::getInt32Ty(Context), 0);
+ Value *CV1 = ConstantInt::get(Type::getInt32Ty(Context), numElem-1);
K2 = ExtractElementInst::Create(K, FlipMemInputs ? CV0 : CV1,
getReplacementName(K, false, 2));
}
std::multimap<Value *, Value *> &LoadMoveSet,
Instruction *I, Instruction *J) {
// Skip to the first instruction past I.
- BasicBlock::iterator L = BB.begin();
- for (; cast<Instruction>(L) != I; ++L);
- ++L;
+ BasicBlock::iterator L = llvm::next(BasicBlock::iterator(I));
DenseSet<Value *> Users;
AliasSetTracker WriteSet(*AA);
Instruction *&InsertionPt,
Instruction *I, Instruction *J) {
// Skip to the first instruction past I.
- BasicBlock::iterator L = BB.begin();
- for (; cast<Instruction>(L) != I; ++L);
- ++L;
+ BasicBlock::iterator L = llvm::next(BasicBlock::iterator(I));
DenseSet<Value *> Users;
AliasSetTracker WriteSet(*AA);
std::multimap<Value *, Value *> &LoadMoveSet,
Instruction *I) {
// Skip to the first instruction past I.
- BasicBlock::iterator L = BB.begin();
- for (; cast<Instruction>(L) != I; ++L);
- ++L;
+ BasicBlock::iterator L = llvm::next(BasicBlock::iterator(I));
DenseSet<Value *> Users;
AliasSetTracker WriteSet(*AA);
}
}
+ // As with the aliasing information, SCEV can also change because of
+ // vectorization. This information is used to compute relative pointer
+ // offsets; the necessary information will be cached here prior to
+ // fusion.
+ void BBVectorize::collectPtrInfo(std::vector<Value *> &PairableInsts,
+ DenseMap<Value *, Value *> &ChosenPairs,
+ DenseSet<Value *> &LowPtrInsts) {
+ for (std::vector<Value *>::iterator PI = PairableInsts.begin(),
+ PIE = PairableInsts.end(); PI != PIE; ++PI) {
+ DenseMap<Value *, Value *>::iterator P = ChosenPairs.find(*PI);
+ if (P == ChosenPairs.end()) continue;
+
+ Instruction *I = cast<Instruction>(P->first);
+ Instruction *J = cast<Instruction>(P->second);
+
+ if (!isa<LoadInst>(I) && !isa<StoreInst>(I))
+ continue;
+
+ Value *IPtr, *JPtr;
+ unsigned IAlignment, JAlignment;
+ int64_t OffsetInElmts;
+ if (!getPairPtrInfo(I, J, IPtr, JPtr, IAlignment, JAlignment,
+ OffsetInElmts) || abs64(OffsetInElmts) != 1)
+ llvm_unreachable("Pre-fusion pointer analysis failed");
+
+ Value *LowPI = (OffsetInElmts > 0) ? I : J;
+ LowPtrInsts.insert(LowPI);
+ }
+ }
+
+ // When the first instruction in each pair is cloned, it will inherit its
+ // parent's metadata. This metadata must be combined with that of the other
+ // instruction in a safe way.
+ void BBVectorize::combineMetadata(Instruction *K, const Instruction *J) {
+ SmallVector<std::pair<unsigned, MDNode*>, 4> Metadata;
+ K->getAllMetadataOtherThanDebugLoc(Metadata);
+ for (unsigned i = 0, n = Metadata.size(); i < n; ++i) {
+ unsigned Kind = Metadata[i].first;
+ MDNode *JMD = J->getMetadata(Kind);
+ MDNode *KMD = Metadata[i].second;
+
+ switch (Kind) {
+ default:
+ K->setMetadata(Kind, 0); // Remove unknown metadata
+ break;
+ case LLVMContext::MD_tbaa:
+ K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
+ break;
+ case LLVMContext::MD_fpmath:
+ K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
+ break;
+ }
+ }
+ }
+
// This function fuses the chosen instruction pairs into vector instructions,
// taking care preserve any needed scalar outputs and, then, it reorders the
// remaining instructions as needed (users of the first member of the pair
std::multimap<Value *, Value *> LoadMoveSet;
collectLoadMoveSet(BB, PairableInsts, ChosenPairs, LoadMoveSet);
+ DenseSet<Value *> LowPtrInsts;
+ collectPtrInfo(PairableInsts, ChosenPairs, LowPtrInsts);
+
DEBUG(dbgs() << "BBV: initial: \n" << BB << "\n");
for (BasicBlock::iterator PI = BB.getFirstInsertionPt(); PI != BB.end();) {
continue;
}
- bool FlipMemInputs;
+ bool FlipMemInputs = false;
+ if (isa<LoadInst>(I) || isa<StoreInst>(I))
+ FlipMemInputs = (LowPtrInsts.find(I) == LowPtrInsts.end());
+
unsigned NumOperands = I->getNumOperands();
SmallVector<Value *, 3> ReplacedOperands(NumOperands);
getReplacementInputsForPair(Context, I, J, ReplacedOperands,
if (I->hasName()) K->takeName(I);
if (!isa<StoreInst>(K))
- K->mutateType(getVecTypeForPair(I->getType()));
+ K->mutateType(getVecTypeForPair(I->getType(), J->getType()));
+
+ combineMetadata(K, J);
for (unsigned o = 0; o < NumOperands; ++o)
K->setOperand(o, ReplacedOperands[o]);
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
INITIALIZE_PASS_END(BBVectorize, BBV_NAME, bb_vectorize_name, false, false)
-BasicBlockPass *llvm::createBBVectorizePass() {
- return new BBVectorize();
+BasicBlockPass *llvm::createBBVectorizePass(const VectorizeConfig &C) {
+ return new BBVectorize(C);
}
+bool
+llvm::vectorizeBasicBlock(Pass *P, BasicBlock &BB, const VectorizeConfig &C) {
+ BBVectorize BBVectorizer(P, C);
+ return BBVectorizer.vectorizeBB(BB);
+}
+
+//===----------------------------------------------------------------------===//
+VectorizeConfig::VectorizeConfig() {
+ VectorBits = ::VectorBits;
+ VectorizeBools = !::NoBools;
+ VectorizeInts = !::NoInts;
+ VectorizeFloats = !::NoFloats;
+ VectorizePointers = !::NoPointers;
+ VectorizeCasts = !::NoCasts;
+ VectorizeMath = !::NoMath;
+ VectorizeFMA = !::NoFMA;
+ VectorizeSelect = !::NoSelect;
+ VectorizeCmp = !::NoCmp;
+ VectorizeGEP = !::NoGEP;
+ VectorizeMemOps = !::NoMemOps;
+ AlignedOnly = ::AlignedOnly;
+ ReqChainDepth= ::ReqChainDepth;
+ SearchLimit = ::SearchLimit;
+ MaxCandPairsForCycleCheck = ::MaxCandPairsForCycleCheck;
+ SplatBreaksChain = ::SplatBreaksChain;
+ MaxInsts = ::MaxInsts;
+ MaxIter = ::MaxIter;
+ Pow2LenOnly = ::Pow2LenOnly;
+ NoMemOpBoost = ::NoMemOpBoost;
+ FastDep = ::FastDep;
+}