1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive
10 // stores that can be put together into vector-stores. Next, it attempts to
11 // construct vectorizable tree using the use-def chains. If a profitable tree
12 // was found, the SLP vectorizer performs vectorization on the tree.
14 // The pass is inspired by the work described in the paper:
15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks.
17 //===----------------------------------------------------------------------===//
18 #include "llvm/Transforms/Vectorize.h"
19 #include "llvm/ADT/MapVector.h"
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/PostOrderIterator.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/AssumptionCache.h"
26 #include "llvm/Analysis/CodeMetrics.h"
27 #include "llvm/Analysis/LoopInfo.h"
28 #include "llvm/Analysis/ScalarEvolution.h"
29 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
30 #include "llvm/Analysis/TargetTransformInfo.h"
31 #include "llvm/Analysis/ValueTracking.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/Dominators.h"
34 #include "llvm/IR/IRBuilder.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/IR/Module.h"
38 #include "llvm/IR/NoFolder.h"
39 #include "llvm/IR/Type.h"
40 #include "llvm/IR/Value.h"
41 #include "llvm/IR/Verifier.h"
42 #include "llvm/Pass.h"
43 #include "llvm/Support/CommandLine.h"
44 #include "llvm/Support/Debug.h"
45 #include "llvm/Support/raw_ostream.h"
46 #include "llvm/Analysis/VectorUtils.h"
53 #define SV_NAME "slp-vectorizer"
54 #define DEBUG_TYPE "SLP"
56 STATISTIC(NumVectorInstructions, "Number of vector instructions generated");
59 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden,
60 cl::desc("Only vectorize if you gain more than this "
64 ShouldVectorizeHor("slp-vectorize-hor", cl::init(false), cl::Hidden,
65 cl::desc("Attempt to vectorize horizontal reductions"));
67 static cl::opt<bool> ShouldStartVectorizeHorAtStore(
68 "slp-vectorize-hor-store", cl::init(false), cl::Hidden,
70 "Attempt to vectorize horizontal reductions feeding into a store"));
73 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden,
74 cl::desc("Attempt to vectorize for this register size in bits"));
76 /// Limits the size of scheduling regions in a block.
77 /// It avoid long compile times for _very_ large blocks where vector
78 /// instructions are spread over a wide range.
79 /// This limit is way higher than needed by real-world functions.
81 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden,
82 cl::desc("Limit the size of the SLP scheduling region per block"));
86 // FIXME: Set this via cl::opt to allow overriding.
87 static const unsigned MinVecRegSize = 128;
89 static const unsigned RecursionMaxDepth = 12;
91 // Limit the number of alias checks. The limit is chosen so that
92 // it has no negative effect on the llvm benchmarks.
93 static const unsigned AliasedCheckLimit = 10;
95 // Another limit for the alias checks: The maximum distance between load/store
96 // instructions where alias checks are done.
97 // This limit is useful for very large basic blocks.
98 static const unsigned MaxMemDepDistance = 160;
100 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling
101 /// regions to be handled.
102 static const int MinScheduleRegionSize = 16;
104 /// \brief Predicate for the element types that the SLP vectorizer supports.
106 /// The most important thing to filter here are types which are invalid in LLVM
107 /// vectors. We also filter target specific types which have absolutely no
108 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just
109 /// avoids spending time checking the cost model and realizing that they will
110 /// be inevitably scalarized.
111 static bool isValidElementType(Type *Ty) {
112 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() &&
113 !Ty->isPPC_FP128Ty();
116 /// \returns the parent basic block if all of the instructions in \p VL
117 /// are in the same block or null otherwise.
118 static BasicBlock *getSameBlock(ArrayRef<Value *> VL) {
119 Instruction *I0 = dyn_cast<Instruction>(VL[0]);
122 BasicBlock *BB = I0->getParent();
123 for (int i = 1, e = VL.size(); i < e; i++) {
124 Instruction *I = dyn_cast<Instruction>(VL[i]);
128 if (BB != I->getParent())
134 /// \returns True if all of the values in \p VL are constants.
135 static bool allConstant(ArrayRef<Value *> VL) {
136 for (unsigned i = 0, e = VL.size(); i < e; ++i)
137 if (!isa<Constant>(VL[i]))
142 /// \returns True if all of the values in \p VL are identical.
143 static bool isSplat(ArrayRef<Value *> VL) {
144 for (unsigned i = 1, e = VL.size(); i < e; ++i)
150 ///\returns Opcode that can be clubbed with \p Op to create an alternate
151 /// sequence which can later be merged as a ShuffleVector instruction.
152 static unsigned getAltOpcode(unsigned Op) {
154 case Instruction::FAdd:
155 return Instruction::FSub;
156 case Instruction::FSub:
157 return Instruction::FAdd;
158 case Instruction::Add:
159 return Instruction::Sub;
160 case Instruction::Sub:
161 return Instruction::Add;
167 ///\returns bool representing if Opcode \p Op can be part
168 /// of an alternate sequence which can later be merged as
169 /// a ShuffleVector instruction.
170 static bool canCombineAsAltInst(unsigned Op) {
171 return Op == Instruction::FAdd || Op == Instruction::FSub ||
172 Op == Instruction::Sub || Op == Instruction::Add;
175 /// \returns ShuffleVector instruction if instructions in \p VL have
176 /// alternate fadd,fsub / fsub,fadd/add,sub/sub,add sequence.
177 /// (i.e. e.g. opcodes of fadd,fsub,fadd,fsub...)
178 static unsigned isAltInst(ArrayRef<Value *> VL) {
179 Instruction *I0 = dyn_cast<Instruction>(VL[0]);
180 unsigned Opcode = I0->getOpcode();
181 unsigned AltOpcode = getAltOpcode(Opcode);
182 for (int i = 1, e = VL.size(); i < e; i++) {
183 Instruction *I = dyn_cast<Instruction>(VL[i]);
184 if (!I || I->getOpcode() != ((i & 1) ? AltOpcode : Opcode))
187 return Instruction::ShuffleVector;
190 /// \returns The opcode if all of the Instructions in \p VL have the same
192 static unsigned getSameOpcode(ArrayRef<Value *> VL) {
193 Instruction *I0 = dyn_cast<Instruction>(VL[0]);
196 unsigned Opcode = I0->getOpcode();
197 for (int i = 1, e = VL.size(); i < e; i++) {
198 Instruction *I = dyn_cast<Instruction>(VL[i]);
199 if (!I || Opcode != I->getOpcode()) {
200 if (canCombineAsAltInst(Opcode) && i == 1)
201 return isAltInst(VL);
208 /// Get the intersection (logical and) of all of the potential IR flags
209 /// of each scalar operation (VL) that will be converted into a vector (I).
210 /// Flag set: NSW, NUW, exact, and all of fast-math.
211 static void propagateIRFlags(Value *I, ArrayRef<Value *> VL) {
212 if (auto *VecOp = dyn_cast<BinaryOperator>(I)) {
213 if (auto *Intersection = dyn_cast<BinaryOperator>(VL[0])) {
214 // Intersection is initialized to the 0th scalar,
215 // so start counting from index '1'.
216 for (int i = 1, e = VL.size(); i < e; ++i) {
217 if (auto *Scalar = dyn_cast<BinaryOperator>(VL[i]))
218 Intersection->andIRFlags(Scalar);
220 VecOp->copyIRFlags(Intersection);
225 /// \returns \p I after propagating metadata from \p VL.
226 static Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL) {
227 Instruction *I0 = cast<Instruction>(VL[0]);
228 SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
229 I0->getAllMetadataOtherThanDebugLoc(Metadata);
231 for (unsigned i = 0, n = Metadata.size(); i != n; ++i) {
232 unsigned Kind = Metadata[i].first;
233 MDNode *MD = Metadata[i].second;
235 for (int i = 1, e = VL.size(); MD && i != e; i++) {
236 Instruction *I = cast<Instruction>(VL[i]);
237 MDNode *IMD = I->getMetadata(Kind);
241 MD = nullptr; // Remove unknown metadata
243 case LLVMContext::MD_tbaa:
244 MD = MDNode::getMostGenericTBAA(MD, IMD);
246 case LLVMContext::MD_alias_scope:
247 MD = MDNode::getMostGenericAliasScope(MD, IMD);
249 case LLVMContext::MD_noalias:
250 MD = MDNode::intersect(MD, IMD);
252 case LLVMContext::MD_fpmath:
253 MD = MDNode::getMostGenericFPMath(MD, IMD);
255 case LLVMContext::MD_nontemporal:
256 MD = MDNode::intersect(MD, IMD);
260 I->setMetadata(Kind, MD);
265 /// \returns The type that all of the values in \p VL have or null if there
266 /// are different types.
267 static Type* getSameType(ArrayRef<Value *> VL) {
268 Type *Ty = VL[0]->getType();
269 for (int i = 1, e = VL.size(); i < e; i++)
270 if (VL[i]->getType() != Ty)
276 /// \returns True if the ExtractElement instructions in VL can be vectorized
277 /// to use the original vector.
278 static bool CanReuseExtract(ArrayRef<Value *> VL) {
279 assert(Instruction::ExtractElement == getSameOpcode(VL) && "Invalid opcode");
280 // Check if all of the extracts come from the same vector and from the
283 ExtractElementInst *E0 = cast<ExtractElementInst>(VL0);
284 Value *Vec = E0->getOperand(0);
286 // We have to extract from the same vector type.
287 unsigned NElts = Vec->getType()->getVectorNumElements();
289 if (NElts != VL.size())
292 // Check that all of the indices extract from the correct offset.
293 ConstantInt *CI = dyn_cast<ConstantInt>(E0->getOperand(1));
294 if (!CI || CI->getZExtValue())
297 for (unsigned i = 1, e = VL.size(); i < e; ++i) {
298 ExtractElementInst *E = cast<ExtractElementInst>(VL[i]);
299 ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1));
301 if (!CI || CI->getZExtValue() != i || E->getOperand(0) != Vec)
308 /// \returns True if in-tree use also needs extract. This refers to
309 /// possible scalar operand in vectorized instruction.
310 static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
311 TargetLibraryInfo *TLI) {
313 unsigned Opcode = UserInst->getOpcode();
315 case Instruction::Load: {
316 LoadInst *LI = cast<LoadInst>(UserInst);
317 return (LI->getPointerOperand() == Scalar);
319 case Instruction::Store: {
320 StoreInst *SI = cast<StoreInst>(UserInst);
321 return (SI->getPointerOperand() == Scalar);
323 case Instruction::Call: {
324 CallInst *CI = cast<CallInst>(UserInst);
325 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI);
326 if (hasVectorInstrinsicScalarOpd(ID, 1)) {
327 return (CI->getArgOperand(1) == Scalar);
335 /// \returns the AA location that is being access by the instruction.
336 static MemoryLocation getLocation(Instruction *I, AliasAnalysis *AA) {
337 if (StoreInst *SI = dyn_cast<StoreInst>(I))
338 return MemoryLocation::get(SI);
339 if (LoadInst *LI = dyn_cast<LoadInst>(I))
340 return MemoryLocation::get(LI);
341 return MemoryLocation();
344 /// \returns True if the instruction is not a volatile or atomic load/store.
345 static bool isSimple(Instruction *I) {
346 if (LoadInst *LI = dyn_cast<LoadInst>(I))
347 return LI->isSimple();
348 if (StoreInst *SI = dyn_cast<StoreInst>(I))
349 return SI->isSimple();
350 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
351 return !MI->isVolatile();
355 /// Bottom Up SLP Vectorizer.
358 typedef SmallVector<Value *, 8> ValueList;
359 typedef SmallVector<Instruction *, 16> InstrList;
360 typedef SmallPtrSet<Value *, 16> ValueSet;
361 typedef SmallVector<StoreInst *, 8> StoreList;
363 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti,
364 TargetLibraryInfo *TLi, AliasAnalysis *Aa, LoopInfo *Li,
365 DominatorTree *Dt, AssumptionCache *AC)
366 : NumLoadsWantToKeepOrder(0), NumLoadsWantToChangeOrder(0), F(Func),
367 SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt),
368 Builder(Se->getContext()) {
369 CodeMetrics::collectEphemeralValues(F, AC, EphValues);
372 /// \brief Vectorize the tree that starts with the elements in \p VL.
373 /// Returns the vectorized root.
374 Value *vectorizeTree();
376 /// \returns the cost incurred by unwanted spills and fills, caused by
377 /// holding live values over call sites.
380 /// \returns the vectorization cost of the subtree that starts at \p VL.
381 /// A negative number means that this is profitable.
384 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
385 /// the purpose of scheduling and extraction in the \p UserIgnoreLst.
386 void buildTree(ArrayRef<Value *> Roots,
387 ArrayRef<Value *> UserIgnoreLst = None);
389 /// Clear the internal data structures that are created by 'buildTree'.
391 VectorizableTree.clear();
392 ScalarToTreeEntry.clear();
394 ExternalUses.clear();
395 NumLoadsWantToKeepOrder = 0;
396 NumLoadsWantToChangeOrder = 0;
397 for (auto &Iter : BlocksSchedules) {
398 BlockScheduling *BS = Iter.second.get();
403 /// \returns true if the memory operations A and B are consecutive.
404 bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL);
406 /// \brief Perform LICM and CSE on the newly generated gather sequences.
407 void optimizeGatherSequence();
409 /// \returns true if it is beneficial to reverse the vector order.
410 bool shouldReorder() const {
411 return NumLoadsWantToChangeOrder > NumLoadsWantToKeepOrder;
417 /// \returns the cost of the vectorizable entry.
418 int getEntryCost(TreeEntry *E);
420 /// This is the recursive part of buildTree.
421 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth);
423 /// Vectorize a single entry in the tree.
424 Value *vectorizeTree(TreeEntry *E);
426 /// Vectorize a single entry in the tree, starting in \p VL.
427 Value *vectorizeTree(ArrayRef<Value *> VL);
429 /// \returns the pointer to the vectorized value if \p VL is already
430 /// vectorized, or NULL. They may happen in cycles.
431 Value *alreadyVectorized(ArrayRef<Value *> VL) const;
433 /// \brief Take the pointer operand from the Load/Store instruction.
434 /// \returns NULL if this is not a valid Load/Store instruction.
435 static Value *getPointerOperand(Value *I);
437 /// \brief Take the address space operand from the Load/Store instruction.
438 /// \returns -1 if this is not a valid Load/Store instruction.
439 static unsigned getAddressSpaceOperand(Value *I);
441 /// \returns the scalarization cost for this type. Scalarization in this
442 /// context means the creation of vectors from a group of scalars.
443 int getGatherCost(Type *Ty);
445 /// \returns the scalarization cost for this list of values. Assuming that
446 /// this subtree gets vectorized, we may need to extract the values from the
447 /// roots. This method calculates the cost of extracting the values.
448 int getGatherCost(ArrayRef<Value *> VL);
450 /// \brief Set the Builder insert point to one after the last instruction in
452 void setInsertPointAfterBundle(ArrayRef<Value *> VL);
454 /// \returns a vector from a collection of scalars in \p VL.
455 Value *Gather(ArrayRef<Value *> VL, VectorType *Ty);
457 /// \returns whether the VectorizableTree is fully vectorizable and will
458 /// be beneficial even the tree height is tiny.
459 bool isFullyVectorizableTinyTree();
461 /// \reorder commutative operands in alt shuffle if they result in
463 void reorderAltShuffleOperands(ArrayRef<Value *> VL,
464 SmallVectorImpl<Value *> &Left,
465 SmallVectorImpl<Value *> &Right);
466 /// \reorder commutative operands to get better probability of
467 /// generating vectorized code.
468 void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
469 SmallVectorImpl<Value *> &Left,
470 SmallVectorImpl<Value *> &Right);
472 TreeEntry() : Scalars(), VectorizedValue(nullptr),
475 /// \returns true if the scalars in VL are equal to this entry.
476 bool isSame(ArrayRef<Value *> VL) const {
477 assert(VL.size() == Scalars.size() && "Invalid size");
478 return std::equal(VL.begin(), VL.end(), Scalars.begin());
481 /// A vector of scalars.
484 /// The Scalars are vectorized into this value. It is initialized to Null.
485 Value *VectorizedValue;
487 /// Do we need to gather this sequence ?
491 /// Create a new VectorizableTree entry.
492 TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized) {
493 VectorizableTree.emplace_back();
494 int idx = VectorizableTree.size() - 1;
495 TreeEntry *Last = &VectorizableTree[idx];
496 Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end());
497 Last->NeedToGather = !Vectorized;
499 for (int i = 0, e = VL.size(); i != e; ++i) {
500 assert(!ScalarToTreeEntry.count(VL[i]) && "Scalar already in tree!");
501 ScalarToTreeEntry[VL[i]] = idx;
504 MustGather.insert(VL.begin(), VL.end());
509 /// -- Vectorization State --
510 /// Holds all of the tree entries.
511 std::vector<TreeEntry> VectorizableTree;
513 /// Maps a specific scalar to its tree entry.
514 SmallDenseMap<Value*, int> ScalarToTreeEntry;
516 /// A list of scalars that we found that we need to keep as scalars.
519 /// This POD struct describes one external user in the vectorized tree.
520 struct ExternalUser {
521 ExternalUser (Value *S, llvm::User *U, int L) :
522 Scalar(S), User(U), Lane(L){}
523 // Which scalar in our function.
525 // Which user that uses the scalar.
527 // Which lane does the scalar belong to.
530 typedef SmallVector<ExternalUser, 16> UserList;
532 /// Checks if two instructions may access the same memory.
534 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it
535 /// is invariant in the calling loop.
536 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1,
537 Instruction *Inst2) {
539 // First check if the result is already in the cache.
540 AliasCacheKey key = std::make_pair(Inst1, Inst2);
541 Optional<bool> &result = AliasCache[key];
542 if (result.hasValue()) {
543 return result.getValue();
545 MemoryLocation Loc2 = getLocation(Inst2, AA);
547 if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) {
548 // Do the alias check.
549 aliased = AA->alias(Loc1, Loc2);
551 // Store the result in the cache.
556 typedef std::pair<Instruction *, Instruction *> AliasCacheKey;
558 /// Cache for alias results.
559 /// TODO: consider moving this to the AliasAnalysis itself.
560 DenseMap<AliasCacheKey, Optional<bool>> AliasCache;
562 /// Removes an instruction from its block and eventually deletes it.
563 /// It's like Instruction::eraseFromParent() except that the actual deletion
564 /// is delayed until BoUpSLP is destructed.
565 /// This is required to ensure that there are no incorrect collisions in the
566 /// AliasCache, which can happen if a new instruction is allocated at the
567 /// same address as a previously deleted instruction.
568 void eraseInstruction(Instruction *I) {
569 I->removeFromParent();
570 I->dropAllReferences();
571 DeletedInstructions.push_back(std::unique_ptr<Instruction>(I));
574 /// Temporary store for deleted instructions. Instructions will be deleted
575 /// eventually when the BoUpSLP is destructed.
576 SmallVector<std::unique_ptr<Instruction>, 8> DeletedInstructions;
578 /// A list of values that need to extracted out of the tree.
579 /// This list holds pairs of (Internal Scalar : External User).
580 UserList ExternalUses;
582 /// Values used only by @llvm.assume calls.
583 SmallPtrSet<const Value *, 32> EphValues;
585 /// Holds all of the instructions that we gathered.
586 SetVector<Instruction *> GatherSeq;
587 /// A list of blocks that we are going to CSE.
588 SetVector<BasicBlock *> CSEBlocks;
590 /// Contains all scheduling relevant data for an instruction.
591 /// A ScheduleData either represents a single instruction or a member of an
592 /// instruction bundle (= a group of instructions which is combined into a
593 /// vector instruction).
594 struct ScheduleData {
596 // The initial value for the dependency counters. It means that the
597 // dependencies are not calculated yet.
598 enum { InvalidDeps = -1 };
601 : Inst(nullptr), FirstInBundle(nullptr), NextInBundle(nullptr),
602 NextLoadStore(nullptr), SchedulingRegionID(0), SchedulingPriority(0),
603 Dependencies(InvalidDeps), UnscheduledDeps(InvalidDeps),
604 UnscheduledDepsInBundle(InvalidDeps), IsScheduled(false) {}
606 void init(int BlockSchedulingRegionID) {
607 FirstInBundle = this;
608 NextInBundle = nullptr;
609 NextLoadStore = nullptr;
611 SchedulingRegionID = BlockSchedulingRegionID;
612 UnscheduledDepsInBundle = UnscheduledDeps;
616 /// Returns true if the dependency information has been calculated.
617 bool hasValidDependencies() const { return Dependencies != InvalidDeps; }
619 /// Returns true for single instructions and for bundle representatives
620 /// (= the head of a bundle).
621 bool isSchedulingEntity() const { return FirstInBundle == this; }
623 /// Returns true if it represents an instruction bundle and not only a
624 /// single instruction.
625 bool isPartOfBundle() const {
626 return NextInBundle != nullptr || FirstInBundle != this;
629 /// Returns true if it is ready for scheduling, i.e. it has no more
630 /// unscheduled depending instructions/bundles.
631 bool isReady() const {
632 assert(isSchedulingEntity() &&
633 "can't consider non-scheduling entity for ready list");
634 return UnscheduledDepsInBundle == 0 && !IsScheduled;
637 /// Modifies the number of unscheduled dependencies, also updating it for
638 /// the whole bundle.
639 int incrementUnscheduledDeps(int Incr) {
640 UnscheduledDeps += Incr;
641 return FirstInBundle->UnscheduledDepsInBundle += Incr;
644 /// Sets the number of unscheduled dependencies to the number of
646 void resetUnscheduledDeps() {
647 incrementUnscheduledDeps(Dependencies - UnscheduledDeps);
650 /// Clears all dependency information.
651 void clearDependencies() {
652 Dependencies = InvalidDeps;
653 resetUnscheduledDeps();
654 MemoryDependencies.clear();
657 void dump(raw_ostream &os) const {
658 if (!isSchedulingEntity()) {
660 } else if (NextInBundle) {
662 ScheduleData *SD = NextInBundle;
664 os << ';' << *SD->Inst;
665 SD = SD->NextInBundle;
675 /// Points to the head in an instruction bundle (and always to this for
676 /// single instructions).
677 ScheduleData *FirstInBundle;
679 /// Single linked list of all instructions in a bundle. Null if it is a
680 /// single instruction.
681 ScheduleData *NextInBundle;
683 /// Single linked list of all memory instructions (e.g. load, store, call)
684 /// in the block - until the end of the scheduling region.
685 ScheduleData *NextLoadStore;
687 /// The dependent memory instructions.
688 /// This list is derived on demand in calculateDependencies().
689 SmallVector<ScheduleData *, 4> MemoryDependencies;
691 /// This ScheduleData is in the current scheduling region if this matches
692 /// the current SchedulingRegionID of BlockScheduling.
693 int SchedulingRegionID;
695 /// Used for getting a "good" final ordering of instructions.
696 int SchedulingPriority;
698 /// The number of dependencies. Constitutes of the number of users of the
699 /// instruction plus the number of dependent memory instructions (if any).
700 /// This value is calculated on demand.
701 /// If InvalidDeps, the number of dependencies is not calculated yet.
705 /// The number of dependencies minus the number of dependencies of scheduled
706 /// instructions. As soon as this is zero, the instruction/bundle gets ready
708 /// Note that this is negative as long as Dependencies is not calculated.
711 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for
712 /// single instructions.
713 int UnscheduledDepsInBundle;
715 /// True if this instruction is scheduled (or considered as scheduled in the
721 friend raw_ostream &operator<<(raw_ostream &os,
722 const BoUpSLP::ScheduleData &SD);
725 /// Contains all scheduling data for a basic block.
727 struct BlockScheduling {
729 BlockScheduling(BasicBlock *BB)
730 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize),
731 ScheduleStart(nullptr), ScheduleEnd(nullptr),
732 FirstLoadStoreInRegion(nullptr), LastLoadStoreInRegion(nullptr),
733 ScheduleRegionSize(0),
734 ScheduleRegionSizeLimit(ScheduleRegionSizeBudget),
735 // Make sure that the initial SchedulingRegionID is greater than the
736 // initial SchedulingRegionID in ScheduleData (which is 0).
737 SchedulingRegionID(1) {}
741 ScheduleStart = nullptr;
742 ScheduleEnd = nullptr;
743 FirstLoadStoreInRegion = nullptr;
744 LastLoadStoreInRegion = nullptr;
746 // Reduce the maximum schedule region size by the size of the
747 // previous scheduling run.
748 ScheduleRegionSizeLimit -= ScheduleRegionSize;
749 if (ScheduleRegionSizeLimit < MinScheduleRegionSize)
750 ScheduleRegionSizeLimit = MinScheduleRegionSize;
751 ScheduleRegionSize = 0;
753 // Make a new scheduling region, i.e. all existing ScheduleData is not
754 // in the new region yet.
755 ++SchedulingRegionID;
758 ScheduleData *getScheduleData(Value *V) {
759 ScheduleData *SD = ScheduleDataMap[V];
760 if (SD && SD->SchedulingRegionID == SchedulingRegionID)
765 bool isInSchedulingRegion(ScheduleData *SD) {
766 return SD->SchedulingRegionID == SchedulingRegionID;
769 /// Marks an instruction as scheduled and puts all dependent ready
770 /// instructions into the ready-list.
771 template <typename ReadyListType>
772 void schedule(ScheduleData *SD, ReadyListType &ReadyList) {
773 SD->IsScheduled = true;
774 DEBUG(dbgs() << "SLP: schedule " << *SD << "\n");
776 ScheduleData *BundleMember = SD;
777 while (BundleMember) {
778 // Handle the def-use chain dependencies.
779 for (Use &U : BundleMember->Inst->operands()) {
780 ScheduleData *OpDef = getScheduleData(U.get());
781 if (OpDef && OpDef->hasValidDependencies() &&
782 OpDef->incrementUnscheduledDeps(-1) == 0) {
783 // There are no more unscheduled dependencies after decrementing,
784 // so we can put the dependent instruction into the ready list.
785 ScheduleData *DepBundle = OpDef->FirstInBundle;
786 assert(!DepBundle->IsScheduled &&
787 "already scheduled bundle gets ready");
788 ReadyList.insert(DepBundle);
789 DEBUG(dbgs() << "SLP: gets ready (def): " << *DepBundle << "\n");
792 // Handle the memory dependencies.
793 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) {
794 if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) {
795 // There are no more unscheduled dependencies after decrementing,
796 // so we can put the dependent instruction into the ready list.
797 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle;
798 assert(!DepBundle->IsScheduled &&
799 "already scheduled bundle gets ready");
800 ReadyList.insert(DepBundle);
801 DEBUG(dbgs() << "SLP: gets ready (mem): " << *DepBundle << "\n");
804 BundleMember = BundleMember->NextInBundle;
808 /// Put all instructions into the ReadyList which are ready for scheduling.
809 template <typename ReadyListType>
810 void initialFillReadyList(ReadyListType &ReadyList) {
811 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
812 ScheduleData *SD = getScheduleData(I);
813 if (SD->isSchedulingEntity() && SD->isReady()) {
814 ReadyList.insert(SD);
815 DEBUG(dbgs() << "SLP: initially in ready list: " << *I << "\n");
820 /// Checks if a bundle of instructions can be scheduled, i.e. has no
821 /// cyclic dependencies. This is only a dry-run, no instructions are
822 /// actually moved at this stage.
823 bool tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP);
825 /// Un-bundles a group of instructions.
826 void cancelScheduling(ArrayRef<Value *> VL);
828 /// Extends the scheduling region so that V is inside the region.
829 /// \returns true if the region size is within the limit.
830 bool extendSchedulingRegion(Value *V);
832 /// Initialize the ScheduleData structures for new instructions in the
833 /// scheduling region.
834 void initScheduleData(Instruction *FromI, Instruction *ToI,
835 ScheduleData *PrevLoadStore,
836 ScheduleData *NextLoadStore);
838 /// Updates the dependency information of a bundle and of all instructions/
839 /// bundles which depend on the original bundle.
840 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList,
843 /// Sets all instruction in the scheduling region to un-scheduled.
844 void resetSchedule();
848 /// Simple memory allocation for ScheduleData.
849 std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks;
851 /// The size of a ScheduleData array in ScheduleDataChunks.
854 /// The allocator position in the current chunk, which is the last entry
855 /// of ScheduleDataChunks.
858 /// Attaches ScheduleData to Instruction.
859 /// Note that the mapping survives during all vectorization iterations, i.e.
860 /// ScheduleData structures are recycled.
861 DenseMap<Value *, ScheduleData *> ScheduleDataMap;
863 struct ReadyList : SmallVector<ScheduleData *, 8> {
864 void insert(ScheduleData *SD) { push_back(SD); }
867 /// The ready-list for scheduling (only used for the dry-run).
868 ReadyList ReadyInsts;
870 /// The first instruction of the scheduling region.
871 Instruction *ScheduleStart;
873 /// The first instruction _after_ the scheduling region.
874 Instruction *ScheduleEnd;
876 /// The first memory accessing instruction in the scheduling region
878 ScheduleData *FirstLoadStoreInRegion;
880 /// The last memory accessing instruction in the scheduling region
882 ScheduleData *LastLoadStoreInRegion;
884 /// The current size of the scheduling region.
885 int ScheduleRegionSize;
887 /// The maximum size allowed for the scheduling region.
888 int ScheduleRegionSizeLimit;
890 /// The ID of the scheduling region. For a new vectorization iteration this
891 /// is incremented which "removes" all ScheduleData from the region.
892 int SchedulingRegionID;
895 /// Attaches the BlockScheduling structures to basic blocks.
896 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules;
898 /// Performs the "real" scheduling. Done before vectorization is actually
899 /// performed in a basic block.
900 void scheduleBlock(BlockScheduling *BS);
902 /// List of users to ignore during scheduling and that don't need extracting.
903 ArrayRef<Value *> UserIgnoreList;
905 // Number of load-bundles, which contain consecutive loads.
906 int NumLoadsWantToKeepOrder;
908 // Number of load-bundles of size 2, which are consecutive loads if reversed.
909 int NumLoadsWantToChangeOrder;
911 // Analysis and block reference.
914 TargetTransformInfo *TTI;
915 TargetLibraryInfo *TLI;
919 /// Instruction builder to construct the vectorized tree.
924 raw_ostream &operator<<(raw_ostream &os, const BoUpSLP::ScheduleData &SD) {
930 void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
931 ArrayRef<Value *> UserIgnoreLst) {
933 UserIgnoreList = UserIgnoreLst;
934 if (!getSameType(Roots))
936 buildTree_rec(Roots, 0);
938 // Collect the values that we need to extract from the tree.
939 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) {
940 TreeEntry *Entry = &VectorizableTree[EIdx];
943 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
944 Value *Scalar = Entry->Scalars[Lane];
946 // No need to handle users of gathered values.
947 if (Entry->NeedToGather)
950 for (User *U : Scalar->users()) {
951 DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n");
953 Instruction *UserInst = dyn_cast<Instruction>(U);
957 // Skip in-tree scalars that become vectors
958 if (ScalarToTreeEntry.count(U)) {
959 int Idx = ScalarToTreeEntry[U];
960 TreeEntry *UseEntry = &VectorizableTree[Idx];
961 Value *UseScalar = UseEntry->Scalars[0];
962 // Some in-tree scalars will remain as scalar in vectorized
963 // instructions. If that is the case, the one in Lane 0 will
965 if (UseScalar != U ||
966 !InTreeUserNeedToExtract(Scalar, UserInst, TLI)) {
967 DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U
969 assert(!VectorizableTree[Idx].NeedToGather && "Bad state");
974 // Ignore users in the user ignore list.
975 if (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), UserInst) !=
976 UserIgnoreList.end())
979 DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " <<
980 Lane << " from " << *Scalar << ".\n");
981 ExternalUses.push_back(ExternalUser(Scalar, U, Lane));
988 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
989 bool SameTy = getSameType(VL); (void)SameTy;
990 bool isAltShuffle = false;
991 assert(SameTy && "Invalid types!");
993 if (Depth == RecursionMaxDepth) {
994 DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n");
995 newTreeEntry(VL, false);
999 // Don't handle vectors.
1000 if (VL[0]->getType()->isVectorTy()) {
1001 DEBUG(dbgs() << "SLP: Gathering due to vector type.\n");
1002 newTreeEntry(VL, false);
1006 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
1007 if (SI->getValueOperand()->getType()->isVectorTy()) {
1008 DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n");
1009 newTreeEntry(VL, false);
1012 unsigned Opcode = getSameOpcode(VL);
1014 // Check that this shuffle vector refers to the alternate
1015 // sequence of opcodes.
1016 if (Opcode == Instruction::ShuffleVector) {
1017 Instruction *I0 = dyn_cast<Instruction>(VL[0]);
1018 unsigned Op = I0->getOpcode();
1019 if (Op != Instruction::ShuffleVector)
1020 isAltShuffle = true;
1023 // If all of the operands are identical or constant we have a simple solution.
1024 if (allConstant(VL) || isSplat(VL) || !getSameBlock(VL) || !Opcode) {
1025 DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n");
1026 newTreeEntry(VL, false);
1030 // We now know that this is a vector of instructions of the same type from
1033 // Don't vectorize ephemeral values.
1034 for (unsigned i = 0, e = VL.size(); i != e; ++i) {
1035 if (EphValues.count(VL[i])) {
1036 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] <<
1037 ") is ephemeral.\n");
1038 newTreeEntry(VL, false);
1043 // Check if this is a duplicate of another entry.
1044 if (ScalarToTreeEntry.count(VL[0])) {
1045 int Idx = ScalarToTreeEntry[VL[0]];
1046 TreeEntry *E = &VectorizableTree[Idx];
1047 for (unsigned i = 0, e = VL.size(); i != e; ++i) {
1048 DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n");
1049 if (E->Scalars[i] != VL[i]) {
1050 DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n");
1051 newTreeEntry(VL, false);
1055 DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *VL[0] << ".\n");
1059 // Check that none of the instructions in the bundle are already in the tree.
1060 for (unsigned i = 0, e = VL.size(); i != e; ++i) {
1061 if (ScalarToTreeEntry.count(VL[i])) {
1062 DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] <<
1063 ") is already in tree.\n");
1064 newTreeEntry(VL, false);
1069 // If any of the scalars is marked as a value that needs to stay scalar then
1070 // we need to gather the scalars.
1071 for (unsigned i = 0, e = VL.size(); i != e; ++i) {
1072 if (MustGather.count(VL[i])) {
1073 DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n");
1074 newTreeEntry(VL, false);
1079 // Check that all of the users of the scalars that we want to vectorize are
1081 Instruction *VL0 = cast<Instruction>(VL[0]);
1082 BasicBlock *BB = cast<Instruction>(VL0)->getParent();
1084 if (!DT->isReachableFromEntry(BB)) {
1085 // Don't go into unreachable blocks. They may contain instructions with
1086 // dependency cycles which confuse the final scheduling.
1087 DEBUG(dbgs() << "SLP: bundle in unreachable block.\n");
1088 newTreeEntry(VL, false);
1092 // Check that every instructions appears once in this bundle.
1093 for (unsigned i = 0, e = VL.size(); i < e; ++i)
1094 for (unsigned j = i+1; j < e; ++j)
1095 if (VL[i] == VL[j]) {
1096 DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n");
1097 newTreeEntry(VL, false);
1101 auto &BSRef = BlocksSchedules[BB];
1103 BSRef = llvm::make_unique<BlockScheduling>(BB);
1105 BlockScheduling &BS = *BSRef.get();
1107 if (!BS.tryScheduleBundle(VL, this)) {
1108 DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n");
1109 assert((!BS.getScheduleData(VL[0]) ||
1110 !BS.getScheduleData(VL[0])->isPartOfBundle()) &&
1111 "tryScheduleBundle should cancelScheduling on failure");
1112 newTreeEntry(VL, false);
1115 DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
1118 case Instruction::PHI: {
1119 PHINode *PH = dyn_cast<PHINode>(VL0);
1121 // Check for terminator values (e.g. invoke).
1122 for (unsigned j = 0; j < VL.size(); ++j)
1123 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
1124 TerminatorInst *Term = dyn_cast<TerminatorInst>(
1125 cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i)));
1127 DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n");
1128 BS.cancelScheduling(VL);
1129 newTreeEntry(VL, false);
1134 newTreeEntry(VL, true);
1135 DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n");
1137 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
1139 // Prepare the operand vector.
1140 for (unsigned j = 0; j < VL.size(); ++j)
1141 Operands.push_back(cast<PHINode>(VL[j])->getIncomingValueForBlock(
1142 PH->getIncomingBlock(i)));
1144 buildTree_rec(Operands, Depth + 1);
1148 case Instruction::ExtractElement: {
1149 bool Reuse = CanReuseExtract(VL);
1151 DEBUG(dbgs() << "SLP: Reusing extract sequence.\n");
1153 BS.cancelScheduling(VL);
1155 newTreeEntry(VL, Reuse);
1158 case Instruction::Load: {
1159 // Check that a vectorized load would load the same memory as a scalar
1161 // For example we don't want vectorize loads that are smaller than 8 bit.
1162 // Even though we have a packed struct {<i2, i2, i2, i2>} LLVM treats
1163 // loading/storing it as an i8 struct. If we vectorize loads/stores from
1164 // such a struct we read/write packed bits disagreeing with the
1165 // unvectorized version.
1166 const DataLayout &DL = F->getParent()->getDataLayout();
1167 Type *ScalarTy = VL[0]->getType();
1169 if (DL.getTypeSizeInBits(ScalarTy) !=
1170 DL.getTypeAllocSizeInBits(ScalarTy)) {
1171 BS.cancelScheduling(VL);
1172 newTreeEntry(VL, false);
1173 DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n");
1176 // Check if the loads are consecutive or of we need to swizzle them.
1177 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) {
1178 LoadInst *L = cast<LoadInst>(VL[i]);
1179 if (!L->isSimple()) {
1180 BS.cancelScheduling(VL);
1181 newTreeEntry(VL, false);
1182 DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
1186 if (!isConsecutiveAccess(VL[i], VL[i + 1], DL)) {
1187 if (VL.size() == 2 && isConsecutiveAccess(VL[1], VL[0], DL)) {
1188 ++NumLoadsWantToChangeOrder;
1190 BS.cancelScheduling(VL);
1191 newTreeEntry(VL, false);
1192 DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n");
1196 ++NumLoadsWantToKeepOrder;
1197 newTreeEntry(VL, true);
1198 DEBUG(dbgs() << "SLP: added a vector of loads.\n");
1201 case Instruction::ZExt:
1202 case Instruction::SExt:
1203 case Instruction::FPToUI:
1204 case Instruction::FPToSI:
1205 case Instruction::FPExt:
1206 case Instruction::PtrToInt:
1207 case Instruction::IntToPtr:
1208 case Instruction::SIToFP:
1209 case Instruction::UIToFP:
1210 case Instruction::Trunc:
1211 case Instruction::FPTrunc:
1212 case Instruction::BitCast: {
1213 Type *SrcTy = VL0->getOperand(0)->getType();
1214 for (unsigned i = 0; i < VL.size(); ++i) {
1215 Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType();
1216 if (Ty != SrcTy || !isValidElementType(Ty)) {
1217 BS.cancelScheduling(VL);
1218 newTreeEntry(VL, false);
1219 DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n");
1223 newTreeEntry(VL, true);
1224 DEBUG(dbgs() << "SLP: added a vector of casts.\n");
1226 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
1228 // Prepare the operand vector.
1229 for (unsigned j = 0; j < VL.size(); ++j)
1230 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i));
1232 buildTree_rec(Operands, Depth+1);
1236 case Instruction::ICmp:
1237 case Instruction::FCmp: {
1238 // Check that all of the compares have the same predicate.
1239 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
1240 Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType();
1241 for (unsigned i = 1, e = VL.size(); i < e; ++i) {
1242 CmpInst *Cmp = cast<CmpInst>(VL[i]);
1243 if (Cmp->getPredicate() != P0 ||
1244 Cmp->getOperand(0)->getType() != ComparedTy) {
1245 BS.cancelScheduling(VL);
1246 newTreeEntry(VL, false);
1247 DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n");
1252 newTreeEntry(VL, true);
1253 DEBUG(dbgs() << "SLP: added a vector of compares.\n");
1255 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
1257 // Prepare the operand vector.
1258 for (unsigned j = 0; j < VL.size(); ++j)
1259 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i));
1261 buildTree_rec(Operands, Depth+1);
1265 case Instruction::Select:
1266 case Instruction::Add:
1267 case Instruction::FAdd:
1268 case Instruction::Sub:
1269 case Instruction::FSub:
1270 case Instruction::Mul:
1271 case Instruction::FMul:
1272 case Instruction::UDiv:
1273 case Instruction::SDiv:
1274 case Instruction::FDiv:
1275 case Instruction::URem:
1276 case Instruction::SRem:
1277 case Instruction::FRem:
1278 case Instruction::Shl:
1279 case Instruction::LShr:
1280 case Instruction::AShr:
1281 case Instruction::And:
1282 case Instruction::Or:
1283 case Instruction::Xor: {
1284 newTreeEntry(VL, true);
1285 DEBUG(dbgs() << "SLP: added a vector of bin op.\n");
1287 // Sort operands of the instructions so that each side is more likely to
1288 // have the same opcode.
1289 if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) {
1290 ValueList Left, Right;
1291 reorderInputsAccordingToOpcode(VL, Left, Right);
1292 buildTree_rec(Left, Depth + 1);
1293 buildTree_rec(Right, Depth + 1);
1297 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
1299 // Prepare the operand vector.
1300 for (unsigned j = 0; j < VL.size(); ++j)
1301 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i));
1303 buildTree_rec(Operands, Depth+1);
1307 case Instruction::GetElementPtr: {
1308 // We don't combine GEPs with complicated (nested) indexing.
1309 for (unsigned j = 0; j < VL.size(); ++j) {
1310 if (cast<Instruction>(VL[j])->getNumOperands() != 2) {
1311 DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
1312 BS.cancelScheduling(VL);
1313 newTreeEntry(VL, false);
1318 // We can't combine several GEPs into one vector if they operate on
1320 Type *Ty0 = cast<Instruction>(VL0)->getOperand(0)->getType();
1321 for (unsigned j = 0; j < VL.size(); ++j) {
1322 Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType();
1324 DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n");
1325 BS.cancelScheduling(VL);
1326 newTreeEntry(VL, false);
1331 // We don't combine GEPs with non-constant indexes.
1332 for (unsigned j = 0; j < VL.size(); ++j) {
1333 auto Op = cast<Instruction>(VL[j])->getOperand(1);
1334 if (!isa<ConstantInt>(Op)) {
1336 dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n");
1337 BS.cancelScheduling(VL);
1338 newTreeEntry(VL, false);
1343 newTreeEntry(VL, true);
1344 DEBUG(dbgs() << "SLP: added a vector of GEPs.\n");
1345 for (unsigned i = 0, e = 2; i < e; ++i) {
1347 // Prepare the operand vector.
1348 for (unsigned j = 0; j < VL.size(); ++j)
1349 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i));
1351 buildTree_rec(Operands, Depth + 1);
1355 case Instruction::Store: {
1356 const DataLayout &DL = F->getParent()->getDataLayout();
1357 // Check if the stores are consecutive or of we need to swizzle them.
1358 for (unsigned i = 0, e = VL.size() - 1; i < e; ++i)
1359 if (!isConsecutiveAccess(VL[i], VL[i + 1], DL)) {
1360 BS.cancelScheduling(VL);
1361 newTreeEntry(VL, false);
1362 DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
1366 newTreeEntry(VL, true);
1367 DEBUG(dbgs() << "SLP: added a vector of stores.\n");
1370 for (unsigned j = 0; j < VL.size(); ++j)
1371 Operands.push_back(cast<Instruction>(VL[j])->getOperand(0));
1373 buildTree_rec(Operands, Depth + 1);
1376 case Instruction::Call: {
1377 // Check if the calls are all to the same vectorizable intrinsic.
1378 CallInst *CI = cast<CallInst>(VL[0]);
1379 // Check if this is an Intrinsic call or something that can be
1380 // represented by an intrinsic call
1381 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI);
1382 if (!isTriviallyVectorizable(ID)) {
1383 BS.cancelScheduling(VL);
1384 newTreeEntry(VL, false);
1385 DEBUG(dbgs() << "SLP: Non-vectorizable call.\n");
1388 Function *Int = CI->getCalledFunction();
1389 Value *A1I = nullptr;
1390 if (hasVectorInstrinsicScalarOpd(ID, 1))
1391 A1I = CI->getArgOperand(1);
1392 for (unsigned i = 1, e = VL.size(); i != e; ++i) {
1393 CallInst *CI2 = dyn_cast<CallInst>(VL[i]);
1394 if (!CI2 || CI2->getCalledFunction() != Int ||
1395 getIntrinsicIDForCall(CI2, TLI) != ID) {
1396 BS.cancelScheduling(VL);
1397 newTreeEntry(VL, false);
1398 DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i]
1402 // ctlz,cttz and powi are special intrinsics whose second argument
1403 // should be same in order for them to be vectorized.
1404 if (hasVectorInstrinsicScalarOpd(ID, 1)) {
1405 Value *A1J = CI2->getArgOperand(1);
1407 BS.cancelScheduling(VL);
1408 newTreeEntry(VL, false);
1409 DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI
1410 << " argument "<< A1I<<"!=" << A1J
1417 newTreeEntry(VL, true);
1418 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) {
1420 // Prepare the operand vector.
1421 for (unsigned j = 0; j < VL.size(); ++j) {
1422 CallInst *CI2 = dyn_cast<CallInst>(VL[j]);
1423 Operands.push_back(CI2->getArgOperand(i));
1425 buildTree_rec(Operands, Depth + 1);
1429 case Instruction::ShuffleVector: {
1430 // If this is not an alternate sequence of opcode like add-sub
1431 // then do not vectorize this instruction.
1432 if (!isAltShuffle) {
1433 BS.cancelScheduling(VL);
1434 newTreeEntry(VL, false);
1435 DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n");
1438 newTreeEntry(VL, true);
1439 DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n");
1441 // Reorder operands if reordering would enable vectorization.
1442 if (isa<BinaryOperator>(VL0)) {
1443 ValueList Left, Right;
1444 reorderAltShuffleOperands(VL, Left, Right);
1445 buildTree_rec(Left, Depth + 1);
1446 buildTree_rec(Right, Depth + 1);
1450 for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
1452 // Prepare the operand vector.
1453 for (unsigned j = 0; j < VL.size(); ++j)
1454 Operands.push_back(cast<Instruction>(VL[j])->getOperand(i));
1456 buildTree_rec(Operands, Depth + 1);
1461 BS.cancelScheduling(VL);
1462 newTreeEntry(VL, false);
1463 DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n");
1468 int BoUpSLP::getEntryCost(TreeEntry *E) {
1469 ArrayRef<Value*> VL = E->Scalars;
1471 Type *ScalarTy = VL[0]->getType();
1472 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
1473 ScalarTy = SI->getValueOperand()->getType();
1474 VectorType *VecTy = VectorType::get(ScalarTy, VL.size());
1476 if (E->NeedToGather) {
1477 if (allConstant(VL))
1480 return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0);
1482 return getGatherCost(E->Scalars);
1484 unsigned Opcode = getSameOpcode(VL);
1485 assert(Opcode && getSameType(VL) && getSameBlock(VL) && "Invalid VL");
1486 Instruction *VL0 = cast<Instruction>(VL[0]);
1488 case Instruction::PHI: {
1491 case Instruction::ExtractElement: {
1492 if (CanReuseExtract(VL)) {
1494 for (unsigned i = 0, e = VL.size(); i < e; ++i) {
1495 ExtractElementInst *E = cast<ExtractElementInst>(VL[i]);
1497 // Take credit for instruction that will become dead.
1499 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i);
1503 return getGatherCost(VecTy);
1505 case Instruction::ZExt:
1506 case Instruction::SExt:
1507 case Instruction::FPToUI:
1508 case Instruction::FPToSI:
1509 case Instruction::FPExt:
1510 case Instruction::PtrToInt:
1511 case Instruction::IntToPtr:
1512 case Instruction::SIToFP:
1513 case Instruction::UIToFP:
1514 case Instruction::Trunc:
1515 case Instruction::FPTrunc:
1516 case Instruction::BitCast: {
1517 Type *SrcTy = VL0->getOperand(0)->getType();
1519 // Calculate the cost of this instruction.
1520 int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(),
1521 VL0->getType(), SrcTy);
1523 VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size());
1524 int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy);
1525 return VecCost - ScalarCost;
1527 case Instruction::FCmp:
1528 case Instruction::ICmp:
1529 case Instruction::Select:
1530 case Instruction::Add:
1531 case Instruction::FAdd:
1532 case Instruction::Sub:
1533 case Instruction::FSub:
1534 case Instruction::Mul:
1535 case Instruction::FMul:
1536 case Instruction::UDiv:
1537 case Instruction::SDiv:
1538 case Instruction::FDiv:
1539 case Instruction::URem:
1540 case Instruction::SRem:
1541 case Instruction::FRem:
1542 case Instruction::Shl:
1543 case Instruction::LShr:
1544 case Instruction::AShr:
1545 case Instruction::And:
1546 case Instruction::Or:
1547 case Instruction::Xor: {
1548 // Calculate the cost of this instruction.
1551 if (Opcode == Instruction::FCmp || Opcode == Instruction::ICmp ||
1552 Opcode == Instruction::Select) {
1553 VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size());
1554 ScalarCost = VecTy->getNumElements() *
1555 TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty());
1556 VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy);
1558 // Certain instructions can be cheaper to vectorize if they have a
1559 // constant second vector operand.
1560 TargetTransformInfo::OperandValueKind Op1VK =
1561 TargetTransformInfo::OK_AnyValue;
1562 TargetTransformInfo::OperandValueKind Op2VK =
1563 TargetTransformInfo::OK_UniformConstantValue;
1564 TargetTransformInfo::OperandValueProperties Op1VP =
1565 TargetTransformInfo::OP_None;
1566 TargetTransformInfo::OperandValueProperties Op2VP =
1567 TargetTransformInfo::OP_None;
1569 // If all operands are exactly the same ConstantInt then set the
1570 // operand kind to OK_UniformConstantValue.
1571 // If instead not all operands are constants, then set the operand kind
1572 // to OK_AnyValue. If all operands are constants but not the same,
1573 // then set the operand kind to OK_NonUniformConstantValue.
1574 ConstantInt *CInt = nullptr;
1575 for (unsigned i = 0; i < VL.size(); ++i) {
1576 const Instruction *I = cast<Instruction>(VL[i]);
1577 if (!isa<ConstantInt>(I->getOperand(1))) {
1578 Op2VK = TargetTransformInfo::OK_AnyValue;
1582 CInt = cast<ConstantInt>(I->getOperand(1));
1585 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue &&
1586 CInt != cast<ConstantInt>(I->getOperand(1)))
1587 Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
1589 // FIXME: Currently cost of model modification for division by
1590 // power of 2 is handled only for X86. Add support for other targets.
1591 if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && CInt &&
1592 CInt->getValue().isPowerOf2())
1593 Op2VP = TargetTransformInfo::OP_PowerOf2;
1595 ScalarCost = VecTy->getNumElements() *
1596 TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK, Op2VK,
1598 VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK,
1601 return VecCost - ScalarCost;
1603 case Instruction::GetElementPtr: {
1604 TargetTransformInfo::OperandValueKind Op1VK =
1605 TargetTransformInfo::OK_AnyValue;
1606 TargetTransformInfo::OperandValueKind Op2VK =
1607 TargetTransformInfo::OK_UniformConstantValue;
1610 VecTy->getNumElements() *
1611 TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, Op1VK, Op2VK);
1613 TTI->getArithmeticInstrCost(Instruction::Add, VecTy, Op1VK, Op2VK);
1615 return VecCost - ScalarCost;
1617 case Instruction::Load: {
1618 // Cost of wide load - cost of scalar loads.
1619 int ScalarLdCost = VecTy->getNumElements() *
1620 TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0);
1621 int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, 1, 0);
1622 return VecLdCost - ScalarLdCost;
1624 case Instruction::Store: {
1625 // We know that we can merge the stores. Calculate the cost.
1626 int ScalarStCost = VecTy->getNumElements() *
1627 TTI->getMemoryOpCost(Instruction::Store, ScalarTy, 1, 0);
1628 int VecStCost = TTI->getMemoryOpCost(Instruction::Store, VecTy, 1, 0);
1629 return VecStCost - ScalarStCost;
1631 case Instruction::Call: {
1632 CallInst *CI = cast<CallInst>(VL0);
1633 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI);
1635 // Calculate the cost of the scalar and vector calls.
1636 SmallVector<Type*, 4> ScalarTys, VecTys;
1637 for (unsigned op = 0, opc = CI->getNumArgOperands(); op!= opc; ++op) {
1638 ScalarTys.push_back(CI->getArgOperand(op)->getType());
1639 VecTys.push_back(VectorType::get(CI->getArgOperand(op)->getType(),
1640 VecTy->getNumElements()));
1643 int ScalarCallCost = VecTy->getNumElements() *
1644 TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys);
1646 int VecCallCost = TTI->getIntrinsicInstrCost(ID, VecTy, VecTys);
1648 DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost
1649 << " (" << VecCallCost << "-" << ScalarCallCost << ")"
1650 << " for " << *CI << "\n");
1652 return VecCallCost - ScalarCallCost;
1654 case Instruction::ShuffleVector: {
1655 TargetTransformInfo::OperandValueKind Op1VK =
1656 TargetTransformInfo::OK_AnyValue;
1657 TargetTransformInfo::OperandValueKind Op2VK =
1658 TargetTransformInfo::OK_AnyValue;
1661 for (unsigned i = 0; i < VL.size(); ++i) {
1662 Instruction *I = cast<Instruction>(VL[i]);
1666 TTI->getArithmeticInstrCost(I->getOpcode(), ScalarTy, Op1VK, Op2VK);
1668 // VecCost is equal to sum of the cost of creating 2 vectors
1669 // and the cost of creating shuffle.
1670 Instruction *I0 = cast<Instruction>(VL[0]);
1672 TTI->getArithmeticInstrCost(I0->getOpcode(), VecTy, Op1VK, Op2VK);
1673 Instruction *I1 = cast<Instruction>(VL[1]);
1675 TTI->getArithmeticInstrCost(I1->getOpcode(), VecTy, Op1VK, Op2VK);
1677 TTI->getShuffleCost(TargetTransformInfo::SK_Alternate, VecTy, 0);
1678 return VecCost - ScalarCost;
1681 llvm_unreachable("Unknown instruction");
1685 bool BoUpSLP::isFullyVectorizableTinyTree() {
1686 DEBUG(dbgs() << "SLP: Check whether the tree with height " <<
1687 VectorizableTree.size() << " is fully vectorizable .\n");
1689 // We only handle trees of height 2.
1690 if (VectorizableTree.size() != 2)
1693 // Handle splat and all-constants stores.
1694 if (!VectorizableTree[0].NeedToGather &&
1695 (allConstant(VectorizableTree[1].Scalars) ||
1696 isSplat(VectorizableTree[1].Scalars)))
1699 // Gathering cost would be too much for tiny trees.
1700 if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather)
1706 int BoUpSLP::getSpillCost() {
1707 // Walk from the bottom of the tree to the top, tracking which values are
1708 // live. When we see a call instruction that is not part of our tree,
1709 // query TTI to see if there is a cost to keeping values live over it
1710 // (for example, if spills and fills are required).
1711 unsigned BundleWidth = VectorizableTree.front().Scalars.size();
1714 SmallPtrSet<Instruction*, 4> LiveValues;
1715 Instruction *PrevInst = nullptr;
1717 for (unsigned N = 0; N < VectorizableTree.size(); ++N) {
1718 Instruction *Inst = dyn_cast<Instruction>(VectorizableTree[N].Scalars[0]);
1728 dbgs() << "SLP: #LV: " << LiveValues.size();
1729 for (auto *X : LiveValues)
1730 dbgs() << " " << X->getName();
1731 dbgs() << ", Looking at ";
1735 // Update LiveValues.
1736 LiveValues.erase(PrevInst);
1737 for (auto &J : PrevInst->operands()) {
1738 if (isa<Instruction>(&*J) && ScalarToTreeEntry.count(&*J))
1739 LiveValues.insert(cast<Instruction>(&*J));
1742 // Now find the sequence of instructions between PrevInst and Inst.
1743 BasicBlock::reverse_iterator InstIt(Inst->getIterator()),
1744 PrevInstIt(PrevInst->getIterator());
1746 while (InstIt != PrevInstIt) {
1747 if (PrevInstIt == PrevInst->getParent()->rend()) {
1748 PrevInstIt = Inst->getParent()->rbegin();
1752 if (isa<CallInst>(&*PrevInstIt) && &*PrevInstIt != PrevInst) {
1753 SmallVector<Type*, 4> V;
1754 for (auto *II : LiveValues)
1755 V.push_back(VectorType::get(II->getType(), BundleWidth));
1756 Cost += TTI->getCostOfKeepingLiveOverCall(V);
1765 DEBUG(dbgs() << "SLP: SpillCost=" << Cost << "\n");
1769 int BoUpSLP::getTreeCost() {
1771 DEBUG(dbgs() << "SLP: Calculating cost for tree of size " <<
1772 VectorizableTree.size() << ".\n");
1774 // We only vectorize tiny trees if it is fully vectorizable.
1775 if (VectorizableTree.size() < 3 && !isFullyVectorizableTinyTree()) {
1776 if (VectorizableTree.empty()) {
1777 assert(!ExternalUses.size() && "We should not have any external users");
1782 unsigned BundleWidth = VectorizableTree[0].Scalars.size();
1784 for (unsigned i = 0, e = VectorizableTree.size(); i != e; ++i) {
1785 int C = getEntryCost(&VectorizableTree[i]);
1786 DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with "
1787 << *VectorizableTree[i].Scalars[0] << " .\n");
1791 SmallSet<Value *, 16> ExtractCostCalculated;
1792 int ExtractCost = 0;
1793 for (UserList::iterator I = ExternalUses.begin(), E = ExternalUses.end();
1795 // We only add extract cost once for the same scalar.
1796 if (!ExtractCostCalculated.insert(I->Scalar).second)
1799 // Uses by ephemeral values are free (because the ephemeral value will be
1800 // removed prior to code generation, and so the extraction will be
1801 // removed as well).
1802 if (EphValues.count(I->User))
1805 VectorType *VecTy = VectorType::get(I->Scalar->getType(), BundleWidth);
1806 ExtractCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy,
1810 Cost += getSpillCost();
1812 DEBUG(dbgs() << "SLP: Total Cost " << Cost + ExtractCost<< ".\n");
1813 return Cost + ExtractCost;
1816 int BoUpSLP::getGatherCost(Type *Ty) {
1818 for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i)
1819 Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i);
1823 int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) {
1824 // Find the type of the operands in VL.
1825 Type *ScalarTy = VL[0]->getType();
1826 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
1827 ScalarTy = SI->getValueOperand()->getType();
1828 VectorType *VecTy = VectorType::get(ScalarTy, VL.size());
1829 // Find the cost of inserting/extracting values from the vector.
1830 return getGatherCost(VecTy);
1833 Value *BoUpSLP::getPointerOperand(Value *I) {
1834 if (LoadInst *LI = dyn_cast<LoadInst>(I))
1835 return LI->getPointerOperand();
1836 if (StoreInst *SI = dyn_cast<StoreInst>(I))
1837 return SI->getPointerOperand();
1841 unsigned BoUpSLP::getAddressSpaceOperand(Value *I) {
1842 if (LoadInst *L = dyn_cast<LoadInst>(I))
1843 return L->getPointerAddressSpace();
1844 if (StoreInst *S = dyn_cast<StoreInst>(I))
1845 return S->getPointerAddressSpace();
1849 bool BoUpSLP::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL) {
1850 Value *PtrA = getPointerOperand(A);
1851 Value *PtrB = getPointerOperand(B);
1852 unsigned ASA = getAddressSpaceOperand(A);
1853 unsigned ASB = getAddressSpaceOperand(B);
1855 // Check that the address spaces match and that the pointers are valid.
1856 if (!PtrA || !PtrB || (ASA != ASB))
1859 // Make sure that A and B are different pointers of the same type.
1860 if (PtrA == PtrB || PtrA->getType() != PtrB->getType())
1863 unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA);
1864 Type *Ty = cast<PointerType>(PtrA->getType())->getElementType();
1865 APInt Size(PtrBitWidth, DL.getTypeStoreSize(Ty));
1867 APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0);
1868 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
1869 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
1871 APInt OffsetDelta = OffsetB - OffsetA;
1873 // Check if they are based on the same pointer. That makes the offsets
1876 return OffsetDelta == Size;
1878 // Compute the necessary base pointer delta to have the necessary final delta
1879 // equal to the size.
1880 APInt BaseDelta = Size - OffsetDelta;
1882 // Otherwise compute the distance with SCEV between the base pointers.
1883 const SCEV *PtrSCEVA = SE->getSCEV(PtrA);
1884 const SCEV *PtrSCEVB = SE->getSCEV(PtrB);
1885 const SCEV *C = SE->getConstant(BaseDelta);
1886 const SCEV *X = SE->getAddExpr(PtrSCEVA, C);
1887 return X == PtrSCEVB;
1890 // Reorder commutative operations in alternate shuffle if the resulting vectors
1891 // are consecutive loads. This would allow us to vectorize the tree.
1892 // If we have something like-
1893 // load a[0] - load b[0]
1894 // load b[1] + load a[1]
1895 // load a[2] - load b[2]
1896 // load a[3] + load b[3]
1897 // Reordering the second load b[1] load a[1] would allow us to vectorize this
1899 void BoUpSLP::reorderAltShuffleOperands(ArrayRef<Value *> VL,
1900 SmallVectorImpl<Value *> &Left,
1901 SmallVectorImpl<Value *> &Right) {
1902 const DataLayout &DL = F->getParent()->getDataLayout();
1904 // Push left and right operands of binary operation into Left and Right
1905 for (unsigned i = 0, e = VL.size(); i < e; ++i) {
1906 Left.push_back(cast<Instruction>(VL[i])->getOperand(0));
1907 Right.push_back(cast<Instruction>(VL[i])->getOperand(1));
1910 // Reorder if we have a commutative operation and consecutive access
1911 // are on either side of the alternate instructions.
1912 for (unsigned j = 0; j < VL.size() - 1; ++j) {
1913 if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) {
1914 if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) {
1915 Instruction *VL1 = cast<Instruction>(VL[j]);
1916 Instruction *VL2 = cast<Instruction>(VL[j + 1]);
1917 if (isConsecutiveAccess(L, L1, DL) && VL1->isCommutative()) {
1918 std::swap(Left[j], Right[j]);
1920 } else if (isConsecutiveAccess(L, L1, DL) && VL2->isCommutative()) {
1921 std::swap(Left[j + 1], Right[j + 1]);
1927 if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) {
1928 if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) {
1929 Instruction *VL1 = cast<Instruction>(VL[j]);
1930 Instruction *VL2 = cast<Instruction>(VL[j + 1]);
1931 if (isConsecutiveAccess(L, L1, DL) && VL1->isCommutative()) {
1932 std::swap(Left[j], Right[j]);
1934 } else if (isConsecutiveAccess(L, L1, DL) && VL2->isCommutative()) {
1935 std::swap(Left[j + 1], Right[j + 1]);
1945 // Return true if I should be commuted before adding it's left and right
1946 // operands to the arrays Left and Right.
1948 // The vectorizer is trying to either have all elements one side being
1949 // instruction with the same opcode to enable further vectorization, or having
1950 // a splat to lower the vectorizing cost.
1951 static bool shouldReorderOperands(int i, Instruction &I,
1952 SmallVectorImpl<Value *> &Left,
1953 SmallVectorImpl<Value *> &Right) {
1954 Value *VLeft = I.getOperand(0);
1955 Value *VRight = I.getOperand(1);
1956 Instruction *ILeft = dyn_cast<Instruction>(VLeft);
1957 Instruction *IRight = dyn_cast<Instruction>(VRight);
1959 // Sort two opcodes. In the code below we try to preserve the ability to use
1960 // broadcast of values instead of individual inserts.
1967 // If we just sorted according to opcode we would leave the first line in
1968 // tact but we would swap vl2 with vr2 because opcode(phi) > opcode(load).
1971 // Because vr2 and vr1 are from the same load we loose the opportunity of a
1972 // broadcast for the packed right side in the backend: we have [vr1, vl2]
1973 // instead of [vr1, vr2=vr1].
1974 if (ILeft && IRight) {
1975 if (ILeft->getOpcode() > IRight->getOpcode() &&
1976 Right[i - 1] != IRight) {
1977 // Try not to destroy a broad cast for no apparent benefit.
1979 } else if (ILeft->getOpcode() == IRight->getOpcode() &&
1980 Right[i - 1] == ILeft) {
1981 // Try preserve broadcasts.
1983 } else if (ILeft->getOpcode() == IRight->getOpcode() &&
1984 Left[i - 1] == IRight) {
1985 // Try preserve broadcasts.
1990 // One opcode, put the instruction on the right.
1991 return ILeft != nullptr;
1994 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
1995 SmallVectorImpl<Value *> &Left,
1996 SmallVectorImpl<Value *> &Right) {
1998 SmallVector<Value *, 16> OrigLeft, OrigRight;
2001 // Peel the first iteration out of the loop since there's nothing
2002 // interesting to do anyway and it simplifies the checks
2003 auto VLeft = cast<Instruction>(VL[0])->getOperand(0);
2004 auto VRight = cast<Instruction>(VL[0])->getOperand(1);
2005 OrigLeft.push_back(VLeft);
2006 OrigRight.push_back(VRight);
2007 if (!isa<Instruction>(VRight) && isa<Instruction>(VLeft))
2008 // Favor having instruction to the right. FIXME: why?
2009 std::swap(VLeft, VRight);
2010 Left.push_back(VLeft);
2011 Right.push_back(VRight);
2014 // Keep track if we have instructions with all the same opcode on one side.
2015 bool AllSameOpcodeLeft = isa<Instruction>(Left[0]);
2016 bool AllSameOpcodeRight = isa<Instruction>(Right[0]);
2018 for (unsigned i = 1, e = VL.size(); i != e; ++i) {
2019 Instruction *I = cast<Instruction>(VL[i]);
2021 Value *VLeft = I->getOperand(0);
2022 Value *VRight = I->getOperand(1);
2023 OrigLeft.push_back(VLeft);
2024 OrigRight.push_back(VRight);
2025 Instruction *ILeft = dyn_cast<Instruction>(VLeft);
2026 Instruction *IRight = dyn_cast<Instruction>(VRight);
2028 // Check whether all operands on one side have the same opcode. In this case
2029 // we want to preserve the original order and not make things worse by
2031 if (AllSameOpcodeLeft && ILeft) {
2032 if (Instruction *PLeft = dyn_cast<Instruction>(OrigLeft[i - 1])) {
2033 if (PLeft->getOpcode() != ILeft->getOpcode())
2034 AllSameOpcodeLeft = false;
2036 AllSameOpcodeLeft = false;
2038 if (AllSameOpcodeRight && IRight) {
2039 if (Instruction *PRight = dyn_cast<Instruction>(OrigRight[i - 1])) {
2040 if (PRight->getOpcode() != IRight->getOpcode())
2041 AllSameOpcodeRight = false;
2043 AllSameOpcodeRight = false;
2047 // Commute to favor either a splat or maximizing having the same opcodes on
2049 if (shouldReorderOperands(i, *I, Left, Right)) {
2050 Left.push_back(I->getOperand(1));
2051 Right.push_back(I->getOperand(0));
2053 Left.push_back(I->getOperand(0));
2054 Right.push_back(I->getOperand(1));
2058 bool LeftBroadcast = isSplat(Left);
2059 bool RightBroadcast = isSplat(Right);
2061 // If operands end up being broadcast return this operand order.
2062 if (LeftBroadcast || RightBroadcast)
2065 // Don't reorder if the operands where good to begin.
2066 if (AllSameOpcodeRight || AllSameOpcodeLeft) {
2071 const DataLayout &DL = F->getParent()->getDataLayout();
2073 // Finally check if we can get longer vectorizable chain by reordering
2074 // without breaking the good operand order detected above.
2075 // E.g. If we have something like-
2076 // load a[0] load b[0]
2077 // load b[1] load a[1]
2078 // load a[2] load b[2]
2079 // load a[3] load b[3]
2080 // Reordering the second load b[1] load a[1] would allow us to vectorize
2081 // this code and we still retain AllSameOpcode property.
2082 // FIXME: This load reordering might break AllSameOpcode in some rare cases
2084 // add a[0],c[0] load b[0]
2085 // add a[1],c[2] load b[1]
2087 // add a[3],c[3] load b[3]
2088 for (unsigned j = 0; j < VL.size() - 1; ++j) {
2089 if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) {
2090 if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) {
2091 if (isConsecutiveAccess(L, L1, DL)) {
2092 std::swap(Left[j + 1], Right[j + 1]);
2097 if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) {
2098 if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) {
2099 if (isConsecutiveAccess(L, L1, DL)) {
2100 std::swap(Left[j + 1], Right[j + 1]);
2109 void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) {
2110 Instruction *VL0 = cast<Instruction>(VL[0]);
2111 BasicBlock::iterator NextInst(VL0);
2113 Builder.SetInsertPoint(VL0->getParent(), NextInst);
2114 Builder.SetCurrentDebugLocation(VL0->getDebugLoc());
2117 Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) {
2118 Value *Vec = UndefValue::get(Ty);
2119 // Generate the 'InsertElement' instruction.
2120 for (unsigned i = 0; i < Ty->getNumElements(); ++i) {
2121 Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i));
2122 if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) {
2123 GatherSeq.insert(Insrt);
2124 CSEBlocks.insert(Insrt->getParent());
2126 // Add to our 'need-to-extract' list.
2127 if (ScalarToTreeEntry.count(VL[i])) {
2128 int Idx = ScalarToTreeEntry[VL[i]];
2129 TreeEntry *E = &VectorizableTree[Idx];
2130 // Find which lane we need to extract.
2132 for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) {
2133 // Is this the lane of the scalar that we are looking for ?
2134 if (E->Scalars[Lane] == VL[i]) {
2139 assert(FoundLane >= 0 && "Could not find the correct lane");
2140 ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane));
2148 Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL) const {
2149 SmallDenseMap<Value*, int>::const_iterator Entry
2150 = ScalarToTreeEntry.find(VL[0]);
2151 if (Entry != ScalarToTreeEntry.end()) {
2152 int Idx = Entry->second;
2153 const TreeEntry *En = &VectorizableTree[Idx];
2154 if (En->isSame(VL) && En->VectorizedValue)
2155 return En->VectorizedValue;
2160 Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) {
2161 if (ScalarToTreeEntry.count(VL[0])) {
2162 int Idx = ScalarToTreeEntry[VL[0]];
2163 TreeEntry *E = &VectorizableTree[Idx];
2165 return vectorizeTree(E);
2168 Type *ScalarTy = VL[0]->getType();
2169 if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
2170 ScalarTy = SI->getValueOperand()->getType();
2171 VectorType *VecTy = VectorType::get(ScalarTy, VL.size());
2173 return Gather(VL, VecTy);
2176 Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
2177 IRBuilder<>::InsertPointGuard Guard(Builder);
2179 if (E->VectorizedValue) {
2180 DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n");
2181 return E->VectorizedValue;
2184 Instruction *VL0 = cast<Instruction>(E->Scalars[0]);
2185 Type *ScalarTy = VL0->getType();
2186 if (StoreInst *SI = dyn_cast<StoreInst>(VL0))
2187 ScalarTy = SI->getValueOperand()->getType();
2188 VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size());
2190 if (E->NeedToGather) {
2191 setInsertPointAfterBundle(E->Scalars);
2192 return Gather(E->Scalars, VecTy);
2195 const DataLayout &DL = F->getParent()->getDataLayout();
2196 unsigned Opcode = getSameOpcode(E->Scalars);
2199 case Instruction::PHI: {
2200 PHINode *PH = dyn_cast<PHINode>(VL0);
2201 Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI());
2202 Builder.SetCurrentDebugLocation(PH->getDebugLoc());
2203 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues());
2204 E->VectorizedValue = NewPhi;
2206 // PHINodes may have multiple entries from the same block. We want to
2207 // visit every block once.
2208 SmallSet<BasicBlock*, 4> VisitedBBs;
2210 for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
2212 BasicBlock *IBB = PH->getIncomingBlock(i);
2214 if (!VisitedBBs.insert(IBB).second) {
2215 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB);
2219 // Prepare the operand vector.
2220 for (Value *V : E->Scalars)
2221 Operands.push_back(cast<PHINode>(V)->getIncomingValueForBlock(IBB));
2223 Builder.SetInsertPoint(IBB->getTerminator());
2224 Builder.SetCurrentDebugLocation(PH->getDebugLoc());
2225 Value *Vec = vectorizeTree(Operands);
2226 NewPhi->addIncoming(Vec, IBB);
2229 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() &&
2230 "Invalid number of incoming values");
2234 case Instruction::ExtractElement: {
2235 if (CanReuseExtract(E->Scalars)) {
2236 Value *V = VL0->getOperand(0);
2237 E->VectorizedValue = V;
2240 return Gather(E->Scalars, VecTy);
2242 case Instruction::ZExt:
2243 case Instruction::SExt:
2244 case Instruction::FPToUI:
2245 case Instruction::FPToSI:
2246 case Instruction::FPExt:
2247 case Instruction::PtrToInt:
2248 case Instruction::IntToPtr:
2249 case Instruction::SIToFP:
2250 case Instruction::UIToFP:
2251 case Instruction::Trunc:
2252 case Instruction::FPTrunc:
2253 case Instruction::BitCast: {
2255 for (Value *V : E->Scalars)
2256 INVL.push_back(cast<Instruction>(V)->getOperand(0));
2258 setInsertPointAfterBundle(E->Scalars);
2260 Value *InVec = vectorizeTree(INVL);
2262 if (Value *V = alreadyVectorized(E->Scalars))
2265 CastInst *CI = dyn_cast<CastInst>(VL0);
2266 Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy);
2267 E->VectorizedValue = V;
2268 ++NumVectorInstructions;
2271 case Instruction::FCmp:
2272 case Instruction::ICmp: {
2273 ValueList LHSV, RHSV;
2274 for (Value *V : E->Scalars) {
2275 LHSV.push_back(cast<Instruction>(V)->getOperand(0));
2276 RHSV.push_back(cast<Instruction>(V)->getOperand(1));
2279 setInsertPointAfterBundle(E->Scalars);
2281 Value *L = vectorizeTree(LHSV);
2282 Value *R = vectorizeTree(RHSV);
2284 if (Value *V = alreadyVectorized(E->Scalars))
2287 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
2289 if (Opcode == Instruction::FCmp)
2290 V = Builder.CreateFCmp(P0, L, R);
2292 V = Builder.CreateICmp(P0, L, R);
2294 E->VectorizedValue = V;
2295 ++NumVectorInstructions;
2298 case Instruction::Select: {
2299 ValueList TrueVec, FalseVec, CondVec;
2300 for (Value *V : E->Scalars) {
2301 CondVec.push_back(cast<Instruction>(V)->getOperand(0));
2302 TrueVec.push_back(cast<Instruction>(V)->getOperand(1));
2303 FalseVec.push_back(cast<Instruction>(V)->getOperand(2));
2306 setInsertPointAfterBundle(E->Scalars);
2308 Value *Cond = vectorizeTree(CondVec);
2309 Value *True = vectorizeTree(TrueVec);
2310 Value *False = vectorizeTree(FalseVec);
2312 if (Value *V = alreadyVectorized(E->Scalars))
2315 Value *V = Builder.CreateSelect(Cond, True, False);
2316 E->VectorizedValue = V;
2317 ++NumVectorInstructions;
2320 case Instruction::Add:
2321 case Instruction::FAdd:
2322 case Instruction::Sub:
2323 case Instruction::FSub:
2324 case Instruction::Mul:
2325 case Instruction::FMul:
2326 case Instruction::UDiv:
2327 case Instruction::SDiv:
2328 case Instruction::FDiv:
2329 case Instruction::URem:
2330 case Instruction::SRem:
2331 case Instruction::FRem:
2332 case Instruction::Shl:
2333 case Instruction::LShr:
2334 case Instruction::AShr:
2335 case Instruction::And:
2336 case Instruction::Or:
2337 case Instruction::Xor: {
2338 ValueList LHSVL, RHSVL;
2339 if (isa<BinaryOperator>(VL0) && VL0->isCommutative())
2340 reorderInputsAccordingToOpcode(E->Scalars, LHSVL, RHSVL);
2342 for (Value *V : E->Scalars) {
2343 LHSVL.push_back(cast<Instruction>(V)->getOperand(0));
2344 RHSVL.push_back(cast<Instruction>(V)->getOperand(1));
2347 setInsertPointAfterBundle(E->Scalars);
2349 Value *LHS = vectorizeTree(LHSVL);
2350 Value *RHS = vectorizeTree(RHSVL);
2352 if (LHS == RHS && isa<Instruction>(LHS)) {
2353 assert((VL0->getOperand(0) == VL0->getOperand(1)) && "Invalid order");
2356 if (Value *V = alreadyVectorized(E->Scalars))
2359 BinaryOperator *BinOp = cast<BinaryOperator>(VL0);
2360 Value *V = Builder.CreateBinOp(BinOp->getOpcode(), LHS, RHS);
2361 E->VectorizedValue = V;
2362 propagateIRFlags(E->VectorizedValue, E->Scalars);
2363 ++NumVectorInstructions;
2365 if (Instruction *I = dyn_cast<Instruction>(V))
2366 return propagateMetadata(I, E->Scalars);
2370 case Instruction::Load: {
2371 // Loads are inserted at the head of the tree because we don't want to
2372 // sink them all the way down past store instructions.
2373 setInsertPointAfterBundle(E->Scalars);
2375 LoadInst *LI = cast<LoadInst>(VL0);
2376 Type *ScalarLoadTy = LI->getType();
2377 unsigned AS = LI->getPointerAddressSpace();
2379 Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(),
2380 VecTy->getPointerTo(AS));
2382 // The pointer operand uses an in-tree scalar so we add the new BitCast to
2383 // ExternalUses list to make sure that an extract will be generated in the
2385 if (ScalarToTreeEntry.count(LI->getPointerOperand()))
2386 ExternalUses.push_back(
2387 ExternalUser(LI->getPointerOperand(), cast<User>(VecPtr), 0));
2389 unsigned Alignment = LI->getAlignment();
2390 LI = Builder.CreateLoad(VecPtr);
2392 Alignment = DL.getABITypeAlignment(ScalarLoadTy);
2394 LI->setAlignment(Alignment);
2395 E->VectorizedValue = LI;
2396 ++NumVectorInstructions;
2397 return propagateMetadata(LI, E->Scalars);
2399 case Instruction::Store: {
2400 StoreInst *SI = cast<StoreInst>(VL0);
2401 unsigned Alignment = SI->getAlignment();
2402 unsigned AS = SI->getPointerAddressSpace();
2405 for (Value *V : E->Scalars)
2406 ValueOp.push_back(cast<StoreInst>(V)->getValueOperand());
2408 setInsertPointAfterBundle(E->Scalars);
2410 Value *VecValue = vectorizeTree(ValueOp);
2411 Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(),
2412 VecTy->getPointerTo(AS));
2413 StoreInst *S = Builder.CreateStore(VecValue, VecPtr);
2415 // The pointer operand uses an in-tree scalar so we add the new BitCast to
2416 // ExternalUses list to make sure that an extract will be generated in the
2418 if (ScalarToTreeEntry.count(SI->getPointerOperand()))
2419 ExternalUses.push_back(
2420 ExternalUser(SI->getPointerOperand(), cast<User>(VecPtr), 0));
2423 Alignment = DL.getABITypeAlignment(SI->getValueOperand()->getType());
2425 S->setAlignment(Alignment);
2426 E->VectorizedValue = S;
2427 ++NumVectorInstructions;
2428 return propagateMetadata(S, E->Scalars);
2430 case Instruction::GetElementPtr: {
2431 setInsertPointAfterBundle(E->Scalars);
2434 for (Value *V : E->Scalars)
2435 Op0VL.push_back(cast<GetElementPtrInst>(V)->getOperand(0));
2437 Value *Op0 = vectorizeTree(Op0VL);
2439 std::vector<Value *> OpVecs;
2440 for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e;
2443 for (Value *V : E->Scalars)
2444 OpVL.push_back(cast<GetElementPtrInst>(V)->getOperand(j));
2446 Value *OpVec = vectorizeTree(OpVL);
2447 OpVecs.push_back(OpVec);
2450 Value *V = Builder.CreateGEP(
2451 cast<GetElementPtrInst>(VL0)->getSourceElementType(), Op0, OpVecs);
2452 E->VectorizedValue = V;
2453 ++NumVectorInstructions;
2455 if (Instruction *I = dyn_cast<Instruction>(V))
2456 return propagateMetadata(I, E->Scalars);
2460 case Instruction::Call: {
2461 CallInst *CI = cast<CallInst>(VL0);
2462 setInsertPointAfterBundle(E->Scalars);
2464 Intrinsic::ID IID = Intrinsic::not_intrinsic;
2465 Value *ScalarArg = nullptr;
2466 if (CI && (FI = CI->getCalledFunction())) {
2467 IID = FI->getIntrinsicID();
2469 std::vector<Value *> OpVecs;
2470 for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) {
2472 // ctlz,cttz and powi are special intrinsics whose second argument is
2473 // a scalar. This argument should not be vectorized.
2474 if (hasVectorInstrinsicScalarOpd(IID, 1) && j == 1) {
2475 CallInst *CEI = cast<CallInst>(E->Scalars[0]);
2476 ScalarArg = CEI->getArgOperand(j);
2477 OpVecs.push_back(CEI->getArgOperand(j));
2480 for (Value *V : E->Scalars) {
2481 CallInst *CEI = cast<CallInst>(V);
2482 OpVL.push_back(CEI->getArgOperand(j));
2485 Value *OpVec = vectorizeTree(OpVL);
2486 DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n");
2487 OpVecs.push_back(OpVec);
2490 Module *M = F->getParent();
2491 Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI);
2492 Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) };
2493 Function *CF = Intrinsic::getDeclaration(M, ID, Tys);
2494 Value *V = Builder.CreateCall(CF, OpVecs);
2496 // The scalar argument uses an in-tree scalar so we add the new vectorized
2497 // call to ExternalUses list to make sure that an extract will be
2498 // generated in the future.
2499 if (ScalarArg && ScalarToTreeEntry.count(ScalarArg))
2500 ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0));
2502 E->VectorizedValue = V;
2503 ++NumVectorInstructions;
2506 case Instruction::ShuffleVector: {
2507 ValueList LHSVL, RHSVL;
2508 assert(isa<BinaryOperator>(VL0) && "Invalid Shuffle Vector Operand");
2509 reorderAltShuffleOperands(E->Scalars, LHSVL, RHSVL);
2510 setInsertPointAfterBundle(E->Scalars);
2512 Value *LHS = vectorizeTree(LHSVL);
2513 Value *RHS = vectorizeTree(RHSVL);
2515 if (Value *V = alreadyVectorized(E->Scalars))
2518 // Create a vector of LHS op1 RHS
2519 BinaryOperator *BinOp0 = cast<BinaryOperator>(VL0);
2520 Value *V0 = Builder.CreateBinOp(BinOp0->getOpcode(), LHS, RHS);
2522 // Create a vector of LHS op2 RHS
2523 Instruction *VL1 = cast<Instruction>(E->Scalars[1]);
2524 BinaryOperator *BinOp1 = cast<BinaryOperator>(VL1);
2525 Value *V1 = Builder.CreateBinOp(BinOp1->getOpcode(), LHS, RHS);
2527 // Create shuffle to take alternate operations from the vector.
2528 // Also, gather up odd and even scalar ops to propagate IR flags to
2529 // each vector operation.
2530 ValueList OddScalars, EvenScalars;
2531 unsigned e = E->Scalars.size();
2532 SmallVector<Constant *, 8> Mask(e);
2533 for (unsigned i = 0; i < e; ++i) {
2535 Mask[i] = Builder.getInt32(e + i);
2536 OddScalars.push_back(E->Scalars[i]);
2538 Mask[i] = Builder.getInt32(i);
2539 EvenScalars.push_back(E->Scalars[i]);
2543 Value *ShuffleMask = ConstantVector::get(Mask);
2544 propagateIRFlags(V0, EvenScalars);
2545 propagateIRFlags(V1, OddScalars);
2547 Value *V = Builder.CreateShuffleVector(V0, V1, ShuffleMask);
2548 E->VectorizedValue = V;
2549 ++NumVectorInstructions;
2550 if (Instruction *I = dyn_cast<Instruction>(V))
2551 return propagateMetadata(I, E->Scalars);
2556 llvm_unreachable("unknown inst");
2561 Value *BoUpSLP::vectorizeTree() {
2563 // All blocks must be scheduled before any instructions are inserted.
2564 for (auto &BSIter : BlocksSchedules) {
2565 scheduleBlock(BSIter.second.get());
2568 Builder.SetInsertPoint(&F->getEntryBlock().front());
2569 vectorizeTree(&VectorizableTree[0]);
2571 DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n");
2573 // Extract all of the elements with the external uses.
2574 for (UserList::iterator it = ExternalUses.begin(), e = ExternalUses.end();
2576 Value *Scalar = it->Scalar;
2577 llvm::User *User = it->User;
2579 // Skip users that we already RAUW. This happens when one instruction
2580 // has multiple uses of the same value.
2581 if (std::find(Scalar->user_begin(), Scalar->user_end(), User) ==
2584 assert(ScalarToTreeEntry.count(Scalar) && "Invalid scalar");
2586 int Idx = ScalarToTreeEntry[Scalar];
2587 TreeEntry *E = &VectorizableTree[Idx];
2588 assert(!E->NeedToGather && "Extracting from a gather list");
2590 Value *Vec = E->VectorizedValue;
2591 assert(Vec && "Can't find vectorizable value");
2593 Value *Lane = Builder.getInt32(it->Lane);
2594 // Generate extracts for out-of-tree users.
2595 // Find the insertion point for the extractelement lane.
2596 if (isa<Instruction>(Vec)){
2597 if (PHINode *PH = dyn_cast<PHINode>(User)) {
2598 for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) {
2599 if (PH->getIncomingValue(i) == Scalar) {
2600 Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator());
2601 Value *Ex = Builder.CreateExtractElement(Vec, Lane);
2602 CSEBlocks.insert(PH->getIncomingBlock(i));
2603 PH->setOperand(i, Ex);
2607 Builder.SetInsertPoint(cast<Instruction>(User));
2608 Value *Ex = Builder.CreateExtractElement(Vec, Lane);
2609 CSEBlocks.insert(cast<Instruction>(User)->getParent());
2610 User->replaceUsesOfWith(Scalar, Ex);
2613 Builder.SetInsertPoint(&F->getEntryBlock().front());
2614 Value *Ex = Builder.CreateExtractElement(Vec, Lane);
2615 CSEBlocks.insert(&F->getEntryBlock());
2616 User->replaceUsesOfWith(Scalar, Ex);
2619 DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n");
2622 // For each vectorized value:
2623 for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) {
2624 TreeEntry *Entry = &VectorizableTree[EIdx];
2627 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
2628 Value *Scalar = Entry->Scalars[Lane];
2629 // No need to handle users of gathered values.
2630 if (Entry->NeedToGather)
2633 assert(Entry->VectorizedValue && "Can't find vectorizable value");
2635 Type *Ty = Scalar->getType();
2636 if (!Ty->isVoidTy()) {
2638 for (User *U : Scalar->users()) {
2639 DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n");
2641 assert((ScalarToTreeEntry.count(U) ||
2642 // It is legal to replace users in the ignorelist by undef.
2643 (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), U) !=
2644 UserIgnoreList.end())) &&
2645 "Replacing out-of-tree value with undef");
2648 Value *Undef = UndefValue::get(Ty);
2649 Scalar->replaceAllUsesWith(Undef);
2651 DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n");
2652 eraseInstruction(cast<Instruction>(Scalar));
2656 Builder.ClearInsertionPoint();
2658 return VectorizableTree[0].VectorizedValue;
2661 void BoUpSLP::optimizeGatherSequence() {
2662 DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size()
2663 << " gather sequences instructions.\n");
2664 // LICM InsertElementInst sequences.
2665 for (SetVector<Instruction *>::iterator it = GatherSeq.begin(),
2666 e = GatherSeq.end(); it != e; ++it) {
2667 InsertElementInst *Insert = dyn_cast<InsertElementInst>(*it);
2672 // Check if this block is inside a loop.
2673 Loop *L = LI->getLoopFor(Insert->getParent());
2677 // Check if it has a preheader.
2678 BasicBlock *PreHeader = L->getLoopPreheader();
2682 // If the vector or the element that we insert into it are
2683 // instructions that are defined in this basic block then we can't
2684 // hoist this instruction.
2685 Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0));
2686 Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1));
2687 if (CurrVec && L->contains(CurrVec))
2689 if (NewElem && L->contains(NewElem))
2692 // We can hoist this instruction. Move it to the pre-header.
2693 Insert->moveBefore(PreHeader->getTerminator());
2696 // Make a list of all reachable blocks in our CSE queue.
2697 SmallVector<const DomTreeNode *, 8> CSEWorkList;
2698 CSEWorkList.reserve(CSEBlocks.size());
2699 for (BasicBlock *BB : CSEBlocks)
2700 if (DomTreeNode *N = DT->getNode(BB)) {
2701 assert(DT->isReachableFromEntry(N));
2702 CSEWorkList.push_back(N);
2705 // Sort blocks by domination. This ensures we visit a block after all blocks
2706 // dominating it are visited.
2707 std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(),
2708 [this](const DomTreeNode *A, const DomTreeNode *B) {
2709 return DT->properlyDominates(A, B);
2712 // Perform O(N^2) search over the gather sequences and merge identical
2713 // instructions. TODO: We can further optimize this scan if we split the
2714 // instructions into different buckets based on the insert lane.
2715 SmallVector<Instruction *, 16> Visited;
2716 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) {
2717 assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) &&
2718 "Worklist not sorted properly!");
2719 BasicBlock *BB = (*I)->getBlock();
2720 // For all instructions in blocks containing gather sequences:
2721 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) {
2722 Instruction *In = &*it++;
2723 if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In))
2726 // Check if we can replace this instruction with any of the
2727 // visited instructions.
2728 for (SmallVectorImpl<Instruction *>::iterator v = Visited.begin(),
2731 if (In->isIdenticalTo(*v) &&
2732 DT->dominates((*v)->getParent(), In->getParent())) {
2733 In->replaceAllUsesWith(*v);
2734 eraseInstruction(In);
2740 assert(std::find(Visited.begin(), Visited.end(), In) == Visited.end());
2741 Visited.push_back(In);
2749 // Groups the instructions to a bundle (which is then a single scheduling entity)
2750 // and schedules instructions until the bundle gets ready.
2751 bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL,
2753 if (isa<PHINode>(VL[0]))
2756 // Initialize the instruction bundle.
2757 Instruction *OldScheduleEnd = ScheduleEnd;
2758 ScheduleData *PrevInBundle = nullptr;
2759 ScheduleData *Bundle = nullptr;
2760 bool ReSchedule = false;
2761 DEBUG(dbgs() << "SLP: bundle: " << *VL[0] << "\n");
2763 // Make sure that the scheduling region contains all
2764 // instructions of the bundle.
2765 for (Value *V : VL) {
2766 if (!extendSchedulingRegion(V))
2770 for (Value *V : VL) {
2771 ScheduleData *BundleMember = getScheduleData(V);
2772 assert(BundleMember &&
2773 "no ScheduleData for bundle member (maybe not in same basic block)");
2774 if (BundleMember->IsScheduled) {
2775 // A bundle member was scheduled as single instruction before and now
2776 // needs to be scheduled as part of the bundle. We just get rid of the
2777 // existing schedule.
2778 DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember
2779 << " was already scheduled\n");
2782 assert(BundleMember->isSchedulingEntity() &&
2783 "bundle member already part of other bundle");
2785 PrevInBundle->NextInBundle = BundleMember;
2787 Bundle = BundleMember;
2789 BundleMember->UnscheduledDepsInBundle = 0;
2790 Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps;
2792 // Group the instructions to a bundle.
2793 BundleMember->FirstInBundle = Bundle;
2794 PrevInBundle = BundleMember;
2796 if (ScheduleEnd != OldScheduleEnd) {
2797 // The scheduling region got new instructions at the lower end (or it is a
2798 // new region for the first bundle). This makes it necessary to
2799 // recalculate all dependencies.
2800 // It is seldom that this needs to be done a second time after adding the
2801 // initial bundle to the region.
2802 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
2803 ScheduleData *SD = getScheduleData(I);
2804 SD->clearDependencies();
2810 initialFillReadyList(ReadyInsts);
2813 DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block "
2814 << BB->getName() << "\n");
2816 calculateDependencies(Bundle, true, SLP);
2818 // Now try to schedule the new bundle. As soon as the bundle is "ready" it
2819 // means that there are no cyclic dependencies and we can schedule it.
2820 // Note that's important that we don't "schedule" the bundle yet (see
2821 // cancelScheduling).
2822 while (!Bundle->isReady() && !ReadyInsts.empty()) {
2824 ScheduleData *pickedSD = ReadyInsts.back();
2825 ReadyInsts.pop_back();
2827 if (pickedSD->isSchedulingEntity() && pickedSD->isReady()) {
2828 schedule(pickedSD, ReadyInsts);
2831 if (!Bundle->isReady()) {
2832 cancelScheduling(VL);
2838 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL) {
2839 if (isa<PHINode>(VL[0]))
2842 ScheduleData *Bundle = getScheduleData(VL[0]);
2843 DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n");
2844 assert(!Bundle->IsScheduled &&
2845 "Can't cancel bundle which is already scheduled");
2846 assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() &&
2847 "tried to unbundle something which is not a bundle");
2849 // Un-bundle: make single instructions out of the bundle.
2850 ScheduleData *BundleMember = Bundle;
2851 while (BundleMember) {
2852 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links");
2853 BundleMember->FirstInBundle = BundleMember;
2854 ScheduleData *Next = BundleMember->NextInBundle;
2855 BundleMember->NextInBundle = nullptr;
2856 BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps;
2857 if (BundleMember->UnscheduledDepsInBundle == 0) {
2858 ReadyInsts.insert(BundleMember);
2860 BundleMember = Next;
2864 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V) {
2865 if (getScheduleData(V))
2867 Instruction *I = dyn_cast<Instruction>(V);
2868 assert(I && "bundle member must be an instruction");
2869 assert(!isa<PHINode>(I) && "phi nodes don't need to be scheduled");
2870 if (!ScheduleStart) {
2871 // It's the first instruction in the new region.
2872 initScheduleData(I, I->getNextNode(), nullptr, nullptr);
2874 ScheduleEnd = I->getNextNode();
2875 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?");
2876 DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n");
2879 // Search up and down at the same time, because we don't know if the new
2880 // instruction is above or below the existing scheduling region.
2881 BasicBlock::reverse_iterator UpIter(ScheduleStart->getIterator());
2882 BasicBlock::reverse_iterator UpperEnd = BB->rend();
2883 BasicBlock::iterator DownIter(ScheduleEnd);
2884 BasicBlock::iterator LowerEnd = BB->end();
2886 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) {
2887 DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n");
2891 if (UpIter != UpperEnd) {
2892 if (&*UpIter == I) {
2893 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion);
2895 DEBUG(dbgs() << "SLP: extend schedule region start to " << *I << "\n");
2900 if (DownIter != LowerEnd) {
2901 if (&*DownIter == I) {
2902 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion,
2904 ScheduleEnd = I->getNextNode();
2905 assert(ScheduleEnd && "tried to vectorize a TerminatorInst?");
2906 DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n");
2911 assert((UpIter != UpperEnd || DownIter != LowerEnd) &&
2912 "instruction not found in block");
2917 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI,
2919 ScheduleData *PrevLoadStore,
2920 ScheduleData *NextLoadStore) {
2921 ScheduleData *CurrentLoadStore = PrevLoadStore;
2922 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) {
2923 ScheduleData *SD = ScheduleDataMap[I];
2925 // Allocate a new ScheduleData for the instruction.
2926 if (ChunkPos >= ChunkSize) {
2927 ScheduleDataChunks.push_back(
2928 llvm::make_unique<ScheduleData[]>(ChunkSize));
2931 SD = &(ScheduleDataChunks.back()[ChunkPos++]);
2932 ScheduleDataMap[I] = SD;
2935 assert(!isInSchedulingRegion(SD) &&
2936 "new ScheduleData already in scheduling region");
2937 SD->init(SchedulingRegionID);
2939 if (I->mayReadOrWriteMemory()) {
2940 // Update the linked list of memory accessing instructions.
2941 if (CurrentLoadStore) {
2942 CurrentLoadStore->NextLoadStore = SD;
2944 FirstLoadStoreInRegion = SD;
2946 CurrentLoadStore = SD;
2949 if (NextLoadStore) {
2950 if (CurrentLoadStore)
2951 CurrentLoadStore->NextLoadStore = NextLoadStore;
2953 LastLoadStoreInRegion = CurrentLoadStore;
2957 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
2958 bool InsertInReadyList,
2960 assert(SD->isSchedulingEntity());
2962 SmallVector<ScheduleData *, 10> WorkList;
2963 WorkList.push_back(SD);
2965 while (!WorkList.empty()) {
2966 ScheduleData *SD = WorkList.back();
2967 WorkList.pop_back();
2969 ScheduleData *BundleMember = SD;
2970 while (BundleMember) {
2971 assert(isInSchedulingRegion(BundleMember));
2972 if (!BundleMember->hasValidDependencies()) {
2974 DEBUG(dbgs() << "SLP: update deps of " << *BundleMember << "\n");
2975 BundleMember->Dependencies = 0;
2976 BundleMember->resetUnscheduledDeps();
2978 // Handle def-use chain dependencies.
2979 for (User *U : BundleMember->Inst->users()) {
2980 if (isa<Instruction>(U)) {
2981 ScheduleData *UseSD = getScheduleData(U);
2982 if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) {
2983 BundleMember->Dependencies++;
2984 ScheduleData *DestBundle = UseSD->FirstInBundle;
2985 if (!DestBundle->IsScheduled) {
2986 BundleMember->incrementUnscheduledDeps(1);
2988 if (!DestBundle->hasValidDependencies()) {
2989 WorkList.push_back(DestBundle);
2993 // I'm not sure if this can ever happen. But we need to be safe.
2994 // This lets the instruction/bundle never be scheduled and
2995 // eventually disable vectorization.
2996 BundleMember->Dependencies++;
2997 BundleMember->incrementUnscheduledDeps(1);
3001 // Handle the memory dependencies.
3002 ScheduleData *DepDest = BundleMember->NextLoadStore;
3004 Instruction *SrcInst = BundleMember->Inst;
3005 MemoryLocation SrcLoc = getLocation(SrcInst, SLP->AA);
3006 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory();
3007 unsigned numAliased = 0;
3008 unsigned DistToSrc = 1;
3011 assert(isInSchedulingRegion(DepDest));
3013 // We have two limits to reduce the complexity:
3014 // 1) AliasedCheckLimit: It's a small limit to reduce calls to
3015 // SLP->isAliased (which is the expensive part in this loop).
3016 // 2) MaxMemDepDistance: It's for very large blocks and it aborts
3017 // the whole loop (even if the loop is fast, it's quadratic).
3018 // It's important for the loop break condition (see below) to
3019 // check this limit even between two read-only instructions.
3020 if (DistToSrc >= MaxMemDepDistance ||
3021 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) &&
3022 (numAliased >= AliasedCheckLimit ||
3023 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) {
3025 // We increment the counter only if the locations are aliased
3026 // (instead of counting all alias checks). This gives a better
3027 // balance between reduced runtime and accurate dependencies.
3030 DepDest->MemoryDependencies.push_back(BundleMember);
3031 BundleMember->Dependencies++;
3032 ScheduleData *DestBundle = DepDest->FirstInBundle;
3033 if (!DestBundle->IsScheduled) {
3034 BundleMember->incrementUnscheduledDeps(1);
3036 if (!DestBundle->hasValidDependencies()) {
3037 WorkList.push_back(DestBundle);
3040 DepDest = DepDest->NextLoadStore;
3042 // Example, explaining the loop break condition: Let's assume our
3043 // starting instruction is i0 and MaxMemDepDistance = 3.
3046 // i0,i1,i2,i3,i4,i5,i6,i7,i8
3049 // MaxMemDepDistance let us stop alias-checking at i3 and we add
3050 // dependencies from i0 to i3,i4,.. (even if they are not aliased).
3051 // Previously we already added dependencies from i3 to i6,i7,i8
3052 // (because of MaxMemDepDistance). As we added a dependency from
3053 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8
3054 // and we can abort this loop at i6.
3055 if (DistToSrc >= 2 * MaxMemDepDistance)
3061 BundleMember = BundleMember->NextInBundle;
3063 if (InsertInReadyList && SD->isReady()) {
3064 ReadyInsts.push_back(SD);
3065 DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst << "\n");
3070 void BoUpSLP::BlockScheduling::resetSchedule() {
3071 assert(ScheduleStart &&
3072 "tried to reset schedule on block which has not been scheduled");
3073 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
3074 ScheduleData *SD = getScheduleData(I);
3075 assert(isInSchedulingRegion(SD));
3076 SD->IsScheduled = false;
3077 SD->resetUnscheduledDeps();
3082 void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
3084 if (!BS->ScheduleStart)
3087 DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n");
3089 BS->resetSchedule();
3091 // For the real scheduling we use a more sophisticated ready-list: it is
3092 // sorted by the original instruction location. This lets the final schedule
3093 // be as close as possible to the original instruction order.
3094 struct ScheduleDataCompare {
3095 bool operator()(ScheduleData *SD1, ScheduleData *SD2) {
3096 return SD2->SchedulingPriority < SD1->SchedulingPriority;
3099 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts;
3101 // Ensure that all dependency data is updated and fill the ready-list with
3102 // initial instructions.
3104 int NumToSchedule = 0;
3105 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd;
3106 I = I->getNextNode()) {
3107 ScheduleData *SD = BS->getScheduleData(I);
3109 SD->isPartOfBundle() == (ScalarToTreeEntry.count(SD->Inst) != 0) &&
3110 "scheduler and vectorizer have different opinion on what is a bundle");
3111 SD->FirstInBundle->SchedulingPriority = Idx++;
3112 if (SD->isSchedulingEntity()) {
3113 BS->calculateDependencies(SD, false, this);
3117 BS->initialFillReadyList(ReadyInsts);
3119 Instruction *LastScheduledInst = BS->ScheduleEnd;
3121 // Do the "real" scheduling.
3122 while (!ReadyInsts.empty()) {
3123 ScheduleData *picked = *ReadyInsts.begin();
3124 ReadyInsts.erase(ReadyInsts.begin());
3126 // Move the scheduled instruction(s) to their dedicated places, if not
3128 ScheduleData *BundleMember = picked;
3129 while (BundleMember) {
3130 Instruction *pickedInst = BundleMember->Inst;
3131 if (LastScheduledInst->getNextNode() != pickedInst) {
3132 BS->BB->getInstList().remove(pickedInst);
3133 BS->BB->getInstList().insert(LastScheduledInst->getIterator(),
3136 LastScheduledInst = pickedInst;
3137 BundleMember = BundleMember->NextInBundle;
3140 BS->schedule(picked, ReadyInsts);
3143 assert(NumToSchedule == 0 && "could not schedule all instructions");
3145 // Avoid duplicate scheduling of the block.
3146 BS->ScheduleStart = nullptr;
3149 /// The SLPVectorizer Pass.
3150 struct SLPVectorizer : public FunctionPass {
3151 typedef SmallVector<StoreInst *, 8> StoreList;
3152 typedef MapVector<Value *, StoreList> StoreListMap;
3154 /// Pass identification, replacement for typeid
3157 explicit SLPVectorizer() : FunctionPass(ID) {
3158 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry());
3161 ScalarEvolution *SE;
3162 TargetTransformInfo *TTI;
3163 TargetLibraryInfo *TLI;
3167 AssumptionCache *AC;
3169 bool runOnFunction(Function &F) override {
3170 if (skipOptnoneFunction(F))
3173 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
3174 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
3175 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
3176 TLI = TLIP ? &TLIP->getTLI() : nullptr;
3177 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
3178 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
3179 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
3180 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
3183 bool Changed = false;
3185 // If the target claims to have no vector registers don't attempt
3187 if (!TTI->getNumberOfRegisters(true))
3190 // Use the vector register size specified by the target unless overridden
3191 // by a command-line option.
3192 // TODO: It would be better to limit the vectorization factor based on
3193 // data type rather than just register size. For example, x86 AVX has
3194 // 256-bit registers, but it does not support integer operations
3195 // at that width (that requires AVX2).
3196 if (MaxVectorRegSizeOption.getNumOccurrences())
3197 MaxVecRegSize = MaxVectorRegSizeOption;
3199 MaxVecRegSize = TTI->getRegisterBitWidth(true);
3201 // Don't vectorize when the attribute NoImplicitFloat is used.
3202 if (F.hasFnAttribute(Attribute::NoImplicitFloat))
3205 DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n");
3207 // Use the bottom up slp vectorizer to construct chains that start with
3208 // store instructions.
3209 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC);
3211 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to
3212 // delete instructions.
3214 // Scan the blocks in the function in post order.
3215 for (auto BB : post_order(&F.getEntryBlock())) {
3216 // Vectorize trees that end at stores.
3217 if (unsigned count = collectStores(BB, R)) {
3219 DEBUG(dbgs() << "SLP: Found " << count << " stores to vectorize.\n");
3220 Changed |= vectorizeStoreChains(R);
3223 // Vectorize trees that end at reductions.
3224 Changed |= vectorizeChainsInBlock(BB, R);
3228 R.optimizeGatherSequence();
3229 DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n");
3230 DEBUG(verifyFunction(F));
3235 void getAnalysisUsage(AnalysisUsage &AU) const override {
3236 FunctionPass::getAnalysisUsage(AU);
3237 AU.addRequired<AssumptionCacheTracker>();
3238 AU.addRequired<ScalarEvolutionWrapperPass>();
3239 AU.addRequired<AAResultsWrapperPass>();
3240 AU.addRequired<TargetTransformInfoWrapperPass>();
3241 AU.addRequired<LoopInfoWrapperPass>();
3242 AU.addRequired<DominatorTreeWrapperPass>();
3243 AU.addPreserved<LoopInfoWrapperPass>();
3244 AU.addPreserved<DominatorTreeWrapperPass>();
3245 AU.setPreservesCFG();
3250 /// \brief Collect memory references and sort them according to their base
3251 /// object. We sort the stores to their base objects to reduce the cost of the
3252 /// quadratic search on the stores. TODO: We can further reduce this cost
3253 /// if we flush the chain creation every time we run into a memory barrier.
3254 unsigned collectStores(BasicBlock *BB, BoUpSLP &R);
3256 /// \brief Try to vectorize a chain that starts at two arithmetic instrs.
3257 bool tryToVectorizePair(Value *A, Value *B, BoUpSLP &R);
3259 /// \brief Try to vectorize a list of operands.
3260 /// \@param BuildVector A list of users to ignore for the purpose of
3261 /// scheduling and that don't need extracting.
3262 /// \returns true if a value was vectorized.
3263 bool tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
3264 ArrayRef<Value *> BuildVector = None,
3265 bool allowReorder = false);
3267 /// \brief Try to vectorize a chain that may start at the operands of \V;
3268 bool tryToVectorize(BinaryOperator *V, BoUpSLP &R);
3270 /// \brief Vectorize the stores that were collected in StoreRefs.
3271 bool vectorizeStoreChains(BoUpSLP &R);
3273 /// \brief Scan the basic block and look for patterns that are likely to start
3274 /// a vectorization chain.
3275 bool vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R);
3277 bool vectorizeStoreChain(ArrayRef<Value *> Chain, int CostThreshold,
3278 BoUpSLP &R, unsigned VecRegSize);
3280 bool vectorizeStores(ArrayRef<StoreInst *> Stores, int costThreshold,
3283 StoreListMap StoreRefs;
3284 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt.
3287 /// \brief Check that the Values in the slice in VL array are still existent in
3288 /// the WeakVH array.
3289 /// Vectorization of part of the VL array may cause later values in the VL array
3290 /// to become invalid. We track when this has happened in the WeakVH array.
3291 static bool hasValueBeenRAUWed(ArrayRef<Value *> VL, ArrayRef<WeakVH> VH,
3292 unsigned SliceBegin, unsigned SliceSize) {
3293 VL = VL.slice(SliceBegin, SliceSize);
3294 VH = VH.slice(SliceBegin, SliceSize);
3295 return !std::equal(VL.begin(), VL.end(), VH.begin());
3298 bool SLPVectorizer::vectorizeStoreChain(ArrayRef<Value *> Chain,
3299 int CostThreshold, BoUpSLP &R,
3300 unsigned VecRegSize) {
3301 unsigned ChainLen = Chain.size();
3302 DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen
3304 Type *StoreTy = cast<StoreInst>(Chain[0])->getValueOperand()->getType();
3305 auto &DL = cast<StoreInst>(Chain[0])->getModule()->getDataLayout();
3306 unsigned Sz = DL.getTypeSizeInBits(StoreTy);
3307 unsigned VF = VecRegSize / Sz;
3309 if (!isPowerOf2_32(Sz) || VF < 2)
3312 // Keep track of values that were deleted by vectorizing in the loop below.
3313 SmallVector<WeakVH, 8> TrackValues(Chain.begin(), Chain.end());
3315 bool Changed = false;
3316 // Look for profitable vectorizable trees at all offsets, starting at zero.
3317 for (unsigned i = 0, e = ChainLen; i < e; ++i) {
3321 // Check that a previous iteration of this loop did not delete the Value.
3322 if (hasValueBeenRAUWed(Chain, TrackValues, i, VF))
3325 DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i
3327 ArrayRef<Value *> Operands = Chain.slice(i, VF);
3329 R.buildTree(Operands);
3331 int Cost = R.getTreeCost();
3333 DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n");
3334 if (Cost < CostThreshold) {
3335 DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n");
3338 // Move to the next bundle.
3347 bool SLPVectorizer::vectorizeStores(ArrayRef<StoreInst *> Stores,
3348 int costThreshold, BoUpSLP &R) {
3349 SetVector<StoreInst *> Heads, Tails;
3350 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain;
3352 // We may run into multiple chains that merge into a single chain. We mark the
3353 // stores that we vectorized so that we don't visit the same store twice.
3354 BoUpSLP::ValueSet VectorizedStores;
3355 bool Changed = false;
3357 // Do a quadratic search on all of the given stores and find
3358 // all of the pairs of stores that follow each other.
3359 SmallVector<unsigned, 16> IndexQueue;
3360 for (unsigned i = 0, e = Stores.size(); i < e; ++i) {
3361 const DataLayout &DL = Stores[i]->getModule()->getDataLayout();
3363 // If a store has multiple consecutive store candidates, search Stores
3364 // array according to the sequence: from i+1 to e, then from i-1 to 0.
3365 // This is because usually pairing with immediate succeeding or preceding
3366 // candidate create the best chance to find slp vectorization opportunity.
3368 for (j = i + 1; j < e; ++j)
3369 IndexQueue.push_back(j);
3370 for (j = i; j > 0; --j)
3371 IndexQueue.push_back(j - 1);
3373 for (auto &k : IndexQueue) {
3374 if (R.isConsecutiveAccess(Stores[i], Stores[k], DL)) {
3375 Tails.insert(Stores[k]);
3376 Heads.insert(Stores[i]);
3377 ConsecutiveChain[Stores[i]] = Stores[k];
3383 // For stores that start but don't end a link in the chain:
3384 for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end();
3386 if (Tails.count(*it))
3389 // We found a store instr that starts a chain. Now follow the chain and try
3391 BoUpSLP::ValueList Operands;
3393 // Collect the chain into a list.
3394 while (Tails.count(I) || Heads.count(I)) {
3395 if (VectorizedStores.count(I))
3397 Operands.push_back(I);
3398 // Move to the next value in the chain.
3399 I = ConsecutiveChain[I];
3402 // FIXME: Is division-by-2 the correct step? Should we assert that the
3403 // register size is a power-of-2?
3404 for (unsigned Size = MaxVecRegSize; Size >= MinVecRegSize; Size /= 2) {
3405 if (vectorizeStoreChain(Operands, costThreshold, R, Size)) {
3406 // Mark the vectorized stores so that we don't vectorize them again.
3407 VectorizedStores.insert(Operands.begin(), Operands.end());
3418 unsigned SLPVectorizer::collectStores(BasicBlock *BB, BoUpSLP &R) {
3421 const DataLayout &DL = BB->getModule()->getDataLayout();
3422 for (Instruction &I : *BB) {
3423 StoreInst *SI = dyn_cast<StoreInst>(&I);
3427 // Don't touch volatile stores.
3428 if (!SI->isSimple())
3431 // Check that the pointer points to scalars.
3432 Type *Ty = SI->getValueOperand()->getType();
3433 if (!isValidElementType(Ty))
3436 // Find the base pointer.
3437 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), DL);
3439 // Save the store locations.
3440 StoreRefs[Ptr].push_back(SI);
3446 bool SLPVectorizer::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) {
3449 Value *VL[] = { A, B };
3450 return tryToVectorizeList(VL, R, None, true);
3453 bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
3454 ArrayRef<Value *> BuildVector,
3455 bool allowReorder) {
3459 DEBUG(dbgs() << "SLP: Vectorizing a list of length = " << VL.size() << ".\n");
3461 // Check that all of the parts are scalar instructions of the same type.
3462 Instruction *I0 = dyn_cast<Instruction>(VL[0]);
3466 unsigned Opcode0 = I0->getOpcode();
3467 const DataLayout &DL = I0->getModule()->getDataLayout();
3469 Type *Ty0 = I0->getType();
3470 unsigned Sz = DL.getTypeSizeInBits(Ty0);
3471 // FIXME: Register size should be a parameter to this function, so we can
3472 // try different vectorization factors.
3473 unsigned VF = MinVecRegSize / Sz;
3475 for (Value *V : VL) {
3476 Type *Ty = V->getType();
3477 if (!isValidElementType(Ty))
3479 Instruction *Inst = dyn_cast<Instruction>(V);
3480 if (!Inst || Inst->getOpcode() != Opcode0)
3484 bool Changed = false;
3486 // Keep track of values that were deleted by vectorizing in the loop below.
3487 SmallVector<WeakVH, 8> TrackValues(VL.begin(), VL.end());
3489 for (unsigned i = 0, e = VL.size(); i < e; ++i) {
3490 unsigned OpsWidth = 0;
3497 if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2)
3500 // Check that a previous iteration of this loop did not delete the Value.
3501 if (hasValueBeenRAUWed(VL, TrackValues, i, OpsWidth))
3504 DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations "
3506 ArrayRef<Value *> Ops = VL.slice(i, OpsWidth);
3508 ArrayRef<Value *> BuildVectorSlice;
3509 if (!BuildVector.empty())
3510 BuildVectorSlice = BuildVector.slice(i, OpsWidth);
3512 R.buildTree(Ops, BuildVectorSlice);
3513 // TODO: check if we can allow reordering also for other cases than
3514 // tryToVectorizePair()
3515 if (allowReorder && R.shouldReorder()) {
3516 assert(Ops.size() == 2);
3517 assert(BuildVectorSlice.empty());
3518 Value *ReorderedOps[] = { Ops[1], Ops[0] };
3519 R.buildTree(ReorderedOps, None);
3521 int Cost = R.getTreeCost();
3523 if (Cost < -SLPCostThreshold) {
3524 DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n");
3525 Value *VectorizedRoot = R.vectorizeTree();
3527 // Reconstruct the build vector by extracting the vectorized root. This
3528 // way we handle the case where some elements of the vector are undefined.
3529 // (return (inserelt <4 xi32> (insertelt undef (opd0) 0) (opd1) 2))
3530 if (!BuildVectorSlice.empty()) {
3531 // The insert point is the last build vector instruction. The vectorized
3532 // root will precede it. This guarantees that we get an instruction. The
3533 // vectorized tree could have been constant folded.
3534 Instruction *InsertAfter = cast<Instruction>(BuildVectorSlice.back());
3535 unsigned VecIdx = 0;
3536 for (auto &V : BuildVectorSlice) {
3537 IRBuilder<true, NoFolder> Builder(
3538 InsertAfter->getParent(), ++BasicBlock::iterator(InsertAfter));
3539 InsertElementInst *IE = cast<InsertElementInst>(V);
3540 Instruction *Extract = cast<Instruction>(Builder.CreateExtractElement(
3541 VectorizedRoot, Builder.getInt32(VecIdx++)));
3542 IE->setOperand(1, Extract);
3543 IE->removeFromParent();
3544 IE->insertAfter(Extract);
3548 // Move to the next bundle.
3557 bool SLPVectorizer::tryToVectorize(BinaryOperator *V, BoUpSLP &R) {
3561 // Try to vectorize V.
3562 if (tryToVectorizePair(V->getOperand(0), V->getOperand(1), R))
3565 BinaryOperator *A = dyn_cast<BinaryOperator>(V->getOperand(0));
3566 BinaryOperator *B = dyn_cast<BinaryOperator>(V->getOperand(1));
3568 if (B && B->hasOneUse()) {
3569 BinaryOperator *B0 = dyn_cast<BinaryOperator>(B->getOperand(0));
3570 BinaryOperator *B1 = dyn_cast<BinaryOperator>(B->getOperand(1));
3571 if (tryToVectorizePair(A, B0, R)) {
3574 if (tryToVectorizePair(A, B1, R)) {
3580 if (A && A->hasOneUse()) {
3581 BinaryOperator *A0 = dyn_cast<BinaryOperator>(A->getOperand(0));
3582 BinaryOperator *A1 = dyn_cast<BinaryOperator>(A->getOperand(1));
3583 if (tryToVectorizePair(A0, B, R)) {
3586 if (tryToVectorizePair(A1, B, R)) {
3593 /// \brief Generate a shuffle mask to be used in a reduction tree.
3595 /// \param VecLen The length of the vector to be reduced.
3596 /// \param NumEltsToRdx The number of elements that should be reduced in the
3598 /// \param IsPairwise Whether the reduction is a pairwise or splitting
3599 /// reduction. A pairwise reduction will generate a mask of
3600 /// <0,2,...> or <1,3,..> while a splitting reduction will generate
3601 /// <2,3, undef,undef> for a vector of 4 and NumElts = 2.
3602 /// \param IsLeft True will generate a mask of even elements, odd otherwise.
3603 static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx,
3604 bool IsPairwise, bool IsLeft,
3605 IRBuilder<> &Builder) {
3606 assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask");
3608 SmallVector<Constant *, 32> ShuffleMask(
3609 VecLen, UndefValue::get(Builder.getInt32Ty()));
3612 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right).
3613 for (unsigned i = 0; i != NumEltsToRdx; ++i)
3614 ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft);
3616 // Move the upper half of the vector to the lower half.
3617 for (unsigned i = 0; i != NumEltsToRdx; ++i)
3618 ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i);
3620 return ConstantVector::get(ShuffleMask);
3624 /// Model horizontal reductions.
3626 /// A horizontal reduction is a tree of reduction operations (currently add and
3627 /// fadd) that has operations that can be put into a vector as its leaf.
3628 /// For example, this tree:
3635 /// This tree has "mul" as its reduced values and "+" as its reduction
3636 /// operations. A reduction might be feeding into a store or a binary operation
3651 class HorizontalReduction {
3652 SmallVector<Value *, 16> ReductionOps;
3653 SmallVector<Value *, 32> ReducedVals;
3655 BinaryOperator *ReductionRoot;
3656 PHINode *ReductionPHI;
3658 /// The opcode of the reduction.
3659 unsigned ReductionOpcode;
3660 /// The opcode of the values we perform a reduction on.
3661 unsigned ReducedValueOpcode;
3662 /// Should we model this reduction as a pairwise reduction tree or a tree that
3663 /// splits the vector in halves and adds those halves.
3664 bool IsPairwiseReduction;
3667 /// The width of one full horizontal reduction operation.
3668 unsigned ReduxWidth;
3670 HorizontalReduction()
3671 : ReductionRoot(nullptr), ReductionPHI(nullptr), ReductionOpcode(0),
3672 ReducedValueOpcode(0), IsPairwiseReduction(false), ReduxWidth(0) {}
3674 /// \brief Try to find a reduction tree.
3675 bool matchAssociativeReduction(PHINode *Phi, BinaryOperator *B) {
3677 std::find(Phi->op_begin(), Phi->op_end(), B) != Phi->op_end()) &&
3678 "Thi phi needs to use the binary operator");
3680 // We could have a initial reductions that is not an add.
3681 // r *= v1 + v2 + v3 + v4
3682 // In such a case start looking for a tree rooted in the first '+'.
3684 if (B->getOperand(0) == Phi) {
3686 B = dyn_cast<BinaryOperator>(B->getOperand(1));
3687 } else if (B->getOperand(1) == Phi) {
3689 B = dyn_cast<BinaryOperator>(B->getOperand(0));
3696 Type *Ty = B->getType();
3697 if (!isValidElementType(Ty))
3700 const DataLayout &DL = B->getModule()->getDataLayout();
3701 ReductionOpcode = B->getOpcode();
3702 ReducedValueOpcode = 0;
3703 // FIXME: Register size should be a parameter to this function, so we can
3704 // try different vectorization factors.
3705 ReduxWidth = MinVecRegSize / DL.getTypeSizeInBits(Ty);
3712 // We currently only support adds.
3713 if (ReductionOpcode != Instruction::Add &&
3714 ReductionOpcode != Instruction::FAdd)
3717 // Post order traverse the reduction tree starting at B. We only handle true
3718 // trees containing only binary operators or selects.
3719 SmallVector<std::pair<Instruction *, unsigned>, 32> Stack;
3720 Stack.push_back(std::make_pair(B, 0));
3721 while (!Stack.empty()) {
3722 Instruction *TreeN = Stack.back().first;
3723 unsigned EdgeToVist = Stack.back().second++;
3724 bool IsReducedValue = TreeN->getOpcode() != ReductionOpcode;
3726 // Only handle trees in the current basic block.
3727 if (TreeN->getParent() != B->getParent())
3730 // Each tree node needs to have one user except for the ultimate
3732 if (!TreeN->hasOneUse() && TreeN != B)
3736 if (EdgeToVist == 2 || IsReducedValue) {
3737 if (IsReducedValue) {
3738 // Make sure that the opcodes of the operations that we are going to
3740 if (!ReducedValueOpcode)
3741 ReducedValueOpcode = TreeN->getOpcode();
3742 else if (ReducedValueOpcode != TreeN->getOpcode())
3744 ReducedVals.push_back(TreeN);
3746 // We need to be able to reassociate the adds.
3747 if (!TreeN->isAssociative())
3749 ReductionOps.push_back(TreeN);
3756 // Visit left or right.
3757 Value *NextV = TreeN->getOperand(EdgeToVist);
3758 // We currently only allow BinaryOperator's and SelectInst's as reduction
3759 // values in our tree.
3760 if (isa<BinaryOperator>(NextV) || isa<SelectInst>(NextV))
3761 Stack.push_back(std::make_pair(cast<Instruction>(NextV), 0));
3762 else if (NextV != Phi)
3768 /// \brief Attempt to vectorize the tree found by
3769 /// matchAssociativeReduction.
3770 bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) {
3771 if (ReducedVals.empty())
3774 unsigned NumReducedVals = ReducedVals.size();
3775 if (NumReducedVals < ReduxWidth)
3778 Value *VectorizedTree = nullptr;
3779 IRBuilder<> Builder(ReductionRoot);
3780 FastMathFlags Unsafe;
3781 Unsafe.setUnsafeAlgebra();
3782 Builder.SetFastMathFlags(Unsafe);
3785 for (; i < NumReducedVals - ReduxWidth + 1; i += ReduxWidth) {
3786 V.buildTree(makeArrayRef(&ReducedVals[i], ReduxWidth), ReductionOps);
3789 int Cost = V.getTreeCost() + getReductionCost(TTI, ReducedVals[i]);
3790 if (Cost >= -SLPCostThreshold)
3793 DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost
3796 // Vectorize a tree.
3797 DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc();
3798 Value *VectorizedRoot = V.vectorizeTree();
3800 // Emit a reduction.
3801 Value *ReducedSubTree = emitReduction(VectorizedRoot, Builder);
3802 if (VectorizedTree) {
3803 Builder.SetCurrentDebugLocation(Loc);
3804 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree,
3805 ReducedSubTree, "bin.rdx");
3807 VectorizedTree = ReducedSubTree;
3810 if (VectorizedTree) {
3811 // Finish the reduction.
3812 for (; i < NumReducedVals; ++i) {
3813 Builder.SetCurrentDebugLocation(
3814 cast<Instruction>(ReducedVals[i])->getDebugLoc());
3815 VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree,
3820 assert(ReductionRoot && "Need a reduction operation");
3821 ReductionRoot->setOperand(0, VectorizedTree);
3822 ReductionRoot->setOperand(1, ReductionPHI);
3824 ReductionRoot->replaceAllUsesWith(VectorizedTree);
3826 return VectorizedTree != nullptr;
3829 unsigned numReductionValues() const {
3830 return ReducedVals.size();
3834 /// \brief Calculate the cost of a reduction.
3835 int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal) {
3836 Type *ScalarTy = FirstReducedVal->getType();
3837 Type *VecTy = VectorType::get(ScalarTy, ReduxWidth);
3839 int PairwiseRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, true);
3840 int SplittingRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, false);
3842 IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost;
3843 int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost;
3845 int ScalarReduxCost =
3846 ReduxWidth * TTI->getArithmeticInstrCost(ReductionOpcode, VecTy);
3848 DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost
3849 << " for reduction that starts with " << *FirstReducedVal
3851 << (IsPairwiseReduction ? "pairwise" : "splitting")
3852 << " reduction)\n");
3854 return VecReduxCost - ScalarReduxCost;
3857 static Value *createBinOp(IRBuilder<> &Builder, unsigned Opcode, Value *L,
3858 Value *R, const Twine &Name = "") {
3859 if (Opcode == Instruction::FAdd)
3860 return Builder.CreateFAdd(L, R, Name);
3861 return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, L, R, Name);
3864 /// \brief Emit a horizontal reduction of the vectorized value.
3865 Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder) {
3866 assert(VectorizedValue && "Need to have a vectorized tree node");
3867 assert(isPowerOf2_32(ReduxWidth) &&
3868 "We only handle power-of-two reductions for now");
3870 Value *TmpVec = VectorizedValue;
3871 for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) {
3872 if (IsPairwiseReduction) {
3874 createRdxShuffleMask(ReduxWidth, i, true, true, Builder);
3876 createRdxShuffleMask(ReduxWidth, i, true, false, Builder);
3878 Value *LeftShuf = Builder.CreateShuffleVector(
3879 TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l");
3880 Value *RightShuf = Builder.CreateShuffleVector(
3881 TmpVec, UndefValue::get(TmpVec->getType()), (RightMask),
3883 TmpVec = createBinOp(Builder, ReductionOpcode, LeftShuf, RightShuf,
3887 createRdxShuffleMask(ReduxWidth, i, false, false, Builder);
3888 Value *Shuf = Builder.CreateShuffleVector(
3889 TmpVec, UndefValue::get(TmpVec->getType()), UpperHalf, "rdx.shuf");
3890 TmpVec = createBinOp(Builder, ReductionOpcode, TmpVec, Shuf, "bin.rdx");
3894 // The result is in the first element of the vector.
3895 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0));
3899 /// \brief Recognize construction of vectors like
3900 /// %ra = insertelement <4 x float> undef, float %s0, i32 0
3901 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1
3902 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2
3903 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3
3905 /// Returns true if it matches
3907 static bool findBuildVector(InsertElementInst *FirstInsertElem,
3908 SmallVectorImpl<Value *> &BuildVector,
3909 SmallVectorImpl<Value *> &BuildVectorOpds) {
3910 if (!isa<UndefValue>(FirstInsertElem->getOperand(0)))
3913 InsertElementInst *IE = FirstInsertElem;
3915 BuildVector.push_back(IE);
3916 BuildVectorOpds.push_back(IE->getOperand(1));
3918 if (IE->use_empty())
3921 InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->user_back());
3925 // If this isn't the final use, make sure the next insertelement is the only
3926 // use. It's OK if the final constructed vector is used multiple times
3927 if (!IE->hasOneUse())
3936 static bool PhiTypeSorterFunc(Value *V, Value *V2) {
3937 return V->getType() < V2->getType();
3940 /// \brief Try and get a reduction value from a phi node.
3942 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions
3943 /// if they come from either \p ParentBB or a containing loop latch.
3945 /// \returns A candidate reduction value if possible, or \code nullptr \endcode
3946 /// if not possible.
3947 static Value *getReductionValue(PHINode *P, BasicBlock *ParentBB,
3949 Value *Rdx = nullptr;
3951 // Return the incoming value if it comes from the same BB as the phi node.
3952 if (P->getIncomingBlock(0) == ParentBB) {
3953 Rdx = P->getIncomingValue(0);
3954 } else if (P->getIncomingBlock(1) == ParentBB) {
3955 Rdx = P->getIncomingValue(1);
3961 // Otherwise, check whether we have a loop latch to look at.
3962 Loop *BBL = LI->getLoopFor(ParentBB);
3965 BasicBlock *BBLatch = BBL->getLoopLatch();
3969 // There is a loop latch, return the incoming value if it comes from
3970 // that. This reduction pattern occassionaly turns up.
3971 if (P->getIncomingBlock(0) == BBLatch) {
3972 Rdx = P->getIncomingValue(0);
3973 } else if (P->getIncomingBlock(1) == BBLatch) {
3974 Rdx = P->getIncomingValue(1);
3980 /// \brief Attempt to reduce a horizontal reduction.
3981 /// If it is legal to match a horizontal reduction feeding
3982 /// the phi node P with reduction operators BI, then check if it
3984 /// \returns true if a horizontal reduction was matched and reduced.
3985 /// \returns false if a horizontal reduction was not matched.
3986 static bool canMatchHorizontalReduction(PHINode *P, BinaryOperator *BI,
3987 BoUpSLP &R, TargetTransformInfo *TTI) {
3988 if (!ShouldVectorizeHor)
3991 HorizontalReduction HorRdx;
3992 if (!HorRdx.matchAssociativeReduction(P, BI))
3995 // If there is a sufficient number of reduction values, reduce
3996 // to a nearby power-of-2. Can safely generate oversized
3997 // vectors and rely on the backend to split them to legal sizes.
3999 std::max((uint64_t)4, PowerOf2Floor(HorRdx.numReductionValues()));
4001 return HorRdx.tryToReduce(R, TTI);
4004 bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
4005 bool Changed = false;
4006 SmallVector<Value *, 4> Incoming;
4007 SmallSet<Value *, 16> VisitedInstrs;
4009 bool HaveVectorizedPhiNodes = true;
4010 while (HaveVectorizedPhiNodes) {
4011 HaveVectorizedPhiNodes = false;
4013 // Collect the incoming values from the PHIs.
4015 for (BasicBlock::iterator instr = BB->begin(), ie = BB->end(); instr != ie;
4017 PHINode *P = dyn_cast<PHINode>(instr);
4021 if (!VisitedInstrs.count(P))
4022 Incoming.push_back(P);
4026 std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc);
4028 // Try to vectorize elements base on their type.
4029 for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(),
4033 // Look for the next elements with the same type.
4034 SmallVector<Value *, 4>::iterator SameTypeIt = IncIt;
4035 while (SameTypeIt != E &&
4036 (*SameTypeIt)->getType() == (*IncIt)->getType()) {
4037 VisitedInstrs.insert(*SameTypeIt);
4041 // Try to vectorize them.
4042 unsigned NumElts = (SameTypeIt - IncIt);
4043 DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n");
4044 if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R)) {
4045 // Success start over because instructions might have been changed.
4046 HaveVectorizedPhiNodes = true;
4051 // Start over at the next instruction of a different type (or the end).
4056 VisitedInstrs.clear();
4058 for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) {
4059 // We may go through BB multiple times so skip the one we have checked.
4060 if (!VisitedInstrs.insert(&*it).second)
4063 if (isa<DbgInfoIntrinsic>(it))
4066 // Try to vectorize reductions that use PHINodes.
4067 if (PHINode *P = dyn_cast<PHINode>(it)) {
4068 // Check that the PHI is a reduction PHI.
4069 if (P->getNumIncomingValues() != 2)
4072 Value *Rdx = getReductionValue(P, BB, LI);
4074 // Check if this is a Binary Operator.
4075 BinaryOperator *BI = dyn_cast_or_null<BinaryOperator>(Rdx);
4079 // Try to match and vectorize a horizontal reduction.
4080 if (canMatchHorizontalReduction(P, BI, R, TTI)) {
4087 Value *Inst = BI->getOperand(0);
4089 Inst = BI->getOperand(1);
4091 if (tryToVectorize(dyn_cast<BinaryOperator>(Inst), R)) {
4092 // We would like to start over since some instructions are deleted
4093 // and the iterator may become invalid value.
4103 if (ShouldStartVectorizeHorAtStore)
4104 if (StoreInst *SI = dyn_cast<StoreInst>(it))
4105 if (BinaryOperator *BinOp =
4106 dyn_cast<BinaryOperator>(SI->getValueOperand())) {
4107 if (canMatchHorizontalReduction(nullptr, BinOp, R, TTI) ||
4108 tryToVectorize(BinOp, R)) {
4116 // Try to vectorize horizontal reductions feeding into a return.
4117 if (ReturnInst *RI = dyn_cast<ReturnInst>(it))
4118 if (RI->getNumOperands() != 0)
4119 if (BinaryOperator *BinOp =
4120 dyn_cast<BinaryOperator>(RI->getOperand(0))) {
4121 DEBUG(dbgs() << "SLP: Found a return to vectorize.\n");
4122 if (tryToVectorizePair(BinOp->getOperand(0),
4123 BinOp->getOperand(1), R)) {
4131 // Try to vectorize trees that start at compare instructions.
4132 if (CmpInst *CI = dyn_cast<CmpInst>(it)) {
4133 if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) {
4135 // We would like to start over since some instructions are deleted
4136 // and the iterator may become invalid value.
4142 for (int i = 0; i < 2; ++i) {
4143 if (BinaryOperator *BI = dyn_cast<BinaryOperator>(CI->getOperand(i))) {
4144 if (tryToVectorizePair(BI->getOperand(0), BI->getOperand(1), R)) {
4146 // We would like to start over since some instructions are deleted
4147 // and the iterator may become invalid value.
4157 // Try to vectorize trees that start at insertelement instructions.
4158 if (InsertElementInst *FirstInsertElem = dyn_cast<InsertElementInst>(it)) {
4159 SmallVector<Value *, 16> BuildVector;
4160 SmallVector<Value *, 16> BuildVectorOpds;
4161 if (!findBuildVector(FirstInsertElem, BuildVector, BuildVectorOpds))
4164 // Vectorize starting with the build vector operands ignoring the
4165 // BuildVector instructions for the purpose of scheduling and user
4167 if (tryToVectorizeList(BuildVectorOpds, R, BuildVector)) {
4180 bool SLPVectorizer::vectorizeStoreChains(BoUpSLP &R) {
4181 bool Changed = false;
4182 // Attempt to sort and vectorize each of the store-groups.
4183 for (StoreListMap::iterator it = StoreRefs.begin(), e = StoreRefs.end();
4185 if (it->second.size() < 2)
4188 DEBUG(dbgs() << "SLP: Analyzing a store chain of length "
4189 << it->second.size() << ".\n");
4191 // Process the stores in chunks of 16.
4192 // TODO: The limit of 16 inhibits greater vectorization factors.
4193 // For example, AVX2 supports v32i8. Increasing this limit, however,
4194 // may cause a significant compile-time increase.
4195 for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) {
4196 unsigned Len = std::min<unsigned>(CE - CI, 16);
4197 Changed |= vectorizeStores(makeArrayRef(&it->second[CI], Len),
4198 -SLPCostThreshold, R);
4204 } // end anonymous namespace
4206 char SLPVectorizer::ID = 0;
4207 static const char lv_name[] = "SLP Vectorizer";
4208 INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false)
4209 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
4210 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
4211 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
4212 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
4213 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
4214 INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false)
4217 Pass *createSLPVectorizerPass() { return new SLPVectorizer(); }