From 944e082905874dcaa334925de5ead6c3aa32e7d0 Mon Sep 17 00:00:00 2001 From: Adam Nemet Date: Tue, 14 Jul 2015 22:32:44 +0000 Subject: [PATCH] [LAA] Lift RuntimePointerCheck out of LoopAccessInfo, NFC I am planning to add more nested classes inside RuntimePointerCheck so all these triple-nesting would be hard to follow. Also rename it to RuntimePointerChecking (i.e. append 'ing'). git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@242218 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Analysis/LoopAccessAnalysis.h | 241 ++++++++++----------- lib/Analysis/LoopAccessAnalysis.cpp | 84 ++++--- lib/Transforms/Scalar/LoopDistribute.cpp | 5 +- lib/Transforms/Utils/LoopVersioning.cpp | 2 +- lib/Transforms/Vectorize/LoopVectorize.cpp | 17 +- 5 files changed, 173 insertions(+), 176 deletions(-) diff --git a/include/llvm/Analysis/LoopAccessAnalysis.h b/include/llvm/Analysis/LoopAccessAnalysis.h index 15bc100c329..cfd75dcd8c8 100644 --- a/include/llvm/Analysis/LoopAccessAnalysis.h +++ b/include/llvm/Analysis/LoopAccessAnalysis.h @@ -292,6 +292,122 @@ private: bool couldPreventStoreLoadForward(unsigned Distance, unsigned TypeByteSize); }; +/// This struct holds information about the memory runtime legality check that +/// a group of pointers do not overlap. +struct RuntimePointerChecking { + RuntimePointerChecking(ScalarEvolution *SE) : Need(false), SE(SE) {} + + /// Reset the state of the pointer runtime information. + void reset() { + Need = false; + Pointers.clear(); + Starts.clear(); + Ends.clear(); + IsWritePtr.clear(); + DependencySetId.clear(); + AliasSetId.clear(); + Exprs.clear(); + } + + /// Insert a pointer and calculate the start and end SCEVs. + void insert(Loop *Lp, Value *Ptr, bool WritePtr, unsigned DepSetId, + unsigned ASId, const ValueToValueMap &Strides); + + /// \brief No run-time memory checking is necessary. + bool empty() const { return Pointers.empty(); } + + /// A grouping of pointers. A single memcheck is required between + /// two groups. + struct CheckingPtrGroup { + /// \brief Create a new pointer checking group containing a single + /// pointer, with index \p Index in RtCheck. + CheckingPtrGroup(unsigned Index, RuntimePointerChecking &RtCheck) + : RtCheck(RtCheck), High(RtCheck.Ends[Index]), + Low(RtCheck.Starts[Index]) { + Members.push_back(Index); + } + + /// \brief Tries to add the pointer recorded in RtCheck at index + /// \p Index to this pointer checking group. We can only add a pointer + /// to a checking group if we will still be able to get + /// the upper and lower bounds of the check. Returns true in case + /// of success, false otherwise. + bool addPointer(unsigned Index); + + /// Constitutes the context of this pointer checking group. For each + /// pointer that is a member of this group we will retain the index + /// at which it appears in RtCheck. + RuntimePointerChecking &RtCheck; + /// The SCEV expression which represents the upper bound of all the + /// pointers in this group. + const SCEV *High; + /// The SCEV expression which represents the lower bound of all the + /// pointers in this group. + const SCEV *Low; + /// Indices of all the pointers that constitute this grouping. + SmallVector Members; + }; + + /// \brief Groups pointers such that a single memcheck is required + /// between two different groups. This will clear the CheckingGroups vector + /// and re-compute it. We will only group dependecies if \p UseDependencies + /// is true, otherwise we will create a separate group for each pointer. + void groupChecks(MemoryDepChecker::DepCandidates &DepCands, + bool UseDependencies); + + /// \brief Decide whether we need to issue a run-time check for pointer at + /// index \p I and \p J to prove their independence. + /// + /// If \p PtrPartition is set, it contains the partition number for + /// pointers (-1 if the pointer belongs to multiple partitions). In this + /// case omit checks between pointers belonging to the same partition. + bool needsChecking(unsigned I, unsigned J, + const SmallVectorImpl *PtrPartition) const; + + /// \brief Decide if we need to add a check between two groups of pointers, + /// according to needsChecking. + bool needsChecking(const CheckingPtrGroup &M, const CheckingPtrGroup &N, + const SmallVectorImpl *PtrPartition) const; + + /// \brief Return true if any pointer requires run-time checking according + /// to needsChecking. + bool needsAnyChecking(const SmallVectorImpl *PtrPartition) const; + + /// \brief Returns the number of run-time checks required according to + /// needsChecking. + unsigned getNumberOfChecks(const SmallVectorImpl *PtrPartition) const; + + /// \brief Print the list run-time memory checks necessary. + /// + /// If \p PtrPartition is set, it contains the partition number for + /// pointers (-1 if the pointer belongs to multiple partitions). In this + /// case omit checks between pointers belonging to the same partition. + void print(raw_ostream &OS, unsigned Depth = 0, + const SmallVectorImpl *PtrPartition = nullptr) const; + + /// This flag indicates if we need to add the runtime check. + bool Need; + /// Holds the pointers that we need to check. + SmallVector, 2> Pointers; + /// Holds the pointer value at the beginning of the loop. + SmallVector Starts; + /// Holds the pointer value at the end of the loop. + SmallVector Ends; + /// Holds the information if this pointer is used for writing to memory. + SmallVector IsWritePtr; + /// Holds the id of the set of pointers that could be dependent because of a + /// shared underlying object. + SmallVector DependencySetId; + /// Holds the id of the disjoint alias set to which this pointer belongs. + SmallVector AliasSetId; + /// Holds at position i the SCEV for the access i + SmallVector Exprs; + /// Holds a partitioning of pointers into "check groups". + SmallVector CheckingGroups; + /// Holds a pointer to the ScalarEvolution analysis. + ScalarEvolution *SE; +}; + /// \brief Drive the analysis of memory accesses in the loop /// /// This class is responsible for analyzing the memory accesses of a loop. It @@ -308,123 +424,6 @@ private: /// RuntimePointerCheck class. class LoopAccessInfo { public: - /// This struct holds information about the memory runtime legality check that - /// a group of pointers do not overlap. - struct RuntimePointerCheck { - RuntimePointerCheck(ScalarEvolution *SE) : Need(false), SE(SE) {} - - /// Reset the state of the pointer runtime information. - void reset() { - Need = false; - Pointers.clear(); - Starts.clear(); - Ends.clear(); - IsWritePtr.clear(); - DependencySetId.clear(); - AliasSetId.clear(); - Exprs.clear(); - } - - /// Insert a pointer and calculate the start and end SCEVs. - void insert(Loop *Lp, Value *Ptr, bool WritePtr, unsigned DepSetId, - unsigned ASId, const ValueToValueMap &Strides); - - /// \brief No run-time memory checking is necessary. - bool empty() const { return Pointers.empty(); } - - /// A grouping of pointers. A single memcheck is required between - /// two groups. - struct CheckingPtrGroup { - /// \brief Create a new pointer checking group containing a single - /// pointer, with index \p Index in RtCheck. - CheckingPtrGroup(unsigned Index, RuntimePointerCheck &RtCheck) - : RtCheck(RtCheck), High(RtCheck.Ends[Index]), - Low(RtCheck.Starts[Index]) { - Members.push_back(Index); - } - - /// \brief Tries to add the pointer recorded in RtCheck at index - /// \p Index to this pointer checking group. We can only add a pointer - /// to a checking group if we will still be able to get - /// the upper and lower bounds of the check. Returns true in case - /// of success, false otherwise. - bool addPointer(unsigned Index); - - /// Constitutes the context of this pointer checking group. For each - /// pointer that is a member of this group we will retain the index - /// at which it appears in RtCheck. - RuntimePointerCheck &RtCheck; - /// The SCEV expression which represents the upper bound of all the - /// pointers in this group. - const SCEV *High; - /// The SCEV expression which represents the lower bound of all the - /// pointers in this group. - const SCEV *Low; - /// Indices of all the pointers that constitute this grouping. - SmallVector Members; - }; - - /// \brief Groups pointers such that a single memcheck is required - /// between two different groups. This will clear the CheckingGroups vector - /// and re-compute it. We will only group dependecies if \p UseDependencies - /// is true, otherwise we will create a separate group for each pointer. - void groupChecks(MemoryDepChecker::DepCandidates &DepCands, - bool UseDependencies); - - /// \brief Decide whether we need to issue a run-time check for pointer at - /// index \p I and \p J to prove their independence. - /// - /// If \p PtrPartition is set, it contains the partition number for - /// pointers (-1 if the pointer belongs to multiple partitions). In this - /// case omit checks between pointers belonging to the same partition. - bool needsChecking(unsigned I, unsigned J, - const SmallVectorImpl *PtrPartition) const; - - /// \brief Decide if we need to add a check between two groups of pointers, - /// according to needsChecking. - bool needsChecking(const CheckingPtrGroup &M, - const CheckingPtrGroup &N, - const SmallVectorImpl *PtrPartition) const; - - /// \brief Return true if any pointer requires run-time checking according - /// to needsChecking. - bool needsAnyChecking(const SmallVectorImpl *PtrPartition) const; - - /// \brief Returns the number of run-time checks required according to - /// needsChecking. - unsigned getNumberOfChecks(const SmallVectorImpl *PtrPartition) const; - - /// \brief Print the list run-time memory checks necessary. - /// - /// If \p PtrPartition is set, it contains the partition number for - /// pointers (-1 if the pointer belongs to multiple partitions). In this - /// case omit checks between pointers belonging to the same partition. - void print(raw_ostream &OS, unsigned Depth = 0, - const SmallVectorImpl *PtrPartition = nullptr) const; - - /// This flag indicates if we need to add the runtime check. - bool Need; - /// Holds the pointers that we need to check. - SmallVector, 2> Pointers; - /// Holds the pointer value at the beginning of the loop. - SmallVector Starts; - /// Holds the pointer value at the end of the loop. - SmallVector Ends; - /// Holds the information if this pointer is used for writing to memory. - SmallVector IsWritePtr; - /// Holds the id of the set of pointers that could be dependent because of a - /// shared underlying object. - SmallVector DependencySetId; - /// Holds the id of the disjoint alias set to which this pointer belongs. - SmallVector AliasSetId; - /// Holds at position i the SCEV for the access i - SmallVector Exprs; - /// Holds a partitioning of pointers into "check groups". - SmallVector CheckingGroups; - /// Holds a pointer to the ScalarEvolution analysis. - ScalarEvolution *SE; - }; - LoopAccessInfo(Loop *L, ScalarEvolution *SE, const DataLayout &DL, const TargetLibraryInfo *TLI, AliasAnalysis *AA, DominatorTree *DT, LoopInfo *LI, @@ -434,15 +433,15 @@ public: /// no memory dependence cycles. bool canVectorizeMemory() const { return CanVecMem; } - const RuntimePointerCheck *getRuntimePointerCheck() const { - return &PtrRtCheck; + const RuntimePointerChecking *getRuntimePointerChecking() const { + return &PtrRtChecking; } /// \brief Number of memchecks required to prove independence of otherwise /// may-alias pointers. unsigned getNumRuntimePointerChecks( const SmallVectorImpl *PtrPartition = nullptr) const { - return PtrRtCheck.getNumberOfChecks(PtrPartition); + return PtrRtChecking.getNumberOfChecks(PtrPartition); } /// Return true if the block BB needs to be predicated in order for the loop @@ -512,7 +511,7 @@ private: /// We need to check that all of the pointers in this list are disjoint /// at runtime. - RuntimePointerCheck PtrRtCheck; + RuntimePointerChecking PtrRtChecking; /// \brief the Memory Dependence Checker which can determine the /// loop-independent and loop-carried dependences between memory accesses. diff --git a/lib/Analysis/LoopAccessAnalysis.cpp b/lib/Analysis/LoopAccessAnalysis.cpp index 79dac048921..db7f74264c1 100644 --- a/lib/Analysis/LoopAccessAnalysis.cpp +++ b/lib/Analysis/LoopAccessAnalysis.cpp @@ -119,9 +119,9 @@ const SCEV *llvm::replaceSymbolicStrideSCEV(ScalarEvolution *SE, return SE->getSCEV(Ptr); } -void LoopAccessInfo::RuntimePointerCheck::insert( - Loop *Lp, Value *Ptr, bool WritePtr, unsigned DepSetId, unsigned ASId, - const ValueToValueMap &Strides) { +void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, bool WritePtr, + unsigned DepSetId, unsigned ASId, + const ValueToValueMap &Strides) { // Get the stride replaced scev. const SCEV *Sc = replaceSymbolicStrideSCEV(SE, Strides, Ptr); const SCEVAddRecExpr *AR = dyn_cast(Sc); @@ -137,7 +137,7 @@ void LoopAccessInfo::RuntimePointerCheck::insert( Exprs.push_back(Sc); } -bool LoopAccessInfo::RuntimePointerCheck::needsChecking( +bool RuntimePointerChecking::needsChecking( const CheckingPtrGroup &M, const CheckingPtrGroup &N, const SmallVectorImpl *PtrPartition) const { for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I) @@ -161,8 +161,7 @@ static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J, return I; } -bool LoopAccessInfo::RuntimePointerCheck::CheckingPtrGroup::addPointer( - unsigned Index) { +bool RuntimePointerChecking::CheckingPtrGroup::addPointer(unsigned Index) { // Compare the starts and ends with the known minimum and maximum // of this set. We need to know how we compare against the min/max // of the set in order to be able to emit memchecks. @@ -186,9 +185,8 @@ bool LoopAccessInfo::RuntimePointerCheck::CheckingPtrGroup::addPointer( return true; } -void LoopAccessInfo::RuntimePointerCheck::groupChecks( - MemoryDepChecker::DepCandidates &DepCands, - bool UseDependencies) { +void RuntimePointerChecking::groupChecks( + MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) { // We build the groups from dependency candidates equivalence classes // because: // - We know that pointers in the same equivalence class share @@ -283,7 +281,7 @@ void LoopAccessInfo::RuntimePointerCheck::groupChecks( } } -bool LoopAccessInfo::RuntimePointerCheck::needsChecking( +bool RuntimePointerChecking::needsChecking( unsigned I, unsigned J, const SmallVectorImpl *PtrPartition) const { // No need to check if two readonly pointers intersect. if (!IsWritePtr[I] && !IsWritePtr[J]) @@ -307,7 +305,7 @@ bool LoopAccessInfo::RuntimePointerCheck::needsChecking( return true; } -void LoopAccessInfo::RuntimePointerCheck::print( +void RuntimePointerChecking::print( raw_ostream &OS, unsigned Depth, const SmallVectorImpl *PtrPartition) const { @@ -353,7 +351,7 @@ void LoopAccessInfo::RuntimePointerCheck::print( } } -unsigned LoopAccessInfo::RuntimePointerCheck::getNumberOfChecks( +unsigned RuntimePointerChecking::getNumberOfChecks( const SmallVectorImpl *PtrPartition) const { unsigned NumPartitions = CheckingGroups.size(); @@ -366,7 +364,7 @@ unsigned LoopAccessInfo::RuntimePointerCheck::getNumberOfChecks( return CheckCount; } -bool LoopAccessInfo::RuntimePointerCheck::needsAnyChecking( +bool RuntimePointerChecking::needsAnyChecking( const SmallVectorImpl *PtrPartition) const { unsigned NumPointers = Pointers.size(); @@ -414,9 +412,8 @@ public: /// /// Returns true if we need no check or if we do and we can generate them /// (i.e. the pointers have computable bounds). - bool canCheckPtrAtRT(LoopAccessInfo::RuntimePointerCheck &RtCheck, - ScalarEvolution *SE, Loop *TheLoop, - const ValueToValueMap &Strides, + bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE, + Loop *TheLoop, const ValueToValueMap &Strides, bool ShouldCheckStride = false); /// \brief Goes over all memory accesses, checks whether a RT check is needed @@ -492,9 +489,10 @@ static bool hasComputableBounds(ScalarEvolution *SE, return AR->isAffine(); } -bool AccessAnalysis::canCheckPtrAtRT( - LoopAccessInfo::RuntimePointerCheck &RtCheck, ScalarEvolution *SE, - Loop *TheLoop, const ValueToValueMap &StridesMap, bool ShouldCheckStride) { +bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck, + ScalarEvolution *SE, Loop *TheLoop, + const ValueToValueMap &StridesMap, + bool ShouldCheckStride) { // Find pointers with computable bounds. We are going to use this information // to place a runtime bound check. bool CanDoRT = true; @@ -1320,8 +1318,8 @@ void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) { unsigned NumReads = 0; unsigned NumReadWrites = 0; - PtrRtCheck.Pointers.clear(); - PtrRtCheck.Need = false; + PtrRtChecking.Pointers.clear(); + PtrRtChecking.Need = false; const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel(); @@ -1481,7 +1479,7 @@ void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) { // Find pointers with computable bounds. We are going to use this information // to place a runtime bound check. bool CanDoRTIfNeeded = - Accesses.canCheckPtrAtRT(PtrRtCheck, SE, TheLoop, Strides); + Accesses.canCheckPtrAtRT(PtrRtChecking, SE, TheLoop, Strides); if (!CanDoRTIfNeeded) { emitAnalysis(LoopAccessReport() << "cannot identify array bounds"); DEBUG(dbgs() << "LAA: We can't vectorize because we can't find " @@ -1505,11 +1503,11 @@ void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) { // Clear the dependency checks. We assume they are not needed. Accesses.resetDepChecks(DepChecker); - PtrRtCheck.reset(); - PtrRtCheck.Need = true; + PtrRtChecking.reset(); + PtrRtChecking.Need = true; CanDoRTIfNeeded = - Accesses.canCheckPtrAtRT(PtrRtCheck, SE, TheLoop, Strides, true); + Accesses.canCheckPtrAtRT(PtrRtChecking, SE, TheLoop, Strides, true); // Check that we found the bounds for the pointer. if (!CanDoRTIfNeeded) { @@ -1526,7 +1524,7 @@ void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) { if (CanVecMem) DEBUG(dbgs() << "LAA: No unsafe dependent memory operations in loop. We" - << (PtrRtCheck.Need ? "" : " don't") + << (PtrRtChecking.Need ? "" : " don't") << " need runtime memory checks.\n"); else { emitAnalysis(LoopAccessReport() << @@ -1566,7 +1564,7 @@ static Instruction *getFirstInst(Instruction *FirstInst, Value *V, std::pair LoopAccessInfo::addRuntimeCheck( Instruction *Loc, const SmallVectorImpl *PtrPartition) const { - if (!PtrRtCheck.Need) + if (!PtrRtChecking.Need) return std::make_pair(nullptr, nullptr); SmallVector, 2> Starts; @@ -1576,10 +1574,10 @@ std::pair LoopAccessInfo::addRuntimeCheck( SCEVExpander Exp(*SE, DL, "induction"); Instruction *FirstInst = nullptr; - for (unsigned i = 0; i < PtrRtCheck.CheckingGroups.size(); ++i) { - const RuntimePointerCheck::CheckingPtrGroup &CG = - PtrRtCheck.CheckingGroups[i]; - Value *Ptr = PtrRtCheck.Pointers[CG.Members[0]]; + for (unsigned i = 0; i < PtrRtChecking.CheckingGroups.size(); ++i) { + const RuntimePointerChecking::CheckingPtrGroup &CG = + PtrRtChecking.CheckingGroups[i]; + Value *Ptr = PtrRtChecking.Pointers[CG.Members[0]]; const SCEV *Sc = SE->getSCEV(Ptr); if (SE->isLoopInvariant(Sc, TheLoop)) { @@ -1606,14 +1604,14 @@ std::pair LoopAccessInfo::addRuntimeCheck( IRBuilder<> ChkBuilder(Loc); // Our instructions might fold to a constant. Value *MemoryRuntimeCheck = nullptr; - for (unsigned i = 0; i < PtrRtCheck.CheckingGroups.size(); ++i) { - for (unsigned j = i + 1; j < PtrRtCheck.CheckingGroups.size(); ++j) { - const RuntimePointerCheck::CheckingPtrGroup &CGI = - PtrRtCheck.CheckingGroups[i]; - const RuntimePointerCheck::CheckingPtrGroup &CGJ = - PtrRtCheck.CheckingGroups[j]; - - if (!PtrRtCheck.needsChecking(CGI, CGJ, PtrPartition)) + for (unsigned i = 0; i < PtrRtChecking.CheckingGroups.size(); ++i) { + for (unsigned j = i + 1; j < PtrRtChecking.CheckingGroups.size(); ++j) { + const RuntimePointerChecking::CheckingPtrGroup &CGI = + PtrRtChecking.CheckingGroups[i]; + const RuntimePointerChecking::CheckingPtrGroup &CGJ = + PtrRtChecking.CheckingGroups[j]; + + if (!PtrRtChecking.needsChecking(CGI, CGJ, PtrPartition)) continue; unsigned AS0 = Starts[i]->getType()->getPointerAddressSpace(); @@ -1664,8 +1662,8 @@ LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetLibraryInfo *TLI, AliasAnalysis *AA, DominatorTree *DT, LoopInfo *LI, const ValueToValueMap &Strides) - : PtrRtCheck(SE), DepChecker(SE, L), TheLoop(L), SE(SE), DL(DL), TLI(TLI), - AA(AA), DT(DT), LI(LI), NumLoads(0), NumStores(0), + : PtrRtChecking(SE), DepChecker(SE, L), TheLoop(L), SE(SE), DL(DL), + TLI(TLI), AA(AA), DT(DT), LI(LI), NumLoads(0), NumStores(0), MaxSafeDepDistBytes(-1U), CanVecMem(false), StoreToLoopInvariantAddress(false) { if (canAnalyzeLoop()) @@ -1674,7 +1672,7 @@ LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE, void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const { if (CanVecMem) { - if (PtrRtCheck.Need) + if (PtrRtChecking.Need) OS.indent(Depth) << "Memory dependences are safe with run-time checks\n"; else OS.indent(Depth) << "Memory dependences are safe\n"; @@ -1693,7 +1691,7 @@ void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const { OS.indent(Depth) << "Too many interesting dependences, not recorded\n"; // List the pair of accesses need run-time checks to prove independence. - PtrRtCheck.print(OS, Depth); + PtrRtChecking.print(OS, Depth); OS << "\n"; OS.indent(Depth) << "Store to invariant address was " diff --git a/lib/Transforms/Scalar/LoopDistribute.cpp b/lib/Transforms/Scalar/LoopDistribute.cpp index 75983222010..3380af29ca8 100644 --- a/lib/Transforms/Scalar/LoopDistribute.cpp +++ b/lib/Transforms/Scalar/LoopDistribute.cpp @@ -432,8 +432,7 @@ public: /// partitions its entry is set to -1. SmallVector computePartitionSetForPointers(const LoopAccessInfo &LAI) { - const LoopAccessInfo::RuntimePointerCheck *RtPtrCheck = - LAI.getRuntimePointerCheck(); + const RuntimePointerChecking *RtPtrCheck = LAI.getRuntimePointerChecking(); unsigned N = RtPtrCheck->Pointers.size(); SmallVector PtrToPartitions(N); @@ -753,7 +752,7 @@ private: LoopVersioning LVer(LAI, L, LI, DT, &PtrToPartition); if (LVer.needsRuntimeChecks()) { DEBUG(dbgs() << "\nPointers:\n"); - DEBUG(LAI.getRuntimePointerCheck()->print(dbgs(), 0, &PtrToPartition)); + DEBUG(LAI.getRuntimePointerChecking()->print(dbgs(), 0, &PtrToPartition)); LVer.versionLoop(this); LVer.addPHINodes(DefsUsedOutside); } diff --git a/lib/Transforms/Utils/LoopVersioning.cpp b/lib/Transforms/Utils/LoopVersioning.cpp index edfe67bfa49..832079d2cf6 100644 --- a/lib/Transforms/Utils/LoopVersioning.cpp +++ b/lib/Transforms/Utils/LoopVersioning.cpp @@ -32,7 +32,7 @@ LoopVersioning::LoopVersioning(const LoopAccessInfo &LAI, Loop *L, LoopInfo *LI, } bool LoopVersioning::needsRuntimeChecks() const { - return LAI.getRuntimePointerCheck()->needsAnyChecking(PtrToPartition); + return LAI.getRuntimePointerChecking()->needsAnyChecking(PtrToPartition); } void LoopVersioning::versionLoop(Pass *P) { diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp index 08b1c3ab71a..69ca2688c81 100644 --- a/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -924,8 +924,8 @@ public: bool isUniformAfterVectorization(Instruction* I) { return Uniforms.count(I); } /// Returns the information that we collected about runtime memory check. - const LoopAccessInfo::RuntimePointerCheck *getRuntimePointerCheck() const { - return LAI->getRuntimePointerCheck(); + const RuntimePointerChecking *getRuntimePointerChecking() const { + return LAI->getRuntimePointerChecking(); } const LoopAccessInfo *getLAI() const { @@ -3873,10 +3873,11 @@ bool LoopVectorizationLegality::canVectorize() { // Collect all of the variables that remain uniform after vectorization. collectLoopUniforms(); - DEBUG(dbgs() << "LV: We can vectorize this loop" << - (LAI->getRuntimePointerCheck()->Need ? " (with a runtime bound check)" : - "") - <<"!\n"); + DEBUG(dbgs() << "LV: We can vectorize this loop" + << (LAI->getRuntimePointerChecking()->Need + ? " (with a runtime bound check)" + : "") + << "!\n"); // Analyze interleaved memory accesses. if (EnableInterleavedMemAccesses) @@ -4449,7 +4450,7 @@ LoopVectorizationCostModel::VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor(bool OptForSize) { // Width 1 means no vectorize VectorizationFactor Factor = { 1U, 0U }; - if (OptForSize && Legal->getRuntimePointerCheck()->Need) { + if (OptForSize && Legal->getRuntimePointerChecking()->Need) { emitAnalysis(VectorizationReport() << "runtime pointer checks needed. Enable vectorization of this " "loop with '#pragma clang loop vectorize(enable)' when " @@ -4713,7 +4714,7 @@ unsigned LoopVectorizationCostModel::selectInterleaveCount(bool OptForSize, // Note that if we've already vectorized the loop we will have done the // runtime check and so interleaving won't require further checks. bool InterleavingRequiresRuntimePointerCheck = - (VF == 1 && Legal->getRuntimePointerCheck()->Need); + (VF == 1 && Legal->getRuntimePointerChecking()->Need); // We want to interleave small loops in order to reduce the loop overhead and // potentially expose ILP opportunities. -- 2.34.1