1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // The implementation for the loop memory dependence that was originally
11 // developed for the loop vectorizer.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Analysis/LoopAccessAnalysis.h"
16 #include "llvm/Analysis/LoopInfo.h"
17 #include "llvm/Analysis/ScalarEvolutionExpander.h"
18 #include "llvm/Analysis/ValueTracking.h"
19 #include "llvm/IR/DiagnosticInfo.h"
20 #include "llvm/IR/Dominators.h"
21 #include "llvm/IR/IRBuilder.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Transforms/Utils/VectorUtils.h"
24 #include "llvm/Analysis/TargetLibraryInfo.h"
27 #define DEBUG_TYPE "loop-accesses"
29 static cl::opt<unsigned, true>
30 VectorizationFactor("force-vector-width", cl::Hidden,
31 cl::desc("Sets the SIMD width. Zero is autoselect."),
32 cl::location(VectorizerParams::VectorizationFactor));
33 unsigned VectorizerParams::VectorizationFactor;
35 static cl::opt<unsigned, true>
36 VectorizationInterleave("force-vector-interleave", cl::Hidden,
37 cl::desc("Sets the vectorization interleave count. "
38 "Zero is autoselect."),
40 VectorizerParams::VectorizationInterleave));
41 unsigned VectorizerParams::VectorizationInterleave;
43 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold(
44 "runtime-memory-check-threshold", cl::Hidden,
45 cl::desc("When performing memory disambiguation checks at runtime do not "
46 "generate more than this number of comparisons (default = 8)."),
47 cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8));
48 unsigned VectorizerParams::RuntimeMemoryCheckThreshold;
50 /// Maximum SIMD width.
51 const unsigned VectorizerParams::MaxVectorWidth = 64;
53 /// \brief We collect interesting dependences up to this threshold.
54 static cl::opt<unsigned> MaxInterestingDependence(
55 "max-interesting-dependences", cl::Hidden,
56 cl::desc("Maximum number of interesting dependences collected by "
57 "loop-access analysis (default = 100)"),
60 bool VectorizerParams::isInterleaveForced() {
61 return ::VectorizationInterleave.getNumOccurrences() > 0;
64 void LoopAccessReport::emitAnalysis(const LoopAccessReport &Message,
65 const Function *TheFunction,
67 const char *PassName) {
68 DebugLoc DL = TheLoop->getStartLoc();
69 if (const Instruction *I = Message.getInstr())
70 DL = I->getDebugLoc();
71 emitOptimizationRemarkAnalysis(TheFunction->getContext(), PassName,
72 *TheFunction, DL, Message.str());
75 Value *llvm::stripIntegerCast(Value *V) {
76 if (CastInst *CI = dyn_cast<CastInst>(V))
77 if (CI->getOperand(0)->getType()->isIntegerTy())
78 return CI->getOperand(0);
82 const SCEV *llvm::replaceSymbolicStrideSCEV(ScalarEvolution *SE,
83 const ValueToValueMap &PtrToStride,
84 Value *Ptr, Value *OrigPtr) {
86 const SCEV *OrigSCEV = SE->getSCEV(Ptr);
88 // If there is an entry in the map return the SCEV of the pointer with the
89 // symbolic stride replaced by one.
90 ValueToValueMap::const_iterator SI =
91 PtrToStride.find(OrigPtr ? OrigPtr : Ptr);
92 if (SI != PtrToStride.end()) {
93 Value *StrideVal = SI->second;
96 StrideVal = stripIntegerCast(StrideVal);
98 // Replace symbolic stride by one.
99 Value *One = ConstantInt::get(StrideVal->getType(), 1);
100 ValueToValueMap RewriteMap;
101 RewriteMap[StrideVal] = One;
104 SCEVParameterRewriter::rewrite(OrigSCEV, *SE, RewriteMap, true);
105 DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV << " by: " << *ByOne
110 // Otherwise, just return the SCEV of the original pointer.
111 return SE->getSCEV(Ptr);
114 void LoopAccessInfo::RuntimePointerCheck::insert(
115 ScalarEvolution *SE, Loop *Lp, Value *Ptr, bool WritePtr, unsigned DepSetId,
116 unsigned ASId, const ValueToValueMap &Strides) {
117 // Get the stride replaced scev.
118 const SCEV *Sc = replaceSymbolicStrideSCEV(SE, Strides, Ptr);
119 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc);
120 assert(AR && "Invalid addrec expression");
121 const SCEV *Ex = SE->getBackedgeTakenCount(Lp);
122 const SCEV *ScEnd = AR->evaluateAtIteration(Ex, *SE);
123 Pointers.push_back(Ptr);
124 Starts.push_back(AR->getStart());
125 Ends.push_back(ScEnd);
126 IsWritePtr.push_back(WritePtr);
127 DependencySetId.push_back(DepSetId);
128 AliasSetId.push_back(ASId);
131 bool LoopAccessInfo::RuntimePointerCheck::needsChecking(
132 unsigned I, unsigned J, const SmallVectorImpl<int> *PtrPartition) const {
133 // No need to check if two readonly pointers intersect.
134 if (!IsWritePtr[I] && !IsWritePtr[J])
137 // Only need to check pointers between two different dependency sets.
138 if (DependencySetId[I] == DependencySetId[J])
141 // Only need to check pointers in the same alias set.
142 if (AliasSetId[I] != AliasSetId[J])
145 // If PtrPartition is set omit checks between pointers of the same partition.
146 // Partition number -1 means that the pointer is used in multiple partitions.
147 // In this case we can't omit the check.
148 if (PtrPartition && (*PtrPartition)[I] != -1 &&
149 (*PtrPartition)[I] == (*PtrPartition)[J])
155 void LoopAccessInfo::RuntimePointerCheck::print(
156 raw_ostream &OS, unsigned Depth,
157 const SmallVectorImpl<int> *PtrPartition) const {
158 unsigned NumPointers = Pointers.size();
159 if (NumPointers == 0)
162 OS.indent(Depth) << "Run-time memory checks:\n";
164 for (unsigned I = 0; I < NumPointers; ++I)
165 for (unsigned J = I + 1; J < NumPointers; ++J)
166 if (needsChecking(I, J, PtrPartition)) {
167 OS.indent(Depth) << N++ << ":\n";
168 OS.indent(Depth + 2) << *Pointers[I];
170 OS << " (Partition: " << (*PtrPartition)[I] << ")";
172 OS.indent(Depth + 2) << *Pointers[J];
174 OS << " (Partition: " << (*PtrPartition)[J] << ")";
180 /// \brief Analyses memory accesses in a loop.
182 /// Checks whether run time pointer checks are needed and builds sets for data
183 /// dependence checking.
184 class AccessAnalysis {
186 /// \brief Read or write access location.
187 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
188 typedef SmallPtrSet<MemAccessInfo, 8> MemAccessInfoSet;
190 AccessAnalysis(const DataLayout &Dl, AliasAnalysis *AA,
191 MemoryDepChecker::DepCandidates &DA)
192 : DL(Dl), AST(*AA), DepCands(DA), IsRTCheckNeeded(false) {}
194 /// \brief Register a load and whether it is only read from.
195 void addLoad(AliasAnalysis::Location &Loc, bool IsReadOnly) {
196 Value *Ptr = const_cast<Value*>(Loc.Ptr);
197 AST.add(Ptr, AliasAnalysis::UnknownSize, Loc.AATags);
198 Accesses.insert(MemAccessInfo(Ptr, false));
200 ReadOnlyPtr.insert(Ptr);
203 /// \brief Register a store.
204 void addStore(AliasAnalysis::Location &Loc) {
205 Value *Ptr = const_cast<Value*>(Loc.Ptr);
206 AST.add(Ptr, AliasAnalysis::UnknownSize, Loc.AATags);
207 Accesses.insert(MemAccessInfo(Ptr, true));
210 /// \brief Check whether we can check the pointers at runtime for
211 /// non-intersection.
212 bool canCheckPtrAtRT(LoopAccessInfo::RuntimePointerCheck &RtCheck,
213 unsigned &NumComparisons, ScalarEvolution *SE,
214 Loop *TheLoop, const ValueToValueMap &Strides,
215 bool ShouldCheckStride = false);
217 /// \brief Goes over all memory accesses, checks whether a RT check is needed
218 /// and builds sets of dependent accesses.
219 void buildDependenceSets() {
220 processMemAccesses();
223 bool isRTCheckNeeded() { return IsRTCheckNeeded; }
225 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); }
226 void resetDepChecks() { CheckDeps.clear(); }
228 MemAccessInfoSet &getDependenciesToCheck() { return CheckDeps; }
231 typedef SetVector<MemAccessInfo> PtrAccessSet;
233 /// \brief Go over all memory access and check whether runtime pointer checks
234 /// are needed /// and build sets of dependency check candidates.
235 void processMemAccesses();
237 /// Set of all accesses.
238 PtrAccessSet Accesses;
240 const DataLayout &DL;
242 /// Set of accesses that need a further dependence check.
243 MemAccessInfoSet CheckDeps;
245 /// Set of pointers that are read only.
246 SmallPtrSet<Value*, 16> ReadOnlyPtr;
248 /// An alias set tracker to partition the access set by underlying object and
249 //intrinsic property (such as TBAA metadata).
252 /// Sets of potentially dependent accesses - members of one set share an
253 /// underlying pointer. The set "CheckDeps" identfies which sets really need a
254 /// dependence check.
255 MemoryDepChecker::DepCandidates &DepCands;
257 bool IsRTCheckNeeded;
260 } // end anonymous namespace
262 /// \brief Check whether a pointer can participate in a runtime bounds check.
263 static bool hasComputableBounds(ScalarEvolution *SE,
264 const ValueToValueMap &Strides, Value *Ptr) {
265 const SCEV *PtrScev = replaceSymbolicStrideSCEV(SE, Strides, Ptr);
266 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
270 return AR->isAffine();
273 /// \brief Check the stride of the pointer and ensure that it does not wrap in
274 /// the address space.
275 static int isStridedPtr(ScalarEvolution *SE, Value *Ptr, const Loop *Lp,
276 const ValueToValueMap &StridesMap);
278 bool AccessAnalysis::canCheckPtrAtRT(
279 LoopAccessInfo::RuntimePointerCheck &RtCheck, unsigned &NumComparisons,
280 ScalarEvolution *SE, Loop *TheLoop, const ValueToValueMap &StridesMap,
281 bool ShouldCheckStride) {
282 // Find pointers with computable bounds. We are going to use this information
283 // to place a runtime bound check.
286 bool IsDepCheckNeeded = isDependencyCheckNeeded();
289 // We assign a consecutive id to access from different alias sets.
290 // Accesses between different groups doesn't need to be checked.
292 for (auto &AS : AST) {
293 unsigned NumReadPtrChecks = 0;
294 unsigned NumWritePtrChecks = 0;
296 // We assign consecutive id to access from different dependence sets.
297 // Accesses within the same set don't need a runtime check.
298 unsigned RunningDepId = 1;
299 DenseMap<Value *, unsigned> DepSetId;
302 Value *Ptr = A.getValue();
303 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
304 MemAccessInfo Access(Ptr, IsWrite);
311 if (hasComputableBounds(SE, StridesMap, Ptr) &&
312 // When we run after a failing dependency check we have to make sure
313 // we don't have wrapping pointers.
314 (!ShouldCheckStride ||
315 isStridedPtr(SE, Ptr, TheLoop, StridesMap) == 1)) {
316 // The id of the dependence set.
319 if (IsDepCheckNeeded) {
320 Value *Leader = DepCands.getLeaderValue(Access).getPointer();
321 unsigned &LeaderId = DepSetId[Leader];
323 LeaderId = RunningDepId++;
326 // Each access has its own dependence set.
327 DepId = RunningDepId++;
329 RtCheck.insert(SE, TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap);
331 DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
337 if (IsDepCheckNeeded && CanDoRT && RunningDepId == 2)
338 NumComparisons += 0; // Only one dependence set.
340 NumComparisons += (NumWritePtrChecks * (NumReadPtrChecks +
341 NumWritePtrChecks - 1));
347 // If the pointers that we would use for the bounds comparison have different
348 // address spaces, assume the values aren't directly comparable, so we can't
349 // use them for the runtime check. We also have to assume they could
350 // overlap. In the future there should be metadata for whether address spaces
352 unsigned NumPointers = RtCheck.Pointers.size();
353 for (unsigned i = 0; i < NumPointers; ++i) {
354 for (unsigned j = i + 1; j < NumPointers; ++j) {
355 // Only need to check pointers between two different dependency sets.
356 if (RtCheck.DependencySetId[i] == RtCheck.DependencySetId[j])
358 // Only need to check pointers in the same alias set.
359 if (RtCheck.AliasSetId[i] != RtCheck.AliasSetId[j])
362 Value *PtrI = RtCheck.Pointers[i];
363 Value *PtrJ = RtCheck.Pointers[j];
365 unsigned ASi = PtrI->getType()->getPointerAddressSpace();
366 unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
368 DEBUG(dbgs() << "LAA: Runtime check would require comparison between"
369 " different address spaces\n");
378 void AccessAnalysis::processMemAccesses() {
379 // We process the set twice: first we process read-write pointers, last we
380 // process read-only pointers. This allows us to skip dependence tests for
381 // read-only pointers.
383 DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
384 DEBUG(dbgs() << " AST: "; AST.dump());
385 DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
387 for (auto A : Accesses)
388 dbgs() << "\t" << *A.getPointer() << " (" <<
389 (A.getInt() ? "write" : (ReadOnlyPtr.count(A.getPointer()) ?
390 "read-only" : "read")) << ")\n";
393 // The AliasSetTracker has nicely partitioned our pointers by metadata
394 // compatibility and potential for underlying-object overlap. As a result, we
395 // only need to check for potential pointer dependencies within each alias
397 for (auto &AS : AST) {
398 // Note that both the alias-set tracker and the alias sets themselves used
399 // linked lists internally and so the iteration order here is deterministic
400 // (matching the original instruction order within each set).
402 bool SetHasWrite = false;
404 // Map of pointers to last access encountered.
405 typedef DenseMap<Value*, MemAccessInfo> UnderlyingObjToAccessMap;
406 UnderlyingObjToAccessMap ObjToLastAccess;
408 // Set of access to check after all writes have been processed.
409 PtrAccessSet DeferredAccesses;
411 // Iterate over each alias set twice, once to process read/write pointers,
412 // and then to process read-only pointers.
413 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
414 bool UseDeferred = SetIteration > 0;
415 PtrAccessSet &S = UseDeferred ? DeferredAccesses : Accesses;
418 Value *Ptr = AV.getValue();
420 // For a single memory access in AliasSetTracker, Accesses may contain
421 // both read and write, and they both need to be handled for CheckDeps.
423 if (AC.getPointer() != Ptr)
426 bool IsWrite = AC.getInt();
428 // If we're using the deferred access set, then it contains only
430 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
431 if (UseDeferred && !IsReadOnlyPtr)
433 // Otherwise, the pointer must be in the PtrAccessSet, either as a
435 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
436 S.count(MemAccessInfo(Ptr, false))) &&
437 "Alias-set pointer not in the access set?");
439 MemAccessInfo Access(Ptr, IsWrite);
440 DepCands.insert(Access);
442 // Memorize read-only pointers for later processing and skip them in
443 // the first round (they need to be checked after we have seen all
444 // write pointers). Note: we also mark pointer that are not
445 // consecutive as "read-only" pointers (so that we check
446 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
447 if (!UseDeferred && IsReadOnlyPtr) {
448 DeferredAccesses.insert(Access);
452 // If this is a write - check other reads and writes for conflicts. If
453 // this is a read only check other writes for conflicts (but only if
454 // there is no other write to the ptr - this is an optimization to
455 // catch "a[i] = a[i] + " without having to do a dependence check).
456 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
457 CheckDeps.insert(Access);
458 IsRTCheckNeeded = true;
464 // Create sets of pointers connected by a shared alias set and
465 // underlying object.
466 typedef SmallVector<Value *, 16> ValueVector;
467 ValueVector TempObjects;
468 GetUnderlyingObjects(Ptr, TempObjects, DL);
469 for (Value *UnderlyingObj : TempObjects) {
470 UnderlyingObjToAccessMap::iterator Prev =
471 ObjToLastAccess.find(UnderlyingObj);
472 if (Prev != ObjToLastAccess.end())
473 DepCands.unionSets(Access, Prev->second);
475 ObjToLastAccess[UnderlyingObj] = Access;
483 static bool isInBoundsGep(Value *Ptr) {
484 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr))
485 return GEP->isInBounds();
489 /// \brief Check whether the access through \p Ptr has a constant stride.
490 static int isStridedPtr(ScalarEvolution *SE, Value *Ptr, const Loop *Lp,
491 const ValueToValueMap &StridesMap) {
492 const Type *Ty = Ptr->getType();
493 assert(Ty->isPointerTy() && "Unexpected non-ptr");
495 // Make sure that the pointer does not point to aggregate types.
496 const PointerType *PtrTy = cast<PointerType>(Ty);
497 if (PtrTy->getElementType()->isAggregateType()) {
498 DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type"
503 const SCEV *PtrScev = replaceSymbolicStrideSCEV(SE, StridesMap, Ptr);
505 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
507 DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer "
508 << *Ptr << " SCEV: " << *PtrScev << "\n");
512 // The accesss function must stride over the innermost loop.
513 if (Lp != AR->getLoop()) {
514 DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop " <<
515 *Ptr << " SCEV: " << *PtrScev << "\n");
518 // The address calculation must not wrap. Otherwise, a dependence could be
520 // An inbounds getelementptr that is a AddRec with a unit stride
521 // cannot wrap per definition. The unit stride requirement is checked later.
522 // An getelementptr without an inbounds attribute and unit stride would have
523 // to access the pointer value "0" which is undefined behavior in address
524 // space 0, therefore we can also vectorize this case.
525 bool IsInBoundsGEP = isInBoundsGep(Ptr);
526 bool IsNoWrapAddRec = AR->getNoWrapFlags(SCEV::NoWrapMask);
527 bool IsInAddressSpaceZero = PtrTy->getAddressSpace() == 0;
528 if (!IsNoWrapAddRec && !IsInBoundsGEP && !IsInAddressSpaceZero) {
529 DEBUG(dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
530 << *Ptr << " SCEV: " << *PtrScev << "\n");
534 // Check the step is constant.
535 const SCEV *Step = AR->getStepRecurrence(*SE);
537 // Calculate the pointer stride and check if it is consecutive.
538 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
540 DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr <<
541 " SCEV: " << *PtrScev << "\n");
545 auto &DL = Lp->getHeader()->getModule()->getDataLayout();
546 int64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
547 const APInt &APStepVal = C->getValue()->getValue();
549 // Huge step value - give up.
550 if (APStepVal.getBitWidth() > 64)
553 int64_t StepVal = APStepVal.getSExtValue();
556 int64_t Stride = StepVal / Size;
557 int64_t Rem = StepVal % Size;
561 // If the SCEV could wrap but we have an inbounds gep with a unit stride we
562 // know we can't "wrap around the address space". In case of address space
563 // zero we know that this won't happen without triggering undefined behavior.
564 if (!IsNoWrapAddRec && (IsInBoundsGEP || IsInAddressSpaceZero) &&
565 Stride != 1 && Stride != -1)
571 bool MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) {
575 case BackwardVectorizable:
579 case ForwardButPreventsForwarding:
581 case BackwardVectorizableButPreventsForwarding:
584 llvm_unreachable("unexpected DepType!");
587 bool MemoryDepChecker::Dependence::isInterestingDependence(DepType Type) {
593 case BackwardVectorizable:
595 case ForwardButPreventsForwarding:
597 case BackwardVectorizableButPreventsForwarding:
600 llvm_unreachable("unexpected DepType!");
603 bool MemoryDepChecker::Dependence::isPossiblyBackward() const {
607 case ForwardButPreventsForwarding:
611 case BackwardVectorizable:
613 case BackwardVectorizableButPreventsForwarding:
616 llvm_unreachable("unexpected DepType!");
619 bool MemoryDepChecker::couldPreventStoreLoadForward(unsigned Distance,
620 unsigned TypeByteSize) {
621 // If loads occur at a distance that is not a multiple of a feasible vector
622 // factor store-load forwarding does not take place.
623 // Positive dependences might cause troubles because vectorizing them might
624 // prevent store-load forwarding making vectorized code run a lot slower.
625 // a[i] = a[i-3] ^ a[i-8];
626 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
627 // hence on your typical architecture store-load forwarding does not take
628 // place. Vectorizing in such cases does not make sense.
629 // Store-load forwarding distance.
630 const unsigned NumCyclesForStoreLoadThroughMemory = 8*TypeByteSize;
631 // Maximum vector factor.
632 unsigned MaxVFWithoutSLForwardIssues =
633 VectorizerParams::MaxVectorWidth * TypeByteSize;
634 if(MaxSafeDepDistBytes < MaxVFWithoutSLForwardIssues)
635 MaxVFWithoutSLForwardIssues = MaxSafeDepDistBytes;
637 for (unsigned vf = 2*TypeByteSize; vf <= MaxVFWithoutSLForwardIssues;
639 if (Distance % vf && Distance / vf < NumCyclesForStoreLoadThroughMemory) {
640 MaxVFWithoutSLForwardIssues = (vf >>=1);
645 if (MaxVFWithoutSLForwardIssues< 2*TypeByteSize) {
646 DEBUG(dbgs() << "LAA: Distance " << Distance <<
647 " that could cause a store-load forwarding conflict\n");
651 if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes &&
652 MaxVFWithoutSLForwardIssues !=
653 VectorizerParams::MaxVectorWidth * TypeByteSize)
654 MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues;
658 MemoryDepChecker::Dependence::DepType
659 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
660 const MemAccessInfo &B, unsigned BIdx,
661 const ValueToValueMap &Strides) {
662 assert (AIdx < BIdx && "Must pass arguments in program order");
664 Value *APtr = A.getPointer();
665 Value *BPtr = B.getPointer();
666 bool AIsWrite = A.getInt();
667 bool BIsWrite = B.getInt();
669 // Two reads are independent.
670 if (!AIsWrite && !BIsWrite)
671 return Dependence::NoDep;
673 // We cannot check pointers in different address spaces.
674 if (APtr->getType()->getPointerAddressSpace() !=
675 BPtr->getType()->getPointerAddressSpace())
676 return Dependence::Unknown;
678 const SCEV *AScev = replaceSymbolicStrideSCEV(SE, Strides, APtr);
679 const SCEV *BScev = replaceSymbolicStrideSCEV(SE, Strides, BPtr);
681 int StrideAPtr = isStridedPtr(SE, APtr, InnermostLoop, Strides);
682 int StrideBPtr = isStridedPtr(SE, BPtr, InnermostLoop, Strides);
684 const SCEV *Src = AScev;
685 const SCEV *Sink = BScev;
687 // If the induction step is negative we have to invert source and sink of the
689 if (StrideAPtr < 0) {
692 std::swap(APtr, BPtr);
693 std::swap(Src, Sink);
694 std::swap(AIsWrite, BIsWrite);
695 std::swap(AIdx, BIdx);
696 std::swap(StrideAPtr, StrideBPtr);
699 const SCEV *Dist = SE->getMinusSCEV(Sink, Src);
701 DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
702 << "(Induction step: " << StrideAPtr << ")\n");
703 DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to "
704 << *InstMap[BIdx] << ": " << *Dist << "\n");
706 // Need consecutive accesses. We don't want to vectorize
707 // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in
708 // the address space.
709 if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){
710 DEBUG(dbgs() << "Non-consecutive pointer access\n");
711 return Dependence::Unknown;
714 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
716 DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n");
717 ShouldRetryWithRuntimeCheck = true;
718 return Dependence::Unknown;
721 Type *ATy = APtr->getType()->getPointerElementType();
722 Type *BTy = BPtr->getType()->getPointerElementType();
723 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
724 unsigned TypeByteSize = DL.getTypeAllocSize(ATy);
726 // Negative distances are not plausible dependencies.
727 const APInt &Val = C->getValue()->getValue();
728 if (Val.isNegative()) {
729 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
730 if (IsTrueDataDependence &&
731 (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) ||
733 return Dependence::ForwardButPreventsForwarding;
735 DEBUG(dbgs() << "LAA: Dependence is negative: NoDep\n");
736 return Dependence::Forward;
739 // Write to the same location with the same size.
740 // Could be improved to assert type sizes are the same (i32 == float, etc).
743 return Dependence::NoDep;
744 DEBUG(dbgs() << "LAA: Zero dependence difference but different types\n");
745 return Dependence::Unknown;
748 assert(Val.isStrictlyPositive() && "Expect a positive value");
752 "LAA: ReadWrite-Write positive dependency with different types\n");
753 return Dependence::Unknown;
756 unsigned Distance = (unsigned) Val.getZExtValue();
758 // Bail out early if passed-in parameters make vectorization not feasible.
759 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
760 VectorizerParams::VectorizationFactor : 1);
761 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
762 VectorizerParams::VectorizationInterleave : 1);
764 // The distance must be bigger than the size needed for a vectorized version
765 // of the operation and the size of the vectorized operation must not be
766 // bigger than the currrent maximum size.
767 if (Distance < 2*TypeByteSize ||
768 2*TypeByteSize > MaxSafeDepDistBytes ||
769 Distance < TypeByteSize * ForcedUnroll * ForcedFactor) {
770 DEBUG(dbgs() << "LAA: Failure because of Positive distance "
771 << Val.getSExtValue() << '\n');
772 return Dependence::Backward;
775 // Positive distance bigger than max vectorization factor.
776 MaxSafeDepDistBytes = Distance < MaxSafeDepDistBytes ?
777 Distance : MaxSafeDepDistBytes;
779 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
780 if (IsTrueDataDependence &&
781 couldPreventStoreLoadForward(Distance, TypeByteSize))
782 return Dependence::BackwardVectorizableButPreventsForwarding;
784 DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue() <<
785 " with max VF = " << MaxSafeDepDistBytes / TypeByteSize << '\n');
787 return Dependence::BackwardVectorizable;
790 bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets,
791 MemAccessInfoSet &CheckDeps,
792 const ValueToValueMap &Strides) {
794 MaxSafeDepDistBytes = -1U;
795 while (!CheckDeps.empty()) {
796 MemAccessInfo CurAccess = *CheckDeps.begin();
798 // Get the relevant memory access set.
799 EquivalenceClasses<MemAccessInfo>::iterator I =
800 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
802 // Check accesses within this set.
803 EquivalenceClasses<MemAccessInfo>::member_iterator AI, AE;
804 AI = AccessSets.member_begin(I), AE = AccessSets.member_end();
806 // Check every access pair.
808 CheckDeps.erase(*AI);
809 EquivalenceClasses<MemAccessInfo>::member_iterator OI = std::next(AI);
811 // Check every accessing instruction pair in program order.
812 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
813 I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
814 for (std::vector<unsigned>::iterator I2 = Accesses[*OI].begin(),
815 I2E = Accesses[*OI].end(); I2 != I2E; ++I2) {
816 auto A = std::make_pair(&*AI, *I1);
817 auto B = std::make_pair(&*OI, *I2);
823 Dependence::DepType Type =
824 isDependent(*A.first, A.second, *B.first, B.second, Strides);
825 SafeForVectorization &= Dependence::isSafeForVectorization(Type);
827 // Gather dependences unless we accumulated MaxInterestingDependence
828 // dependences. In that case return as soon as we find the first
829 // unsafe dependence. This puts a limit on this quadratic
831 if (RecordInterestingDependences) {
832 if (Dependence::isInterestingDependence(Type))
833 InterestingDependences.push_back(
834 Dependence(A.second, B.second, Type));
836 if (InterestingDependences.size() >= MaxInterestingDependence) {
837 RecordInterestingDependences = false;
838 InterestingDependences.clear();
839 DEBUG(dbgs() << "Too many dependences, stopped recording\n");
842 if (!RecordInterestingDependences && !SafeForVectorization)
851 DEBUG(dbgs() << "Total Interesting Dependences: "
852 << InterestingDependences.size() << "\n");
853 return SafeForVectorization;
856 SmallVector<Instruction *, 4>
857 MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const {
858 MemAccessInfo Access(Ptr, isWrite);
859 auto &IndexVector = Accesses.find(Access)->second;
861 SmallVector<Instruction *, 4> Insts;
862 std::transform(IndexVector.begin(), IndexVector.end(),
863 std::back_inserter(Insts),
864 [&](unsigned Idx) { return this->InstMap[Idx]; });
868 const char *MemoryDepChecker::Dependence::DepName[] = {
869 "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward",
870 "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"};
872 void MemoryDepChecker::Dependence::print(
873 raw_ostream &OS, unsigned Depth,
874 const SmallVectorImpl<Instruction *> &Instrs) const {
875 OS.indent(Depth) << DepName[Type] << ":\n";
876 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
877 OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
880 bool LoopAccessInfo::canAnalyzeLoop() {
881 // We can only analyze innermost loops.
882 if (!TheLoop->empty()) {
883 emitAnalysis(LoopAccessReport() << "loop is not the innermost loop");
887 // We must have a single backedge.
888 if (TheLoop->getNumBackEdges() != 1) {
890 LoopAccessReport() <<
891 "loop control flow is not understood by analyzer");
895 // We must have a single exiting block.
896 if (!TheLoop->getExitingBlock()) {
898 LoopAccessReport() <<
899 "loop control flow is not understood by analyzer");
903 // We only handle bottom-tested loops, i.e. loop in which the condition is
904 // checked at the end of each iteration. With that we can assume that all
905 // instructions in the loop are executed the same number of times.
906 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
908 LoopAccessReport() <<
909 "loop control flow is not understood by analyzer");
913 // We need to have a loop header.
914 DEBUG(dbgs() << "LAA: Found a loop: " <<
915 TheLoop->getHeader()->getName() << '\n');
917 // ScalarEvolution needs to be able to find the exit count.
918 const SCEV *ExitCount = SE->getBackedgeTakenCount(TheLoop);
919 if (ExitCount == SE->getCouldNotCompute()) {
920 emitAnalysis(LoopAccessReport() <<
921 "could not determine number of loop iterations");
922 DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
929 void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) {
931 typedef SmallVector<Value*, 16> ValueVector;
932 typedef SmallPtrSet<Value*, 16> ValueSet;
934 // Holds the Load and Store *instructions*.
938 // Holds all the different accesses in the loop.
939 unsigned NumReads = 0;
940 unsigned NumReadWrites = 0;
942 PtrRtCheck.Pointers.clear();
943 PtrRtCheck.Need = false;
945 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
948 for (Loop::block_iterator bb = TheLoop->block_begin(),
949 be = TheLoop->block_end(); bb != be; ++bb) {
951 // Scan the BB and collect legal loads and stores.
952 for (BasicBlock::iterator it = (*bb)->begin(), e = (*bb)->end(); it != e;
955 // If this is a load, save it. If this instruction can read from memory
956 // but is not a load, then we quit. Notice that we don't handle function
957 // calls that read or write.
958 if (it->mayReadFromMemory()) {
959 // Many math library functions read the rounding mode. We will only
960 // vectorize a loop if it contains known function calls that don't set
961 // the flag. Therefore, it is safe to ignore this read from memory.
962 CallInst *Call = dyn_cast<CallInst>(it);
963 if (Call && getIntrinsicIDForCall(Call, TLI))
966 // If the function has an explicit vectorized counterpart, we can safely
967 // assume that it can be vectorized.
968 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
969 TLI->isFunctionVectorizable(Call->getCalledFunction()->getName()))
972 LoadInst *Ld = dyn_cast<LoadInst>(it);
973 if (!Ld || (!Ld->isSimple() && !IsAnnotatedParallel)) {
974 emitAnalysis(LoopAccessReport(Ld)
975 << "read with atomic ordering or volatile read");
976 DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
982 DepChecker.addAccess(Ld);
986 // Save 'store' instructions. Abort if other instructions write to memory.
987 if (it->mayWriteToMemory()) {
988 StoreInst *St = dyn_cast<StoreInst>(it);
990 emitAnalysis(LoopAccessReport(it) <<
991 "instruction cannot be vectorized");
995 if (!St->isSimple() && !IsAnnotatedParallel) {
996 emitAnalysis(LoopAccessReport(St)
997 << "write with atomic ordering or volatile write");
998 DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
1003 Stores.push_back(St);
1004 DepChecker.addAccess(St);
1009 // Now we have two lists that hold the loads and the stores.
1010 // Next, we find the pointers that they use.
1012 // Check if we see any stores. If there are no stores, then we don't
1013 // care if the pointers are *restrict*.
1014 if (!Stores.size()) {
1015 DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
1020 MemoryDepChecker::DepCandidates DependentAccesses;
1021 AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(),
1022 AA, DependentAccesses);
1024 // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects
1025 // multiple times on the same object. If the ptr is accessed twice, once
1026 // for read and once for write, it will only appear once (on the write
1027 // list). This is okay, since we are going to check for conflicts between
1028 // writes and between reads and writes, but not between reads and reads.
1031 ValueVector::iterator I, IE;
1032 for (I = Stores.begin(), IE = Stores.end(); I != IE; ++I) {
1033 StoreInst *ST = cast<StoreInst>(*I);
1034 Value* Ptr = ST->getPointerOperand();
1036 if (isUniform(Ptr)) {
1038 LoopAccessReport(ST)
1039 << "write to a loop invariant address could not be vectorized");
1040 DEBUG(dbgs() << "LAA: We don't allow storing to uniform addresses\n");
1045 // If we did *not* see this pointer before, insert it to the read-write
1046 // list. At this phase it is only a 'write' list.
1047 if (Seen.insert(Ptr).second) {
1050 AliasAnalysis::Location Loc = AA->getLocation(ST);
1051 // The TBAA metadata could have a control dependency on the predication
1052 // condition, so we cannot rely on it when determining whether or not we
1053 // need runtime pointer checks.
1054 if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
1055 Loc.AATags.TBAA = nullptr;
1057 Accesses.addStore(Loc);
1061 if (IsAnnotatedParallel) {
1063 << "LAA: A loop annotated parallel, ignore memory dependency "
1069 for (I = Loads.begin(), IE = Loads.end(); I != IE; ++I) {
1070 LoadInst *LD = cast<LoadInst>(*I);
1071 Value* Ptr = LD->getPointerOperand();
1072 // If we did *not* see this pointer before, insert it to the
1073 // read list. If we *did* see it before, then it is already in
1074 // the read-write list. This allows us to vectorize expressions
1075 // such as A[i] += x; Because the address of A[i] is a read-write
1076 // pointer. This only works if the index of A[i] is consecutive.
1077 // If the address of i is unknown (for example A[B[i]]) then we may
1078 // read a few words, modify, and write a few words, and some of the
1079 // words may be written to the same address.
1080 bool IsReadOnlyPtr = false;
1081 if (Seen.insert(Ptr).second || !isStridedPtr(SE, Ptr, TheLoop, Strides)) {
1083 IsReadOnlyPtr = true;
1086 AliasAnalysis::Location Loc = AA->getLocation(LD);
1087 // The TBAA metadata could have a control dependency on the predication
1088 // condition, so we cannot rely on it when determining whether or not we
1089 // need runtime pointer checks.
1090 if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
1091 Loc.AATags.TBAA = nullptr;
1093 Accesses.addLoad(Loc, IsReadOnlyPtr);
1096 // If we write (or read-write) to a single destination and there are no
1097 // other reads in this loop then is it safe to vectorize.
1098 if (NumReadWrites == 1 && NumReads == 0) {
1099 DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
1104 // Build dependence sets and check whether we need a runtime pointer bounds
1106 Accesses.buildDependenceSets();
1107 bool NeedRTCheck = Accesses.isRTCheckNeeded();
1109 // Find pointers with computable bounds. We are going to use this information
1110 // to place a runtime bound check.
1111 bool CanDoRT = false;
1113 CanDoRT = Accesses.canCheckPtrAtRT(PtrRtCheck, NumComparisons, SE, TheLoop,
1116 DEBUG(dbgs() << "LAA: We need to do " << NumComparisons <<
1117 " pointer comparisons.\n");
1119 // If we only have one set of dependences to check pointers among we don't
1120 // need a runtime check.
1121 if (NumComparisons == 0 && NeedRTCheck)
1122 NeedRTCheck = false;
1124 // Check that we found the bounds for the pointer.
1126 DEBUG(dbgs() << "LAA: We can perform a memory runtime check if needed.\n");
1127 else if (NeedRTCheck) {
1128 emitAnalysis(LoopAccessReport() << "cannot identify array bounds");
1129 DEBUG(dbgs() << "LAA: We can't vectorize because we can't find " <<
1130 "the array bounds.\n");
1136 PtrRtCheck.Need = NeedRTCheck;
1139 if (Accesses.isDependencyCheckNeeded()) {
1140 DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
1141 CanVecMem = DepChecker.areDepsSafe(
1142 DependentAccesses, Accesses.getDependenciesToCheck(), Strides);
1143 MaxSafeDepDistBytes = DepChecker.getMaxSafeDepDistBytes();
1145 if (!CanVecMem && DepChecker.shouldRetryWithRuntimeCheck()) {
1146 DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
1149 // Clear the dependency checks. We assume they are not needed.
1150 Accesses.resetDepChecks();
1153 PtrRtCheck.Need = true;
1155 CanDoRT = Accesses.canCheckPtrAtRT(PtrRtCheck, NumComparisons, SE,
1156 TheLoop, Strides, true);
1157 // Check that we found the bounds for the pointer.
1158 if (!CanDoRT && NumComparisons > 0) {
1159 emitAnalysis(LoopAccessReport()
1160 << "cannot check memory dependencies at runtime");
1161 DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
1172 DEBUG(dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
1173 << (NeedRTCheck ? "" : " don't")
1174 << " need a runtime memory check.\n");
1176 emitAnalysis(LoopAccessReport() <<
1177 "unsafe dependent memory operations in loop");
1178 DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
1182 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
1183 DominatorTree *DT) {
1184 assert(TheLoop->contains(BB) && "Unknown block used");
1186 // Blocks that do not dominate the latch need predication.
1187 BasicBlock* Latch = TheLoop->getLoopLatch();
1188 return !DT->dominates(BB, Latch);
1191 void LoopAccessInfo::emitAnalysis(LoopAccessReport &Message) {
1192 assert(!Report && "Multiple reports generated");
1196 bool LoopAccessInfo::isUniform(Value *V) const {
1197 return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop));
1200 // FIXME: this function is currently a duplicate of the one in
1201 // LoopVectorize.cpp.
1202 static Instruction *getFirstInst(Instruction *FirstInst, Value *V,
1206 if (Instruction *I = dyn_cast<Instruction>(V))
1207 return I->getParent() == Loc->getParent() ? I : nullptr;
1211 std::pair<Instruction *, Instruction *> LoopAccessInfo::addRuntimeCheck(
1212 Instruction *Loc, const SmallVectorImpl<int> *PtrPartition) const {
1213 Instruction *tnullptr = nullptr;
1214 if (!PtrRtCheck.Need)
1215 return std::pair<Instruction *, Instruction *>(tnullptr, tnullptr);
1217 unsigned NumPointers = PtrRtCheck.Pointers.size();
1218 SmallVector<TrackingVH<Value> , 2> Starts;
1219 SmallVector<TrackingVH<Value> , 2> Ends;
1221 LLVMContext &Ctx = Loc->getContext();
1222 SCEVExpander Exp(*SE, DL, "induction");
1223 Instruction *FirstInst = nullptr;
1225 for (unsigned i = 0; i < NumPointers; ++i) {
1226 Value *Ptr = PtrRtCheck.Pointers[i];
1227 const SCEV *Sc = SE->getSCEV(Ptr);
1229 if (SE->isLoopInvariant(Sc, TheLoop)) {
1230 DEBUG(dbgs() << "LAA: Adding RT check for a loop invariant ptr:" <<
1232 Starts.push_back(Ptr);
1233 Ends.push_back(Ptr);
1235 DEBUG(dbgs() << "LAA: Adding RT check for range:" << *Ptr << '\n');
1236 unsigned AS = Ptr->getType()->getPointerAddressSpace();
1238 // Use this type for pointer arithmetic.
1239 Type *PtrArithTy = Type::getInt8PtrTy(Ctx, AS);
1241 Value *Start = Exp.expandCodeFor(PtrRtCheck.Starts[i], PtrArithTy, Loc);
1242 Value *End = Exp.expandCodeFor(PtrRtCheck.Ends[i], PtrArithTy, Loc);
1243 Starts.push_back(Start);
1244 Ends.push_back(End);
1248 IRBuilder<> ChkBuilder(Loc);
1249 // Our instructions might fold to a constant.
1250 Value *MemoryRuntimeCheck = nullptr;
1251 for (unsigned i = 0; i < NumPointers; ++i) {
1252 for (unsigned j = i+1; j < NumPointers; ++j) {
1253 if (!PtrRtCheck.needsChecking(i, j, PtrPartition))
1256 unsigned AS0 = Starts[i]->getType()->getPointerAddressSpace();
1257 unsigned AS1 = Starts[j]->getType()->getPointerAddressSpace();
1259 assert((AS0 == Ends[j]->getType()->getPointerAddressSpace()) &&
1260 (AS1 == Ends[i]->getType()->getPointerAddressSpace()) &&
1261 "Trying to bounds check pointers with different address spaces");
1263 Type *PtrArithTy0 = Type::getInt8PtrTy(Ctx, AS0);
1264 Type *PtrArithTy1 = Type::getInt8PtrTy(Ctx, AS1);
1266 Value *Start0 = ChkBuilder.CreateBitCast(Starts[i], PtrArithTy0, "bc");
1267 Value *Start1 = ChkBuilder.CreateBitCast(Starts[j], PtrArithTy1, "bc");
1268 Value *End0 = ChkBuilder.CreateBitCast(Ends[i], PtrArithTy1, "bc");
1269 Value *End1 = ChkBuilder.CreateBitCast(Ends[j], PtrArithTy0, "bc");
1271 Value *Cmp0 = ChkBuilder.CreateICmpULE(Start0, End1, "bound0");
1272 FirstInst = getFirstInst(FirstInst, Cmp0, Loc);
1273 Value *Cmp1 = ChkBuilder.CreateICmpULE(Start1, End0, "bound1");
1274 FirstInst = getFirstInst(FirstInst, Cmp1, Loc);
1275 Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict");
1276 FirstInst = getFirstInst(FirstInst, IsConflict, Loc);
1277 if (MemoryRuntimeCheck) {
1278 IsConflict = ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict,
1280 FirstInst = getFirstInst(FirstInst, IsConflict, Loc);
1282 MemoryRuntimeCheck = IsConflict;
1286 // We have to do this trickery because the IRBuilder might fold the check to a
1287 // constant expression in which case there is no Instruction anchored in a
1289 Instruction *Check = BinaryOperator::CreateAnd(MemoryRuntimeCheck,
1290 ConstantInt::getTrue(Ctx));
1291 ChkBuilder.Insert(Check, "memcheck.conflict");
1292 FirstInst = getFirstInst(FirstInst, Check, Loc);
1293 return std::make_pair(FirstInst, Check);
1296 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE,
1297 const DataLayout &DL,
1298 const TargetLibraryInfo *TLI, AliasAnalysis *AA,
1300 const ValueToValueMap &Strides)
1301 : DepChecker(SE, L), NumComparisons(0), TheLoop(L), SE(SE), DL(DL),
1302 TLI(TLI), AA(AA), DT(DT), NumLoads(0), NumStores(0),
1303 MaxSafeDepDistBytes(-1U), CanVecMem(false) {
1304 if (canAnalyzeLoop())
1305 analyzeLoop(Strides);
1308 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
1310 if (PtrRtCheck.empty())
1311 OS.indent(Depth) << "Memory dependences are safe\n";
1313 OS.indent(Depth) << "Memory dependences are safe with run-time checks\n";
1317 OS.indent(Depth) << "Report: " << Report->str() << "\n";
1319 if (auto *InterestingDependences = DepChecker.getInterestingDependences()) {
1320 OS.indent(Depth) << "Interesting Dependences:\n";
1321 for (auto &Dep : *InterestingDependences) {
1322 Dep.print(OS, Depth + 2, DepChecker.getMemoryInstructions());
1326 OS.indent(Depth) << "Too many interesting dependences, not recorded\n";
1328 // List the pair of accesses need run-time checks to prove independence.
1329 PtrRtCheck.print(OS, Depth);
1333 const LoopAccessInfo &
1334 LoopAccessAnalysis::getInfo(Loop *L, const ValueToValueMap &Strides) {
1335 auto &LAI = LoopAccessInfoMap[L];
1338 assert((!LAI || LAI->NumSymbolicStrides == Strides.size()) &&
1339 "Symbolic strides changed for loop");
1343 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
1344 LAI = llvm::make_unique<LoopAccessInfo>(L, SE, DL, TLI, AA, DT, Strides);
1346 LAI->NumSymbolicStrides = Strides.size();
1352 void LoopAccessAnalysis::print(raw_ostream &OS, const Module *M) const {
1353 LoopAccessAnalysis &LAA = *const_cast<LoopAccessAnalysis *>(this);
1355 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1356 ValueToValueMap NoSymbolicStrides;
1358 for (Loop *TopLevelLoop : *LI)
1359 for (Loop *L : depth_first(TopLevelLoop)) {
1360 OS.indent(2) << L->getHeader()->getName() << ":\n";
1361 auto &LAI = LAA.getInfo(L, NoSymbolicStrides);
1366 bool LoopAccessAnalysis::runOnFunction(Function &F) {
1367 SE = &getAnalysis<ScalarEvolution>();
1368 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
1369 TLI = TLIP ? &TLIP->getTLI() : nullptr;
1370 AA = &getAnalysis<AliasAnalysis>();
1371 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1376 void LoopAccessAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
1377 AU.addRequired<ScalarEvolution>();
1378 AU.addRequired<AliasAnalysis>();
1379 AU.addRequired<DominatorTreeWrapperPass>();
1380 AU.addRequired<LoopInfoWrapperPass>();
1382 AU.setPreservesAll();
1385 char LoopAccessAnalysis::ID = 0;
1386 static const char laa_name[] = "Loop Access Analysis";
1387 #define LAA_NAME "loop-accesses"
1389 INITIALIZE_PASS_BEGIN(LoopAccessAnalysis, LAA_NAME, laa_name, false, true)
1390 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
1391 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
1392 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1393 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
1394 INITIALIZE_PASS_END(LoopAccessAnalysis, LAA_NAME, laa_name, false, true)
1397 Pass *createLAAPass() {
1398 return new LoopAccessAnalysis();