1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // The implementation for the loop memory dependence that was originally
11 // developed for the loop vectorizer.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Analysis/LoopAccessAnalysis.h"
16 #include "llvm/Analysis/LoopInfo.h"
17 #include "llvm/Analysis/ScalarEvolutionExpander.h"
18 #include "llvm/Analysis/TargetLibraryInfo.h"
19 #include "llvm/Analysis/ValueTracking.h"
20 #include "llvm/IR/DiagnosticInfo.h"
21 #include "llvm/IR/Dominators.h"
22 #include "llvm/IR/IRBuilder.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Support/raw_ostream.h"
25 #include "llvm/Transforms/Utils/VectorUtils.h"
28 #define DEBUG_TYPE "loop-accesses"
30 static cl::opt<unsigned, true>
31 VectorizationFactor("force-vector-width", cl::Hidden,
32 cl::desc("Sets the SIMD width. Zero is autoselect."),
33 cl::location(VectorizerParams::VectorizationFactor));
34 unsigned VectorizerParams::VectorizationFactor;
36 static cl::opt<unsigned, true>
37 VectorizationInterleave("force-vector-interleave", cl::Hidden,
38 cl::desc("Sets the vectorization interleave count. "
39 "Zero is autoselect."),
41 VectorizerParams::VectorizationInterleave));
42 unsigned VectorizerParams::VectorizationInterleave;
44 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold(
45 "runtime-memory-check-threshold", cl::Hidden,
46 cl::desc("When performing memory disambiguation checks at runtime do not "
47 "generate more than this number of comparisons (default = 8)."),
48 cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8));
49 unsigned VectorizerParams::RuntimeMemoryCheckThreshold;
51 /// Maximum SIMD width.
52 const unsigned VectorizerParams::MaxVectorWidth = 64;
54 /// \brief We collect interesting dependences up to this threshold.
55 static cl::opt<unsigned> MaxInterestingDependence(
56 "max-interesting-dependences", cl::Hidden,
57 cl::desc("Maximum number of interesting dependences collected by "
58 "loop-access analysis (default = 100)"),
61 bool VectorizerParams::isInterleaveForced() {
62 return ::VectorizationInterleave.getNumOccurrences() > 0;
65 void LoopAccessReport::emitAnalysis(const LoopAccessReport &Message,
66 const Function *TheFunction,
68 const char *PassName) {
69 DebugLoc DL = TheLoop->getStartLoc();
70 if (const Instruction *I = Message.getInstr())
71 DL = I->getDebugLoc();
72 emitOptimizationRemarkAnalysis(TheFunction->getContext(), PassName,
73 *TheFunction, DL, Message.str());
76 Value *llvm::stripIntegerCast(Value *V) {
77 if (CastInst *CI = dyn_cast<CastInst>(V))
78 if (CI->getOperand(0)->getType()->isIntegerTy())
79 return CI->getOperand(0);
83 const SCEV *llvm::replaceSymbolicStrideSCEV(ScalarEvolution *SE,
84 const ValueToValueMap &PtrToStride,
85 Value *Ptr, Value *OrigPtr) {
87 const SCEV *OrigSCEV = SE->getSCEV(Ptr);
89 // If there is an entry in the map return the SCEV of the pointer with the
90 // symbolic stride replaced by one.
91 ValueToValueMap::const_iterator SI =
92 PtrToStride.find(OrigPtr ? OrigPtr : Ptr);
93 if (SI != PtrToStride.end()) {
94 Value *StrideVal = SI->second;
97 StrideVal = stripIntegerCast(StrideVal);
99 // Replace symbolic stride by one.
100 Value *One = ConstantInt::get(StrideVal->getType(), 1);
101 ValueToValueMap RewriteMap;
102 RewriteMap[StrideVal] = One;
105 SCEVParameterRewriter::rewrite(OrigSCEV, *SE, RewriteMap, true);
106 DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV << " by: " << *ByOne
111 // Otherwise, just return the SCEV of the original pointer.
112 return SE->getSCEV(Ptr);
115 void LoopAccessInfo::RuntimePointerCheck::insert(
116 ScalarEvolution *SE, Loop *Lp, Value *Ptr, bool WritePtr, unsigned DepSetId,
117 unsigned ASId, const ValueToValueMap &Strides) {
118 // Get the stride replaced scev.
119 const SCEV *Sc = replaceSymbolicStrideSCEV(SE, Strides, Ptr);
120 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc);
121 assert(AR && "Invalid addrec expression");
122 const SCEV *Ex = SE->getBackedgeTakenCount(Lp);
123 const SCEV *ScEnd = AR->evaluateAtIteration(Ex, *SE);
124 Pointers.push_back(Ptr);
125 Starts.push_back(AR->getStart());
126 Ends.push_back(ScEnd);
127 IsWritePtr.push_back(WritePtr);
128 DependencySetId.push_back(DepSetId);
129 AliasSetId.push_back(ASId);
132 bool LoopAccessInfo::RuntimePointerCheck::needsChecking(
133 unsigned I, unsigned J, const SmallVectorImpl<int> *PtrPartition) const {
134 // No need to check if two readonly pointers intersect.
135 if (!IsWritePtr[I] && !IsWritePtr[J])
138 // Only need to check pointers between two different dependency sets.
139 if (DependencySetId[I] == DependencySetId[J])
142 // Only need to check pointers in the same alias set.
143 if (AliasSetId[I] != AliasSetId[J])
146 // If PtrPartition is set omit checks between pointers of the same partition.
147 // Partition number -1 means that the pointer is used in multiple partitions.
148 // In this case we can't omit the check.
149 if (PtrPartition && (*PtrPartition)[I] != -1 &&
150 (*PtrPartition)[I] == (*PtrPartition)[J])
156 void LoopAccessInfo::RuntimePointerCheck::print(
157 raw_ostream &OS, unsigned Depth,
158 const SmallVectorImpl<int> *PtrPartition) const {
159 unsigned NumPointers = Pointers.size();
160 if (NumPointers == 0)
163 OS.indent(Depth) << "Run-time memory checks:\n";
165 for (unsigned I = 0; I < NumPointers; ++I)
166 for (unsigned J = I + 1; J < NumPointers; ++J)
167 if (needsChecking(I, J, PtrPartition)) {
168 OS.indent(Depth) << N++ << ":\n";
169 OS.indent(Depth + 2) << *Pointers[I];
171 OS << " (Partition: " << (*PtrPartition)[I] << ")";
173 OS.indent(Depth + 2) << *Pointers[J];
175 OS << " (Partition: " << (*PtrPartition)[J] << ")";
180 bool LoopAccessInfo::RuntimePointerCheck::needsAnyChecking(
181 const SmallVectorImpl<int> *PtrPartition) const {
182 unsigned NumPointers = Pointers.size();
184 for (unsigned I = 0; I < NumPointers; ++I)
185 for (unsigned J = I + 1; J < NumPointers; ++J)
186 if (needsChecking(I, J, PtrPartition))
192 /// \brief Analyses memory accesses in a loop.
194 /// Checks whether run time pointer checks are needed and builds sets for data
195 /// dependence checking.
196 class AccessAnalysis {
198 /// \brief Read or write access location.
199 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
200 typedef SmallPtrSet<MemAccessInfo, 8> MemAccessInfoSet;
202 AccessAnalysis(const DataLayout &Dl, AliasAnalysis *AA, LoopInfo *LI,
203 MemoryDepChecker::DepCandidates &DA)
204 : DL(Dl), AST(*AA), LI(LI), DepCands(DA), IsRTCheckNeeded(false) {}
206 /// \brief Register a load and whether it is only read from.
207 void addLoad(AliasAnalysis::Location &Loc, bool IsReadOnly) {
208 Value *Ptr = const_cast<Value*>(Loc.Ptr);
209 AST.add(Ptr, AliasAnalysis::UnknownSize, Loc.AATags);
210 Accesses.insert(MemAccessInfo(Ptr, false));
212 ReadOnlyPtr.insert(Ptr);
215 /// \brief Register a store.
216 void addStore(AliasAnalysis::Location &Loc) {
217 Value *Ptr = const_cast<Value*>(Loc.Ptr);
218 AST.add(Ptr, AliasAnalysis::UnknownSize, Loc.AATags);
219 Accesses.insert(MemAccessInfo(Ptr, true));
222 /// \brief Check whether we can check the pointers at runtime for
223 /// non-intersection.
224 bool canCheckPtrAtRT(LoopAccessInfo::RuntimePointerCheck &RtCheck,
225 unsigned &NumComparisons, ScalarEvolution *SE,
226 Loop *TheLoop, const ValueToValueMap &Strides,
227 bool ShouldCheckStride = false);
229 /// \brief Goes over all memory accesses, checks whether a RT check is needed
230 /// and builds sets of dependent accesses.
231 void buildDependenceSets() {
232 processMemAccesses();
235 bool isRTCheckNeeded() { return IsRTCheckNeeded; }
237 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); }
239 /// We decided that no dependence analysis would be used. Reset the state.
240 void resetDepChecks(MemoryDepChecker &DepChecker) {
242 DepChecker.clearInterestingDependences();
245 MemAccessInfoSet &getDependenciesToCheck() { return CheckDeps; }
248 typedef SetVector<MemAccessInfo> PtrAccessSet;
250 /// \brief Go over all memory access and check whether runtime pointer checks
251 /// are needed /// and build sets of dependency check candidates.
252 void processMemAccesses();
254 /// Set of all accesses.
255 PtrAccessSet Accesses;
257 const DataLayout &DL;
259 /// Set of accesses that need a further dependence check.
260 MemAccessInfoSet CheckDeps;
262 /// Set of pointers that are read only.
263 SmallPtrSet<Value*, 16> ReadOnlyPtr;
265 /// An alias set tracker to partition the access set by underlying object and
266 //intrinsic property (such as TBAA metadata).
271 /// Sets of potentially dependent accesses - members of one set share an
272 /// underlying pointer. The set "CheckDeps" identfies which sets really need a
273 /// dependence check.
274 MemoryDepChecker::DepCandidates &DepCands;
276 bool IsRTCheckNeeded;
279 } // end anonymous namespace
281 /// \brief Check whether a pointer can participate in a runtime bounds check.
282 static bool hasComputableBounds(ScalarEvolution *SE,
283 const ValueToValueMap &Strides, Value *Ptr) {
284 const SCEV *PtrScev = replaceSymbolicStrideSCEV(SE, Strides, Ptr);
285 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
289 return AR->isAffine();
292 /// \brief Check the stride of the pointer and ensure that it does not wrap in
293 /// the address space.
294 static int isStridedPtr(ScalarEvolution *SE, Value *Ptr, const Loop *Lp,
295 const ValueToValueMap &StridesMap);
297 bool AccessAnalysis::canCheckPtrAtRT(
298 LoopAccessInfo::RuntimePointerCheck &RtCheck, unsigned &NumComparisons,
299 ScalarEvolution *SE, Loop *TheLoop, const ValueToValueMap &StridesMap,
300 bool ShouldCheckStride) {
301 // Find pointers with computable bounds. We are going to use this information
302 // to place a runtime bound check.
305 bool IsDepCheckNeeded = isDependencyCheckNeeded();
308 // We assign a consecutive id to access from different alias sets.
309 // Accesses between different groups doesn't need to be checked.
311 for (auto &AS : AST) {
312 unsigned NumReadPtrChecks = 0;
313 unsigned NumWritePtrChecks = 0;
315 // We assign consecutive id to access from different dependence sets.
316 // Accesses within the same set don't need a runtime check.
317 unsigned RunningDepId = 1;
318 DenseMap<Value *, unsigned> DepSetId;
321 Value *Ptr = A.getValue();
322 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
323 MemAccessInfo Access(Ptr, IsWrite);
330 if (hasComputableBounds(SE, StridesMap, Ptr) &&
331 // When we run after a failing dependency check we have to make sure
332 // we don't have wrapping pointers.
333 (!ShouldCheckStride ||
334 isStridedPtr(SE, Ptr, TheLoop, StridesMap) == 1)) {
335 // The id of the dependence set.
338 if (IsDepCheckNeeded) {
339 Value *Leader = DepCands.getLeaderValue(Access).getPointer();
340 unsigned &LeaderId = DepSetId[Leader];
342 LeaderId = RunningDepId++;
345 // Each access has its own dependence set.
346 DepId = RunningDepId++;
348 RtCheck.insert(SE, TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap);
350 DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
352 DEBUG(dbgs() << "LAA: Can't find bounds for ptr:" << *Ptr << '\n');
357 if (IsDepCheckNeeded && CanDoRT && RunningDepId == 2)
358 NumComparisons += 0; // Only one dependence set.
360 NumComparisons += (NumWritePtrChecks * (NumReadPtrChecks +
361 NumWritePtrChecks - 1));
367 // If the pointers that we would use for the bounds comparison have different
368 // address spaces, assume the values aren't directly comparable, so we can't
369 // use them for the runtime check. We also have to assume they could
370 // overlap. In the future there should be metadata for whether address spaces
372 unsigned NumPointers = RtCheck.Pointers.size();
373 for (unsigned i = 0; i < NumPointers; ++i) {
374 for (unsigned j = i + 1; j < NumPointers; ++j) {
375 // Only need to check pointers between two different dependency sets.
376 if (RtCheck.DependencySetId[i] == RtCheck.DependencySetId[j])
378 // Only need to check pointers in the same alias set.
379 if (RtCheck.AliasSetId[i] != RtCheck.AliasSetId[j])
382 Value *PtrI = RtCheck.Pointers[i];
383 Value *PtrJ = RtCheck.Pointers[j];
385 unsigned ASi = PtrI->getType()->getPointerAddressSpace();
386 unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
388 DEBUG(dbgs() << "LAA: Runtime check would require comparison between"
389 " different address spaces\n");
398 void AccessAnalysis::processMemAccesses() {
399 // We process the set twice: first we process read-write pointers, last we
400 // process read-only pointers. This allows us to skip dependence tests for
401 // read-only pointers.
403 DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
404 DEBUG(dbgs() << " AST: "; AST.dump());
405 DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
407 for (auto A : Accesses)
408 dbgs() << "\t" << *A.getPointer() << " (" <<
409 (A.getInt() ? "write" : (ReadOnlyPtr.count(A.getPointer()) ?
410 "read-only" : "read")) << ")\n";
413 // The AliasSetTracker has nicely partitioned our pointers by metadata
414 // compatibility and potential for underlying-object overlap. As a result, we
415 // only need to check for potential pointer dependencies within each alias
417 for (auto &AS : AST) {
418 // Note that both the alias-set tracker and the alias sets themselves used
419 // linked lists internally and so the iteration order here is deterministic
420 // (matching the original instruction order within each set).
422 bool SetHasWrite = false;
424 // Map of pointers to last access encountered.
425 typedef DenseMap<Value*, MemAccessInfo> UnderlyingObjToAccessMap;
426 UnderlyingObjToAccessMap ObjToLastAccess;
428 // Set of access to check after all writes have been processed.
429 PtrAccessSet DeferredAccesses;
431 // Iterate over each alias set twice, once to process read/write pointers,
432 // and then to process read-only pointers.
433 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
434 bool UseDeferred = SetIteration > 0;
435 PtrAccessSet &S = UseDeferred ? DeferredAccesses : Accesses;
438 Value *Ptr = AV.getValue();
440 // For a single memory access in AliasSetTracker, Accesses may contain
441 // both read and write, and they both need to be handled for CheckDeps.
443 if (AC.getPointer() != Ptr)
446 bool IsWrite = AC.getInt();
448 // If we're using the deferred access set, then it contains only
450 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
451 if (UseDeferred && !IsReadOnlyPtr)
453 // Otherwise, the pointer must be in the PtrAccessSet, either as a
455 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
456 S.count(MemAccessInfo(Ptr, false))) &&
457 "Alias-set pointer not in the access set?");
459 MemAccessInfo Access(Ptr, IsWrite);
460 DepCands.insert(Access);
462 // Memorize read-only pointers for later processing and skip them in
463 // the first round (they need to be checked after we have seen all
464 // write pointers). Note: we also mark pointer that are not
465 // consecutive as "read-only" pointers (so that we check
466 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
467 if (!UseDeferred && IsReadOnlyPtr) {
468 DeferredAccesses.insert(Access);
472 // If this is a write - check other reads and writes for conflicts. If
473 // this is a read only check other writes for conflicts (but only if
474 // there is no other write to the ptr - this is an optimization to
475 // catch "a[i] = a[i] + " without having to do a dependence check).
476 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
477 CheckDeps.insert(Access);
478 IsRTCheckNeeded = true;
484 // Create sets of pointers connected by a shared alias set and
485 // underlying object.
486 typedef SmallVector<Value *, 16> ValueVector;
487 ValueVector TempObjects;
489 GetUnderlyingObjects(Ptr, TempObjects, DL, LI);
490 DEBUG(dbgs() << "Underlying objects for pointer " << *Ptr << "\n");
491 for (Value *UnderlyingObj : TempObjects) {
492 UnderlyingObjToAccessMap::iterator Prev =
493 ObjToLastAccess.find(UnderlyingObj);
494 if (Prev != ObjToLastAccess.end())
495 DepCands.unionSets(Access, Prev->second);
497 ObjToLastAccess[UnderlyingObj] = Access;
498 DEBUG(dbgs() << " " << *UnderlyingObj << "\n");
506 static bool isInBoundsGep(Value *Ptr) {
507 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr))
508 return GEP->isInBounds();
512 /// \brief Check whether the access through \p Ptr has a constant stride.
513 static int isStridedPtr(ScalarEvolution *SE, Value *Ptr, const Loop *Lp,
514 const ValueToValueMap &StridesMap) {
515 const Type *Ty = Ptr->getType();
516 assert(Ty->isPointerTy() && "Unexpected non-ptr");
518 // Make sure that the pointer does not point to aggregate types.
519 const PointerType *PtrTy = cast<PointerType>(Ty);
520 if (PtrTy->getElementType()->isAggregateType()) {
521 DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type"
526 const SCEV *PtrScev = replaceSymbolicStrideSCEV(SE, StridesMap, Ptr);
528 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
530 DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer "
531 << *Ptr << " SCEV: " << *PtrScev << "\n");
535 // The accesss function must stride over the innermost loop.
536 if (Lp != AR->getLoop()) {
537 DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop " <<
538 *Ptr << " SCEV: " << *PtrScev << "\n");
541 // The address calculation must not wrap. Otherwise, a dependence could be
543 // An inbounds getelementptr that is a AddRec with a unit stride
544 // cannot wrap per definition. The unit stride requirement is checked later.
545 // An getelementptr without an inbounds attribute and unit stride would have
546 // to access the pointer value "0" which is undefined behavior in address
547 // space 0, therefore we can also vectorize this case.
548 bool IsInBoundsGEP = isInBoundsGep(Ptr);
549 bool IsNoWrapAddRec = AR->getNoWrapFlags(SCEV::NoWrapMask);
550 bool IsInAddressSpaceZero = PtrTy->getAddressSpace() == 0;
551 if (!IsNoWrapAddRec && !IsInBoundsGEP && !IsInAddressSpaceZero) {
552 DEBUG(dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
553 << *Ptr << " SCEV: " << *PtrScev << "\n");
557 // Check the step is constant.
558 const SCEV *Step = AR->getStepRecurrence(*SE);
560 // Calculate the pointer stride and check if it is consecutive.
561 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
563 DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr <<
564 " SCEV: " << *PtrScev << "\n");
568 auto &DL = Lp->getHeader()->getModule()->getDataLayout();
569 int64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
570 const APInt &APStepVal = C->getValue()->getValue();
572 // Huge step value - give up.
573 if (APStepVal.getBitWidth() > 64)
576 int64_t StepVal = APStepVal.getSExtValue();
579 int64_t Stride = StepVal / Size;
580 int64_t Rem = StepVal % Size;
584 // If the SCEV could wrap but we have an inbounds gep with a unit stride we
585 // know we can't "wrap around the address space". In case of address space
586 // zero we know that this won't happen without triggering undefined behavior.
587 if (!IsNoWrapAddRec && (IsInBoundsGEP || IsInAddressSpaceZero) &&
588 Stride != 1 && Stride != -1)
594 bool MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) {
598 case BackwardVectorizable:
602 case ForwardButPreventsForwarding:
604 case BackwardVectorizableButPreventsForwarding:
607 llvm_unreachable("unexpected DepType!");
610 bool MemoryDepChecker::Dependence::isInterestingDependence(DepType Type) {
616 case BackwardVectorizable:
618 case ForwardButPreventsForwarding:
620 case BackwardVectorizableButPreventsForwarding:
623 llvm_unreachable("unexpected DepType!");
626 bool MemoryDepChecker::Dependence::isPossiblyBackward() const {
630 case ForwardButPreventsForwarding:
634 case BackwardVectorizable:
636 case BackwardVectorizableButPreventsForwarding:
639 llvm_unreachable("unexpected DepType!");
642 bool MemoryDepChecker::couldPreventStoreLoadForward(unsigned Distance,
643 unsigned TypeByteSize) {
644 // If loads occur at a distance that is not a multiple of a feasible vector
645 // factor store-load forwarding does not take place.
646 // Positive dependences might cause troubles because vectorizing them might
647 // prevent store-load forwarding making vectorized code run a lot slower.
648 // a[i] = a[i-3] ^ a[i-8];
649 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
650 // hence on your typical architecture store-load forwarding does not take
651 // place. Vectorizing in such cases does not make sense.
652 // Store-load forwarding distance.
653 const unsigned NumCyclesForStoreLoadThroughMemory = 8*TypeByteSize;
654 // Maximum vector factor.
655 unsigned MaxVFWithoutSLForwardIssues =
656 VectorizerParams::MaxVectorWidth * TypeByteSize;
657 if(MaxSafeDepDistBytes < MaxVFWithoutSLForwardIssues)
658 MaxVFWithoutSLForwardIssues = MaxSafeDepDistBytes;
660 for (unsigned vf = 2*TypeByteSize; vf <= MaxVFWithoutSLForwardIssues;
662 if (Distance % vf && Distance / vf < NumCyclesForStoreLoadThroughMemory) {
663 MaxVFWithoutSLForwardIssues = (vf >>=1);
668 if (MaxVFWithoutSLForwardIssues< 2*TypeByteSize) {
669 DEBUG(dbgs() << "LAA: Distance " << Distance <<
670 " that could cause a store-load forwarding conflict\n");
674 if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes &&
675 MaxVFWithoutSLForwardIssues !=
676 VectorizerParams::MaxVectorWidth * TypeByteSize)
677 MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues;
681 /// \brief Check the dependence for two accesses with the same stride \p Stride.
682 /// \p Distance is the positive distance and \p TypeByteSize is type size in
685 /// \returns true if they are independent.
686 static bool areStridedAccessesIndependent(unsigned Distance, unsigned Stride,
687 unsigned TypeByteSize) {
688 assert(Stride > 1 && "The stride must be greater than 1");
689 assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
690 assert(Distance > 0 && "The distance must be non-zero");
692 // Skip if the distance is not multiple of type byte size.
693 if (Distance % TypeByteSize)
696 unsigned ScaledDist = Distance / TypeByteSize;
698 // No dependence if the scaled distance is not multiple of the stride.
700 // for (i = 0; i < 1024 ; i += 4)
701 // A[i+2] = A[i] + 1;
703 // Two accesses in memory (scaled distance is 2, stride is 4):
704 // | A[0] | | | | A[4] | | | |
705 // | | | A[2] | | | | A[6] | |
708 // for (i = 0; i < 1024 ; i += 3)
709 // A[i+4] = A[i] + 1;
711 // Two accesses in memory (scaled distance is 4, stride is 3):
712 // | A[0] | | | A[3] | | | A[6] | | |
713 // | | | | | A[4] | | | A[7] | |
714 return ScaledDist % Stride;
717 MemoryDepChecker::Dependence::DepType
718 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
719 const MemAccessInfo &B, unsigned BIdx,
720 const ValueToValueMap &Strides) {
721 assert (AIdx < BIdx && "Must pass arguments in program order");
723 Value *APtr = A.getPointer();
724 Value *BPtr = B.getPointer();
725 bool AIsWrite = A.getInt();
726 bool BIsWrite = B.getInt();
728 // Two reads are independent.
729 if (!AIsWrite && !BIsWrite)
730 return Dependence::NoDep;
732 // We cannot check pointers in different address spaces.
733 if (APtr->getType()->getPointerAddressSpace() !=
734 BPtr->getType()->getPointerAddressSpace())
735 return Dependence::Unknown;
737 const SCEV *AScev = replaceSymbolicStrideSCEV(SE, Strides, APtr);
738 const SCEV *BScev = replaceSymbolicStrideSCEV(SE, Strides, BPtr);
740 int StrideAPtr = isStridedPtr(SE, APtr, InnermostLoop, Strides);
741 int StrideBPtr = isStridedPtr(SE, BPtr, InnermostLoop, Strides);
743 const SCEV *Src = AScev;
744 const SCEV *Sink = BScev;
746 // If the induction step is negative we have to invert source and sink of the
748 if (StrideAPtr < 0) {
751 std::swap(APtr, BPtr);
752 std::swap(Src, Sink);
753 std::swap(AIsWrite, BIsWrite);
754 std::swap(AIdx, BIdx);
755 std::swap(StrideAPtr, StrideBPtr);
758 const SCEV *Dist = SE->getMinusSCEV(Sink, Src);
760 DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
761 << "(Induction step: " << StrideAPtr << ")\n");
762 DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to "
763 << *InstMap[BIdx] << ": " << *Dist << "\n");
765 // Need consecutive accesses. We don't want to vectorize
766 // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in
767 // the address space.
768 if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){
769 DEBUG(dbgs() << "Non-consecutive pointer access\n");
770 return Dependence::Unknown;
773 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
775 DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n");
776 ShouldRetryWithRuntimeCheck = true;
777 return Dependence::Unknown;
780 Type *ATy = APtr->getType()->getPointerElementType();
781 Type *BTy = BPtr->getType()->getPointerElementType();
782 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
783 unsigned TypeByteSize = DL.getTypeAllocSize(ATy);
785 // Negative distances are not plausible dependencies.
786 const APInt &Val = C->getValue()->getValue();
787 if (Val.isNegative()) {
788 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
789 if (IsTrueDataDependence &&
790 (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) ||
792 return Dependence::ForwardButPreventsForwarding;
794 DEBUG(dbgs() << "LAA: Dependence is negative: NoDep\n");
795 return Dependence::Forward;
798 // Write to the same location with the same size.
799 // Could be improved to assert type sizes are the same (i32 == float, etc).
802 return Dependence::NoDep;
803 DEBUG(dbgs() << "LAA: Zero dependence difference but different types\n");
804 return Dependence::Unknown;
807 assert(Val.isStrictlyPositive() && "Expect a positive value");
811 "LAA: ReadWrite-Write positive dependency with different types\n");
812 return Dependence::Unknown;
815 unsigned Distance = (unsigned) Val.getZExtValue();
817 unsigned Stride = std::abs(StrideAPtr);
819 areStridedAccessesIndependent(Distance, Stride, TypeByteSize))
820 return Dependence::NoDep;
822 // Bail out early if passed-in parameters make vectorization not feasible.
823 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
824 VectorizerParams::VectorizationFactor : 1);
825 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
826 VectorizerParams::VectorizationInterleave : 1);
827 // The minimum number of iterations for a vectorized/unrolled version.
828 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
830 // It's not vectorizable if the distance is smaller than the minimum distance
831 // needed for a vectroized/unrolled version. Vectorizing one iteration in
832 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs
833 // TypeByteSize (No need to plus the last gap distance).
835 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
837 // int *B = (int *)((char *)A + 14);
838 // for (i = 0 ; i < 1024 ; i += 2)
842 // Two accesses in memory (stride is 2):
843 // | A[0] | | A[2] | | A[4] | | A[6] | |
844 // | B[0] | | B[2] | | B[4] |
846 // Distance needs for vectorizing iterations except the last iteration:
847 // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4.
848 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
850 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
851 // 12, which is less than distance.
853 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
854 // the minimum distance needed is 28, which is greater than distance. It is
855 // not safe to do vectorization.
856 unsigned MinDistanceNeeded =
857 TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize;
858 if (MinDistanceNeeded > Distance) {
859 DEBUG(dbgs() << "LAA: Failure because of positive distance " << Distance
861 return Dependence::Backward;
864 // Unsafe if the minimum distance needed is greater than max safe distance.
865 if (MinDistanceNeeded > MaxSafeDepDistBytes) {
866 DEBUG(dbgs() << "LAA: Failure because it needs at least "
867 << MinDistanceNeeded << " size in bytes");
868 return Dependence::Backward;
871 // Positive distance bigger than max vectorization factor.
872 // FIXME: Should use max factor instead of max distance in bytes, which could
873 // not handle different types.
874 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
875 // void foo (int *A, char *B) {
876 // for (unsigned i = 0; i < 1024; i++) {
877 // A[i+2] = A[i] + 1;
878 // B[i+2] = B[i] + 1;
882 // This case is currently unsafe according to the max safe distance. If we
883 // analyze the two accesses on array B, the max safe dependence distance
884 // is 2. Then we analyze the accesses on array A, the minimum distance needed
885 // is 8, which is less than 2 and forbidden vectorization, But actually
886 // both A and B could be vectorized by 2 iterations.
887 MaxSafeDepDistBytes =
888 Distance < MaxSafeDepDistBytes ? Distance : MaxSafeDepDistBytes;
890 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
891 if (IsTrueDataDependence &&
892 couldPreventStoreLoadForward(Distance, TypeByteSize))
893 return Dependence::BackwardVectorizableButPreventsForwarding;
895 DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue()
897 << MaxSafeDepDistBytes / (TypeByteSize * Stride) << '\n');
899 return Dependence::BackwardVectorizable;
902 bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets,
903 MemAccessInfoSet &CheckDeps,
904 const ValueToValueMap &Strides) {
906 MaxSafeDepDistBytes = -1U;
907 while (!CheckDeps.empty()) {
908 MemAccessInfo CurAccess = *CheckDeps.begin();
910 // Get the relevant memory access set.
911 EquivalenceClasses<MemAccessInfo>::iterator I =
912 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
914 // Check accesses within this set.
915 EquivalenceClasses<MemAccessInfo>::member_iterator AI, AE;
916 AI = AccessSets.member_begin(I), AE = AccessSets.member_end();
918 // Check every access pair.
920 CheckDeps.erase(*AI);
921 EquivalenceClasses<MemAccessInfo>::member_iterator OI = std::next(AI);
923 // Check every accessing instruction pair in program order.
924 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
925 I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
926 for (std::vector<unsigned>::iterator I2 = Accesses[*OI].begin(),
927 I2E = Accesses[*OI].end(); I2 != I2E; ++I2) {
928 auto A = std::make_pair(&*AI, *I1);
929 auto B = std::make_pair(&*OI, *I2);
935 Dependence::DepType Type =
936 isDependent(*A.first, A.second, *B.first, B.second, Strides);
937 SafeForVectorization &= Dependence::isSafeForVectorization(Type);
939 // Gather dependences unless we accumulated MaxInterestingDependence
940 // dependences. In that case return as soon as we find the first
941 // unsafe dependence. This puts a limit on this quadratic
943 if (RecordInterestingDependences) {
944 if (Dependence::isInterestingDependence(Type))
945 InterestingDependences.push_back(
946 Dependence(A.second, B.second, Type));
948 if (InterestingDependences.size() >= MaxInterestingDependence) {
949 RecordInterestingDependences = false;
950 InterestingDependences.clear();
951 DEBUG(dbgs() << "Too many dependences, stopped recording\n");
954 if (!RecordInterestingDependences && !SafeForVectorization)
963 DEBUG(dbgs() << "Total Interesting Dependences: "
964 << InterestingDependences.size() << "\n");
965 return SafeForVectorization;
968 SmallVector<Instruction *, 4>
969 MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const {
970 MemAccessInfo Access(Ptr, isWrite);
971 auto &IndexVector = Accesses.find(Access)->second;
973 SmallVector<Instruction *, 4> Insts;
974 std::transform(IndexVector.begin(), IndexVector.end(),
975 std::back_inserter(Insts),
976 [&](unsigned Idx) { return this->InstMap[Idx]; });
980 const char *MemoryDepChecker::Dependence::DepName[] = {
981 "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward",
982 "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"};
984 void MemoryDepChecker::Dependence::print(
985 raw_ostream &OS, unsigned Depth,
986 const SmallVectorImpl<Instruction *> &Instrs) const {
987 OS.indent(Depth) << DepName[Type] << ":\n";
988 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
989 OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
992 bool LoopAccessInfo::canAnalyzeLoop() {
993 // We need to have a loop header.
994 DEBUG(dbgs() << "LAA: Found a loop: " <<
995 TheLoop->getHeader()->getName() << '\n');
997 // We can only analyze innermost loops.
998 if (!TheLoop->empty()) {
999 DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
1000 emitAnalysis(LoopAccessReport() << "loop is not the innermost loop");
1004 // We must have a single backedge.
1005 if (TheLoop->getNumBackEdges() != 1) {
1006 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n");
1008 LoopAccessReport() <<
1009 "loop control flow is not understood by analyzer");
1013 // We must have a single exiting block.
1014 if (!TheLoop->getExitingBlock()) {
1015 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n");
1017 LoopAccessReport() <<
1018 "loop control flow is not understood by analyzer");
1022 // We only handle bottom-tested loops, i.e. loop in which the condition is
1023 // checked at the end of each iteration. With that we can assume that all
1024 // instructions in the loop are executed the same number of times.
1025 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
1026 DEBUG(dbgs() << "LAA: loop control flow is not understood by analyzer\n");
1028 LoopAccessReport() <<
1029 "loop control flow is not understood by analyzer");
1033 // ScalarEvolution needs to be able to find the exit count.
1034 const SCEV *ExitCount = SE->getBackedgeTakenCount(TheLoop);
1035 if (ExitCount == SE->getCouldNotCompute()) {
1036 emitAnalysis(LoopAccessReport() <<
1037 "could not determine number of loop iterations");
1038 DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
1045 void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) {
1047 typedef SmallVector<Value*, 16> ValueVector;
1048 typedef SmallPtrSet<Value*, 16> ValueSet;
1050 // Holds the Load and Store *instructions*.
1054 // Holds all the different accesses in the loop.
1055 unsigned NumReads = 0;
1056 unsigned NumReadWrites = 0;
1058 PtrRtCheck.Pointers.clear();
1059 PtrRtCheck.Need = false;
1061 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
1064 for (Loop::block_iterator bb = TheLoop->block_begin(),
1065 be = TheLoop->block_end(); bb != be; ++bb) {
1067 // Scan the BB and collect legal loads and stores.
1068 for (BasicBlock::iterator it = (*bb)->begin(), e = (*bb)->end(); it != e;
1071 // If this is a load, save it. If this instruction can read from memory
1072 // but is not a load, then we quit. Notice that we don't handle function
1073 // calls that read or write.
1074 if (it->mayReadFromMemory()) {
1075 // Many math library functions read the rounding mode. We will only
1076 // vectorize a loop if it contains known function calls that don't set
1077 // the flag. Therefore, it is safe to ignore this read from memory.
1078 CallInst *Call = dyn_cast<CallInst>(it);
1079 if (Call && getIntrinsicIDForCall(Call, TLI))
1082 // If the function has an explicit vectorized counterpart, we can safely
1083 // assume that it can be vectorized.
1084 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
1085 TLI->isFunctionVectorizable(Call->getCalledFunction()->getName()))
1088 LoadInst *Ld = dyn_cast<LoadInst>(it);
1089 if (!Ld || (!Ld->isSimple() && !IsAnnotatedParallel)) {
1090 emitAnalysis(LoopAccessReport(Ld)
1091 << "read with atomic ordering or volatile read");
1092 DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
1097 Loads.push_back(Ld);
1098 DepChecker.addAccess(Ld);
1102 // Save 'store' instructions. Abort if other instructions write to memory.
1103 if (it->mayWriteToMemory()) {
1104 StoreInst *St = dyn_cast<StoreInst>(it);
1106 emitAnalysis(LoopAccessReport(it) <<
1107 "instruction cannot be vectorized");
1111 if (!St->isSimple() && !IsAnnotatedParallel) {
1112 emitAnalysis(LoopAccessReport(St)
1113 << "write with atomic ordering or volatile write");
1114 DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
1119 Stores.push_back(St);
1120 DepChecker.addAccess(St);
1125 // Now we have two lists that hold the loads and the stores.
1126 // Next, we find the pointers that they use.
1128 // Check if we see any stores. If there are no stores, then we don't
1129 // care if the pointers are *restrict*.
1130 if (!Stores.size()) {
1131 DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
1136 MemoryDepChecker::DepCandidates DependentAccesses;
1137 AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(),
1138 AA, LI, DependentAccesses);
1140 // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects
1141 // multiple times on the same object. If the ptr is accessed twice, once
1142 // for read and once for write, it will only appear once (on the write
1143 // list). This is okay, since we are going to check for conflicts between
1144 // writes and between reads and writes, but not between reads and reads.
1147 ValueVector::iterator I, IE;
1148 for (I = Stores.begin(), IE = Stores.end(); I != IE; ++I) {
1149 StoreInst *ST = cast<StoreInst>(*I);
1150 Value* Ptr = ST->getPointerOperand();
1151 // Check for store to loop invariant address.
1152 StoreToLoopInvariantAddress |= isUniform(Ptr);
1153 // If we did *not* see this pointer before, insert it to the read-write
1154 // list. At this phase it is only a 'write' list.
1155 if (Seen.insert(Ptr).second) {
1158 AliasAnalysis::Location Loc = MemoryLocation::get(ST);
1159 // The TBAA metadata could have a control dependency on the predication
1160 // condition, so we cannot rely on it when determining whether or not we
1161 // need runtime pointer checks.
1162 if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
1163 Loc.AATags.TBAA = nullptr;
1165 Accesses.addStore(Loc);
1169 if (IsAnnotatedParallel) {
1171 << "LAA: A loop annotated parallel, ignore memory dependency "
1177 for (I = Loads.begin(), IE = Loads.end(); I != IE; ++I) {
1178 LoadInst *LD = cast<LoadInst>(*I);
1179 Value* Ptr = LD->getPointerOperand();
1180 // If we did *not* see this pointer before, insert it to the
1181 // read list. If we *did* see it before, then it is already in
1182 // the read-write list. This allows us to vectorize expressions
1183 // such as A[i] += x; Because the address of A[i] is a read-write
1184 // pointer. This only works if the index of A[i] is consecutive.
1185 // If the address of i is unknown (for example A[B[i]]) then we may
1186 // read a few words, modify, and write a few words, and some of the
1187 // words may be written to the same address.
1188 bool IsReadOnlyPtr = false;
1189 if (Seen.insert(Ptr).second || !isStridedPtr(SE, Ptr, TheLoop, Strides)) {
1191 IsReadOnlyPtr = true;
1194 AliasAnalysis::Location Loc = MemoryLocation::get(LD);
1195 // The TBAA metadata could have a control dependency on the predication
1196 // condition, so we cannot rely on it when determining whether or not we
1197 // need runtime pointer checks.
1198 if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
1199 Loc.AATags.TBAA = nullptr;
1201 Accesses.addLoad(Loc, IsReadOnlyPtr);
1204 // If we write (or read-write) to a single destination and there are no
1205 // other reads in this loop then is it safe to vectorize.
1206 if (NumReadWrites == 1 && NumReads == 0) {
1207 DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
1212 // Build dependence sets and check whether we need a runtime pointer bounds
1214 Accesses.buildDependenceSets();
1215 bool NeedRTCheck = Accesses.isRTCheckNeeded();
1217 // Find pointers with computable bounds. We are going to use this information
1218 // to place a runtime bound check.
1219 bool CanDoRT = false;
1221 CanDoRT = Accesses.canCheckPtrAtRT(PtrRtCheck, NumComparisons, SE, TheLoop,
1224 DEBUG(dbgs() << "LAA: We need to do " << NumComparisons <<
1225 " pointer comparisons.\n");
1227 // If we only have one set of dependences to check pointers among we don't
1228 // need a runtime check.
1229 if (NumComparisons == 0 && NeedRTCheck)
1230 NeedRTCheck = false;
1232 // Check that we found the bounds for the pointer.
1234 DEBUG(dbgs() << "LAA: We can perform a memory runtime check if needed.\n");
1235 else if (NeedRTCheck) {
1236 emitAnalysis(LoopAccessReport() << "cannot identify array bounds");
1237 DEBUG(dbgs() << "LAA: We can't vectorize because we can't find " <<
1238 "the array bounds.\n");
1244 PtrRtCheck.Need = NeedRTCheck;
1247 if (Accesses.isDependencyCheckNeeded()) {
1248 DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
1249 CanVecMem = DepChecker.areDepsSafe(
1250 DependentAccesses, Accesses.getDependenciesToCheck(), Strides);
1251 MaxSafeDepDistBytes = DepChecker.getMaxSafeDepDistBytes();
1253 if (!CanVecMem && DepChecker.shouldRetryWithRuntimeCheck()) {
1254 DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
1257 // Clear the dependency checks. We assume they are not needed.
1258 Accesses.resetDepChecks(DepChecker);
1261 PtrRtCheck.Need = true;
1263 CanDoRT = Accesses.canCheckPtrAtRT(PtrRtCheck, NumComparisons, SE,
1264 TheLoop, Strides, true);
1265 // Check that we found the bounds for the pointer.
1266 if (!CanDoRT && NumComparisons > 0) {
1267 emitAnalysis(LoopAccessReport()
1268 << "cannot check memory dependencies at runtime");
1269 DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
1280 DEBUG(dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
1281 << (NeedRTCheck ? "" : " don't")
1282 << " need a runtime memory check.\n");
1284 emitAnalysis(LoopAccessReport() <<
1285 "unsafe dependent memory operations in loop");
1286 DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
1290 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
1291 DominatorTree *DT) {
1292 assert(TheLoop->contains(BB) && "Unknown block used");
1294 // Blocks that do not dominate the latch need predication.
1295 BasicBlock* Latch = TheLoop->getLoopLatch();
1296 return !DT->dominates(BB, Latch);
1299 void LoopAccessInfo::emitAnalysis(LoopAccessReport &Message) {
1300 assert(!Report && "Multiple reports generated");
1304 bool LoopAccessInfo::isUniform(Value *V) const {
1305 return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop));
1308 // FIXME: this function is currently a duplicate of the one in
1309 // LoopVectorize.cpp.
1310 static Instruction *getFirstInst(Instruction *FirstInst, Value *V,
1314 if (Instruction *I = dyn_cast<Instruction>(V))
1315 return I->getParent() == Loc->getParent() ? I : nullptr;
1319 std::pair<Instruction *, Instruction *> LoopAccessInfo::addRuntimeCheck(
1320 Instruction *Loc, const SmallVectorImpl<int> *PtrPartition) const {
1321 if (!PtrRtCheck.Need)
1322 return std::make_pair(nullptr, nullptr);
1324 unsigned NumPointers = PtrRtCheck.Pointers.size();
1325 SmallVector<TrackingVH<Value> , 2> Starts;
1326 SmallVector<TrackingVH<Value> , 2> Ends;
1328 LLVMContext &Ctx = Loc->getContext();
1329 SCEVExpander Exp(*SE, DL, "induction");
1330 Instruction *FirstInst = nullptr;
1332 for (unsigned i = 0; i < NumPointers; ++i) {
1333 Value *Ptr = PtrRtCheck.Pointers[i];
1334 const SCEV *Sc = SE->getSCEV(Ptr);
1336 if (SE->isLoopInvariant(Sc, TheLoop)) {
1337 DEBUG(dbgs() << "LAA: Adding RT check for a loop invariant ptr:" <<
1339 Starts.push_back(Ptr);
1340 Ends.push_back(Ptr);
1342 DEBUG(dbgs() << "LAA: Adding RT check for range:" << *Ptr << '\n');
1343 unsigned AS = Ptr->getType()->getPointerAddressSpace();
1345 // Use this type for pointer arithmetic.
1346 Type *PtrArithTy = Type::getInt8PtrTy(Ctx, AS);
1348 Value *Start = Exp.expandCodeFor(PtrRtCheck.Starts[i], PtrArithTy, Loc);
1349 Value *End = Exp.expandCodeFor(PtrRtCheck.Ends[i], PtrArithTy, Loc);
1350 Starts.push_back(Start);
1351 Ends.push_back(End);
1355 IRBuilder<> ChkBuilder(Loc);
1356 // Our instructions might fold to a constant.
1357 Value *MemoryRuntimeCheck = nullptr;
1358 for (unsigned i = 0; i < NumPointers; ++i) {
1359 for (unsigned j = i+1; j < NumPointers; ++j) {
1360 if (!PtrRtCheck.needsChecking(i, j, PtrPartition))
1363 unsigned AS0 = Starts[i]->getType()->getPointerAddressSpace();
1364 unsigned AS1 = Starts[j]->getType()->getPointerAddressSpace();
1366 assert((AS0 == Ends[j]->getType()->getPointerAddressSpace()) &&
1367 (AS1 == Ends[i]->getType()->getPointerAddressSpace()) &&
1368 "Trying to bounds check pointers with different address spaces");
1370 Type *PtrArithTy0 = Type::getInt8PtrTy(Ctx, AS0);
1371 Type *PtrArithTy1 = Type::getInt8PtrTy(Ctx, AS1);
1373 Value *Start0 = ChkBuilder.CreateBitCast(Starts[i], PtrArithTy0, "bc");
1374 Value *Start1 = ChkBuilder.CreateBitCast(Starts[j], PtrArithTy1, "bc");
1375 Value *End0 = ChkBuilder.CreateBitCast(Ends[i], PtrArithTy1, "bc");
1376 Value *End1 = ChkBuilder.CreateBitCast(Ends[j], PtrArithTy0, "bc");
1378 Value *Cmp0 = ChkBuilder.CreateICmpULE(Start0, End1, "bound0");
1379 FirstInst = getFirstInst(FirstInst, Cmp0, Loc);
1380 Value *Cmp1 = ChkBuilder.CreateICmpULE(Start1, End0, "bound1");
1381 FirstInst = getFirstInst(FirstInst, Cmp1, Loc);
1382 Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict");
1383 FirstInst = getFirstInst(FirstInst, IsConflict, Loc);
1384 if (MemoryRuntimeCheck) {
1385 IsConflict = ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict,
1387 FirstInst = getFirstInst(FirstInst, IsConflict, Loc);
1389 MemoryRuntimeCheck = IsConflict;
1393 if (!MemoryRuntimeCheck)
1394 return std::make_pair(nullptr, nullptr);
1396 // We have to do this trickery because the IRBuilder might fold the check to a
1397 // constant expression in which case there is no Instruction anchored in a
1399 Instruction *Check = BinaryOperator::CreateAnd(MemoryRuntimeCheck,
1400 ConstantInt::getTrue(Ctx));
1401 ChkBuilder.Insert(Check, "memcheck.conflict");
1402 FirstInst = getFirstInst(FirstInst, Check, Loc);
1403 return std::make_pair(FirstInst, Check);
1406 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE,
1407 const DataLayout &DL,
1408 const TargetLibraryInfo *TLI, AliasAnalysis *AA,
1409 DominatorTree *DT, LoopInfo *LI,
1410 const ValueToValueMap &Strides)
1411 : DepChecker(SE, L), NumComparisons(0), TheLoop(L), SE(SE), DL(DL),
1412 TLI(TLI), AA(AA), DT(DT), LI(LI), NumLoads(0), NumStores(0),
1413 MaxSafeDepDistBytes(-1U), CanVecMem(false),
1414 StoreToLoopInvariantAddress(false) {
1415 if (canAnalyzeLoop())
1416 analyzeLoop(Strides);
1419 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
1421 if (PtrRtCheck.Need)
1422 OS.indent(Depth) << "Memory dependences are safe with run-time checks\n";
1424 OS.indent(Depth) << "Memory dependences are safe\n";
1428 OS.indent(Depth) << "Report: " << Report->str() << "\n";
1430 if (auto *InterestingDependences = DepChecker.getInterestingDependences()) {
1431 OS.indent(Depth) << "Interesting Dependences:\n";
1432 for (auto &Dep : *InterestingDependences) {
1433 Dep.print(OS, Depth + 2, DepChecker.getMemoryInstructions());
1437 OS.indent(Depth) << "Too many interesting dependences, not recorded\n";
1439 // List the pair of accesses need run-time checks to prove independence.
1440 PtrRtCheck.print(OS, Depth);
1443 OS.indent(Depth) << "Store to invariant address was "
1444 << (StoreToLoopInvariantAddress ? "" : "not ")
1445 << "found in loop.\n";
1448 const LoopAccessInfo &
1449 LoopAccessAnalysis::getInfo(Loop *L, const ValueToValueMap &Strides) {
1450 auto &LAI = LoopAccessInfoMap[L];
1453 assert((!LAI || LAI->NumSymbolicStrides == Strides.size()) &&
1454 "Symbolic strides changed for loop");
1458 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
1459 LAI = llvm::make_unique<LoopAccessInfo>(L, SE, DL, TLI, AA, DT, LI,
1462 LAI->NumSymbolicStrides = Strides.size();
1468 void LoopAccessAnalysis::print(raw_ostream &OS, const Module *M) const {
1469 LoopAccessAnalysis &LAA = *const_cast<LoopAccessAnalysis *>(this);
1471 ValueToValueMap NoSymbolicStrides;
1473 for (Loop *TopLevelLoop : *LI)
1474 for (Loop *L : depth_first(TopLevelLoop)) {
1475 OS.indent(2) << L->getHeader()->getName() << ":\n";
1476 auto &LAI = LAA.getInfo(L, NoSymbolicStrides);
1481 bool LoopAccessAnalysis::runOnFunction(Function &F) {
1482 SE = &getAnalysis<ScalarEvolution>();
1483 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
1484 TLI = TLIP ? &TLIP->getTLI() : nullptr;
1485 AA = &getAnalysis<AliasAnalysis>();
1486 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1487 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1492 void LoopAccessAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
1493 AU.addRequired<ScalarEvolution>();
1494 AU.addRequired<AliasAnalysis>();
1495 AU.addRequired<DominatorTreeWrapperPass>();
1496 AU.addRequired<LoopInfoWrapperPass>();
1498 AU.setPreservesAll();
1501 char LoopAccessAnalysis::ID = 0;
1502 static const char laa_name[] = "Loop Access Analysis";
1503 #define LAA_NAME "loop-accesses"
1505 INITIALIZE_PASS_BEGIN(LoopAccessAnalysis, LAA_NAME, laa_name, false, true)
1506 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
1507 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
1508 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1509 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
1510 INITIALIZE_PASS_END(LoopAccessAnalysis, LAA_NAME, laa_name, false, true)
1513 Pass *createLAAPass() {
1514 return new LoopAccessAnalysis();