1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // The implementation for the loop memory dependence that was originally
11 // developed for the loop vectorizer.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Analysis/LoopAccessAnalysis.h"
16 #include "llvm/Analysis/LoopInfo.h"
17 #include "llvm/Analysis/ScalarEvolutionExpander.h"
18 #include "llvm/Analysis/ValueTracking.h"
19 #include "llvm/IR/DiagnosticInfo.h"
20 #include "llvm/IR/Dominators.h"
21 #include "llvm/IR/IRBuilder.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Transforms/Utils/VectorUtils.h"
26 #define DEBUG_TYPE "loop-accesses"
28 static cl::opt<unsigned, true>
29 VectorizationFactor("force-vector-width", cl::Hidden,
30 cl::desc("Sets the SIMD width. Zero is autoselect."),
31 cl::location(VectorizerParams::VectorizationFactor));
32 unsigned VectorizerParams::VectorizationFactor;
34 static cl::opt<unsigned, true>
35 VectorizationInterleave("force-vector-interleave", cl::Hidden,
36 cl::desc("Sets the vectorization interleave count. "
37 "Zero is autoselect."),
39 VectorizerParams::VectorizationInterleave));
40 unsigned VectorizerParams::VectorizationInterleave;
42 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold(
43 "runtime-memory-check-threshold", cl::Hidden,
44 cl::desc("When performing memory disambiguation checks at runtime do not "
45 "generate more than this number of comparisons (default = 8)."),
46 cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8));
47 unsigned VectorizerParams::RuntimeMemoryCheckThreshold;
49 /// Maximum SIMD width.
50 const unsigned VectorizerParams::MaxVectorWidth = 64;
52 bool VectorizerParams::isInterleaveForced() {
53 return ::VectorizationInterleave.getNumOccurrences() > 0;
56 void LoopAccessReport::emitAnalysis(const LoopAccessReport &Message,
57 const Function *TheFunction,
59 const char *PassName) {
60 DebugLoc DL = TheLoop->getStartLoc();
61 if (const Instruction *I = Message.getInstr())
62 DL = I->getDebugLoc();
63 emitOptimizationRemarkAnalysis(TheFunction->getContext(), PassName,
64 *TheFunction, DL, Message.str());
67 Value *llvm::stripIntegerCast(Value *V) {
68 if (CastInst *CI = dyn_cast<CastInst>(V))
69 if (CI->getOperand(0)->getType()->isIntegerTy())
70 return CI->getOperand(0);
74 const SCEV *llvm::replaceSymbolicStrideSCEV(ScalarEvolution *SE,
75 const ValueToValueMap &PtrToStride,
76 Value *Ptr, Value *OrigPtr) {
78 const SCEV *OrigSCEV = SE->getSCEV(Ptr);
80 // If there is an entry in the map return the SCEV of the pointer with the
81 // symbolic stride replaced by one.
82 ValueToValueMap::const_iterator SI =
83 PtrToStride.find(OrigPtr ? OrigPtr : Ptr);
84 if (SI != PtrToStride.end()) {
85 Value *StrideVal = SI->second;
88 StrideVal = stripIntegerCast(StrideVal);
90 // Replace symbolic stride by one.
91 Value *One = ConstantInt::get(StrideVal->getType(), 1);
92 ValueToValueMap RewriteMap;
93 RewriteMap[StrideVal] = One;
96 SCEVParameterRewriter::rewrite(OrigSCEV, *SE, RewriteMap, true);
97 DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV << " by: " << *ByOne
102 // Otherwise, just return the SCEV of the original pointer.
103 return SE->getSCEV(Ptr);
106 void LoopAccessInfo::RuntimePointerCheck::insert(
107 ScalarEvolution *SE, Loop *Lp, Value *Ptr, bool WritePtr, unsigned DepSetId,
108 unsigned ASId, const ValueToValueMap &Strides) {
109 // Get the stride replaced scev.
110 const SCEV *Sc = replaceSymbolicStrideSCEV(SE, Strides, Ptr);
111 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc);
112 assert(AR && "Invalid addrec expression");
113 const SCEV *Ex = SE->getBackedgeTakenCount(Lp);
114 const SCEV *ScEnd = AR->evaluateAtIteration(Ex, *SE);
115 Pointers.push_back(Ptr);
116 Starts.push_back(AR->getStart());
117 Ends.push_back(ScEnd);
118 IsWritePtr.push_back(WritePtr);
119 DependencySetId.push_back(DepSetId);
120 AliasSetId.push_back(ASId);
123 bool LoopAccessInfo::RuntimePointerCheck::needsChecking(unsigned I,
125 // No need to check if two readonly pointers intersect.
126 if (!IsWritePtr[I] && !IsWritePtr[J])
129 // Only need to check pointers between two different dependency sets.
130 if (DependencySetId[I] == DependencySetId[J])
133 // Only need to check pointers in the same alias set.
134 if (AliasSetId[I] != AliasSetId[J])
140 void LoopAccessInfo::RuntimePointerCheck::print(raw_ostream &OS,
141 unsigned Depth) const {
142 unsigned NumPointers = Pointers.size();
143 if (NumPointers == 0)
146 OS.indent(Depth) << "Run-time memory checks:\n";
148 for (unsigned I = 0; I < NumPointers; ++I)
149 for (unsigned J = I + 1; J < NumPointers; ++J)
150 if (needsChecking(I, J)) {
151 OS.indent(Depth) << N++ << ":\n";
152 OS.indent(Depth + 2) << *Pointers[I] << "\n";
153 OS.indent(Depth + 2) << *Pointers[J] << "\n";
158 /// \brief Analyses memory accesses in a loop.
160 /// Checks whether run time pointer checks are needed and builds sets for data
161 /// dependence checking.
162 class AccessAnalysis {
164 /// \brief Read or write access location.
165 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
166 typedef SmallPtrSet<MemAccessInfo, 8> MemAccessInfoSet;
168 /// \brief Set of potential dependent memory accesses.
169 typedef EquivalenceClasses<MemAccessInfo> DepCandidates;
171 AccessAnalysis(const DataLayout *Dl, AliasAnalysis *AA, DepCandidates &DA) :
172 DL(Dl), AST(*AA), DepCands(DA), IsRTCheckNeeded(false) {}
174 /// \brief Register a load and whether it is only read from.
175 void addLoad(AliasAnalysis::Location &Loc, bool IsReadOnly) {
176 Value *Ptr = const_cast<Value*>(Loc.Ptr);
177 AST.add(Ptr, AliasAnalysis::UnknownSize, Loc.AATags);
178 Accesses.insert(MemAccessInfo(Ptr, false));
180 ReadOnlyPtr.insert(Ptr);
183 /// \brief Register a store.
184 void addStore(AliasAnalysis::Location &Loc) {
185 Value *Ptr = const_cast<Value*>(Loc.Ptr);
186 AST.add(Ptr, AliasAnalysis::UnknownSize, Loc.AATags);
187 Accesses.insert(MemAccessInfo(Ptr, true));
190 /// \brief Check whether we can check the pointers at runtime for
191 /// non-intersection.
192 bool canCheckPtrAtRT(LoopAccessInfo::RuntimePointerCheck &RtCheck,
193 unsigned &NumComparisons, ScalarEvolution *SE,
194 Loop *TheLoop, const ValueToValueMap &Strides,
195 bool ShouldCheckStride = false);
197 /// \brief Goes over all memory accesses, checks whether a RT check is needed
198 /// and builds sets of dependent accesses.
199 void buildDependenceSets() {
200 processMemAccesses();
203 bool isRTCheckNeeded() { return IsRTCheckNeeded; }
205 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); }
206 void resetDepChecks() { CheckDeps.clear(); }
208 MemAccessInfoSet &getDependenciesToCheck() { return CheckDeps; }
211 typedef SetVector<MemAccessInfo> PtrAccessSet;
213 /// \brief Go over all memory access and check whether runtime pointer checks
214 /// are needed /// and build sets of dependency check candidates.
215 void processMemAccesses();
217 /// Set of all accesses.
218 PtrAccessSet Accesses;
220 /// Set of accesses that need a further dependence check.
221 MemAccessInfoSet CheckDeps;
223 /// Set of pointers that are read only.
224 SmallPtrSet<Value*, 16> ReadOnlyPtr;
226 const DataLayout *DL;
228 /// An alias set tracker to partition the access set by underlying object and
229 //intrinsic property (such as TBAA metadata).
232 /// Sets of potentially dependent accesses - members of one set share an
233 /// underlying pointer. The set "CheckDeps" identfies which sets really need a
234 /// dependence check.
235 DepCandidates &DepCands;
237 bool IsRTCheckNeeded;
240 } // end anonymous namespace
242 /// \brief Check whether a pointer can participate in a runtime bounds check.
243 static bool hasComputableBounds(ScalarEvolution *SE,
244 const ValueToValueMap &Strides, Value *Ptr) {
245 const SCEV *PtrScev = replaceSymbolicStrideSCEV(SE, Strides, Ptr);
246 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
250 return AR->isAffine();
253 /// \brief Check the stride of the pointer and ensure that it does not wrap in
254 /// the address space.
255 static int isStridedPtr(ScalarEvolution *SE, const DataLayout *DL, Value *Ptr,
256 const Loop *Lp, const ValueToValueMap &StridesMap);
258 bool AccessAnalysis::canCheckPtrAtRT(
259 LoopAccessInfo::RuntimePointerCheck &RtCheck, unsigned &NumComparisons,
260 ScalarEvolution *SE, Loop *TheLoop, const ValueToValueMap &StridesMap,
261 bool ShouldCheckStride) {
262 // Find pointers with computable bounds. We are going to use this information
263 // to place a runtime bound check.
266 bool IsDepCheckNeeded = isDependencyCheckNeeded();
269 // We assign a consecutive id to access from different alias sets.
270 // Accesses between different groups doesn't need to be checked.
272 for (auto &AS : AST) {
273 unsigned NumReadPtrChecks = 0;
274 unsigned NumWritePtrChecks = 0;
276 // We assign consecutive id to access from different dependence sets.
277 // Accesses within the same set don't need a runtime check.
278 unsigned RunningDepId = 1;
279 DenseMap<Value *, unsigned> DepSetId;
282 Value *Ptr = A.getValue();
283 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
284 MemAccessInfo Access(Ptr, IsWrite);
291 if (hasComputableBounds(SE, StridesMap, Ptr) &&
292 // When we run after a failing dependency check we have to make sure we
293 // don't have wrapping pointers.
294 (!ShouldCheckStride ||
295 isStridedPtr(SE, DL, Ptr, TheLoop, StridesMap) == 1)) {
296 // The id of the dependence set.
299 if (IsDepCheckNeeded) {
300 Value *Leader = DepCands.getLeaderValue(Access).getPointer();
301 unsigned &LeaderId = DepSetId[Leader];
303 LeaderId = RunningDepId++;
306 // Each access has its own dependence set.
307 DepId = RunningDepId++;
309 RtCheck.insert(SE, TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap);
311 DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
317 if (IsDepCheckNeeded && CanDoRT && RunningDepId == 2)
318 NumComparisons += 0; // Only one dependence set.
320 NumComparisons += (NumWritePtrChecks * (NumReadPtrChecks +
321 NumWritePtrChecks - 1));
327 // If the pointers that we would use for the bounds comparison have different
328 // address spaces, assume the values aren't directly comparable, so we can't
329 // use them for the runtime check. We also have to assume they could
330 // overlap. In the future there should be metadata for whether address spaces
332 unsigned NumPointers = RtCheck.Pointers.size();
333 for (unsigned i = 0; i < NumPointers; ++i) {
334 for (unsigned j = i + 1; j < NumPointers; ++j) {
335 // Only need to check pointers between two different dependency sets.
336 if (RtCheck.DependencySetId[i] == RtCheck.DependencySetId[j])
338 // Only need to check pointers in the same alias set.
339 if (RtCheck.AliasSetId[i] != RtCheck.AliasSetId[j])
342 Value *PtrI = RtCheck.Pointers[i];
343 Value *PtrJ = RtCheck.Pointers[j];
345 unsigned ASi = PtrI->getType()->getPointerAddressSpace();
346 unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
348 DEBUG(dbgs() << "LAA: Runtime check would require comparison between"
349 " different address spaces\n");
358 void AccessAnalysis::processMemAccesses() {
359 // We process the set twice: first we process read-write pointers, last we
360 // process read-only pointers. This allows us to skip dependence tests for
361 // read-only pointers.
363 DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
364 DEBUG(dbgs() << " AST: "; AST.dump());
365 DEBUG(dbgs() << "LAA: Accesses:\n");
367 for (auto A : Accesses)
368 dbgs() << "\t" << *A.getPointer() << " (" <<
369 (A.getInt() ? "write" : (ReadOnlyPtr.count(A.getPointer()) ?
370 "read-only" : "read")) << ")\n";
373 // The AliasSetTracker has nicely partitioned our pointers by metadata
374 // compatibility and potential for underlying-object overlap. As a result, we
375 // only need to check for potential pointer dependencies within each alias
377 for (auto &AS : AST) {
378 // Note that both the alias-set tracker and the alias sets themselves used
379 // linked lists internally and so the iteration order here is deterministic
380 // (matching the original instruction order within each set).
382 bool SetHasWrite = false;
384 // Map of pointers to last access encountered.
385 typedef DenseMap<Value*, MemAccessInfo> UnderlyingObjToAccessMap;
386 UnderlyingObjToAccessMap ObjToLastAccess;
388 // Set of access to check after all writes have been processed.
389 PtrAccessSet DeferredAccesses;
391 // Iterate over each alias set twice, once to process read/write pointers,
392 // and then to process read-only pointers.
393 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
394 bool UseDeferred = SetIteration > 0;
395 PtrAccessSet &S = UseDeferred ? DeferredAccesses : Accesses;
398 Value *Ptr = AV.getValue();
400 // For a single memory access in AliasSetTracker, Accesses may contain
401 // both read and write, and they both need to be handled for CheckDeps.
403 if (AC.getPointer() != Ptr)
406 bool IsWrite = AC.getInt();
408 // If we're using the deferred access set, then it contains only
410 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
411 if (UseDeferred && !IsReadOnlyPtr)
413 // Otherwise, the pointer must be in the PtrAccessSet, either as a
415 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
416 S.count(MemAccessInfo(Ptr, false))) &&
417 "Alias-set pointer not in the access set?");
419 MemAccessInfo Access(Ptr, IsWrite);
420 DepCands.insert(Access);
422 // Memorize read-only pointers for later processing and skip them in
423 // the first round (they need to be checked after we have seen all
424 // write pointers). Note: we also mark pointer that are not
425 // consecutive as "read-only" pointers (so that we check
426 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
427 if (!UseDeferred && IsReadOnlyPtr) {
428 DeferredAccesses.insert(Access);
432 // If this is a write - check other reads and writes for conflicts. If
433 // this is a read only check other writes for conflicts (but only if
434 // there is no other write to the ptr - this is an optimization to
435 // catch "a[i] = a[i] + " without having to do a dependence check).
436 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
437 CheckDeps.insert(Access);
438 IsRTCheckNeeded = true;
444 // Create sets of pointers connected by a shared alias set and
445 // underlying object.
446 typedef SmallVector<Value *, 16> ValueVector;
447 ValueVector TempObjects;
448 GetUnderlyingObjects(Ptr, TempObjects, DL);
449 for (Value *UnderlyingObj : TempObjects) {
450 UnderlyingObjToAccessMap::iterator Prev =
451 ObjToLastAccess.find(UnderlyingObj);
452 if (Prev != ObjToLastAccess.end())
453 DepCands.unionSets(Access, Prev->second);
455 ObjToLastAccess[UnderlyingObj] = Access;
464 /// \brief Checks memory dependences among accesses to the same underlying
465 /// object to determine whether there vectorization is legal or not (and at
466 /// which vectorization factor).
468 /// This class works under the assumption that we already checked that memory
469 /// locations with different underlying pointers are "must-not alias".
470 /// We use the ScalarEvolution framework to symbolically evalutate access
471 /// functions pairs. Since we currently don't restructure the loop we can rely
472 /// on the program order of memory accesses to determine their safety.
473 /// At the moment we will only deem accesses as safe for:
474 /// * A negative constant distance assuming program order.
476 /// Safe: tmp = a[i + 1]; OR a[i + 1] = x;
477 /// a[i] = tmp; y = a[i];
479 /// The latter case is safe because later checks guarantuee that there can't
480 /// be a cycle through a phi node (that is, we check that "x" and "y" is not
481 /// the same variable: a header phi can only be an induction or a reduction, a
482 /// reduction can't have a memory sink, an induction can't have a memory
483 /// source). This is important and must not be violated (or we have to
484 /// resort to checking for cycles through memory).
486 /// * A positive constant distance assuming program order that is bigger
487 /// than the biggest memory access.
489 /// tmp = a[i] OR b[i] = x
490 /// a[i+2] = tmp y = b[i+2];
492 /// Safe distance: 2 x sizeof(a[0]), and 2 x sizeof(b[0]), respectively.
494 /// * Zero distances and all accesses have the same size.
496 class MemoryDepChecker {
498 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
499 typedef SmallPtrSet<MemAccessInfo, 8> MemAccessInfoSet;
501 MemoryDepChecker(ScalarEvolution *Se, const DataLayout *Dl, const Loop *L)
502 : SE(Se), DL(Dl), InnermostLoop(L), AccessIdx(0),
503 ShouldRetryWithRuntimeCheck(false) {}
505 /// \brief Register the location (instructions are given increasing numbers)
506 /// of a write access.
507 void addAccess(StoreInst *SI) {
508 Value *Ptr = SI->getPointerOperand();
509 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
510 InstMap.push_back(SI);
514 /// \brief Register the location (instructions are given increasing numbers)
515 /// of a write access.
516 void addAccess(LoadInst *LI) {
517 Value *Ptr = LI->getPointerOperand();
518 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
519 InstMap.push_back(LI);
523 /// \brief Check whether the dependencies between the accesses are safe.
525 /// Only checks sets with elements in \p CheckDeps.
526 bool areDepsSafe(AccessAnalysis::DepCandidates &AccessSets,
527 MemAccessInfoSet &CheckDeps, const ValueToValueMap &Strides);
529 /// \brief The maximum number of bytes of a vector register we can vectorize
530 /// the accesses safely with.
531 unsigned getMaxSafeDepDistBytes() { return MaxSafeDepDistBytes; }
533 /// \brief In same cases when the dependency check fails we can still
534 /// vectorize the loop with a dynamic array access check.
535 bool shouldRetryWithRuntimeCheck() { return ShouldRetryWithRuntimeCheck; }
539 const DataLayout *DL;
540 const Loop *InnermostLoop;
542 /// \brief Maps access locations (ptr, read/write) to program order.
543 DenseMap<MemAccessInfo, std::vector<unsigned> > Accesses;
545 /// \brief Memory access instructions in program order.
546 SmallVector<Instruction *, 16> InstMap;
548 /// \brief The program order index to be used for the next instruction.
551 // We can access this many bytes in parallel safely.
552 unsigned MaxSafeDepDistBytes;
554 /// \brief If we see a non-constant dependence distance we can still try to
555 /// vectorize this loop with runtime checks.
556 bool ShouldRetryWithRuntimeCheck;
558 /// \brief Check whether there is a plausible dependence between the two
561 /// Access \p A must happen before \p B in program order. The two indices
562 /// identify the index into the program order map.
564 /// This function checks whether there is a plausible dependence (or the
565 /// absence of such can't be proved) between the two accesses. If there is a
566 /// plausible dependence but the dependence distance is bigger than one
567 /// element access it records this distance in \p MaxSafeDepDistBytes (if this
568 /// distance is smaller than any other distance encountered so far).
569 /// Otherwise, this function returns true signaling a possible dependence.
570 bool isDependent(const MemAccessInfo &A, unsigned AIdx,
571 const MemAccessInfo &B, unsigned BIdx,
572 const ValueToValueMap &Strides);
574 /// \brief Check whether the data dependence could prevent store-load
576 bool couldPreventStoreLoadForward(unsigned Distance, unsigned TypeByteSize);
579 } // end anonymous namespace
581 static bool isInBoundsGep(Value *Ptr) {
582 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr))
583 return GEP->isInBounds();
587 /// \brief Check whether the access through \p Ptr has a constant stride.
588 static int isStridedPtr(ScalarEvolution *SE, const DataLayout *DL, Value *Ptr,
589 const Loop *Lp, const ValueToValueMap &StridesMap) {
590 const Type *Ty = Ptr->getType();
591 assert(Ty->isPointerTy() && "Unexpected non-ptr");
593 // Make sure that the pointer does not point to aggregate types.
594 const PointerType *PtrTy = cast<PointerType>(Ty);
595 if (PtrTy->getElementType()->isAggregateType()) {
596 DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type"
601 const SCEV *PtrScev = replaceSymbolicStrideSCEV(SE, StridesMap, Ptr);
603 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
605 DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer "
606 << *Ptr << " SCEV: " << *PtrScev << "\n");
610 // The accesss function must stride over the innermost loop.
611 if (Lp != AR->getLoop()) {
612 DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop " <<
613 *Ptr << " SCEV: " << *PtrScev << "\n");
616 // The address calculation must not wrap. Otherwise, a dependence could be
618 // An inbounds getelementptr that is a AddRec with a unit stride
619 // cannot wrap per definition. The unit stride requirement is checked later.
620 // An getelementptr without an inbounds attribute and unit stride would have
621 // to access the pointer value "0" which is undefined behavior in address
622 // space 0, therefore we can also vectorize this case.
623 bool IsInBoundsGEP = isInBoundsGep(Ptr);
624 bool IsNoWrapAddRec = AR->getNoWrapFlags(SCEV::NoWrapMask);
625 bool IsInAddressSpaceZero = PtrTy->getAddressSpace() == 0;
626 if (!IsNoWrapAddRec && !IsInBoundsGEP && !IsInAddressSpaceZero) {
627 DEBUG(dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
628 << *Ptr << " SCEV: " << *PtrScev << "\n");
632 // Check the step is constant.
633 const SCEV *Step = AR->getStepRecurrence(*SE);
635 // Calculate the pointer stride and check if it is consecutive.
636 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
638 DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr <<
639 " SCEV: " << *PtrScev << "\n");
643 int64_t Size = DL->getTypeAllocSize(PtrTy->getElementType());
644 const APInt &APStepVal = C->getValue()->getValue();
646 // Huge step value - give up.
647 if (APStepVal.getBitWidth() > 64)
650 int64_t StepVal = APStepVal.getSExtValue();
653 int64_t Stride = StepVal / Size;
654 int64_t Rem = StepVal % Size;
658 // If the SCEV could wrap but we have an inbounds gep with a unit stride we
659 // know we can't "wrap around the address space". In case of address space
660 // zero we know that this won't happen without triggering undefined behavior.
661 if (!IsNoWrapAddRec && (IsInBoundsGEP || IsInAddressSpaceZero) &&
662 Stride != 1 && Stride != -1)
668 bool MemoryDepChecker::couldPreventStoreLoadForward(unsigned Distance,
669 unsigned TypeByteSize) {
670 // If loads occur at a distance that is not a multiple of a feasible vector
671 // factor store-load forwarding does not take place.
672 // Positive dependences might cause troubles because vectorizing them might
673 // prevent store-load forwarding making vectorized code run a lot slower.
674 // a[i] = a[i-3] ^ a[i-8];
675 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
676 // hence on your typical architecture store-load forwarding does not take
677 // place. Vectorizing in such cases does not make sense.
678 // Store-load forwarding distance.
679 const unsigned NumCyclesForStoreLoadThroughMemory = 8*TypeByteSize;
680 // Maximum vector factor.
681 unsigned MaxVFWithoutSLForwardIssues =
682 VectorizerParams::MaxVectorWidth * TypeByteSize;
683 if(MaxSafeDepDistBytes < MaxVFWithoutSLForwardIssues)
684 MaxVFWithoutSLForwardIssues = MaxSafeDepDistBytes;
686 for (unsigned vf = 2*TypeByteSize; vf <= MaxVFWithoutSLForwardIssues;
688 if (Distance % vf && Distance / vf < NumCyclesForStoreLoadThroughMemory) {
689 MaxVFWithoutSLForwardIssues = (vf >>=1);
694 if (MaxVFWithoutSLForwardIssues< 2*TypeByteSize) {
695 DEBUG(dbgs() << "LAA: Distance " << Distance <<
696 " that could cause a store-load forwarding conflict\n");
700 if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes &&
701 MaxVFWithoutSLForwardIssues !=
702 VectorizerParams::MaxVectorWidth * TypeByteSize)
703 MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues;
707 bool MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
708 const MemAccessInfo &B, unsigned BIdx,
709 const ValueToValueMap &Strides) {
710 assert (AIdx < BIdx && "Must pass arguments in program order");
712 Value *APtr = A.getPointer();
713 Value *BPtr = B.getPointer();
714 bool AIsWrite = A.getInt();
715 bool BIsWrite = B.getInt();
717 // Two reads are independent.
718 if (!AIsWrite && !BIsWrite)
721 // We cannot check pointers in different address spaces.
722 if (APtr->getType()->getPointerAddressSpace() !=
723 BPtr->getType()->getPointerAddressSpace())
726 const SCEV *AScev = replaceSymbolicStrideSCEV(SE, Strides, APtr);
727 const SCEV *BScev = replaceSymbolicStrideSCEV(SE, Strides, BPtr);
729 int StrideAPtr = isStridedPtr(SE, DL, APtr, InnermostLoop, Strides);
730 int StrideBPtr = isStridedPtr(SE, DL, BPtr, InnermostLoop, Strides);
732 const SCEV *Src = AScev;
733 const SCEV *Sink = BScev;
735 // If the induction step is negative we have to invert source and sink of the
737 if (StrideAPtr < 0) {
740 std::swap(APtr, BPtr);
741 std::swap(Src, Sink);
742 std::swap(AIsWrite, BIsWrite);
743 std::swap(AIdx, BIdx);
744 std::swap(StrideAPtr, StrideBPtr);
747 const SCEV *Dist = SE->getMinusSCEV(Sink, Src);
749 DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
750 << "(Induction step: " << StrideAPtr << ")\n");
751 DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to "
752 << *InstMap[BIdx] << ": " << *Dist << "\n");
754 // Need consecutive accesses. We don't want to vectorize
755 // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in
756 // the address space.
757 if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){
758 DEBUG(dbgs() << "Non-consecutive pointer access\n");
762 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
764 DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n");
765 ShouldRetryWithRuntimeCheck = true;
769 Type *ATy = APtr->getType()->getPointerElementType();
770 Type *BTy = BPtr->getType()->getPointerElementType();
771 unsigned TypeByteSize = DL->getTypeAllocSize(ATy);
773 // Negative distances are not plausible dependencies.
774 const APInt &Val = C->getValue()->getValue();
775 if (Val.isNegative()) {
776 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
777 if (IsTrueDataDependence &&
778 (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) ||
782 DEBUG(dbgs() << "LAA: Dependence is negative: NoDep\n");
786 // Write to the same location with the same size.
787 // Could be improved to assert type sizes are the same (i32 == float, etc).
791 DEBUG(dbgs() << "LAA: Zero dependence difference but different types\n");
795 assert(Val.isStrictlyPositive() && "Expect a positive value");
797 // Positive distance bigger than max vectorization factor.
800 "LAA: ReadWrite-Write positive dependency with different types\n");
804 unsigned Distance = (unsigned) Val.getZExtValue();
806 // Bail out early if passed-in parameters make vectorization not feasible.
807 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
808 VectorizerParams::VectorizationFactor : 1);
809 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
810 VectorizerParams::VectorizationInterleave : 1);
812 // The distance must be bigger than the size needed for a vectorized version
813 // of the operation and the size of the vectorized operation must not be
814 // bigger than the currrent maximum size.
815 if (Distance < 2*TypeByteSize ||
816 2*TypeByteSize > MaxSafeDepDistBytes ||
817 Distance < TypeByteSize * ForcedUnroll * ForcedFactor) {
818 DEBUG(dbgs() << "LAA: Failure because of Positive distance "
819 << Val.getSExtValue() << '\n');
823 MaxSafeDepDistBytes = Distance < MaxSafeDepDistBytes ?
824 Distance : MaxSafeDepDistBytes;
826 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
827 if (IsTrueDataDependence &&
828 couldPreventStoreLoadForward(Distance, TypeByteSize))
831 DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue() <<
832 " with max VF = " << MaxSafeDepDistBytes / TypeByteSize << '\n');
837 bool MemoryDepChecker::areDepsSafe(AccessAnalysis::DepCandidates &AccessSets,
838 MemAccessInfoSet &CheckDeps,
839 const ValueToValueMap &Strides) {
841 MaxSafeDepDistBytes = -1U;
842 while (!CheckDeps.empty()) {
843 MemAccessInfo CurAccess = *CheckDeps.begin();
845 // Get the relevant memory access set.
846 EquivalenceClasses<MemAccessInfo>::iterator I =
847 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
849 // Check accesses within this set.
850 EquivalenceClasses<MemAccessInfo>::member_iterator AI, AE;
851 AI = AccessSets.member_begin(I), AE = AccessSets.member_end();
853 // Check every access pair.
855 CheckDeps.erase(*AI);
856 EquivalenceClasses<MemAccessInfo>::member_iterator OI = std::next(AI);
858 // Check every accessing instruction pair in program order.
859 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
860 I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
861 for (std::vector<unsigned>::iterator I2 = Accesses[*OI].begin(),
862 I2E = Accesses[*OI].end(); I2 != I2E; ++I2) {
863 if (*I1 < *I2 && isDependent(*AI, *I1, *OI, *I2, Strides))
865 if (*I2 < *I1 && isDependent(*OI, *I2, *AI, *I1, Strides))
876 bool LoopAccessInfo::canAnalyzeLoop() {
877 // We can only analyze innermost loops.
878 if (!TheLoop->empty()) {
879 emitAnalysis(LoopAccessReport() << "loop is not the innermost loop");
883 // We must have a single backedge.
884 if (TheLoop->getNumBackEdges() != 1) {
886 LoopAccessReport() <<
887 "loop control flow is not understood by analyzer");
891 // We must have a single exiting block.
892 if (!TheLoop->getExitingBlock()) {
894 LoopAccessReport() <<
895 "loop control flow is not understood by analyzer");
899 // We only handle bottom-tested loops, i.e. loop in which the condition is
900 // checked at the end of each iteration. With that we can assume that all
901 // instructions in the loop are executed the same number of times.
902 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
904 LoopAccessReport() <<
905 "loop control flow is not understood by analyzer");
909 // We need to have a loop header.
910 DEBUG(dbgs() << "LAA: Found a loop: " <<
911 TheLoop->getHeader()->getName() << '\n');
913 // ScalarEvolution needs to be able to find the exit count.
914 const SCEV *ExitCount = SE->getBackedgeTakenCount(TheLoop);
915 if (ExitCount == SE->getCouldNotCompute()) {
916 emitAnalysis(LoopAccessReport() <<
917 "could not determine number of loop iterations");
918 DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
925 void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) {
927 typedef SmallVector<Value*, 16> ValueVector;
928 typedef SmallPtrSet<Value*, 16> ValueSet;
930 // Holds the Load and Store *instructions*.
934 // Holds all the different accesses in the loop.
935 unsigned NumReads = 0;
936 unsigned NumReadWrites = 0;
938 PtrRtCheck.Pointers.clear();
939 PtrRtCheck.Need = false;
941 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
942 MemoryDepChecker DepChecker(SE, DL, TheLoop);
945 for (Loop::block_iterator bb = TheLoop->block_begin(),
946 be = TheLoop->block_end(); bb != be; ++bb) {
948 // Scan the BB and collect legal loads and stores.
949 for (BasicBlock::iterator it = (*bb)->begin(), e = (*bb)->end(); it != e;
952 // If this is a load, save it. If this instruction can read from memory
953 // but is not a load, then we quit. Notice that we don't handle function
954 // calls that read or write.
955 if (it->mayReadFromMemory()) {
956 // Many math library functions read the rounding mode. We will only
957 // vectorize a loop if it contains known function calls that don't set
958 // the flag. Therefore, it is safe to ignore this read from memory.
959 CallInst *Call = dyn_cast<CallInst>(it);
960 if (Call && getIntrinsicIDForCall(Call, TLI))
963 LoadInst *Ld = dyn_cast<LoadInst>(it);
964 if (!Ld || (!Ld->isSimple() && !IsAnnotatedParallel)) {
965 emitAnalysis(LoopAccessReport(Ld)
966 << "read with atomic ordering or volatile read");
967 DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
973 DepChecker.addAccess(Ld);
977 // Save 'store' instructions. Abort if other instructions write to memory.
978 if (it->mayWriteToMemory()) {
979 StoreInst *St = dyn_cast<StoreInst>(it);
981 emitAnalysis(LoopAccessReport(it) <<
982 "instruction cannot be vectorized");
986 if (!St->isSimple() && !IsAnnotatedParallel) {
987 emitAnalysis(LoopAccessReport(St)
988 << "write with atomic ordering or volatile write");
989 DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
994 Stores.push_back(St);
995 DepChecker.addAccess(St);
1000 // Now we have two lists that hold the loads and the stores.
1001 // Next, we find the pointers that they use.
1003 // Check if we see any stores. If there are no stores, then we don't
1004 // care if the pointers are *restrict*.
1005 if (!Stores.size()) {
1006 DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
1011 AccessAnalysis::DepCandidates DependentAccesses;
1012 AccessAnalysis Accesses(DL, AA, DependentAccesses);
1014 // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects
1015 // multiple times on the same object. If the ptr is accessed twice, once
1016 // for read and once for write, it will only appear once (on the write
1017 // list). This is okay, since we are going to check for conflicts between
1018 // writes and between reads and writes, but not between reads and reads.
1021 ValueVector::iterator I, IE;
1022 for (I = Stores.begin(), IE = Stores.end(); I != IE; ++I) {
1023 StoreInst *ST = cast<StoreInst>(*I);
1024 Value* Ptr = ST->getPointerOperand();
1026 if (isUniform(Ptr)) {
1028 LoopAccessReport(ST)
1029 << "write to a loop invariant address could not be vectorized");
1030 DEBUG(dbgs() << "LAA: We don't allow storing to uniform addresses\n");
1035 // If we did *not* see this pointer before, insert it to the read-write
1036 // list. At this phase it is only a 'write' list.
1037 if (Seen.insert(Ptr).second) {
1040 AliasAnalysis::Location Loc = AA->getLocation(ST);
1041 // The TBAA metadata could have a control dependency on the predication
1042 // condition, so we cannot rely on it when determining whether or not we
1043 // need runtime pointer checks.
1044 if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
1045 Loc.AATags.TBAA = nullptr;
1047 Accesses.addStore(Loc);
1051 if (IsAnnotatedParallel) {
1053 << "LAA: A loop annotated parallel, ignore memory dependency "
1059 for (I = Loads.begin(), IE = Loads.end(); I != IE; ++I) {
1060 LoadInst *LD = cast<LoadInst>(*I);
1061 Value* Ptr = LD->getPointerOperand();
1062 // If we did *not* see this pointer before, insert it to the
1063 // read list. If we *did* see it before, then it is already in
1064 // the read-write list. This allows us to vectorize expressions
1065 // such as A[i] += x; Because the address of A[i] is a read-write
1066 // pointer. This only works if the index of A[i] is consecutive.
1067 // If the address of i is unknown (for example A[B[i]]) then we may
1068 // read a few words, modify, and write a few words, and some of the
1069 // words may be written to the same address.
1070 bool IsReadOnlyPtr = false;
1071 if (Seen.insert(Ptr).second ||
1072 !isStridedPtr(SE, DL, Ptr, TheLoop, Strides)) {
1074 IsReadOnlyPtr = true;
1077 AliasAnalysis::Location Loc = AA->getLocation(LD);
1078 // The TBAA metadata could have a control dependency on the predication
1079 // condition, so we cannot rely on it when determining whether or not we
1080 // need runtime pointer checks.
1081 if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
1082 Loc.AATags.TBAA = nullptr;
1084 Accesses.addLoad(Loc, IsReadOnlyPtr);
1087 // If we write (or read-write) to a single destination and there are no
1088 // other reads in this loop then is it safe to vectorize.
1089 if (NumReadWrites == 1 && NumReads == 0) {
1090 DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
1095 // Build dependence sets and check whether we need a runtime pointer bounds
1097 Accesses.buildDependenceSets();
1098 bool NeedRTCheck = Accesses.isRTCheckNeeded();
1100 // Find pointers with computable bounds. We are going to use this information
1101 // to place a runtime bound check.
1102 unsigned NumComparisons = 0;
1103 bool CanDoRT = false;
1105 CanDoRT = Accesses.canCheckPtrAtRT(PtrRtCheck, NumComparisons, SE, TheLoop,
1108 DEBUG(dbgs() << "LAA: We need to do " << NumComparisons <<
1109 " pointer comparisons.\n");
1111 // If we only have one set of dependences to check pointers among we don't
1112 // need a runtime check.
1113 if (NumComparisons == 0 && NeedRTCheck)
1114 NeedRTCheck = false;
1116 // Check that we did not collect too many pointers or found an unsizeable
1118 if (!CanDoRT || NumComparisons > RuntimeMemoryCheckThreshold) {
1124 DEBUG(dbgs() << "LAA: We can perform a memory runtime check if needed.\n");
1127 if (NeedRTCheck && !CanDoRT) {
1128 emitAnalysis(LoopAccessReport() << "cannot identify array bounds");
1129 DEBUG(dbgs() << "LAA: We can't vectorize because we can't find " <<
1130 "the array bounds.\n");
1136 PtrRtCheck.Need = NeedRTCheck;
1139 if (Accesses.isDependencyCheckNeeded()) {
1140 DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
1141 CanVecMem = DepChecker.areDepsSafe(
1142 DependentAccesses, Accesses.getDependenciesToCheck(), Strides);
1143 MaxSafeDepDistBytes = DepChecker.getMaxSafeDepDistBytes();
1145 if (!CanVecMem && DepChecker.shouldRetryWithRuntimeCheck()) {
1146 DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
1149 // Clear the dependency checks. We assume they are not needed.
1150 Accesses.resetDepChecks();
1153 PtrRtCheck.Need = true;
1155 CanDoRT = Accesses.canCheckPtrAtRT(PtrRtCheck, NumComparisons, SE,
1156 TheLoop, Strides, true);
1157 // Check that we did not collect too many pointers or found an unsizeable
1159 if (!CanDoRT || NumComparisons > RuntimeMemoryCheckThreshold) {
1160 if (!CanDoRT && NumComparisons > 0)
1161 emitAnalysis(LoopAccessReport()
1162 << "cannot check memory dependencies at runtime");
1164 emitAnalysis(LoopAccessReport()
1165 << NumComparisons << " exceeds limit of "
1166 << RuntimeMemoryCheckThreshold
1167 << " dependent memory operations checked at runtime");
1168 DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
1179 emitAnalysis(LoopAccessReport() <<
1180 "unsafe dependent memory operations in loop");
1182 DEBUG(dbgs() << "LAA: We" << (NeedRTCheck ? "" : " don't") <<
1183 " need a runtime memory check.\n");
1186 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
1187 DominatorTree *DT) {
1188 assert(TheLoop->contains(BB) && "Unknown block used");
1190 // Blocks that do not dominate the latch need predication.
1191 BasicBlock* Latch = TheLoop->getLoopLatch();
1192 return !DT->dominates(BB, Latch);
1195 void LoopAccessInfo::emitAnalysis(LoopAccessReport &Message) {
1196 assert(!Report && "Multiple reports generated");
1200 bool LoopAccessInfo::isUniform(Value *V) const {
1201 return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop));
1204 // FIXME: this function is currently a duplicate of the one in
1205 // LoopVectorize.cpp.
1206 static Instruction *getFirstInst(Instruction *FirstInst, Value *V,
1210 if (Instruction *I = dyn_cast<Instruction>(V))
1211 return I->getParent() == Loc->getParent() ? I : nullptr;
1215 std::pair<Instruction *, Instruction *>
1216 LoopAccessInfo::addRuntimeCheck(Instruction *Loc) const {
1217 Instruction *tnullptr = nullptr;
1218 if (!PtrRtCheck.Need)
1219 return std::pair<Instruction *, Instruction *>(tnullptr, tnullptr);
1221 unsigned NumPointers = PtrRtCheck.Pointers.size();
1222 SmallVector<TrackingVH<Value> , 2> Starts;
1223 SmallVector<TrackingVH<Value> , 2> Ends;
1225 LLVMContext &Ctx = Loc->getContext();
1226 SCEVExpander Exp(*SE, "induction");
1227 Instruction *FirstInst = nullptr;
1229 for (unsigned i = 0; i < NumPointers; ++i) {
1230 Value *Ptr = PtrRtCheck.Pointers[i];
1231 const SCEV *Sc = SE->getSCEV(Ptr);
1233 if (SE->isLoopInvariant(Sc, TheLoop)) {
1234 DEBUG(dbgs() << "LAA: Adding RT check for a loop invariant ptr:" <<
1236 Starts.push_back(Ptr);
1237 Ends.push_back(Ptr);
1239 DEBUG(dbgs() << "LAA: Adding RT check for range:" << *Ptr << '\n');
1240 unsigned AS = Ptr->getType()->getPointerAddressSpace();
1242 // Use this type for pointer arithmetic.
1243 Type *PtrArithTy = Type::getInt8PtrTy(Ctx, AS);
1245 Value *Start = Exp.expandCodeFor(PtrRtCheck.Starts[i], PtrArithTy, Loc);
1246 Value *End = Exp.expandCodeFor(PtrRtCheck.Ends[i], PtrArithTy, Loc);
1247 Starts.push_back(Start);
1248 Ends.push_back(End);
1252 IRBuilder<> ChkBuilder(Loc);
1253 // Our instructions might fold to a constant.
1254 Value *MemoryRuntimeCheck = nullptr;
1255 for (unsigned i = 0; i < NumPointers; ++i) {
1256 for (unsigned j = i+1; j < NumPointers; ++j) {
1257 if (!PtrRtCheck.needsChecking(i, j))
1260 unsigned AS0 = Starts[i]->getType()->getPointerAddressSpace();
1261 unsigned AS1 = Starts[j]->getType()->getPointerAddressSpace();
1263 assert((AS0 == Ends[j]->getType()->getPointerAddressSpace()) &&
1264 (AS1 == Ends[i]->getType()->getPointerAddressSpace()) &&
1265 "Trying to bounds check pointers with different address spaces");
1267 Type *PtrArithTy0 = Type::getInt8PtrTy(Ctx, AS0);
1268 Type *PtrArithTy1 = Type::getInt8PtrTy(Ctx, AS1);
1270 Value *Start0 = ChkBuilder.CreateBitCast(Starts[i], PtrArithTy0, "bc");
1271 Value *Start1 = ChkBuilder.CreateBitCast(Starts[j], PtrArithTy1, "bc");
1272 Value *End0 = ChkBuilder.CreateBitCast(Ends[i], PtrArithTy1, "bc");
1273 Value *End1 = ChkBuilder.CreateBitCast(Ends[j], PtrArithTy0, "bc");
1275 Value *Cmp0 = ChkBuilder.CreateICmpULE(Start0, End1, "bound0");
1276 FirstInst = getFirstInst(FirstInst, Cmp0, Loc);
1277 Value *Cmp1 = ChkBuilder.CreateICmpULE(Start1, End0, "bound1");
1278 FirstInst = getFirstInst(FirstInst, Cmp1, Loc);
1279 Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict");
1280 FirstInst = getFirstInst(FirstInst, IsConflict, Loc);
1281 if (MemoryRuntimeCheck) {
1282 IsConflict = ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict,
1284 FirstInst = getFirstInst(FirstInst, IsConflict, Loc);
1286 MemoryRuntimeCheck = IsConflict;
1290 // We have to do this trickery because the IRBuilder might fold the check to a
1291 // constant expression in which case there is no Instruction anchored in a
1293 Instruction *Check = BinaryOperator::CreateAnd(MemoryRuntimeCheck,
1294 ConstantInt::getTrue(Ctx));
1295 ChkBuilder.Insert(Check, "memcheck.conflict");
1296 FirstInst = getFirstInst(FirstInst, Check, Loc);
1297 return std::make_pair(FirstInst, Check);
1300 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE,
1301 const DataLayout *DL,
1302 const TargetLibraryInfo *TLI, AliasAnalysis *AA,
1304 const ValueToValueMap &Strides)
1305 : TheLoop(L), SE(SE), DL(DL), TLI(TLI), AA(AA), DT(DT), NumLoads(0),
1306 NumStores(0), MaxSafeDepDistBytes(-1U), CanVecMem(false) {
1307 if (canAnalyzeLoop())
1308 analyzeLoop(Strides);
1311 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
1313 if (PtrRtCheck.empty())
1314 OS.indent(Depth) << "Memory dependences are safe\n";
1316 OS.indent(Depth) << "Memory dependences are safe with run-time checks\n";
1320 OS.indent(Depth) << "Report: " << Report->str() << "\n";
1322 // FIXME: Print unsafe dependences
1324 // List the pair of accesses need run-time checks to prove independence.
1325 PtrRtCheck.print(OS, Depth);
1329 const LoopAccessInfo &
1330 LoopAccessAnalysis::getInfo(Loop *L, const ValueToValueMap &Strides) {
1331 auto &LAI = LoopAccessInfoMap[L];
1334 assert((!LAI || LAI->NumSymbolicStrides == Strides.size()) &&
1335 "Symbolic strides changed for loop");
1339 LAI = llvm::make_unique<LoopAccessInfo>(L, SE, DL, TLI, AA, DT, Strides);
1341 LAI->NumSymbolicStrides = Strides.size();
1347 void LoopAccessAnalysis::print(raw_ostream &OS, const Module *M) const {
1348 LoopAccessAnalysis &LAA = *const_cast<LoopAccessAnalysis *>(this);
1350 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1351 ValueToValueMap NoSymbolicStrides;
1353 for (Loop *TopLevelLoop : *LI)
1354 for (Loop *L : depth_first(TopLevelLoop)) {
1355 OS.indent(2) << L->getHeader()->getName() << ":\n";
1356 auto &LAI = LAA.getInfo(L, NoSymbolicStrides);
1361 bool LoopAccessAnalysis::runOnFunction(Function &F) {
1362 SE = &getAnalysis<ScalarEvolution>();
1363 DL = F.getParent()->getDataLayout();
1364 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
1365 TLI = TLIP ? &TLIP->getTLI() : nullptr;
1366 AA = &getAnalysis<AliasAnalysis>();
1367 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1372 void LoopAccessAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
1373 AU.addRequired<ScalarEvolution>();
1374 AU.addRequired<AliasAnalysis>();
1375 AU.addRequired<DominatorTreeWrapperPass>();
1376 AU.addRequired<LoopInfoWrapperPass>();
1378 AU.setPreservesAll();
1381 char LoopAccessAnalysis::ID = 0;
1382 static const char laa_name[] = "Loop Access Analysis";
1383 #define LAA_NAME "loop-accesses"
1385 INITIALIZE_PASS_BEGIN(LoopAccessAnalysis, LAA_NAME, laa_name, false, true)
1386 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
1387 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
1388 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1389 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
1390 INITIALIZE_PASS_END(LoopAccessAnalysis, LAA_NAME, laa_name, false, true)
1393 Pass *createLAAPass() {
1394 return new LoopAccessAnalysis();