1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the primary stateless implementation of the
11 // Alias Analysis interface that implements identities (two different
12 // globals cannot alias, etc), but does no stateful analysis.
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Analysis/Passes.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/AssumptionCache.h"
21 #include "llvm/Analysis/CFG.h"
22 #include "llvm/Analysis/CaptureTracking.h"
23 #include "llvm/Analysis/InstructionSimplify.h"
24 #include "llvm/Analysis/LoopInfo.h"
25 #include "llvm/Analysis/MemoryBuiltins.h"
26 #include "llvm/Analysis/TargetLibraryInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/Constants.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/DerivedTypes.h"
31 #include "llvm/IR/Dominators.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/GetElementPtrTypeIterator.h"
34 #include "llvm/IR/GlobalAlias.h"
35 #include "llvm/IR/GlobalVariable.h"
36 #include "llvm/IR/Instructions.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/LLVMContext.h"
39 #include "llvm/IR/Operator.h"
40 #include "llvm/Pass.h"
41 #include "llvm/Support/ErrorHandling.h"
45 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
46 /// in a cycle. Because we are analysing 'through' phi nodes we need to be
47 /// careful with value equivalence. We use reachability to make sure a value
48 /// cannot be involved in a cycle.
49 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
51 // The max limit of the search depth in DecomposeGEPExpression() and
52 // GetUnderlyingObject(), both functions need to use the same search
53 // depth otherwise the algorithm in aliasGEP will assert.
54 static const unsigned MaxLookupSearchDepth = 6;
56 //===----------------------------------------------------------------------===//
58 //===----------------------------------------------------------------------===//
60 /// isNonEscapingLocalObject - Return true if the pointer is to a function-local
61 /// object that never escapes from the function.
62 static bool isNonEscapingLocalObject(const Value *V) {
63 // If this is a local allocation, check to see if it escapes.
64 if (isa<AllocaInst>(V) || isNoAliasCall(V))
65 // Set StoreCaptures to True so that we can assume in our callers that the
66 // pointer is not the result of a load instruction. Currently
67 // PointerMayBeCaptured doesn't have any special analysis for the
68 // StoreCaptures=false case; if it did, our callers could be refined to be
70 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
72 // If this is an argument that corresponds to a byval or noalias argument,
73 // then it has not escaped before entering the function. Check if it escapes
74 // inside the function.
75 if (const Argument *A = dyn_cast<Argument>(V))
76 if (A->hasByValAttr() || A->hasNoAliasAttr())
77 // Note even if the argument is marked nocapture we still need to check
78 // for copies made inside the function. The nocapture attribute only
79 // specifies that there are no copies made that outlive the function.
80 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
85 /// isEscapeSource - Return true if the pointer is one which would have
86 /// been considered an escape by isNonEscapingLocalObject.
87 static bool isEscapeSource(const Value *V) {
88 if (isa<CallInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V))
91 // The load case works because isNonEscapingLocalObject considers all
92 // stores to be escapes (it passes true for the StoreCaptures argument
93 // to PointerMayBeCaptured).
100 /// getObjectSize - Return the size of the object specified by V, or
101 /// UnknownSize if unknown.
102 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
103 const TargetLibraryInfo &TLI,
104 bool RoundToAlign = false) {
106 if (getObjectSize(V, Size, DL, &TLI, RoundToAlign))
108 return AliasAnalysis::UnknownSize;
111 /// isObjectSmallerThan - Return true if we can prove that the object specified
112 /// by V is smaller than Size.
113 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
114 const DataLayout &DL,
115 const TargetLibraryInfo &TLI) {
116 // Note that the meanings of the "object" are slightly different in the
117 // following contexts:
118 // c1: llvm::getObjectSize()
119 // c2: llvm.objectsize() intrinsic
120 // c3: isObjectSmallerThan()
121 // c1 and c2 share the same meaning; however, the meaning of "object" in c3
122 // refers to the "entire object".
124 // Consider this example:
125 // char *p = (char*)malloc(100)
128 // In the context of c1 and c2, the "object" pointed by q refers to the
129 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
131 // However, in the context of c3, the "object" refers to the chunk of memory
132 // being allocated. So, the "object" has 100 bytes, and q points to the middle
133 // the "object". In case q is passed to isObjectSmallerThan() as the 1st
134 // parameter, before the llvm::getObjectSize() is called to get the size of
135 // entire object, we should:
136 // - either rewind the pointer q to the base-address of the object in
137 // question (in this case rewind to p), or
138 // - just give up. It is up to caller to make sure the pointer is pointing
139 // to the base address the object.
141 // We go for 2nd option for simplicity.
142 if (!isIdentifiedObject(V))
145 // This function needs to use the aligned object size because we allow
146 // reads a bit past the end given sufficient alignment.
147 uint64_t ObjectSize = getObjectSize(V, DL, TLI, /*RoundToAlign*/true);
149 return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize < Size;
152 /// isObjectSize - Return true if we can prove that the object specified
153 /// by V has size Size.
154 static bool isObjectSize(const Value *V, uint64_t Size,
155 const DataLayout &DL, const TargetLibraryInfo &TLI) {
156 uint64_t ObjectSize = getObjectSize(V, DL, TLI);
157 return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize == Size;
160 //===----------------------------------------------------------------------===//
161 // GetElementPtr Instruction Decomposition and Analysis
162 //===----------------------------------------------------------------------===//
171 struct VariableGEPIndex {
173 ExtensionKind Extension;
176 bool operator==(const VariableGEPIndex &Other) const {
177 return V == Other.V && Extension == Other.Extension &&
178 Scale == Other.Scale;
181 bool operator!=(const VariableGEPIndex &Other) const {
182 return !operator==(Other);
188 /// GetLinearExpression - Analyze the specified value as a linear expression:
189 /// "A*V + B", where A and B are constant integers. Return the scale and offset
190 /// values as APInts and return V as a Value*, and return whether we looked
191 /// through any sign or zero extends. The incoming Value is known to have
192 /// IntegerType and it may already be sign or zero extended.
194 /// Note that this looks through extends, so the high bits may not be
195 /// represented in the result.
196 static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
197 ExtensionKind &Extension,
198 const DataLayout &DL, unsigned Depth,
199 AssumptionCache *AC, DominatorTree *DT) {
200 assert(V->getType()->isIntegerTy() && "Not an integer value");
202 // Limit our recursion depth.
209 if (ConstantInt *Const = dyn_cast<ConstantInt>(V)) {
210 // if it's a constant, just convert it to an offset
211 // and remove the variable.
212 Offset += Const->getValue();
213 assert(Scale == 0 && "Constant values don't have a scale");
217 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
218 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
219 switch (BOp->getOpcode()) {
221 case Instruction::Or:
222 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
224 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
228 case Instruction::Add:
229 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
230 DL, Depth + 1, AC, DT);
231 Offset += RHSC->getValue();
233 case Instruction::Mul:
234 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
235 DL, Depth + 1, AC, DT);
236 Offset *= RHSC->getValue();
237 Scale *= RHSC->getValue();
239 case Instruction::Shl:
240 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
241 DL, Depth + 1, AC, DT);
242 Offset <<= RHSC->getValue().getLimitedValue();
243 Scale <<= RHSC->getValue().getLimitedValue();
249 // Since GEP indices are sign extended anyway, we don't care about the high
250 // bits of a sign or zero extended value - just scales and offsets. The
251 // extensions have to be consistent though.
252 if ((isa<SExtInst>(V) && Extension != EK_ZeroExt) ||
253 (isa<ZExtInst>(V) && Extension != EK_SignExt)) {
254 Value *CastOp = cast<CastInst>(V)->getOperand(0);
255 unsigned OldWidth = Scale.getBitWidth();
256 unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
257 Scale = Scale.trunc(SmallWidth);
258 Offset = Offset.trunc(SmallWidth);
259 Extension = isa<SExtInst>(V) ? EK_SignExt : EK_ZeroExt;
261 Value *Result = GetLinearExpression(CastOp, Scale, Offset, Extension, DL,
263 Scale = Scale.zext(OldWidth);
265 // We have to sign-extend even if Extension == EK_ZeroExt as we can't
266 // decompose a sign extension (i.e. zext(x - 1) != zext(x) - zext(-1)).
267 Offset = Offset.sext(OldWidth);
277 /// DecomposeGEPExpression - If V is a symbolic pointer expression, decompose it
278 /// into a base pointer with a constant offset and a number of scaled symbolic
281 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale in
282 /// the VarIndices vector) are Value*'s that are known to be scaled by the
283 /// specified amount, but which may have other unrepresented high bits. As such,
284 /// the gep cannot necessarily be reconstructed from its decomposed form.
286 /// When DataLayout is around, this function is capable of analyzing everything
287 /// that GetUnderlyingObject can look through. To be able to do that
288 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search
289 /// depth (MaxLookupSearchDepth).
290 /// When DataLayout not is around, it just looks through pointer casts.
293 DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
294 SmallVectorImpl<VariableGEPIndex> &VarIndices,
295 bool &MaxLookupReached, const DataLayout &DL,
296 AssumptionCache *AC, DominatorTree *DT) {
297 // Limit recursion depth to limit compile time in crazy cases.
298 unsigned MaxLookup = MaxLookupSearchDepth;
299 MaxLookupReached = false;
303 // See if this is a bitcast or GEP.
304 const Operator *Op = dyn_cast<Operator>(V);
306 // The only non-operator case we can handle are GlobalAliases.
307 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
308 if (!GA->mayBeOverridden()) {
309 V = GA->getAliasee();
316 if (Op->getOpcode() == Instruction::BitCast ||
317 Op->getOpcode() == Instruction::AddrSpaceCast) {
318 V = Op->getOperand(0);
322 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
324 // If it's not a GEP, hand it off to SimplifyInstruction to see if it
325 // can come up with something. This matches what GetUnderlyingObject does.
326 if (const Instruction *I = dyn_cast<Instruction>(V))
327 // TODO: Get a DominatorTree and AssumptionCache and use them here
328 // (these are both now available in this function, but this should be
329 // updated when GetUnderlyingObject is updated). TLI should be
331 if (const Value *Simplified =
332 SimplifyInstruction(const_cast<Instruction *>(I), DL)) {
340 // Don't attempt to analyze GEPs over unsized objects.
341 if (!GEPOp->getOperand(0)->getType()->getPointerElementType()->isSized())
344 unsigned AS = GEPOp->getPointerAddressSpace();
345 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
346 gep_type_iterator GTI = gep_type_begin(GEPOp);
347 for (User::const_op_iterator I = GEPOp->op_begin()+1,
348 E = GEPOp->op_end(); I != E; ++I) {
350 // Compute the (potentially symbolic) offset in bytes for this index.
351 if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
352 // For a struct, add the member offset.
353 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
354 if (FieldNo == 0) continue;
356 BaseOffs += DL.getStructLayout(STy)->getElementOffset(FieldNo);
360 // For an array/pointer, add the element offset, explicitly scaled.
361 if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
362 if (CIdx->isZero()) continue;
363 BaseOffs += DL.getTypeAllocSize(*GTI) * CIdx->getSExtValue();
367 uint64_t Scale = DL.getTypeAllocSize(*GTI);
368 ExtensionKind Extension = EK_NotExtended;
370 // If the integer type is smaller than the pointer size, it is implicitly
371 // sign extended to pointer size.
372 unsigned Width = Index->getType()->getIntegerBitWidth();
373 if (DL.getPointerSizeInBits(AS) > Width)
374 Extension = EK_SignExt;
376 // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
377 APInt IndexScale(Width, 0), IndexOffset(Width, 0);
378 Index = GetLinearExpression(Index, IndexScale, IndexOffset, Extension, DL,
381 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
382 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
383 BaseOffs += IndexOffset.getSExtValue()*Scale;
384 Scale *= IndexScale.getSExtValue();
386 // If we already had an occurrence of this index variable, merge this
387 // scale into it. For example, we want to handle:
388 // A[x][x] -> x*16 + x*4 -> x*20
389 // This also ensures that 'x' only appears in the index list once.
390 for (unsigned i = 0, e = VarIndices.size(); i != e; ++i) {
391 if (VarIndices[i].V == Index &&
392 VarIndices[i].Extension == Extension) {
393 Scale += VarIndices[i].Scale;
394 VarIndices.erase(VarIndices.begin()+i);
399 // Make sure that we have a scale that makes sense for this target's
401 if (unsigned ShiftBits = 64 - DL.getPointerSizeInBits(AS)) {
403 Scale = (int64_t)Scale >> ShiftBits;
407 VariableGEPIndex Entry = {Index, Extension,
408 static_cast<int64_t>(Scale)};
409 VarIndices.push_back(Entry);
413 // Analyze the base pointer next.
414 V = GEPOp->getOperand(0);
415 } while (--MaxLookup);
417 // If the chain of expressions is too deep, just return early.
418 MaxLookupReached = true;
422 //===----------------------------------------------------------------------===//
423 // BasicAliasAnalysis Pass
424 //===----------------------------------------------------------------------===//
427 static const Function *getParent(const Value *V) {
428 if (const Instruction *inst = dyn_cast<Instruction>(V))
429 return inst->getParent()->getParent();
431 if (const Argument *arg = dyn_cast<Argument>(V))
432 return arg->getParent();
437 static bool notDifferentParent(const Value *O1, const Value *O2) {
439 const Function *F1 = getParent(O1);
440 const Function *F2 = getParent(O2);
442 return !F1 || !F2 || F1 == F2;
447 /// BasicAliasAnalysis - This is the primary alias analysis implementation.
448 struct BasicAliasAnalysis : public ImmutablePass, public AliasAnalysis {
449 static char ID; // Class identification, replacement for typeinfo
450 BasicAliasAnalysis() : ImmutablePass(ID) {
451 initializeBasicAliasAnalysisPass(*PassRegistry::getPassRegistry());
454 bool doInitialization(Module &M) override;
456 void getAnalysisUsage(AnalysisUsage &AU) const override {
457 AU.addRequired<AliasAnalysis>();
458 AU.addRequired<AssumptionCacheTracker>();
459 AU.addRequired<TargetLibraryInfoWrapperPass>();
462 AliasResult alias(const Location &LocA, const Location &LocB) override {
463 assert(AliasCache.empty() && "AliasCache must be cleared after use!");
464 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
465 "BasicAliasAnalysis doesn't support interprocedural queries.");
466 AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags,
467 LocB.Ptr, LocB.Size, LocB.AATags);
468 // AliasCache rarely has more than 1 or 2 elements, always use
469 // shrink_and_clear so it quickly returns to the inline capacity of the
470 // SmallDenseMap if it ever grows larger.
471 // FIXME: This should really be shrink_to_inline_capacity_and_clear().
472 AliasCache.shrink_and_clear();
473 VisitedPhiBBs.clear();
477 ModRefResult getModRefInfo(ImmutableCallSite CS,
478 const Location &Loc) override;
480 ModRefResult getModRefInfo(ImmutableCallSite CS1,
481 ImmutableCallSite CS2) override;
483 /// pointsToConstantMemory - Chase pointers until we find a (constant
485 bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override;
487 /// Get the location associated with a pointer argument of a callsite.
488 Location getArgLocation(ImmutableCallSite CS, unsigned ArgIdx,
489 ModRefResult &Mask) override;
491 /// getModRefBehavior - Return the behavior when calling the given
493 ModRefBehavior getModRefBehavior(ImmutableCallSite CS) override;
495 /// getModRefBehavior - Return the behavior when calling the given function.
496 /// For use when the call site is not known.
497 ModRefBehavior getModRefBehavior(const Function *F) override;
499 /// getAdjustedAnalysisPointer - This method is used when a pass implements
500 /// an analysis interface through multiple inheritance. If needed, it
501 /// should override this to adjust the this pointer as needed for the
502 /// specified pass info.
503 void *getAdjustedAnalysisPointer(const void *ID) override {
504 if (ID == &AliasAnalysis::ID)
505 return (AliasAnalysis*)this;
510 // AliasCache - Track alias queries to guard against recursion.
511 typedef std::pair<Location, Location> LocPair;
512 typedef SmallDenseMap<LocPair, AliasResult, 8> AliasCacheTy;
513 AliasCacheTy AliasCache;
515 /// \brief Track phi nodes we have visited. When interpret "Value" pointer
516 /// equality as value equality we need to make sure that the "Value" is not
517 /// part of a cycle. Otherwise, two uses could come from different
518 /// "iterations" of a cycle and see different values for the same "Value"
520 /// The following example shows the problem:
521 /// %p = phi(%alloca1, %addr2)
523 /// %addr1 = gep, %alloca2, 0, %l
524 /// %addr2 = gep %alloca2, 0, (%l + 1)
525 /// alias(%p, %addr1) -> MayAlias !
527 SmallPtrSet<const BasicBlock*, 8> VisitedPhiBBs;
529 // Visited - Track instructions visited by pointsToConstantMemory.
530 SmallPtrSet<const Value*, 16> Visited;
532 /// \brief Check whether two Values can be considered equivalent.
534 /// In addition to pointer equivalence of \p V1 and \p V2 this checks
535 /// whether they can not be part of a cycle in the value graph by looking at
536 /// all visited phi nodes an making sure that the phis cannot reach the
537 /// value. We have to do this because we are looking through phi nodes (That
538 /// is we say noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
539 bool isValueEqualInPotentialCycles(const Value *V1, const Value *V2);
541 /// \brief Dest and Src are the variable indices from two decomposed
542 /// GetElementPtr instructions GEP1 and GEP2 which have common base
543 /// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic
544 /// difference between the two pointers.
545 void GetIndexDifference(SmallVectorImpl<VariableGEPIndex> &Dest,
546 const SmallVectorImpl<VariableGEPIndex> &Src);
548 // aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP
549 // instruction against another.
550 AliasResult aliasGEP(const GEPOperator *V1, uint64_t V1Size,
551 const AAMDNodes &V1AAInfo,
552 const Value *V2, uint64_t V2Size,
553 const AAMDNodes &V2AAInfo,
554 const Value *UnderlyingV1, const Value *UnderlyingV2);
556 // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI
557 // instruction against another.
558 AliasResult aliasPHI(const PHINode *PN, uint64_t PNSize,
559 const AAMDNodes &PNAAInfo,
560 const Value *V2, uint64_t V2Size,
561 const AAMDNodes &V2AAInfo);
563 /// aliasSelect - Disambiguate a Select instruction against another value.
564 AliasResult aliasSelect(const SelectInst *SI, uint64_t SISize,
565 const AAMDNodes &SIAAInfo,
566 const Value *V2, uint64_t V2Size,
567 const AAMDNodes &V2AAInfo);
569 AliasResult aliasCheck(const Value *V1, uint64_t V1Size,
571 const Value *V2, uint64_t V2Size,
574 } // End of anonymous namespace
576 // Register this pass...
577 char BasicAliasAnalysis::ID = 0;
578 INITIALIZE_AG_PASS_BEGIN(BasicAliasAnalysis, AliasAnalysis, "basicaa",
579 "Basic Alias Analysis (stateless AA impl)",
581 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
582 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
583 INITIALIZE_AG_PASS_END(BasicAliasAnalysis, AliasAnalysis, "basicaa",
584 "Basic Alias Analysis (stateless AA impl)",
588 ImmutablePass *llvm::createBasicAliasAnalysisPass() {
589 return new BasicAliasAnalysis();
592 /// pointsToConstantMemory - Returns whether the given pointer value
593 /// points to memory that is local to the function, with global constants being
594 /// considered local to all functions.
596 BasicAliasAnalysis::pointsToConstantMemory(const Location &Loc, bool OrLocal) {
597 assert(Visited.empty() && "Visited must be cleared after use!");
599 unsigned MaxLookup = 8;
600 SmallVector<const Value *, 16> Worklist;
601 Worklist.push_back(Loc.Ptr);
603 const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), *DL);
604 if (!Visited.insert(V).second) {
606 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
609 // An alloca instruction defines local memory.
610 if (OrLocal && isa<AllocaInst>(V))
613 // A global constant counts as local memory for our purposes.
614 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
615 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
616 // global to be marked constant in some modules and non-constant in
617 // others. GV may even be a declaration, not a definition.
618 if (!GV->isConstant()) {
620 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
625 // If both select values point to local memory, then so does the select.
626 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
627 Worklist.push_back(SI->getTrueValue());
628 Worklist.push_back(SI->getFalseValue());
632 // If all values incoming to a phi node point to local memory, then so does
634 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
635 // Don't bother inspecting phi nodes with many operands.
636 if (PN->getNumIncomingValues() > MaxLookup) {
638 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
640 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
641 Worklist.push_back(PN->getIncomingValue(i));
645 // Otherwise be conservative.
647 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
649 } while (!Worklist.empty() && --MaxLookup);
652 return Worklist.empty();
655 static bool isMemsetPattern16(const Function *MS,
656 const TargetLibraryInfo &TLI) {
657 if (TLI.has(LibFunc::memset_pattern16) &&
658 MS->getName() == "memset_pattern16") {
659 FunctionType *MemsetType = MS->getFunctionType();
660 if (!MemsetType->isVarArg() && MemsetType->getNumParams() == 3 &&
661 isa<PointerType>(MemsetType->getParamType(0)) &&
662 isa<PointerType>(MemsetType->getParamType(1)) &&
663 isa<IntegerType>(MemsetType->getParamType(2)))
670 /// getModRefBehavior - Return the behavior when calling the given call site.
671 AliasAnalysis::ModRefBehavior
672 BasicAliasAnalysis::getModRefBehavior(ImmutableCallSite CS) {
673 if (CS.doesNotAccessMemory())
674 // Can't do better than this.
675 return DoesNotAccessMemory;
677 ModRefBehavior Min = UnknownModRefBehavior;
679 // If the callsite knows it only reads memory, don't return worse
681 if (CS.onlyReadsMemory())
682 Min = OnlyReadsMemory;
684 // The AliasAnalysis base class has some smarts, lets use them.
685 return ModRefBehavior(AliasAnalysis::getModRefBehavior(CS) & Min);
688 /// getModRefBehavior - Return the behavior when calling the given function.
689 /// For use when the call site is not known.
690 AliasAnalysis::ModRefBehavior
691 BasicAliasAnalysis::getModRefBehavior(const Function *F) {
692 // If the function declares it doesn't access memory, we can't do better.
693 if (F->doesNotAccessMemory())
694 return DoesNotAccessMemory;
696 // For intrinsics, we can check the table.
697 if (unsigned iid = F->getIntrinsicID()) {
698 #define GET_INTRINSIC_MODREF_BEHAVIOR
699 #include "llvm/IR/Intrinsics.gen"
700 #undef GET_INTRINSIC_MODREF_BEHAVIOR
703 ModRefBehavior Min = UnknownModRefBehavior;
705 // If the function declares it only reads memory, go with that.
706 if (F->onlyReadsMemory())
707 Min = OnlyReadsMemory;
709 const TargetLibraryInfo &TLI =
710 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
711 if (isMemsetPattern16(F, TLI))
712 Min = OnlyAccessesArgumentPointees;
714 // Otherwise be conservative.
715 return ModRefBehavior(AliasAnalysis::getModRefBehavior(F) & Min);
718 AliasAnalysis::Location
719 BasicAliasAnalysis::getArgLocation(ImmutableCallSite CS, unsigned ArgIdx,
720 ModRefResult &Mask) {
721 Location Loc = AliasAnalysis::getArgLocation(CS, ArgIdx, Mask);
722 const TargetLibraryInfo &TLI =
723 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
724 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
726 switch (II->getIntrinsicID()) {
728 case Intrinsic::memset:
729 case Intrinsic::memcpy:
730 case Intrinsic::memmove: {
731 assert((ArgIdx == 0 || ArgIdx == 1) &&
732 "Invalid argument index for memory intrinsic");
733 if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2)))
734 Loc.Size = LenCI->getZExtValue();
735 assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
736 "Memory intrinsic location pointer not argument?");
737 Mask = ArgIdx ? Ref : Mod;
740 case Intrinsic::lifetime_start:
741 case Intrinsic::lifetime_end:
742 case Intrinsic::invariant_start: {
743 assert(ArgIdx == 1 && "Invalid argument index");
744 assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
745 "Intrinsic location pointer not argument?");
746 Loc.Size = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
749 case Intrinsic::invariant_end: {
750 assert(ArgIdx == 2 && "Invalid argument index");
751 assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
752 "Intrinsic location pointer not argument?");
753 Loc.Size = cast<ConstantInt>(II->getArgOperand(1))->getZExtValue();
756 case Intrinsic::arm_neon_vld1: {
757 assert(ArgIdx == 0 && "Invalid argument index");
758 assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
759 "Intrinsic location pointer not argument?");
760 // LLVM's vld1 and vst1 intrinsics currently only support a single
763 Loc.Size = DL->getTypeStoreSize(II->getType());
766 case Intrinsic::arm_neon_vst1: {
767 assert(ArgIdx == 0 && "Invalid argument index");
768 assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
769 "Intrinsic location pointer not argument?");
771 Loc.Size = DL->getTypeStoreSize(II->getArgOperand(1)->getType());
776 // We can bound the aliasing properties of memset_pattern16 just as we can
777 // for memcpy/memset. This is particularly important because the
778 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
779 // whenever possible.
780 else if (CS.getCalledFunction() &&
781 isMemsetPattern16(CS.getCalledFunction(), TLI)) {
782 assert((ArgIdx == 0 || ArgIdx == 1) &&
783 "Invalid argument index for memset_pattern16");
786 else if (const ConstantInt *LenCI =
787 dyn_cast<ConstantInt>(CS.getArgument(2)))
788 Loc.Size = LenCI->getZExtValue();
789 assert(Loc.Ptr == CS.getArgument(ArgIdx) &&
790 "memset_pattern16 location pointer not argument?");
791 Mask = ArgIdx ? Ref : Mod;
793 // FIXME: Handle memset_pattern4 and memset_pattern8 also.
798 static bool isAssumeIntrinsic(ImmutableCallSite CS) {
799 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
800 if (II && II->getIntrinsicID() == Intrinsic::assume)
806 bool BasicAliasAnalysis::doInitialization(Module &M) {
807 InitializeAliasAnalysis(this, &M.getDataLayout());
811 /// getModRefInfo - Check to see if the specified callsite can clobber the
812 /// specified memory object. Since we only look at local properties of this
813 /// function, we really can't say much about this query. We do, however, use
814 /// simple "address taken" analysis on local objects.
815 AliasAnalysis::ModRefResult
816 BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
817 const Location &Loc) {
818 assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) &&
819 "AliasAnalysis query involving multiple functions!");
821 const Value *Object = GetUnderlyingObject(Loc.Ptr, *DL);
823 // If this is a tail call and Loc.Ptr points to a stack location, we know that
824 // the tail call cannot access or modify the local stack.
825 // We cannot exclude byval arguments here; these belong to the caller of
826 // the current function not to the current function, and a tail callee
827 // may reference them.
828 if (isa<AllocaInst>(Object))
829 if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
830 if (CI->isTailCall())
833 // If the pointer is to a locally allocated object that does not escape,
834 // then the call can not mod/ref the pointer unless the call takes the pointer
835 // as an argument, and itself doesn't capture it.
836 if (!isa<Constant>(Object) && CS.getInstruction() != Object &&
837 isNonEscapingLocalObject(Object)) {
838 bool PassedAsArg = false;
840 for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
841 CI != CE; ++CI, ++ArgNo) {
842 // Only look at the no-capture or byval pointer arguments. If this
843 // pointer were passed to arguments that were neither of these, then it
844 // couldn't be no-capture.
845 if (!(*CI)->getType()->isPointerTy() ||
846 (!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo)))
849 // If this is a no-capture pointer argument, see if we can tell that it
850 // is impossible to alias the pointer we're checking. If not, we have to
851 // assume that the call could touch the pointer, even though it doesn't
853 if (!isNoAlias(Location(*CI), Location(Object))) {
863 // While the assume intrinsic is marked as arbitrarily writing so that
864 // proper control dependencies will be maintained, it never aliases any
865 // particular memory location.
866 if (isAssumeIntrinsic(CS))
869 // The AliasAnalysis base class has some smarts, lets use them.
870 return AliasAnalysis::getModRefInfo(CS, Loc);
873 AliasAnalysis::ModRefResult
874 BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS1,
875 ImmutableCallSite CS2) {
876 // While the assume intrinsic is marked as arbitrarily writing so that
877 // proper control dependencies will be maintained, it never aliases any
878 // particular memory location.
879 if (isAssumeIntrinsic(CS1) || isAssumeIntrinsic(CS2))
882 // The AliasAnalysis base class has some smarts, lets use them.
883 return AliasAnalysis::getModRefInfo(CS1, CS2);
886 /// \brief Provide ad-hoc rules to disambiguate accesses through two GEP
887 /// operators, both having the exact same pointer operand.
888 static AliasAnalysis::AliasResult
889 aliasSameBasePointerGEPs(const GEPOperator *GEP1, uint64_t V1Size,
890 const GEPOperator *GEP2, uint64_t V2Size,
891 const DataLayout &DL) {
893 assert(GEP1->getPointerOperand() == GEP2->getPointerOperand() &&
894 "Expected GEPs with the same pointer operand");
896 // Try to determine whether GEP1 and GEP2 index through arrays, into structs,
897 // such that the struct field accesses provably cannot alias.
898 // We also need at least two indices (the pointer, and the struct field).
899 if (GEP1->getNumIndices() != GEP2->getNumIndices() ||
900 GEP1->getNumIndices() < 2)
901 return AliasAnalysis::MayAlias;
903 // If we don't know the size of the accesses through both GEPs, we can't
904 // determine whether the struct fields accessed can't alias.
905 if (V1Size == AliasAnalysis::UnknownSize ||
906 V2Size == AliasAnalysis::UnknownSize)
907 return AliasAnalysis::MayAlias;
910 dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1));
912 dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1));
914 // If the last (struct) indices aren't constants, we can't say anything.
915 // If they're identical, the other indices might be also be dynamically
916 // equal, so the GEPs can alias.
917 if (!C1 || !C2 || C1 == C2)
918 return AliasAnalysis::MayAlias;
920 // Find the last-indexed type of the GEP, i.e., the type you'd get if
921 // you stripped the last index.
922 // On the way, look at each indexed type. If there's something other
923 // than an array, different indices can lead to different final types.
924 SmallVector<Value *, 8> IntermediateIndices;
926 // Insert the first index; we don't need to check the type indexed
927 // through it as it only drops the pointer indirection.
928 assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine");
929 IntermediateIndices.push_back(GEP1->getOperand(1));
931 // Insert all the remaining indices but the last one.
932 // Also, check that they all index through arrays.
933 for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) {
934 if (!isa<ArrayType>(GetElementPtrInst::getIndexedType(
935 GEP1->getPointerOperandType(), IntermediateIndices)))
936 return AliasAnalysis::MayAlias;
937 IntermediateIndices.push_back(GEP1->getOperand(i + 1));
940 StructType *LastIndexedStruct =
941 dyn_cast<StructType>(GetElementPtrInst::getIndexedType(
942 GEP1->getPointerOperandType(), IntermediateIndices));
944 if (!LastIndexedStruct)
945 return AliasAnalysis::MayAlias;
948 // - both GEPs begin indexing from the exact same pointer;
949 // - the last indices in both GEPs are constants, indexing into a struct;
950 // - said indices are different, hence, the pointed-to fields are different;
951 // - both GEPs only index through arrays prior to that.
953 // This lets us determine that the struct that GEP1 indexes into and the
954 // struct that GEP2 indexes into must either precisely overlap or be
955 // completely disjoint. Because they cannot partially overlap, indexing into
956 // different non-overlapping fields of the struct will never alias.
958 // Therefore, the only remaining thing needed to show that both GEPs can't
959 // alias is that the fields are not overlapping.
960 const StructLayout *SL = DL.getStructLayout(LastIndexedStruct);
961 const uint64_t StructSize = SL->getSizeInBytes();
962 const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue());
963 const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue());
965 auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size,
966 uint64_t V2Off, uint64_t V2Size) {
967 return V1Off < V2Off && V1Off + V1Size <= V2Off &&
968 ((V2Off + V2Size <= StructSize) ||
969 (V2Off + V2Size - StructSize <= V1Off));
972 if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) ||
973 EltsDontOverlap(V2Off, V2Size, V1Off, V1Size))
974 return AliasAnalysis::NoAlias;
976 return AliasAnalysis::MayAlias;
979 /// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction
980 /// against another pointer. We know that V1 is a GEP, but we don't know
981 /// anything about V2. UnderlyingV1 is GetUnderlyingObject(GEP1, DL),
982 /// UnderlyingV2 is the same for V2.
984 AliasAnalysis::AliasResult
985 BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
986 const AAMDNodes &V1AAInfo,
987 const Value *V2, uint64_t V2Size,
988 const AAMDNodes &V2AAInfo,
989 const Value *UnderlyingV1,
990 const Value *UnderlyingV2) {
991 int64_t GEP1BaseOffset;
992 bool GEP1MaxLookupReached;
993 SmallVector<VariableGEPIndex, 4> GEP1VariableIndices;
995 // We have to get two AssumptionCaches here because GEP1 and V2 may be from
996 // different functions.
997 // FIXME: This really doesn't make any sense. We get a dominator tree below
998 // that can only refer to a single function. But this function (aliasGEP) is
999 // a method on an immutable pass that can be called when there *isn't*
1000 // a single function. The old pass management layer makes this "work", but
1001 // this isn't really a clean solution.
1002 AssumptionCacheTracker &ACT = getAnalysis<AssumptionCacheTracker>();
1003 AssumptionCache *AC1 = nullptr, *AC2 = nullptr;
1004 if (auto *GEP1I = dyn_cast<Instruction>(GEP1))
1005 AC1 = &ACT.getAssumptionCache(
1006 const_cast<Function &>(*GEP1I->getParent()->getParent()));
1007 if (auto *I2 = dyn_cast<Instruction>(V2))
1008 AC2 = &ACT.getAssumptionCache(
1009 const_cast<Function &>(*I2->getParent()->getParent()));
1011 DominatorTreeWrapperPass *DTWP =
1012 getAnalysisIfAvailable<DominatorTreeWrapperPass>();
1013 DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
1015 // If we have two gep instructions with must-alias or not-alias'ing base
1016 // pointers, figure out if the indexes to the GEP tell us anything about the
1018 if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
1019 // Do the base pointers alias?
1020 AliasResult BaseAlias = aliasCheck(UnderlyingV1, UnknownSize, AAMDNodes(),
1021 UnderlyingV2, UnknownSize, AAMDNodes());
1023 // Check for geps of non-aliasing underlying pointers where the offsets are
1025 if ((BaseAlias == MayAlias) && V1Size == V2Size) {
1026 // Do the base pointers alias assuming type and size.
1027 AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size,
1028 V1AAInfo, UnderlyingV2,
1030 if (PreciseBaseAlias == NoAlias) {
1031 // See if the computed offset from the common pointer tells us about the
1032 // relation of the resulting pointer.
1033 int64_t GEP2BaseOffset;
1034 bool GEP2MaxLookupReached;
1035 SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
1036 const Value *GEP2BasePtr =
1037 DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices,
1038 GEP2MaxLookupReached, *DL, AC2, DT);
1039 const Value *GEP1BasePtr =
1040 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
1041 GEP1MaxLookupReached, *DL, AC1, DT);
1042 // DecomposeGEPExpression and GetUnderlyingObject should return the
1043 // same result except when DecomposeGEPExpression has no DataLayout.
1044 if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
1046 "DecomposeGEPExpression and GetUnderlyingObject disagree!");
1049 // If the max search depth is reached the result is undefined
1050 if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1054 if (GEP1BaseOffset == GEP2BaseOffset &&
1055 GEP1VariableIndices == GEP2VariableIndices)
1057 GEP1VariableIndices.clear();
1061 // If we get a No or May, then return it immediately, no amount of analysis
1062 // will improve this situation.
1063 if (BaseAlias != MustAlias) return BaseAlias;
1065 // Otherwise, we have a MustAlias. Since the base pointers alias each other
1066 // exactly, see if the computed offset from the common pointer tells us
1067 // about the relation of the resulting pointer.
1068 const Value *GEP1BasePtr =
1069 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
1070 GEP1MaxLookupReached, *DL, AC1, DT);
1072 int64_t GEP2BaseOffset;
1073 bool GEP2MaxLookupReached;
1074 SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
1075 const Value *GEP2BasePtr =
1076 DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices,
1077 GEP2MaxLookupReached, *DL, AC2, DT);
1079 // DecomposeGEPExpression and GetUnderlyingObject should return the
1080 // same result except when DecomposeGEPExpression has no DataLayout.
1081 if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
1083 "DecomposeGEPExpression and GetUnderlyingObject disagree!");
1087 // If we know the two GEPs are based off of the exact same pointer (and not
1088 // just the same underlying object), see if that tells us anything about
1089 // the resulting pointers.
1090 if (DL && GEP1->getPointerOperand() == GEP2->getPointerOperand()) {
1091 AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, *DL);
1092 // If we couldn't find anything interesting, don't abandon just yet.
1097 // If the max search depth is reached the result is undefined
1098 if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1101 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1102 // symbolic difference.
1103 GEP1BaseOffset -= GEP2BaseOffset;
1104 GetIndexDifference(GEP1VariableIndices, GEP2VariableIndices);
1107 // Check to see if these two pointers are related by the getelementptr
1108 // instruction. If one pointer is a GEP with a non-zero index of the other
1109 // pointer, we know they cannot alias.
1111 // If both accesses are unknown size, we can't do anything useful here.
1112 if (V1Size == UnknownSize && V2Size == UnknownSize)
1115 AliasResult R = aliasCheck(UnderlyingV1, UnknownSize, AAMDNodes(),
1116 V2, V2Size, V2AAInfo);
1118 // If V2 may alias GEP base pointer, conservatively returns MayAlias.
1119 // If V2 is known not to alias GEP base pointer, then the two values
1120 // cannot alias per GEP semantics: "A pointer value formed from a
1121 // getelementptr instruction is associated with the addresses associated
1122 // with the first operand of the getelementptr".
1125 const Value *GEP1BasePtr =
1126 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
1127 GEP1MaxLookupReached, *DL, AC1, DT);
1129 // DecomposeGEPExpression and GetUnderlyingObject should return the
1130 // same result except when DecomposeGEPExpression has no DataLayout.
1131 if (GEP1BasePtr != UnderlyingV1) {
1133 "DecomposeGEPExpression and GetUnderlyingObject disagree!");
1136 // If the max search depth is reached the result is undefined
1137 if (GEP1MaxLookupReached)
1141 // In the two GEP Case, if there is no difference in the offsets of the
1142 // computed pointers, the resultant pointers are a must alias. This
1143 // hapens when we have two lexically identical GEP's (for example).
1145 // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2
1146 // must aliases the GEP, the end result is a must alias also.
1147 if (GEP1BaseOffset == 0 && GEP1VariableIndices.empty())
1150 // If there is a constant difference between the pointers, but the difference
1151 // is less than the size of the associated memory object, then we know
1152 // that the objects are partially overlapping. If the difference is
1153 // greater, we know they do not overlap.
1154 if (GEP1BaseOffset != 0 && GEP1VariableIndices.empty()) {
1155 if (GEP1BaseOffset >= 0) {
1156 if (V2Size != UnknownSize) {
1157 if ((uint64_t)GEP1BaseOffset < V2Size)
1158 return PartialAlias;
1162 // We have the situation where:
1165 // ---------------->|
1166 // |-->V1Size |-------> V2Size
1168 // We need to know that V2Size is not unknown, otherwise we might have
1169 // stripped a gep with negative index ('gep <ptr>, -1, ...).
1170 if (V1Size != UnknownSize && V2Size != UnknownSize) {
1171 if (-(uint64_t)GEP1BaseOffset < V1Size)
1172 return PartialAlias;
1178 if (!GEP1VariableIndices.empty()) {
1179 uint64_t Modulo = 0;
1180 bool AllPositive = true;
1181 for (unsigned i = 0, e = GEP1VariableIndices.size(); i != e; ++i) {
1183 // Try to distinguish something like &A[i][1] against &A[42][0].
1184 // Grab the least significant bit set in any of the scales. We
1185 // don't need std::abs here (even if the scale's negative) as we'll
1186 // be ^'ing Modulo with itself later.
1187 Modulo |= (uint64_t) GEP1VariableIndices[i].Scale;
1190 // If the Value could change between cycles, then any reasoning about
1191 // the Value this cycle may not hold in the next cycle. We'll just
1192 // give up if we can't determine conditions that hold for every cycle:
1193 const Value *V = GEP1VariableIndices[i].V;
1195 bool SignKnownZero, SignKnownOne;
1196 ComputeSignBit(const_cast<Value *>(V), SignKnownZero, SignKnownOne, *DL,
1197 0, AC1, nullptr, DT);
1199 // Zero-extension widens the variable, and so forces the sign
1201 bool IsZExt = GEP1VariableIndices[i].Extension == EK_ZeroExt;
1202 SignKnownZero |= IsZExt;
1203 SignKnownOne &= !IsZExt;
1205 // If the variable begins with a zero then we know it's
1206 // positive, regardless of whether the value is signed or
1208 int64_t Scale = GEP1VariableIndices[i].Scale;
1210 (SignKnownZero && Scale >= 0) ||
1211 (SignKnownOne && Scale < 0);
1215 Modulo = Modulo ^ (Modulo & (Modulo - 1));
1217 // We can compute the difference between the two addresses
1218 // mod Modulo. Check whether that difference guarantees that the
1219 // two locations do not alias.
1220 uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1);
1221 if (V1Size != UnknownSize && V2Size != UnknownSize &&
1222 ModOffset >= V2Size && V1Size <= Modulo - ModOffset)
1225 // If we know all the variables are positive, then GEP1 >= GEP1BasePtr.
1226 // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers
1227 // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr.
1228 if (AllPositive && GEP1BaseOffset > 0 && V2Size <= (uint64_t) GEP1BaseOffset)
1232 // Statically, we can see that the base objects are the same, but the
1233 // pointers have dynamic offsets which we can't resolve. And none of our
1234 // little tricks above worked.
1236 // TODO: Returning PartialAlias instead of MayAlias is a mild hack; the
1237 // practical effect of this is protecting TBAA in the case of dynamic
1238 // indices into arrays of unions or malloc'd memory.
1239 return PartialAlias;
1242 static AliasAnalysis::AliasResult
1243 MergeAliasResults(AliasAnalysis::AliasResult A, AliasAnalysis::AliasResult B) {
1244 // If the results agree, take it.
1247 // A mix of PartialAlias and MustAlias is PartialAlias.
1248 if ((A == AliasAnalysis::PartialAlias && B == AliasAnalysis::MustAlias) ||
1249 (B == AliasAnalysis::PartialAlias && A == AliasAnalysis::MustAlias))
1250 return AliasAnalysis::PartialAlias;
1251 // Otherwise, we don't know anything.
1252 return AliasAnalysis::MayAlias;
1255 /// aliasSelect - Provide a bunch of ad-hoc rules to disambiguate a Select
1256 /// instruction against another.
1257 AliasAnalysis::AliasResult
1258 BasicAliasAnalysis::aliasSelect(const SelectInst *SI, uint64_t SISize,
1259 const AAMDNodes &SIAAInfo,
1260 const Value *V2, uint64_t V2Size,
1261 const AAMDNodes &V2AAInfo) {
1262 // If the values are Selects with the same condition, we can do a more precise
1263 // check: just check for aliases between the values on corresponding arms.
1264 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1265 if (SI->getCondition() == SI2->getCondition()) {
1267 aliasCheck(SI->getTrueValue(), SISize, SIAAInfo,
1268 SI2->getTrueValue(), V2Size, V2AAInfo);
1269 if (Alias == MayAlias)
1271 AliasResult ThisAlias =
1272 aliasCheck(SI->getFalseValue(), SISize, SIAAInfo,
1273 SI2->getFalseValue(), V2Size, V2AAInfo);
1274 return MergeAliasResults(ThisAlias, Alias);
1277 // If both arms of the Select node NoAlias or MustAlias V2, then returns
1278 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1280 aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(), SISize, SIAAInfo);
1281 if (Alias == MayAlias)
1284 AliasResult ThisAlias =
1285 aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo);
1286 return MergeAliasResults(ThisAlias, Alias);
1289 // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI instruction
1291 AliasAnalysis::AliasResult
1292 BasicAliasAnalysis::aliasPHI(const PHINode *PN, uint64_t PNSize,
1293 const AAMDNodes &PNAAInfo,
1294 const Value *V2, uint64_t V2Size,
1295 const AAMDNodes &V2AAInfo) {
1296 // Track phi nodes we have visited. We use this information when we determine
1297 // value equivalence.
1298 VisitedPhiBBs.insert(PN->getParent());
1300 // If the values are PHIs in the same block, we can do a more precise
1301 // as well as efficient check: just check for aliases between the values
1302 // on corresponding edges.
1303 if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1304 if (PN2->getParent() == PN->getParent()) {
1305 LocPair Locs(Location(PN, PNSize, PNAAInfo),
1306 Location(V2, V2Size, V2AAInfo));
1308 std::swap(Locs.first, Locs.second);
1309 // Analyse the PHIs' inputs under the assumption that the PHIs are
1311 // If the PHIs are May/MustAlias there must be (recursively) an input
1312 // operand from outside the PHIs' cycle that is MayAlias/MustAlias or
1313 // there must be an operation on the PHIs within the PHIs' value cycle
1314 // that causes a MayAlias.
1315 // Pretend the phis do not alias.
1316 AliasResult Alias = NoAlias;
1317 assert(AliasCache.count(Locs) &&
1318 "There must exist an entry for the phi node");
1319 AliasResult OrigAliasResult = AliasCache[Locs];
1320 AliasCache[Locs] = NoAlias;
1322 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1323 AliasResult ThisAlias =
1324 aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo,
1325 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
1327 Alias = MergeAliasResults(ThisAlias, Alias);
1328 if (Alias == MayAlias)
1332 // Reset if speculation failed.
1333 if (Alias != NoAlias)
1334 AliasCache[Locs] = OrigAliasResult;
1339 SmallPtrSet<Value*, 4> UniqueSrc;
1340 SmallVector<Value*, 4> V1Srcs;
1341 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1342 Value *PV1 = PN->getIncomingValue(i);
1343 if (isa<PHINode>(PV1))
1344 // If any of the source itself is a PHI, return MayAlias conservatively
1345 // to avoid compile time explosion. The worst possible case is if both
1346 // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
1347 // and 'n' are the number of PHI sources.
1349 if (UniqueSrc.insert(PV1).second)
1350 V1Srcs.push_back(PV1);
1353 AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo,
1354 V1Srcs[0], PNSize, PNAAInfo);
1355 // Early exit if the check of the first PHI source against V2 is MayAlias.
1356 // Other results are not possible.
1357 if (Alias == MayAlias)
1360 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1361 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1362 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1363 Value *V = V1Srcs[i];
1365 AliasResult ThisAlias = aliasCheck(V2, V2Size, V2AAInfo,
1366 V, PNSize, PNAAInfo);
1367 Alias = MergeAliasResults(ThisAlias, Alias);
1368 if (Alias == MayAlias)
1375 // aliasCheck - Provide a bunch of ad-hoc rules to disambiguate in common cases,
1376 // such as array references.
1378 AliasAnalysis::AliasResult
1379 BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
1381 const Value *V2, uint64_t V2Size,
1382 AAMDNodes V2AAInfo) {
1383 // If either of the memory references is empty, it doesn't matter what the
1384 // pointer values are.
1385 if (V1Size == 0 || V2Size == 0)
1388 // Strip off any casts if they exist.
1389 V1 = V1->stripPointerCasts();
1390 V2 = V2->stripPointerCasts();
1392 // Are we checking for alias of the same value?
1393 // Because we look 'through' phi nodes we could look at "Value" pointers from
1394 // different iterations. We must therefore make sure that this is not the
1395 // case. The function isValueEqualInPotentialCycles ensures that this cannot
1396 // happen by looking at the visited phi nodes and making sure they cannot
1398 if (isValueEqualInPotentialCycles(V1, V2))
1401 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1402 return NoAlias; // Scalars cannot alias each other
1404 // Figure out what objects these things are pointing to if we can.
1405 const Value *O1 = GetUnderlyingObject(V1, *DL, MaxLookupSearchDepth);
1406 const Value *O2 = GetUnderlyingObject(V2, *DL, MaxLookupSearchDepth);
1408 // Null values in the default address space don't point to any object, so they
1409 // don't alias any other pointer.
1410 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1411 if (CPN->getType()->getAddressSpace() == 0)
1413 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1414 if (CPN->getType()->getAddressSpace() == 0)
1418 // If V1/V2 point to two different objects we know that we have no alias.
1419 if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1422 // Constant pointers can't alias with non-const isIdentifiedObject objects.
1423 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1424 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1427 // Function arguments can't alias with things that are known to be
1428 // unambigously identified at the function level.
1429 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1430 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1433 // Most objects can't alias null.
1434 if ((isa<ConstantPointerNull>(O2) && isKnownNonNull(O1)) ||
1435 (isa<ConstantPointerNull>(O1) && isKnownNonNull(O2)))
1438 // If one pointer is the result of a call/invoke or load and the other is a
1439 // non-escaping local object within the same function, then we know the
1440 // object couldn't escape to a point where the call could return it.
1442 // Note that if the pointers are in different functions, there are a
1443 // variety of complications. A call with a nocapture argument may still
1444 // temporary store the nocapture argument's value in a temporary memory
1445 // location if that memory location doesn't escape. Or it may pass a
1446 // nocapture value to other functions as long as they don't capture it.
1447 if (isEscapeSource(O1) && isNonEscapingLocalObject(O2))
1449 if (isEscapeSource(O2) && isNonEscapingLocalObject(O1))
1453 // If the size of one access is larger than the entire object on the other
1454 // side, then we know such behavior is undefined and can assume no alias.
1456 if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *DL, *TLI)) ||
1457 (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *DL, *TLI)))
1460 // Check the cache before climbing up use-def chains. This also terminates
1461 // otherwise infinitely recursive queries.
1462 LocPair Locs(Location(V1, V1Size, V1AAInfo),
1463 Location(V2, V2Size, V2AAInfo));
1465 std::swap(Locs.first, Locs.second);
1466 std::pair<AliasCacheTy::iterator, bool> Pair =
1467 AliasCache.insert(std::make_pair(Locs, MayAlias));
1469 return Pair.first->second;
1471 // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
1472 // GEP can't simplify, we don't even look at the PHI cases.
1473 if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
1475 std::swap(V1Size, V2Size);
1477 std::swap(V1AAInfo, V2AAInfo);
1479 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1480 AliasResult Result = aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2);
1481 if (Result != MayAlias) return AliasCache[Locs] = Result;
1484 if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
1486 std::swap(V1Size, V2Size);
1487 std::swap(V1AAInfo, V2AAInfo);
1489 if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1490 AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo,
1491 V2, V2Size, V2AAInfo);
1492 if (Result != MayAlias) return AliasCache[Locs] = Result;
1495 if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
1497 std::swap(V1Size, V2Size);
1498 std::swap(V1AAInfo, V2AAInfo);
1500 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1501 AliasResult Result = aliasSelect(S1, V1Size, V1AAInfo,
1502 V2, V2Size, V2AAInfo);
1503 if (Result != MayAlias) return AliasCache[Locs] = Result;
1506 // If both pointers are pointing into the same object and one of them
1507 // accesses is accessing the entire object, then the accesses must
1508 // overlap in some way.
1510 if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *DL, *TLI)) ||
1511 (V2Size != UnknownSize && isObjectSize(O2, V2Size, *DL, *TLI)))
1512 return AliasCache[Locs] = PartialAlias;
1514 AliasResult Result =
1515 AliasAnalysis::alias(Location(V1, V1Size, V1AAInfo),
1516 Location(V2, V2Size, V2AAInfo));
1517 return AliasCache[Locs] = Result;
1520 bool BasicAliasAnalysis::isValueEqualInPotentialCycles(const Value *V,
1525 const Instruction *Inst = dyn_cast<Instruction>(V);
1529 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1532 // Use dominance or loop info if available.
1533 DominatorTreeWrapperPass *DTWP =
1534 getAnalysisIfAvailable<DominatorTreeWrapperPass>();
1535 DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
1536 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
1537 LoopInfo *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
1539 // Make sure that the visited phis cannot reach the Value. This ensures that
1540 // the Values cannot come from different iterations of a potential cycle the
1541 // phi nodes could be involved in.
1542 for (auto *P : VisitedPhiBBs)
1543 if (isPotentiallyReachable(P->begin(), Inst, DT, LI))
1549 /// GetIndexDifference - Dest and Src are the variable indices from two
1550 /// decomposed GetElementPtr instructions GEP1 and GEP2 which have common base
1551 /// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic
1552 /// difference between the two pointers.
1553 void BasicAliasAnalysis::GetIndexDifference(
1554 SmallVectorImpl<VariableGEPIndex> &Dest,
1555 const SmallVectorImpl<VariableGEPIndex> &Src) {
1559 for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1560 const Value *V = Src[i].V;
1561 ExtensionKind Extension = Src[i].Extension;
1562 int64_t Scale = Src[i].Scale;
1564 // Find V in Dest. This is N^2, but pointer indices almost never have more
1565 // than a few variable indexes.
1566 for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1567 if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1568 Dest[j].Extension != Extension)
1571 // If we found it, subtract off Scale V's from the entry in Dest. If it
1572 // goes to zero, remove the entry.
1573 if (Dest[j].Scale != Scale)
1574 Dest[j].Scale -= Scale;
1576 Dest.erase(Dest.begin() + j);
1581 // If we didn't consume this entry, add it to the end of the Dest list.
1583 VariableGEPIndex Entry = { V, Extension, -Scale };
1584 Dest.push_back(Entry);