1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the primary stateless implementation of the
11 // Alias Analysis interface that implements identities (two different
12 // globals cannot alias, etc), but does no stateful analysis.
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Analysis/BasicAliasAnalysis.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/CFG.h"
21 #include "llvm/Analysis/CaptureTracking.h"
22 #include "llvm/Analysis/InstructionSimplify.h"
23 #include "llvm/Analysis/LoopInfo.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/ValueTracking.h"
26 #include "llvm/IR/Constants.h"
27 #include "llvm/IR/DataLayout.h"
28 #include "llvm/IR/DerivedTypes.h"
29 #include "llvm/IR/Dominators.h"
30 #include "llvm/IR/GlobalAlias.h"
31 #include "llvm/IR/GlobalVariable.h"
32 #include "llvm/IR/Instructions.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/IR/LLVMContext.h"
35 #include "llvm/IR/Operator.h"
36 #include "llvm/Pass.h"
37 #include "llvm/Support/ErrorHandling.h"
41 /// Enable analysis of recursive PHI nodes.
42 static cl::opt<bool> EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden,
45 /// SearchLimitReached / SearchTimes shows how often the limit of
46 /// to decompose GEPs is reached. It will affect the precision
47 /// of basic alias analysis.
48 #define DEBUG_TYPE "basicaa"
49 STATISTIC(SearchLimitReached, "Number of times the limit to "
50 "decompose GEPs is reached");
51 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
53 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
54 /// in a cycle. Because we are analysing 'through' phi nodes we need to be
55 /// careful with value equivalence. We use reachability to make sure a value
56 /// cannot be involved in a cycle.
57 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
59 // The max limit of the search depth in DecomposeGEPExpression() and
60 // GetUnderlyingObject(), both functions need to use the same search
61 // depth otherwise the algorithm in aliasGEP will assert.
62 static const unsigned MaxLookupSearchDepth = 6;
64 //===----------------------------------------------------------------------===//
66 //===----------------------------------------------------------------------===//
68 /// Returns true if the pointer is to a function-local object that never
69 /// escapes from the function.
70 static bool isNonEscapingLocalObject(const Value *V) {
71 // If this is a local allocation, check to see if it escapes.
72 if (isa<AllocaInst>(V) || isNoAliasCall(V))
73 // Set StoreCaptures to True so that we can assume in our callers that the
74 // pointer is not the result of a load instruction. Currently
75 // PointerMayBeCaptured doesn't have any special analysis for the
76 // StoreCaptures=false case; if it did, our callers could be refined to be
78 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
80 // If this is an argument that corresponds to a byval or noalias argument,
81 // then it has not escaped before entering the function. Check if it escapes
82 // inside the function.
83 if (const Argument *A = dyn_cast<Argument>(V))
84 if (A->hasByValAttr() || A->hasNoAliasAttr())
85 // Note even if the argument is marked nocapture we still need to check
86 // for copies made inside the function. The nocapture attribute only
87 // specifies that there are no copies made that outlive the function.
88 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
93 /// Returns true if the pointer is one which would have been considered an
94 /// escape by isNonEscapingLocalObject.
95 static bool isEscapeSource(const Value *V) {
96 if (isa<CallInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V))
99 // The load case works because isNonEscapingLocalObject considers all
100 // stores to be escapes (it passes true for the StoreCaptures argument
101 // to PointerMayBeCaptured).
102 if (isa<LoadInst>(V))
108 /// Returns the size of the object specified by V, or UnknownSize if unknown.
109 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
110 const TargetLibraryInfo &TLI,
111 bool RoundToAlign = false) {
113 if (getObjectSize(V, Size, DL, &TLI, RoundToAlign))
115 return MemoryLocation::UnknownSize;
118 /// Returns true if we can prove that the object specified by V is smaller than
120 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
121 const DataLayout &DL,
122 const TargetLibraryInfo &TLI) {
123 // Note that the meanings of the "object" are slightly different in the
124 // following contexts:
125 // c1: llvm::getObjectSize()
126 // c2: llvm.objectsize() intrinsic
127 // c3: isObjectSmallerThan()
128 // c1 and c2 share the same meaning; however, the meaning of "object" in c3
129 // refers to the "entire object".
131 // Consider this example:
132 // char *p = (char*)malloc(100)
135 // In the context of c1 and c2, the "object" pointed by q refers to the
136 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
138 // However, in the context of c3, the "object" refers to the chunk of memory
139 // being allocated. So, the "object" has 100 bytes, and q points to the middle
140 // the "object". In case q is passed to isObjectSmallerThan() as the 1st
141 // parameter, before the llvm::getObjectSize() is called to get the size of
142 // entire object, we should:
143 // - either rewind the pointer q to the base-address of the object in
144 // question (in this case rewind to p), or
145 // - just give up. It is up to caller to make sure the pointer is pointing
146 // to the base address the object.
148 // We go for 2nd option for simplicity.
149 if (!isIdentifiedObject(V))
152 // This function needs to use the aligned object size because we allow
153 // reads a bit past the end given sufficient alignment.
154 uint64_t ObjectSize = getObjectSize(V, DL, TLI, /*RoundToAlign*/ true);
156 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
159 /// Returns true if we can prove that the object specified by V has size Size.
160 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
161 const TargetLibraryInfo &TLI) {
162 uint64_t ObjectSize = getObjectSize(V, DL, TLI);
163 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
166 //===----------------------------------------------------------------------===//
167 // GetElementPtr Instruction Decomposition and Analysis
168 //===----------------------------------------------------------------------===//
170 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
171 /// B are constant integers.
173 /// Returns the scale and offset values as APInts and return V as a Value*, and
174 /// return whether we looked through any sign or zero extends. The incoming
175 /// Value is known to have IntegerType and it may already be sign or zero
178 /// Note that this looks through extends, so the high bits may not be
179 /// represented in the result.
180 /*static*/ Value *BasicAliasAnalysis::GetLinearExpression(
181 Value *V, APInt &Scale, APInt &Offset, ExtensionKind &Extension,
182 const DataLayout &DL, unsigned Depth, AssumptionCache *AC,
184 assert(V->getType()->isIntegerTy() && "Not an integer value");
186 // Limit our recursion depth.
193 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
194 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
195 switch (BOp->getOpcode()) {
198 case Instruction::Or:
199 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
201 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
205 case Instruction::Add:
206 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
207 DL, Depth + 1, AC, DT);
208 Offset += RHSC->getValue();
210 case Instruction::Mul:
211 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
212 DL, Depth + 1, AC, DT);
213 Offset *= RHSC->getValue();
214 Scale *= RHSC->getValue();
216 case Instruction::Shl:
217 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
218 DL, Depth + 1, AC, DT);
219 Offset <<= RHSC->getValue().getLimitedValue();
220 Scale <<= RHSC->getValue().getLimitedValue();
226 // Since GEP indices are sign extended anyway, we don't care about the high
227 // bits of a sign or zero extended value - just scales and offsets. The
228 // extensions have to be consistent though.
229 if ((isa<SExtInst>(V) && Extension != EK_ZeroExt) ||
230 (isa<ZExtInst>(V) && Extension != EK_SignExt)) {
231 Value *CastOp = cast<CastInst>(V)->getOperand(0);
232 unsigned OldWidth = Scale.getBitWidth();
233 unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
234 Scale = Scale.trunc(SmallWidth);
235 Offset = Offset.trunc(SmallWidth);
236 Extension = isa<SExtInst>(V) ? EK_SignExt : EK_ZeroExt;
238 Value *Result = GetLinearExpression(CastOp, Scale, Offset, Extension, DL,
240 Scale = Scale.zext(OldWidth);
241 Offset = Offset.zext(OldWidth);
251 /// If V is a symbolic pointer expression, decompose it into a base pointer
252 /// with a constant offset and a number of scaled symbolic offsets.
254 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
255 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
256 /// specified amount, but which may have other unrepresented high bits. As
257 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
259 /// When DataLayout is around, this function is capable of analyzing everything
260 /// that GetUnderlyingObject can look through. To be able to do that
261 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search
262 /// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks
263 /// through pointer casts.
264 /*static*/ const Value *BasicAliasAnalysis::DecomposeGEPExpression(
265 const Value *V, int64_t &BaseOffs,
266 SmallVectorImpl<VariableGEPIndex> &VarIndices, bool &MaxLookupReached,
267 const DataLayout &DL, AssumptionCache *AC, DominatorTree *DT) {
268 // Limit recursion depth to limit compile time in crazy cases.
269 unsigned MaxLookup = MaxLookupSearchDepth;
270 MaxLookupReached = false;
275 // See if this is a bitcast or GEP.
276 const Operator *Op = dyn_cast<Operator>(V);
278 // The only non-operator case we can handle are GlobalAliases.
279 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
280 if (!GA->mayBeOverridden()) {
281 V = GA->getAliasee();
288 if (Op->getOpcode() == Instruction::BitCast ||
289 Op->getOpcode() == Instruction::AddrSpaceCast) {
290 V = Op->getOperand(0);
294 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
296 // If it's not a GEP, hand it off to SimplifyInstruction to see if it
297 // can come up with something. This matches what GetUnderlyingObject does.
298 if (const Instruction *I = dyn_cast<Instruction>(V))
299 // TODO: Get a DominatorTree and AssumptionCache and use them here
300 // (these are both now available in this function, but this should be
301 // updated when GetUnderlyingObject is updated). TLI should be
303 if (const Value *Simplified =
304 SimplifyInstruction(const_cast<Instruction *>(I), DL)) {
312 // Don't attempt to analyze GEPs over unsized objects.
313 if (!GEPOp->getOperand(0)->getType()->getPointerElementType()->isSized())
316 unsigned AS = GEPOp->getPointerAddressSpace();
317 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
318 gep_type_iterator GTI = gep_type_begin(GEPOp);
319 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
322 // Compute the (potentially symbolic) offset in bytes for this index.
323 if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
324 // For a struct, add the member offset.
325 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
329 BaseOffs += DL.getStructLayout(STy)->getElementOffset(FieldNo);
333 // For an array/pointer, add the element offset, explicitly scaled.
334 if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
337 BaseOffs += DL.getTypeAllocSize(*GTI) * CIdx->getSExtValue();
341 uint64_t Scale = DL.getTypeAllocSize(*GTI);
342 ExtensionKind Extension = EK_NotExtended;
344 // If the integer type is smaller than the pointer size, it is implicitly
345 // sign extended to pointer size.
346 unsigned Width = Index->getType()->getIntegerBitWidth();
347 if (DL.getPointerSizeInBits(AS) > Width)
348 Extension = EK_SignExt;
350 // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
351 APInt IndexScale(Width, 0), IndexOffset(Width, 0);
352 Index = GetLinearExpression(Index, IndexScale, IndexOffset, Extension, DL,
355 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
356 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
357 BaseOffs += IndexOffset.getSExtValue() * Scale;
358 Scale *= IndexScale.getSExtValue();
360 // If we already had an occurrence of this index variable, merge this
361 // scale into it. For example, we want to handle:
362 // A[x][x] -> x*16 + x*4 -> x*20
363 // This also ensures that 'x' only appears in the index list once.
364 for (unsigned i = 0, e = VarIndices.size(); i != e; ++i) {
365 if (VarIndices[i].V == Index && VarIndices[i].Extension == Extension) {
366 Scale += VarIndices[i].Scale;
367 VarIndices.erase(VarIndices.begin() + i);
372 // Make sure that we have a scale that makes sense for this target's
374 if (unsigned ShiftBits = 64 - DL.getPointerSizeInBits(AS)) {
376 Scale = (int64_t)Scale >> ShiftBits;
380 VariableGEPIndex Entry = {Index, Extension,
381 static_cast<int64_t>(Scale)};
382 VarIndices.push_back(Entry);
386 // Analyze the base pointer next.
387 V = GEPOp->getOperand(0);
388 } while (--MaxLookup);
390 // If the chain of expressions is too deep, just return early.
391 MaxLookupReached = true;
392 SearchLimitReached++;
396 //===----------------------------------------------------------------------===//
397 // BasicAliasAnalysis Pass
398 //===----------------------------------------------------------------------===//
400 // Register the pass...
401 char BasicAliasAnalysis::ID = 0;
402 INITIALIZE_AG_PASS_BEGIN(BasicAliasAnalysis, AliasAnalysis, "basicaa",
403 "Basic Alias Analysis (stateless AA impl)", false,
405 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
406 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
407 INITIALIZE_AG_PASS_END(BasicAliasAnalysis, AliasAnalysis, "basicaa",
408 "Basic Alias Analysis (stateless AA impl)", false, true,
411 ImmutablePass *llvm::createBasicAliasAnalysisPass() {
412 return new BasicAliasAnalysis();
415 /// Returns whether the given pointer value points to memory that is local to
416 /// the function, with global constants being considered local to all
418 bool BasicAliasAnalysis::pointsToConstantMemory(const MemoryLocation &Loc,
420 assert(Visited.empty() && "Visited must be cleared after use!");
422 unsigned MaxLookup = 8;
423 SmallVector<const Value *, 16> Worklist;
424 Worklist.push_back(Loc.Ptr);
426 const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), *DL);
427 if (!Visited.insert(V).second) {
429 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
432 // An alloca instruction defines local memory.
433 if (OrLocal && isa<AllocaInst>(V))
436 // A global constant counts as local memory for our purposes.
437 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
438 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
439 // global to be marked constant in some modules and non-constant in
440 // others. GV may even be a declaration, not a definition.
441 if (!GV->isConstant()) {
443 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
448 // If both select values point to local memory, then so does the select.
449 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
450 Worklist.push_back(SI->getTrueValue());
451 Worklist.push_back(SI->getFalseValue());
455 // If all values incoming to a phi node point to local memory, then so does
457 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
458 // Don't bother inspecting phi nodes with many operands.
459 if (PN->getNumIncomingValues() > MaxLookup) {
461 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
463 for (Value *IncValue : PN->incoming_values())
464 Worklist.push_back(IncValue);
468 // Otherwise be conservative.
470 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
472 } while (!Worklist.empty() && --MaxLookup);
475 return Worklist.empty();
478 // FIXME: This code is duplicated with MemoryLocation and should be hoisted to
479 // some common utility location.
480 static bool isMemsetPattern16(const Function *MS,
481 const TargetLibraryInfo &TLI) {
482 if (TLI.has(LibFunc::memset_pattern16) &&
483 MS->getName() == "memset_pattern16") {
484 FunctionType *MemsetType = MS->getFunctionType();
485 if (!MemsetType->isVarArg() && MemsetType->getNumParams() == 3 &&
486 isa<PointerType>(MemsetType->getParamType(0)) &&
487 isa<PointerType>(MemsetType->getParamType(1)) &&
488 isa<IntegerType>(MemsetType->getParamType(2)))
495 /// Returns the behavior when calling the given call site.
496 FunctionModRefBehavior
497 BasicAliasAnalysis::getModRefBehavior(ImmutableCallSite CS) {
498 if (CS.doesNotAccessMemory())
499 // Can't do better than this.
500 return FMRB_DoesNotAccessMemory;
502 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
504 // If the callsite knows it only reads memory, don't return worse
506 if (CS.onlyReadsMemory())
507 Min = FMRB_OnlyReadsMemory;
509 if (CS.onlyAccessesArgMemory())
510 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
512 // The AliasAnalysis base class has some smarts, lets use them.
513 return FunctionModRefBehavior(AliasAnalysis::getModRefBehavior(CS) & Min);
516 /// Returns the behavior when calling the given function. For use when the call
517 /// site is not known.
518 FunctionModRefBehavior
519 BasicAliasAnalysis::getModRefBehavior(const Function *F) {
520 // If the function declares it doesn't access memory, we can't do better.
521 if (F->doesNotAccessMemory())
522 return FMRB_DoesNotAccessMemory;
524 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
526 // If the function declares it only reads memory, go with that.
527 if (F->onlyReadsMemory())
528 Min = FMRB_OnlyReadsMemory;
530 if (F->onlyAccessesArgMemory())
531 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
533 const TargetLibraryInfo &TLI =
534 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
535 if (isMemsetPattern16(F, TLI))
536 Min = FMRB_OnlyAccessesArgumentPointees;
538 // Otherwise be conservative.
539 return FunctionModRefBehavior(AliasAnalysis::getModRefBehavior(F) & Min);
542 ModRefInfo BasicAliasAnalysis::getArgModRefInfo(ImmutableCallSite CS,
544 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction()))
545 switch (II->getIntrinsicID()) {
548 case Intrinsic::memset:
549 case Intrinsic::memcpy:
550 case Intrinsic::memmove:
551 assert((ArgIdx == 0 || ArgIdx == 1) &&
552 "Invalid argument index for memory intrinsic");
553 return ArgIdx ? MRI_Ref : MRI_Mod;
556 // We can bound the aliasing properties of memset_pattern16 just as we can
557 // for memcpy/memset. This is particularly important because the
558 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
559 // whenever possible.
560 if (CS.getCalledFunction() &&
561 isMemsetPattern16(CS.getCalledFunction(), *TLI)) {
562 assert((ArgIdx == 0 || ArgIdx == 1) &&
563 "Invalid argument index for memset_pattern16");
564 return ArgIdx ? MRI_Ref : MRI_Mod;
566 // FIXME: Handle memset_pattern4 and memset_pattern8 also.
568 return AliasAnalysis::getArgModRefInfo(CS, ArgIdx);
571 static bool isAssumeIntrinsic(ImmutableCallSite CS) {
572 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
573 if (II && II->getIntrinsicID() == Intrinsic::assume)
579 bool BasicAliasAnalysis::doInitialization(Module &M) {
580 InitializeAliasAnalysis(this, &M.getDataLayout());
584 /// Checks to see if the specified callsite can clobber the specified memory
587 /// Since we only look at local properties of this function, we really can't
588 /// say much about this query. We do, however, use simple "address taken"
589 /// analysis on local objects.
590 ModRefInfo BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
591 const MemoryLocation &Loc) {
592 assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) &&
593 "AliasAnalysis query involving multiple functions!");
595 const Value *Object = GetUnderlyingObject(Loc.Ptr, *DL);
597 // If this is a tail call and Loc.Ptr points to a stack location, we know that
598 // the tail call cannot access or modify the local stack.
599 // We cannot exclude byval arguments here; these belong to the caller of
600 // the current function not to the current function, and a tail callee
601 // may reference them.
602 if (isa<AllocaInst>(Object))
603 if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
604 if (CI->isTailCall())
607 // If the pointer is to a locally allocated object that does not escape,
608 // then the call can not mod/ref the pointer unless the call takes the pointer
609 // as an argument, and itself doesn't capture it.
610 if (!isa<Constant>(Object) && CS.getInstruction() != Object &&
611 isNonEscapingLocalObject(Object)) {
612 bool PassedAsArg = false;
614 for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
615 CI != CE; ++CI, ++ArgNo) {
616 // Only look at the no-capture or byval pointer arguments. If this
617 // pointer were passed to arguments that were neither of these, then it
618 // couldn't be no-capture.
619 if (!(*CI)->getType()->isPointerTy() ||
620 (!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo)))
623 // If this is a no-capture pointer argument, see if we can tell that it
624 // is impossible to alias the pointer we're checking. If not, we have to
625 // assume that the call could touch the pointer, even though it doesn't
627 if (!isNoAlias(MemoryLocation(*CI), MemoryLocation(Object))) {
637 // While the assume intrinsic is marked as arbitrarily writing so that
638 // proper control dependencies will be maintained, it never aliases any
639 // particular memory location.
640 if (isAssumeIntrinsic(CS))
643 // The AliasAnalysis base class has some smarts, lets use them.
644 return AliasAnalysis::getModRefInfo(CS, Loc);
647 ModRefInfo BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS1,
648 ImmutableCallSite CS2) {
649 // While the assume intrinsic is marked as arbitrarily writing so that
650 // proper control dependencies will be maintained, it never aliases any
651 // particular memory location.
652 if (isAssumeIntrinsic(CS1) || isAssumeIntrinsic(CS2))
655 // The AliasAnalysis base class has some smarts, lets use them.
656 return AliasAnalysis::getModRefInfo(CS1, CS2);
659 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
660 /// both having the exact same pointer operand.
661 static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
663 const GEPOperator *GEP2,
665 const DataLayout &DL) {
667 assert(GEP1->getPointerOperand() == GEP2->getPointerOperand() &&
668 "Expected GEPs with the same pointer operand");
670 // Try to determine whether GEP1 and GEP2 index through arrays, into structs,
671 // such that the struct field accesses provably cannot alias.
672 // We also need at least two indices (the pointer, and the struct field).
673 if (GEP1->getNumIndices() != GEP2->getNumIndices() ||
674 GEP1->getNumIndices() < 2)
677 // If we don't know the size of the accesses through both GEPs, we can't
678 // determine whether the struct fields accessed can't alias.
679 if (V1Size == MemoryLocation::UnknownSize ||
680 V2Size == MemoryLocation::UnknownSize)
684 dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1));
686 dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1));
688 // If the last (struct) indices aren't constants, we can't say anything.
689 // If they're identical, the other indices might be also be dynamically
690 // equal, so the GEPs can alias.
691 if (!C1 || !C2 || C1 == C2)
694 // Find the last-indexed type of the GEP, i.e., the type you'd get if
695 // you stripped the last index.
696 // On the way, look at each indexed type. If there's something other
697 // than an array, different indices can lead to different final types.
698 SmallVector<Value *, 8> IntermediateIndices;
700 // Insert the first index; we don't need to check the type indexed
701 // through it as it only drops the pointer indirection.
702 assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine");
703 IntermediateIndices.push_back(GEP1->getOperand(1));
705 // Insert all the remaining indices but the last one.
706 // Also, check that they all index through arrays.
707 for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) {
708 if (!isa<ArrayType>(GetElementPtrInst::getIndexedType(
709 GEP1->getSourceElementType(), IntermediateIndices)))
711 IntermediateIndices.push_back(GEP1->getOperand(i + 1));
714 StructType *LastIndexedStruct =
715 dyn_cast<StructType>(GetElementPtrInst::getIndexedType(
716 GEP1->getSourceElementType(), IntermediateIndices));
718 if (!LastIndexedStruct)
722 // - both GEPs begin indexing from the exact same pointer;
723 // - the last indices in both GEPs are constants, indexing into a struct;
724 // - said indices are different, hence, the pointed-to fields are different;
725 // - both GEPs only index through arrays prior to that.
727 // This lets us determine that the struct that GEP1 indexes into and the
728 // struct that GEP2 indexes into must either precisely overlap or be
729 // completely disjoint. Because they cannot partially overlap, indexing into
730 // different non-overlapping fields of the struct will never alias.
732 // Therefore, the only remaining thing needed to show that both GEPs can't
733 // alias is that the fields are not overlapping.
734 const StructLayout *SL = DL.getStructLayout(LastIndexedStruct);
735 const uint64_t StructSize = SL->getSizeInBytes();
736 const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue());
737 const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue());
739 auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size,
740 uint64_t V2Off, uint64_t V2Size) {
741 return V1Off < V2Off && V1Off + V1Size <= V2Off &&
742 ((V2Off + V2Size <= StructSize) ||
743 (V2Off + V2Size - StructSize <= V1Off));
746 if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) ||
747 EltsDontOverlap(V2Off, V2Size, V1Off, V1Size))
753 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
756 /// We know that V1 is a GEP, but we don't know anything about V2.
757 /// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
759 AliasResult BasicAliasAnalysis::aliasGEP(
760 const GEPOperator *GEP1, uint64_t V1Size, const AAMDNodes &V1AAInfo,
761 const Value *V2, uint64_t V2Size, const AAMDNodes &V2AAInfo,
762 const Value *UnderlyingV1, const Value *UnderlyingV2) {
763 int64_t GEP1BaseOffset;
764 bool GEP1MaxLookupReached;
765 SmallVector<VariableGEPIndex, 4> GEP1VariableIndices;
767 // We have to get two AssumptionCaches here because GEP1 and V2 may be from
768 // different functions.
769 // FIXME: This really doesn't make any sense. We get a dominator tree below
770 // that can only refer to a single function. But this function (aliasGEP) is
771 // a method on an immutable pass that can be called when there *isn't*
772 // a single function. The old pass management layer makes this "work", but
773 // this isn't really a clean solution.
774 AssumptionCacheTracker &ACT = getAnalysis<AssumptionCacheTracker>();
775 AssumptionCache *AC1 = nullptr, *AC2 = nullptr;
776 if (auto *GEP1I = dyn_cast<Instruction>(GEP1))
777 AC1 = &ACT.getAssumptionCache(
778 const_cast<Function &>(*GEP1I->getParent()->getParent()));
779 if (auto *I2 = dyn_cast<Instruction>(V2))
780 AC2 = &ACT.getAssumptionCache(
781 const_cast<Function &>(*I2->getParent()->getParent()));
783 DominatorTreeWrapperPass *DTWP =
784 getAnalysisIfAvailable<DominatorTreeWrapperPass>();
785 DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
787 // If we have two gep instructions with must-alias or not-alias'ing base
788 // pointers, figure out if the indexes to the GEP tell us anything about the
790 if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
791 // Do the base pointers alias?
792 AliasResult BaseAlias =
793 aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, AAMDNodes(),
794 UnderlyingV2, MemoryLocation::UnknownSize, AAMDNodes());
796 // Check for geps of non-aliasing underlying pointers where the offsets are
798 if ((BaseAlias == MayAlias) && V1Size == V2Size) {
799 // Do the base pointers alias assuming type and size.
800 AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, V1AAInfo,
801 UnderlyingV2, V2Size, V2AAInfo);
802 if (PreciseBaseAlias == NoAlias) {
803 // See if the computed offset from the common pointer tells us about the
804 // relation of the resulting pointer.
805 int64_t GEP2BaseOffset;
806 bool GEP2MaxLookupReached;
807 SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
808 const Value *GEP2BasePtr =
809 DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices,
810 GEP2MaxLookupReached, *DL, AC2, DT);
811 const Value *GEP1BasePtr =
812 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
813 GEP1MaxLookupReached, *DL, AC1, DT);
814 // DecomposeGEPExpression and GetUnderlyingObject should return the
815 // same result except when DecomposeGEPExpression has no DataLayout.
816 if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
818 "DecomposeGEPExpression and GetUnderlyingObject disagree!");
821 // If the max search depth is reached the result is undefined
822 if (GEP2MaxLookupReached || GEP1MaxLookupReached)
826 if (GEP1BaseOffset == GEP2BaseOffset &&
827 GEP1VariableIndices == GEP2VariableIndices)
829 GEP1VariableIndices.clear();
833 // If we get a No or May, then return it immediately, no amount of analysis
834 // will improve this situation.
835 if (BaseAlias != MustAlias)
838 // Otherwise, we have a MustAlias. Since the base pointers alias each other
839 // exactly, see if the computed offset from the common pointer tells us
840 // about the relation of the resulting pointer.
841 const Value *GEP1BasePtr =
842 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
843 GEP1MaxLookupReached, *DL, AC1, DT);
845 int64_t GEP2BaseOffset;
846 bool GEP2MaxLookupReached;
847 SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
848 const Value *GEP2BasePtr =
849 DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices,
850 GEP2MaxLookupReached, *DL, AC2, DT);
852 // DecomposeGEPExpression and GetUnderlyingObject should return the
853 // same result except when DecomposeGEPExpression has no DataLayout.
854 if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
855 assert(!DL && "DecomposeGEPExpression and GetUnderlyingObject disagree!");
859 // If we know the two GEPs are based off of the exact same pointer (and not
860 // just the same underlying object), see if that tells us anything about
861 // the resulting pointers.
862 if (DL && GEP1->getPointerOperand() == GEP2->getPointerOperand()) {
863 AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, *DL);
864 // If we couldn't find anything interesting, don't abandon just yet.
869 // If the max search depth is reached the result is undefined
870 if (GEP2MaxLookupReached || GEP1MaxLookupReached)
873 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
874 // symbolic difference.
875 GEP1BaseOffset -= GEP2BaseOffset;
876 GetIndexDifference(GEP1VariableIndices, GEP2VariableIndices);
879 // Check to see if these two pointers are related by the getelementptr
880 // instruction. If one pointer is a GEP with a non-zero index of the other
881 // pointer, we know they cannot alias.
883 // If both accesses are unknown size, we can't do anything useful here.
884 if (V1Size == MemoryLocation::UnknownSize &&
885 V2Size == MemoryLocation::UnknownSize)
888 AliasResult R = aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize,
889 AAMDNodes(), V2, V2Size, V2AAInfo);
891 // If V2 may alias GEP base pointer, conservatively returns MayAlias.
892 // If V2 is known not to alias GEP base pointer, then the two values
893 // cannot alias per GEP semantics: "A pointer value formed from a
894 // getelementptr instruction is associated with the addresses associated
895 // with the first operand of the getelementptr".
898 const Value *GEP1BasePtr =
899 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
900 GEP1MaxLookupReached, *DL, AC1, DT);
902 // DecomposeGEPExpression and GetUnderlyingObject should return the
903 // same result except when DecomposeGEPExpression has no DataLayout.
904 if (GEP1BasePtr != UnderlyingV1) {
905 assert(!DL && "DecomposeGEPExpression and GetUnderlyingObject disagree!");
908 // If the max search depth is reached the result is undefined
909 if (GEP1MaxLookupReached)
913 // In the two GEP Case, if there is no difference in the offsets of the
914 // computed pointers, the resultant pointers are a must alias. This
915 // hapens when we have two lexically identical GEP's (for example).
917 // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2
918 // must aliases the GEP, the end result is a must alias also.
919 if (GEP1BaseOffset == 0 && GEP1VariableIndices.empty())
922 // If there is a constant difference between the pointers, but the difference
923 // is less than the size of the associated memory object, then we know
924 // that the objects are partially overlapping. If the difference is
925 // greater, we know they do not overlap.
926 if (GEP1BaseOffset != 0 && GEP1VariableIndices.empty()) {
927 if (GEP1BaseOffset >= 0) {
928 if (V2Size != MemoryLocation::UnknownSize) {
929 if ((uint64_t)GEP1BaseOffset < V2Size)
934 // We have the situation where:
937 // ---------------->|
938 // |-->V1Size |-------> V2Size
940 // We need to know that V2Size is not unknown, otherwise we might have
941 // stripped a gep with negative index ('gep <ptr>, -1, ...).
942 if (V1Size != MemoryLocation::UnknownSize &&
943 V2Size != MemoryLocation::UnknownSize) {
944 if (-(uint64_t)GEP1BaseOffset < V1Size)
951 // Try to distinguish something like &A[i][1] against &A[42][0].
952 // Grab the least significant bit set in any of the scales.
953 if (!GEP1VariableIndices.empty()) {
955 for (unsigned i = 0, e = GEP1VariableIndices.size(); i != e; ++i)
956 Modulo |= (uint64_t)GEP1VariableIndices[i].Scale;
957 Modulo = Modulo ^ (Modulo & (Modulo - 1));
959 // We can compute the difference between the two addresses
960 // mod Modulo. Check whether that difference guarantees that the
961 // two locations do not alias.
962 uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1);
963 if (V1Size != MemoryLocation::UnknownSize &&
964 V2Size != MemoryLocation::UnknownSize && ModOffset >= V2Size &&
965 V1Size <= Modulo - ModOffset)
969 // Statically, we can see that the base objects are the same, but the
970 // pointers have dynamic offsets which we can't resolve. And none of our
971 // little tricks above worked.
973 // TODO: Returning PartialAlias instead of MayAlias is a mild hack; the
974 // practical effect of this is protecting TBAA in the case of dynamic
975 // indices into arrays of unions or malloc'd memory.
979 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
980 // If the results agree, take it.
983 // A mix of PartialAlias and MustAlias is PartialAlias.
984 if ((A == PartialAlias && B == MustAlias) ||
985 (B == PartialAlias && A == MustAlias))
987 // Otherwise, we don't know anything.
991 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
993 AliasResult BasicAliasAnalysis::aliasSelect(const SelectInst *SI,
995 const AAMDNodes &SIAAInfo,
996 const Value *V2, uint64_t V2Size,
997 const AAMDNodes &V2AAInfo) {
998 // If the values are Selects with the same condition, we can do a more precise
999 // check: just check for aliases between the values on corresponding arms.
1000 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1001 if (SI->getCondition() == SI2->getCondition()) {
1002 AliasResult Alias = aliasCheck(SI->getTrueValue(), SISize, SIAAInfo,
1003 SI2->getTrueValue(), V2Size, V2AAInfo);
1004 if (Alias == MayAlias)
1006 AliasResult ThisAlias =
1007 aliasCheck(SI->getFalseValue(), SISize, SIAAInfo,
1008 SI2->getFalseValue(), V2Size, V2AAInfo);
1009 return MergeAliasResults(ThisAlias, Alias);
1012 // If both arms of the Select node NoAlias or MustAlias V2, then returns
1013 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1015 aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(), SISize, SIAAInfo);
1016 if (Alias == MayAlias)
1019 AliasResult ThisAlias =
1020 aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo);
1021 return MergeAliasResults(ThisAlias, Alias);
1024 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1026 AliasResult BasicAliasAnalysis::aliasPHI(const PHINode *PN, uint64_t PNSize,
1027 const AAMDNodes &PNAAInfo,
1028 const Value *V2, uint64_t V2Size,
1029 const AAMDNodes &V2AAInfo) {
1030 // Track phi nodes we have visited. We use this information when we determine
1031 // value equivalence.
1032 VisitedPhiBBs.insert(PN->getParent());
1034 // If the values are PHIs in the same block, we can do a more precise
1035 // as well as efficient check: just check for aliases between the values
1036 // on corresponding edges.
1037 if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1038 if (PN2->getParent() == PN->getParent()) {
1039 LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo),
1040 MemoryLocation(V2, V2Size, V2AAInfo));
1042 std::swap(Locs.first, Locs.second);
1043 // Analyse the PHIs' inputs under the assumption that the PHIs are
1045 // If the PHIs are May/MustAlias there must be (recursively) an input
1046 // operand from outside the PHIs' cycle that is MayAlias/MustAlias or
1047 // there must be an operation on the PHIs within the PHIs' value cycle
1048 // that causes a MayAlias.
1049 // Pretend the phis do not alias.
1050 AliasResult Alias = NoAlias;
1051 assert(AliasCache.count(Locs) &&
1052 "There must exist an entry for the phi node");
1053 AliasResult OrigAliasResult = AliasCache[Locs];
1054 AliasCache[Locs] = NoAlias;
1056 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1057 AliasResult ThisAlias =
1058 aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo,
1059 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
1061 Alias = MergeAliasResults(ThisAlias, Alias);
1062 if (Alias == MayAlias)
1066 // Reset if speculation failed.
1067 if (Alias != NoAlias)
1068 AliasCache[Locs] = OrigAliasResult;
1073 SmallPtrSet<Value *, 4> UniqueSrc;
1074 SmallVector<Value *, 4> V1Srcs;
1075 bool isRecursive = false;
1076 for (Value *PV1 : PN->incoming_values()) {
1077 if (isa<PHINode>(PV1))
1078 // If any of the source itself is a PHI, return MayAlias conservatively
1079 // to avoid compile time explosion. The worst possible case is if both
1080 // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
1081 // and 'n' are the number of PHI sources.
1084 if (EnableRecPhiAnalysis)
1085 if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
1086 // Check whether the incoming value is a GEP that advances the pointer
1087 // result of this PHI node (e.g. in a loop). If this is the case, we
1088 // would recurse and always get a MayAlias. Handle this case specially
1090 if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
1091 isa<ConstantInt>(PV1GEP->idx_begin())) {
1097 if (UniqueSrc.insert(PV1).second)
1098 V1Srcs.push_back(PV1);
1101 // If this PHI node is recursive, set the size of the accessed memory to
1102 // unknown to represent all the possible values the GEP could advance the
1105 PNSize = MemoryLocation::UnknownSize;
1108 aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0], PNSize, PNAAInfo);
1110 // Early exit if the check of the first PHI source against V2 is MayAlias.
1111 // Other results are not possible.
1112 if (Alias == MayAlias)
1115 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1116 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1117 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1118 Value *V = V1Srcs[i];
1120 AliasResult ThisAlias =
1121 aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo);
1122 Alias = MergeAliasResults(ThisAlias, Alias);
1123 if (Alias == MayAlias)
1130 /// Provideis a bunch of ad-hoc rules to disambiguate in common cases, such as
1131 /// array references.
1132 AliasResult BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
1133 AAMDNodes V1AAInfo, const Value *V2,
1135 AAMDNodes V2AAInfo) {
1136 // If either of the memory references is empty, it doesn't matter what the
1137 // pointer values are.
1138 if (V1Size == 0 || V2Size == 0)
1141 // Strip off any casts if they exist.
1142 V1 = V1->stripPointerCasts();
1143 V2 = V2->stripPointerCasts();
1145 // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1146 // value for undef that aliases nothing in the program.
1147 if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1150 // Are we checking for alias of the same value?
1151 // Because we look 'through' phi nodes we could look at "Value" pointers from
1152 // different iterations. We must therefore make sure that this is not the
1153 // case. The function isValueEqualInPotentialCycles ensures that this cannot
1154 // happen by looking at the visited phi nodes and making sure they cannot
1156 if (isValueEqualInPotentialCycles(V1, V2))
1159 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1160 return NoAlias; // Scalars cannot alias each other
1162 // Figure out what objects these things are pointing to if we can.
1163 const Value *O1 = GetUnderlyingObject(V1, *DL, MaxLookupSearchDepth);
1164 const Value *O2 = GetUnderlyingObject(V2, *DL, MaxLookupSearchDepth);
1166 // Null values in the default address space don't point to any object, so they
1167 // don't alias any other pointer.
1168 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1169 if (CPN->getType()->getAddressSpace() == 0)
1171 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1172 if (CPN->getType()->getAddressSpace() == 0)
1176 // If V1/V2 point to two different objects we know that we have no alias.
1177 if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1180 // Constant pointers can't alias with non-const isIdentifiedObject objects.
1181 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1182 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1185 // Function arguments can't alias with things that are known to be
1186 // unambigously identified at the function level.
1187 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1188 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1191 // Most objects can't alias null.
1192 if ((isa<ConstantPointerNull>(O2) && isKnownNonNull(O1)) ||
1193 (isa<ConstantPointerNull>(O1) && isKnownNonNull(O2)))
1196 // If one pointer is the result of a call/invoke or load and the other is a
1197 // non-escaping local object within the same function, then we know the
1198 // object couldn't escape to a point where the call could return it.
1200 // Note that if the pointers are in different functions, there are a
1201 // variety of complications. A call with a nocapture argument may still
1202 // temporary store the nocapture argument's value in a temporary memory
1203 // location if that memory location doesn't escape. Or it may pass a
1204 // nocapture value to other functions as long as they don't capture it.
1205 if (isEscapeSource(O1) && isNonEscapingLocalObject(O2))
1207 if (isEscapeSource(O2) && isNonEscapingLocalObject(O1))
1211 // If the size of one access is larger than the entire object on the other
1212 // side, then we know such behavior is undefined and can assume no alias.
1214 if ((V1Size != MemoryLocation::UnknownSize &&
1215 isObjectSmallerThan(O2, V1Size, *DL, *TLI)) ||
1216 (V2Size != MemoryLocation::UnknownSize &&
1217 isObjectSmallerThan(O1, V2Size, *DL, *TLI)))
1220 // Check the cache before climbing up use-def chains. This also terminates
1221 // otherwise infinitely recursive queries.
1222 LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
1223 MemoryLocation(V2, V2Size, V2AAInfo));
1225 std::swap(Locs.first, Locs.second);
1226 std::pair<AliasCacheTy::iterator, bool> Pair =
1227 AliasCache.insert(std::make_pair(Locs, MayAlias));
1229 return Pair.first->second;
1231 // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
1232 // GEP can't simplify, we don't even look at the PHI cases.
1233 if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
1235 std::swap(V1Size, V2Size);
1237 std::swap(V1AAInfo, V2AAInfo);
1239 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1240 AliasResult Result =
1241 aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2);
1242 if (Result != MayAlias)
1243 return AliasCache[Locs] = Result;
1246 if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
1248 std::swap(V1Size, V2Size);
1249 std::swap(V1AAInfo, V2AAInfo);
1251 if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1252 AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo, V2, V2Size, V2AAInfo);
1253 if (Result != MayAlias)
1254 return AliasCache[Locs] = Result;
1257 if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
1259 std::swap(V1Size, V2Size);
1260 std::swap(V1AAInfo, V2AAInfo);
1262 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1263 AliasResult Result =
1264 aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo);
1265 if (Result != MayAlias)
1266 return AliasCache[Locs] = Result;
1269 // If both pointers are pointing into the same object and one of them
1270 // accesses is accessing the entire object, then the accesses must
1271 // overlap in some way.
1273 if ((V1Size != MemoryLocation::UnknownSize &&
1274 isObjectSize(O1, V1Size, *DL, *TLI)) ||
1275 (V2Size != MemoryLocation::UnknownSize &&
1276 isObjectSize(O2, V2Size, *DL, *TLI)))
1277 return AliasCache[Locs] = PartialAlias;
1279 AliasResult Result =
1280 AliasAnalysis::alias(MemoryLocation(V1, V1Size, V1AAInfo),
1281 MemoryLocation(V2, V2Size, V2AAInfo));
1282 return AliasCache[Locs] = Result;
1285 /// Check whether two Values can be considered equivalent.
1287 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1288 /// they can not be part of a cycle in the value graph by looking at all
1289 /// visited phi nodes an making sure that the phis cannot reach the value. We
1290 /// have to do this because we are looking through phi nodes (That is we say
1291 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1292 bool BasicAliasAnalysis::isValueEqualInPotentialCycles(const Value *V,
1297 const Instruction *Inst = dyn_cast<Instruction>(V);
1301 if (VisitedPhiBBs.empty())
1304 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1307 // Use dominance or loop info if available.
1308 DominatorTreeWrapperPass *DTWP =
1309 getAnalysisIfAvailable<DominatorTreeWrapperPass>();
1310 DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
1311 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
1312 LoopInfo *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
1314 // Make sure that the visited phis cannot reach the Value. This ensures that
1315 // the Values cannot come from different iterations of a potential cycle the
1316 // phi nodes could be involved in.
1317 for (auto *P : VisitedPhiBBs)
1318 if (isPotentiallyReachable(P->begin(), Inst, DT, LI))
1324 /// Computes the symbolic difference between two de-composed GEPs.
1326 /// Dest and Src are the variable indices from two decomposed GetElementPtr
1327 /// instructions GEP1 and GEP2 which have common base pointers.
1328 void BasicAliasAnalysis::GetIndexDifference(
1329 SmallVectorImpl<VariableGEPIndex> &Dest,
1330 const SmallVectorImpl<VariableGEPIndex> &Src) {
1334 for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1335 const Value *V = Src[i].V;
1336 ExtensionKind Extension = Src[i].Extension;
1337 int64_t Scale = Src[i].Scale;
1339 // Find V in Dest. This is N^2, but pointer indices almost never have more
1340 // than a few variable indexes.
1341 for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1342 if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1343 Dest[j].Extension != Extension)
1346 // If we found it, subtract off Scale V's from the entry in Dest. If it
1347 // goes to zero, remove the entry.
1348 if (Dest[j].Scale != Scale)
1349 Dest[j].Scale -= Scale;
1351 Dest.erase(Dest.begin() + j);
1356 // If we didn't consume this entry, add it to the end of the Dest list.
1358 VariableGEPIndex Entry = {V, Extension, -Scale};
1359 Dest.push_back(Entry);