--- /dev/null
+//===- ScalarEvolutionAliasAnalysis.cpp - SCEV-based Alias Analysis -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ScalarEvolutionAliasAnalysis pass, which implements a
+// simple alias analysis implemented in terms of ScalarEvolution queries.
+//
+// ScalarEvolution has a more complete understanding of pointer arithmetic
+// than BasicAliasAnalysis' collection of ad-hoc analyses.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Compiler.h"
+using namespace llvm;
+
+namespace {
+ /// ScalarEvolutionAliasAnalysis - This is a simple alias analysis
+ /// implementation that uses ScalarEvolution to answer queries.
+ class VISIBILITY_HIDDEN ScalarEvolutionAliasAnalysis : public FunctionPass,
+ public AliasAnalysis {
+ ScalarEvolution *SE;
+
+ public:
+ static char ID; // Class identification, replacement for typeinfo
+ ScalarEvolutionAliasAnalysis() : FunctionPass(&ID), SE(0) {}
+
+ private:
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+ virtual bool runOnFunction(Function &F);
+ virtual AliasResult alias(const Value *V1, unsigned V1Size,
+ const Value *V2, unsigned V2Size);
+
+ Value *GetUnderlyingIdentifiedObject(const SCEV *S);
+ };
+} // End of anonymous namespace
+
+// Register this pass...
+char ScalarEvolutionAliasAnalysis::ID = 0;
+static RegisterPass<ScalarEvolutionAliasAnalysis>
+X("scev-aa", "ScalarEvolution-based Alias Analysis", false, true);
+
+// Declare that we implement the AliasAnalysis interface
+static RegisterAnalysisGroup<AliasAnalysis> Y(X);
+
+FunctionPass *llvm::createScalarEvolutionAliasAnalysisPass() {
+ return new ScalarEvolutionAliasAnalysis();
+}
+
+void
+ScalarEvolutionAliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequiredTransitive<ScalarEvolution>();
+ AU.setPreservesAll();
+ AliasAnalysis::getAnalysisUsage(AU);
+}
+
+bool
+ScalarEvolutionAliasAnalysis::runOnFunction(Function &F) {
+ InitializeAliasAnalysis(this);
+ SE = &getAnalysis<ScalarEvolution>();
+ return false;
+}
+
+Value *
+ScalarEvolutionAliasAnalysis::GetUnderlyingIdentifiedObject(const SCEV *S) {
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
+ return GetUnderlyingIdentifiedObject(AR->getStart());
+ } else if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
+ // If there's a pointer operand, it'll be sorted at the end of the list.
+ const SCEV *Last = A->getOperand(A->getNumOperands()-1);
+ if (isa<PointerType>(Last->getType()))
+ return GetUnderlyingIdentifiedObject(Last);
+ } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
+ // Determine if we've found an Identified object.
+ Value *V = U->getValue();
+ if (isIdentifiedObject(V))
+ return V;
+ }
+ // No Identified object found.
+ return 0;
+}
+
+AliasAnalysis::AliasResult
+ScalarEvolutionAliasAnalysis::alias(const Value *A, unsigned ASize,
+ const Value *B, unsigned BSize) {
+ // This is ScalarEvolutionAliasAnalysis. Get the SCEVs!
+ const SCEV *AS = SE->getSCEV(const_cast<Value *>(A));
+ const SCEV *BS = SE->getSCEV(const_cast<Value *>(B));
+
+ // If they evaluate to the same expression, it's a MustAlias.
+ if (AS == BS) return MustAlias;
+
+ // If something is known about the difference between the two addresses,
+ // see if it's enough to prove a NoAlias.
+ if (SE->getEffectiveSCEVType(AS->getType()) ==
+ SE->getEffectiveSCEVType(BS->getType())) {
+ unsigned BitWidth = SE->getTypeSizeInBits(AS->getType());
+ APInt AI(BitWidth, ASize);
+ const SCEV *BA = SE->getMinusSCEV(BS, AS);
+ if (AI.ule(SE->getUnsignedRange(BA).getUnsignedMin())) {
+ APInt BI(BitWidth, BSize);
+ const SCEV *AB = SE->getMinusSCEV(AS, BS);
+ if (BI.ule(SE->getUnsignedRange(AB).getUnsignedMin()))
+ return NoAlias;
+ }
+ }
+
+ // If ScalarEvolution can find an underlying object, form a new query.
+ // The correctness of this depends on ScalarEvolution not recognizing
+ // inttoptr and ptrtoint operators.
+ Value *AO = GetUnderlyingIdentifiedObject(AS);
+ Value *BO = GetUnderlyingIdentifiedObject(BS);
+ if ((AO && AO != A) || (BO && BO != B))
+ if (alias(AO ? AO : A, AO ? ~0u : ASize,
+ BO ? BO : B, BO ? ~0u : BSize) == NoAlias)
+ return NoAlias;
+
+ // Forward the query to the next analysis.
+ return AliasAnalysis::alias(A, ASize, B, BSize);
+}
--- /dev/null
+; RUN: llvm-as < %s | opt -scev-aa -aa-eval -print-all-alias-modref-info \
+; RUN: |& FileCheck %s
+
+; At the time of this writing, all of these CHECK lines are cases that
+; plain -basicaa misses.
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64"
+
+; p[i] and p[i+1] don't alias.
+
+; CHECK: Function: loop: 3 pointers, 0 call sites
+; CHECK: NoAlias: double* %pi, double* %pi.next
+
+define void @loop(double* nocapture %p, i64 %n) nounwind {
+entry:
+ %j = icmp sgt i64 %n, 0
+ br i1 %j, label %bb, label %return
+
+bb:
+ %i = phi i64 [ 0, %entry ], [ %i.next, %bb ]
+ %pi = getelementptr double* %p, i64 %i
+ %i.next = add i64 %i, 1
+ %pi.next = getelementptr double* %p, i64 %i.next
+ %x = load double* %pi
+ %y = load double* %pi.next
+ %z = fmul double %x, %y
+ store double %z, double* %pi
+ %exitcond = icmp eq i64 %i.next, %n
+ br i1 %exitcond, label %return, label %bb
+
+return:
+ ret void
+}
+
+; Slightly more involved: p[j][i], p[j][i+1], and p[j+1][i] don't alias.
+
+; CHECK: Function: nestedloop: 4 pointers, 0 call sites
+; CHECK: NoAlias: double* %pi.j, double* %pi.next.j
+; CHECK: NoAlias: double* %pi.j, double* %pi.j.next
+; CHECK: NoAlias: double* %pi.j.next, double* %pi.next.j
+
+define void @nestedloop(double* nocapture %p, i64 %m) nounwind {
+entry:
+ %k = icmp sgt i64 %m, 0
+ br i1 %k, label %guard, label %return
+
+guard:
+ %l = icmp sgt i64 91, 0
+ br i1 %l, label %outer.loop, label %return
+
+outer.loop:
+ %j = phi i64 [ 0, %guard ], [ %j.next, %outer.latch ]
+ br label %bb
+
+bb:
+ %i = phi i64 [ 0, %outer.loop ], [ %i.next, %bb ]
+ %i.next = add i64 %i, 1
+
+ %e = add i64 %i, %j
+ %pi.j = getelementptr double* %p, i64 %e
+ %f = add i64 %i.next, %j
+ %pi.next.j = getelementptr double* %p, i64 %f
+ %x = load double* %pi.j
+ %y = load double* %pi.next.j
+ %z = fmul double %x, %y
+ store double %z, double* %pi.j
+
+ %o = add i64 %j, 91
+ %g = add i64 %i, %o
+ %pi.j.next = getelementptr double* %p, i64 %g
+ %a = load double* %pi.j.next
+ %b = fmul double %x, %a
+ store double %b, double* %pi.j.next
+
+ %exitcond = icmp eq i64 %i.next, 91
+ br i1 %exitcond, label %outer.latch, label %bb
+
+outer.latch:
+ %j.next = add i64 %j, 91
+ %h = icmp eq i64 %j.next, %m
+ br i1 %h, label %return, label %outer.loop
+
+return:
+ ret void
+}
+
+; Even more involved: same as nestedloop, but with a variable extent.
+; When n is 1, p[j+1][i] does alias p[j][i+1], and there's no way to
+; prove whether n will be greater than 1, so that relation will always
+; by MayAlias. The loop is guarded by a n > 0 test though, so
+; p[j+1][i] and p[j][i] can theoretically be determined to be NoAlias,
+; however the analysis currently doesn't do that.
+; TODO: Make the analysis smarter and turn that MayAlias into a NoAlias.
+
+; CHECK: Function: nestedloop_more: 4 pointers, 0 call sites
+; CHECK: NoAlias: double* %pi.j, double* %pi.next.j
+; CHECK: MayAlias: double* %pi.j, double* %pi.j.next
+
+define void @nestedloop_more(double* nocapture %p, i64 %n, i64 %m) nounwind {
+entry:
+ %k = icmp sgt i64 %m, 0
+ br i1 %k, label %guard, label %return
+
+guard:
+ %l = icmp sgt i64 %n, 0
+ br i1 %l, label %outer.loop, label %return
+
+outer.loop:
+ %j = phi i64 [ 0, %guard ], [ %j.next, %outer.latch ]
+ br label %bb
+
+bb:
+ %i = phi i64 [ 0, %outer.loop ], [ %i.next, %bb ]
+ %i.next = add i64 %i, 1
+
+ %e = add i64 %i, %j
+ %pi.j = getelementptr double* %p, i64 %e
+ %f = add i64 %i.next, %j
+ %pi.next.j = getelementptr double* %p, i64 %f
+ %x = load double* %pi.j
+ %y = load double* %pi.next.j
+ %z = fmul double %x, %y
+ store double %z, double* %pi.j
+
+ %o = add i64 %j, %n
+ %g = add i64 %i, %o
+ %pi.j.next = getelementptr double* %p, i64 %g
+ %a = load double* %pi.j.next
+ %b = fmul double %x, %a
+ store double %b, double* %pi.j.next
+
+ %exitcond = icmp eq i64 %i.next, %n
+ br i1 %exitcond, label %outer.latch, label %bb
+
+outer.latch:
+ %j.next = add i64 %j, %n
+ %h = icmp eq i64 %j.next, %m
+ br i1 %h, label %return, label %outer.loop
+
+return:
+ ret void
+}
+
+; ScalarEvolution expands field offsets into constants, which allows it to
+; do aggressive analysis. Contrast this with BasicAA, which works by
+; recognizing GEP idioms.
+
+%struct.A = type { %struct.B, i32, i32 }
+%struct.B = type { double }
+
+; CHECK: Function: foo: 7 pointers, 0 call sites
+; CHECK: NoAlias: %struct.B* %B, i32* %Z
+; CHECK: NoAlias: %struct.B* %B, %struct.B* %C
+; CHECK: MustAlias: %struct.B* %C, i32* %Z
+; CHECK: NoAlias: %struct.B* %B, i32* %X
+; CHECK: MustAlias: i32* %X, i32* %Z
+; CHECK: MustAlias: %struct.B* %C, i32* %Y
+; CHECK: MustAlias: i32* %X, i32* %Y
+
+define void @foo() {
+entry:
+ %A = alloca %struct.A
+ %B = getelementptr %struct.A* %A, i32 0, i32 0
+ %Q = bitcast %struct.B* %B to %struct.A*
+ %Z = getelementptr %struct.A* %Q, i32 0, i32 1
+ %C = getelementptr %struct.B* %B, i32 1
+ %X = bitcast %struct.B* %C to i32*
+ %Y = getelementptr %struct.A* %A, i32 0, i32 1
+ ret void
+}
+
+; CHECK: Function: bar: 7 pointers, 0 call sites
+; CHECK: NoAlias: %struct.B* %N, i32* %P
+; CHECK: NoAlias: %struct.B* %N, %struct.B* %R
+; CHECK: MustAlias: %struct.B* %R, i32* %P
+; CHECK: NoAlias: %struct.B* %N, i32* %W
+; CHECK: MustAlias: i32* %P, i32* %W
+; CHECK: MustAlias: %struct.B* %R, i32* %V
+; CHECK: MustAlias: i32* %V, i32* %W
+
+define void @bar() {
+ %M = alloca %struct.A
+ %N = getelementptr %struct.A* %M, i32 0, i32 0
+ %O = bitcast %struct.B* %N to %struct.A*
+ %P = getelementptr %struct.A* %O, i32 0, i32 1
+ %R = getelementptr %struct.B* %N, i32 1
+ %W = bitcast %struct.B* %R to i32*
+ %V = getelementptr %struct.A* %M, i32 0, i32 1
+ ret void
+}
+
+; CHECK: 13 no alias responses
+; CHECK: 26 may alias responses
+; CHECK: 18 must alias responses