UnknownModRefBehavior = Anywhere | ModRef
};
- /// Get the location associated with a pointer argument of a callsite.
- /// The mask bits are set to indicate the allowed aliasing ModRef kinds.
- /// Note that these mask bits do not necessarily account for the overall
- /// behavior of the function, but rather only provide additional
- /// per-argument information.
- virtual Location getArgLocation(ImmutableCallSite CS, unsigned ArgIdx,
- ModRefResult &Mask);
+ /// Get the ModRef info associated with a pointer argument of a callsite. The
+ /// result's bits are set to indicate the allowed aliasing ModRef kinds. Note
+ /// that these bits do not necessarily account for the overall behavior of
+ /// the function, but rather only provide additional per-argument
+ /// information.
+ virtual ModRefResult getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx);
/// getModRefBehavior - Return the behavior when calling the given call site.
virtual ModRefBehavior getModRefBehavior(ImmutableCallSite CS);
class StoreInst;
class MemTransferInst;
class MemIntrinsic;
+class TargetLibraryInfo;
/// Representation for a specific memory location.
///
/// transfer.
static MemoryLocation getForDest(const MemIntrinsic *MI);
+ /// Return a location representing a particular argument of a call.
+ static MemoryLocation getForArgument(ImmutableCallSite CS, unsigned ArgIdx,
+ const TargetLibraryInfo &TLI);
+
explicit MemoryLocation(const Value *Ptr = nullptr,
uint64_t Size = UnknownSize,
const AAMDNodes &AATags = AAMDNodes())
return AA->pointsToConstantMemory(Loc, OrLocal);
}
-AliasAnalysis::Location
-AliasAnalysis::getArgLocation(ImmutableCallSite CS, unsigned ArgIdx,
- AliasAnalysis::ModRefResult &Mask) {
+AliasAnalysis::ModRefResult
+AliasAnalysis::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
- return AA->getArgLocation(CS, ArgIdx, Mask);
+ return AA->getArgModRefInfo(CS, ArgIdx);
}
void AliasAnalysis::deleteValue(Value *V) {
const Value *Arg = *AI;
if (!Arg->getType()->isPointerTy())
continue;
- ModRefResult ArgMask;
- Location CSLoc =
- getArgLocation(CS, (unsigned) std::distance(CS.arg_begin(), AI),
- ArgMask);
- if (!isNoAlias(CSLoc, Loc)) {
+ unsigned ArgIdx = std::distance(CS.arg_begin(), AI);
+ Location ArgLoc = MemoryLocation::getForArgument(CS, ArgIdx, *TLI);
+ if (!isNoAlias(ArgLoc, Loc)) {
+ ModRefResult ArgMask = getArgModRefInfo(CS, ArgIdx);
doesAlias = true;
AllArgsMask = ModRefResult(AllArgsMask | ArgMask);
}
const Value *Arg = *I;
if (!Arg->getType()->isPointerTy())
continue;
- ModRefResult ArgMask;
- Location CS2Loc =
- getArgLocation(CS2, (unsigned) std::distance(CS2.arg_begin(), I),
- ArgMask);
- // ArgMask indicates what CS2 might do to CS2Loc, and the dependence of
+ unsigned CS2ArgIdx = std::distance(CS2.arg_begin(), I);
+ Location CS2ArgLoc = MemoryLocation::getForArgument(CS2, CS2ArgIdx, *TLI);
+
+ // ArgMask indicates what CS2 might do to CS2ArgLoc, and the dependence of
// CS1 on that location is the inverse.
+ ModRefResult ArgMask = getArgModRefInfo(CS2, CS2ArgIdx);
if (ArgMask == Mod)
ArgMask = ModRef;
else if (ArgMask == Ref)
ArgMask = Mod;
- R = ModRefResult((R | (getModRefInfo(CS1, CS2Loc) & ArgMask)) & Mask);
+ R = ModRefResult((R | (getModRefInfo(CS1, CS2ArgLoc) & ArgMask)) & Mask);
if (R == Mask)
break;
}
const Value *Arg = *I;
if (!Arg->getType()->isPointerTy())
continue;
- ModRefResult ArgMask;
- Location CS1Loc = getArgLocation(
- CS1, (unsigned)std::distance(CS1.arg_begin(), I), ArgMask);
- // ArgMask indicates what CS1 might do to CS1Loc; if CS1 might Mod
- // CS1Loc, then we care about either a Mod or a Ref by CS2. If CS1
+ unsigned CS1ArgIdx = std::distance(CS1.arg_begin(), I);
+ Location CS1ArgLoc = MemoryLocation::getForArgument(CS1, CS1ArgIdx, *TLI);
+
+ // ArgMask indicates what CS1 might do to CS1ArgLoc; if CS1 might Mod
+ // CS1ArgLoc, then we care about either a Mod or a Ref by CS2. If CS1
// might Ref, then we care only about a Mod by CS2.
- ModRefResult ArgR = getModRefInfo(CS2, CS1Loc);
+ ModRefResult ArgMask = getArgModRefInfo(CS1, CS1ArgIdx);
+ ModRefResult ArgR = getModRefInfo(CS2, CS1ArgLoc);
if (((ArgMask & Mod) != NoModRef && (ArgR & ModRef) != NoModRef) ||
((ArgMask & Ref) != NoModRef && (ArgR & Mod) != NoModRef))
R = ModRefResult((R | ArgMask) & Mask);
bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override;
/// Get the location associated with a pointer argument of a callsite.
- Location getArgLocation(ImmutableCallSite CS, unsigned ArgIdx,
- ModRefResult &Mask) override;
+ ModRefResult getArgModRefInfo(ImmutableCallSite CS,
+ unsigned ArgIdx) override;
/// getModRefBehavior - Return the behavior when calling the given
/// call site.
return Worklist.empty();
}
+// FIXME: This code is duplicated with MemoryLocation and should be hoisted to
+// some common utility location.
static bool isMemsetPattern16(const Function *MS,
const TargetLibraryInfo &TLI) {
if (TLI.has(LibFunc::memset_pattern16) &&
return ModRefBehavior(AliasAnalysis::getModRefBehavior(F) & Min);
}
-AliasAnalysis::Location
-BasicAliasAnalysis::getArgLocation(ImmutableCallSite CS, unsigned ArgIdx,
- ModRefResult &Mask) {
- Location Loc = AliasAnalysis::getArgLocation(CS, ArgIdx, Mask);
- const TargetLibraryInfo &TLI =
- getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
- const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
- if (II != nullptr)
+AliasAnalysis::ModRefResult
+BasicAliasAnalysis::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
+ if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction()))
switch (II->getIntrinsicID()) {
- default: break;
+ default:
+ break;
case Intrinsic::memset:
case Intrinsic::memcpy:
- case Intrinsic::memmove: {
+ case Intrinsic::memmove:
assert((ArgIdx == 0 || ArgIdx == 1) &&
"Invalid argument index for memory intrinsic");
- if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2)))
- Loc.Size = LenCI->getZExtValue();
- assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
- "Memory intrinsic location pointer not argument?");
- Mask = ArgIdx ? Ref : Mod;
- break;
- }
- case Intrinsic::lifetime_start:
- case Intrinsic::lifetime_end:
- case Intrinsic::invariant_start: {
- assert(ArgIdx == 1 && "Invalid argument index");
- assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
- "Intrinsic location pointer not argument?");
- Loc.Size = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
- break;
- }
- case Intrinsic::invariant_end: {
- assert(ArgIdx == 2 && "Invalid argument index");
- assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
- "Intrinsic location pointer not argument?");
- Loc.Size = cast<ConstantInt>(II->getArgOperand(1))->getZExtValue();
- break;
- }
- case Intrinsic::arm_neon_vld1: {
- assert(ArgIdx == 0 && "Invalid argument index");
- assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
- "Intrinsic location pointer not argument?");
- // LLVM's vld1 and vst1 intrinsics currently only support a single
- // vector register.
- if (DL)
- Loc.Size = DL->getTypeStoreSize(II->getType());
- break;
- }
- case Intrinsic::arm_neon_vst1: {
- assert(ArgIdx == 0 && "Invalid argument index");
- assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
- "Intrinsic location pointer not argument?");
- if (DL)
- Loc.Size = DL->getTypeStoreSize(II->getArgOperand(1)->getType());
- break;
- }
+ return ArgIdx ? Ref : Mod;
}
// We can bound the aliasing properties of memset_pattern16 just as we can
// for memcpy/memset. This is particularly important because the
// LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
// whenever possible.
- else if (CS.getCalledFunction() &&
- isMemsetPattern16(CS.getCalledFunction(), TLI)) {
+ if (CS.getCalledFunction() &&
+ isMemsetPattern16(CS.getCalledFunction(), *TLI)) {
assert((ArgIdx == 0 || ArgIdx == 1) &&
"Invalid argument index for memset_pattern16");
- if (ArgIdx == 1)
- Loc.Size = 16;
- else if (const ConstantInt *LenCI =
- dyn_cast<ConstantInt>(CS.getArgument(2)))
- Loc.Size = LenCI->getZExtValue();
- assert(Loc.Ptr == CS.getArgument(ArgIdx) &&
- "memset_pattern16 location pointer not argument?");
- Mask = ArgIdx ? Ref : Mod;
+ return ArgIdx ? Ref : Mod;
}
// FIXME: Handle memset_pattern4 and memset_pattern8 also.
- return Loc;
+ return AliasAnalysis::getArgModRefInfo(CS, ArgIdx);
}
static bool isAssumeIntrinsic(ImmutableCallSite CS) {
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/MemoryLocation.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Instructions.h"
return MemoryLocation(MTI->getRawDest(), Size, AATags);
}
+
+// FIXME: This code is duplicated with BasicAliasAnalysis and should be hoisted
+// to some common utility location.
+static bool isMemsetPattern16(const Function *MS,
+ const TargetLibraryInfo &TLI) {
+ if (TLI.has(LibFunc::memset_pattern16) &&
+ MS->getName() == "memset_pattern16") {
+ FunctionType *MemsetType = MS->getFunctionType();
+ if (!MemsetType->isVarArg() && MemsetType->getNumParams() == 3 &&
+ isa<PointerType>(MemsetType->getParamType(0)) &&
+ isa<PointerType>(MemsetType->getParamType(1)) &&
+ isa<IntegerType>(MemsetType->getParamType(2)))
+ return true;
+ }
+
+ return false;
+}
+
+MemoryLocation MemoryLocation::getForArgument(ImmutableCallSite CS,
+ unsigned ArgIdx,
+ const TargetLibraryInfo &TLI) {
+ AAMDNodes AATags;
+ CS->getAAMetadata(AATags);
+ const Value *Arg = CS.getArgument(ArgIdx);
+
+ // We may be able to produce an exact size for known intrinsics.
+ if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
+ const DataLayout &DL = II->getModule()->getDataLayout();
+
+ switch (II->getIntrinsicID()) {
+ default:
+ break;
+ case Intrinsic::memset:
+ case Intrinsic::memcpy:
+ case Intrinsic::memmove:
+ assert((ArgIdx == 0 || ArgIdx == 1) &&
+ "Invalid argument index for memory intrinsic");
+ if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2)))
+ return MemoryLocation(Arg, LenCI->getZExtValue(), AATags);
+ break;
+
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ case Intrinsic::invariant_start:
+ assert(ArgIdx == 1 && "Invalid argument index");
+ return MemoryLocation(
+ Arg, cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(), AATags);
+
+ case Intrinsic::invariant_end:
+ assert(ArgIdx == 2 && "Invalid argument index");
+ return MemoryLocation(
+ Arg, cast<ConstantInt>(II->getArgOperand(1))->getZExtValue(), AATags);
+
+ case Intrinsic::arm_neon_vld1:
+ assert(ArgIdx == 0 && "Invalid argument index");
+ // LLVM's vld1 and vst1 intrinsics currently only support a single
+ // vector register.
+ return MemoryLocation(Arg, DL.getTypeStoreSize(II->getType()), AATags);
+
+ case Intrinsic::arm_neon_vst1:
+ assert(ArgIdx == 0 && "Invalid argument index");
+ return MemoryLocation(
+ Arg, DL.getTypeStoreSize(II->getArgOperand(1)->getType()), AATags);
+ }
+ }
+
+ // We can bound the aliasing properties of memset_pattern16 just as we can
+ // for memcpy/memset. This is particularly important because the
+ // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
+ // whenever possible.
+ if (CS.getCalledFunction() &&
+ isMemsetPattern16(CS.getCalledFunction(), TLI)) {
+ assert((ArgIdx == 0 || ArgIdx == 1) &&
+ "Invalid argument index for memset_pattern16");
+ if (ArgIdx == 1)
+ return MemoryLocation(Arg, 16, AATags);
+ if (const ConstantInt *LenCI = dyn_cast<ConstantInt>(CS.getArgument(2)))
+ return MemoryLocation(Arg, LenCI->getZExtValue(), AATags);
+ }
+ // FIXME: Handle memset_pattern4 and memset_pattern8 also.
+
+ return MemoryLocation(CS.getArgument(ArgIdx), UnknownSize, AATags);
+}
bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override {
return false;
}
- Location getArgLocation(ImmutableCallSite CS, unsigned ArgIdx,
- ModRefResult &Mask) override {
- Mask = ModRef;
- AAMDNodes AATags;
- CS->getAAMetadata(AATags);
- return Location(CS.getArgument(ArgIdx), UnknownSize, AATags);
+ ModRefResult getArgModRefInfo(ImmutableCallSite CS,
+ unsigned ArgIdx) override {
+ return ModRef;
}
ModRefResult getModRefInfo(ImmutableCallSite CS,