return isDereferenceableFromAttribute(V, Offset, Ty, DL, CtxI, DT, TLI);
}
-/// Return true if Value is always a dereferenceable pointer.
-///
+static bool isAligned(const Value *Base, APInt Offset, unsigned Align,
+ const DataLayout &DL) {
+ APInt BaseAlign(Offset.getBitWidth(), 0);
+ if (const AllocaInst *AI = dyn_cast<AllocaInst>(Base))
+ BaseAlign = AI->getAlignment();
+ else if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Base))
+ BaseAlign = GV->getAlignment();
+ else if (const Argument *A = dyn_cast<Argument>(Base))
+ BaseAlign = A->getParamAlignment();
+
+ if (!BaseAlign) {
+ Type *Ty = Base->getType()->getPointerElementType();
+ BaseAlign = DL.getABITypeAlignment(Ty);
+ }
+
+ APInt Alignment(Offset.getBitWidth(), Align);
+
+ assert(Alignment.isPowerOf2() && "must be a power of 2!");
+ return BaseAlign.uge(Alignment) && !(Offset & (Alignment-1));
+}
+
+static bool isAligned(const Value *Base, unsigned Align, const DataLayout &DL) {
+ APInt Offset(DL.getTypeStoreSizeInBits(Base->getType()), 0);
+ return isAligned(Base, Offset, Align, DL);
+}
+
/// Test if V is always a pointer to allocated and suitably aligned memory for
/// a simple load or store.
-static bool isDereferenceablePointer(const Value *V, const DataLayout &DL,
- const Instruction *CtxI,
- const DominatorTree *DT,
- const TargetLibraryInfo *TLI,
- SmallPtrSetImpl<const Value *> &Visited) {
+static bool isDereferenceableAndAlignedPointer(
+ const Value *V, unsigned Align, const DataLayout &DL,
+ const Instruction *CtxI, const DominatorTree *DT,
+ const TargetLibraryInfo *TLI, SmallPtrSetImpl<const Value *> &Visited) {
// Note that it is not safe to speculate into a malloc'd region because
// malloc may return null.
- // These are obviously ok.
- if (isa<AllocaInst>(V)) return true;
+ // These are obviously ok if aligned.
+ if (isa<AllocaInst>(V))
+ return isAligned(V, Align, DL);
// It's not always safe to follow a bitcast, for example:
// bitcast i8* (alloca i8) to i32*
if (STy->isSized() && DTy->isSized() &&
(DL.getTypeStoreSize(STy) >= DL.getTypeStoreSize(DTy)) &&
(DL.getABITypeAlignment(STy) >= DL.getABITypeAlignment(DTy)))
- return isDereferenceablePointer(BC->getOperand(0), DL, CtxI,
- DT, TLI, Visited);
+ return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, DL,
+ CtxI, DT, TLI, Visited);
}
// Global variables which can't collapse to null are ok.
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
- return !GV->hasExternalWeakLinkage();
+ if (!GV->hasExternalWeakLinkage())
+ return isAligned(V, Align, DL);
// byval arguments are okay.
if (const Argument *A = dyn_cast<Argument>(V))
if (A->hasByValAttr())
- return true;
-
+ return isAligned(V, Align, DL);
+
if (isDereferenceableFromAttribute(V, DL, CtxI, DT, TLI))
- return true;
+ return isAligned(V, Align, DL);
// For GEPs, determine if the indexing lands within the allocated object.
if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
Type *Ty = VTy->getPointerElementType();
const Value *Base = GEP->getPointerOperand();
- // Conservatively require that the base pointer be fully dereferenceable.
+ // Conservatively require that the base pointer be fully dereferenceable
+ // and aligned.
if (!Visited.insert(Base).second)
return false;
- if (!isDereferenceablePointer(Base, DL, CtxI,
- DT, TLI, Visited))
+ if (!isDereferenceableAndAlignedPointer(Base, Align, DL, CtxI, DT, TLI,
+ Visited))
return false;
-
+
APInt Offset(DL.getPointerTypeSizeInBits(VTy), 0);
if (!GEP->accumulateConstantOffset(DL, Offset))
return false;
-
- // Check if the load is within the bounds of the underlying object.
+
+ // Check if the load is within the bounds of the underlying object
+ // and offset is aligned.
uint64_t LoadSize = DL.getTypeStoreSize(Ty);
Type *BaseType = Base->getType()->getPointerElementType();
- return (Offset + LoadSize).ule(DL.getTypeAllocSize(BaseType));
+ assert(isPowerOf2_32(Align) && "must be a power of 2!");
+ return (Offset + LoadSize).ule(DL.getTypeAllocSize(BaseType)) &&
+ !(Offset & APInt(Offset.getBitWidth(), Align-1));
}
// For gc.relocate, look through relocations
if (const IntrinsicInst *I = dyn_cast<IntrinsicInst>(V))
if (I->getIntrinsicID() == Intrinsic::experimental_gc_relocate) {
GCRelocateOperands RelocateInst(I);
- return isDereferenceablePointer(RelocateInst.getDerivedPtr(), DL, CtxI,
- DT, TLI, Visited);
+ return isDereferenceableAndAlignedPointer(
+ RelocateInst.getDerivedPtr(), Align, DL, CtxI, DT, TLI, Visited);
}
if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
- return isDereferenceablePointer(ASC->getOperand(0), DL, CtxI,
- DT, TLI, Visited);
+ return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, DL,
+ CtxI, DT, TLI, Visited);
// If we don't know, assume the worst.
return false;
}
-bool llvm::isDereferenceablePointer(const Value *V, const DataLayout &DL,
- const Instruction *CtxI,
- const DominatorTree *DT,
- const TargetLibraryInfo *TLI) {
+bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
+ const DataLayout &DL,
+ const Instruction *CtxI,
+ const DominatorTree *DT,
+ const TargetLibraryInfo *TLI) {
// When dereferenceability information is provided by a dereferenceable
// attribute, we know exactly how many bytes are dereferenceable. If we can
// determine the exact offset to the attributed variable, we can use that
// information here.
Type *VTy = V->getType();
Type *Ty = VTy->getPointerElementType();
+
+ // Require ABI alignment for loads without alignment specification
+ if (Align == 0)
+ Align = DL.getABITypeAlignment(Ty);
+
if (Ty->isSized()) {
APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0);
const Value *BV = V->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
-
+
if (Offset.isNonNegative())
- if (isDereferenceableFromAttribute(BV, Offset, Ty, DL,
- CtxI, DT, TLI))
+ if (isDereferenceableFromAttribute(BV, Offset, Ty, DL, CtxI, DT, TLI) &&
+ isAligned(BV, Offset, Align, DL))
return true;
}
SmallPtrSet<const Value *, 32> Visited;
- return ::isDereferenceablePointer(V, DL, CtxI, DT, TLI, Visited);
+ return ::isDereferenceableAndAlignedPointer(V, Align, DL, CtxI, DT, TLI,
+ Visited);
+}
+
+bool llvm::isDereferenceablePointer(const Value *V, const DataLayout &DL,
+ const Instruction *CtxI,
+ const DominatorTree *DT,
+ const TargetLibraryInfo *TLI) {
+ return isDereferenceableAndAlignedPointer(V, 1, DL, CtxI, DT, TLI);
}
bool llvm::isSafeToSpeculativelyExecute(const Value *V,
LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread))
return false;
const DataLayout &DL = LI->getModule()->getDataLayout();
- return isDereferenceablePointer(LI->getPointerOperand(), DL, CtxI, DT, TLI);
+ return isDereferenceableAndAlignedPointer(
+ LI->getPointerOperand(), LI->getAlignment(), DL, CtxI, DT, TLI);
}
case Instruction::Call: {
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
%struct.A = type { [8 x i8], [5 x i8] }
@globalstruct = external global %struct.A
-define void @test(i32 addrspace(1)* dereferenceable(8) %dparam) gc "statepoint-example" {
+@globalptr.align1 = external global i8, align 1
+@globalptr.align16 = external global i8, align 16
+
+define void @test(i32 addrspace(1)* dereferenceable(8) %dparam,
+ i8 addrspace(1)* dereferenceable(32) align 1 %dparam.align1,
+ i8 addrspace(1)* dereferenceable(32) align 16 %dparam.align16)
+ gc "statepoint-example" {
; CHECK: The following are dereferenceable:
-; CHECK: %globalptr
-; CHECK: %alloca
-; CHECK: %dparam
-; CHECK: %relocate
-; CHECK-NOT: %nparam
-; CHECK-NOT: %nd_load
-; CHECK: %d4_load
-; CHECK-NOT: %d2_load
-; CHECK-NOT: %d_or_null_load
-; CHECK: %d_or_null_non_null_load
-; CHECK: %within_allocation
-; CHECK-NOT: %outside_allocation
entry:
+; CHECK: %globalptr{{.*}}(aligned)
%globalptr = getelementptr inbounds [6 x i8], [6 x i8]* @globalstr, i32 0, i32 0
%load1 = load i8, i8* %globalptr
+
+; CHECK: %alloca{{.*}}(aligned)
%alloca = alloca i1
%load2 = load i1, i1* %alloca
+
+; CHECK: %dparam{{.*}}(aligned)
%load3 = load i32, i32 addrspace(1)* %dparam
+
+; CHECK: %relocate{{.*}}(aligned)
%tok = tail call i32 (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* @return_i1, i32 0, i32 0, i32 0, i32 0, i32 addrspace(1)* %dparam)
%relocate = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %tok, i32 7, i32 7)
%load4 = load i32, i32 addrspace(1)* %relocate
+
+; CHECK-NOT: %nparam
%nparam = getelementptr i32, i32 addrspace(1)* %dparam, i32 5
%load5 = load i32, i32 addrspace(1)* %nparam
; Load from a non-dereferenceable load
+; CHECK-NOT: %nd_load
%nd_load = load i32*, i32** @globali32ptr
%load6 = load i32, i32* %nd_load
; Load from a dereferenceable load
+; CHECK: %d4_load{{.*}}(aligned)
%d4_load = load i32*, i32** @globali32ptr, !dereferenceable !0
%load7 = load i32, i32* %d4_load
; Load from an offset not covered by the dereferenceable portion
+; CHECK-NOT: %d2_load
%d2_load = load i32*, i32** @globali32ptr, !dereferenceable !1
%load8 = load i32, i32* %d2_load
; Load from a potentially null pointer with dereferenceable_or_null
+; CHECK-NOT: %d_or_null_load
%d_or_null_load = load i32*, i32** @globali32ptr, !dereferenceable_or_null !0
%load9 = load i32, i32* %d_or_null_load
; Load from a non-null pointer with dereferenceable_or_null
+; CHECK: %d_or_null_non_null_load{{.*}}(aligned)
%d_or_null_non_null_load = load i32*, i32** @globali32ptr, !nonnull !2, !dereferenceable_or_null !0
%load10 = load i32, i32* %d_or_null_non_null_load
; It's OK to overrun static array size as long as we stay within underlying object size
+; CHECK: %within_allocation{{.*}}(aligned)
%within_allocation = getelementptr inbounds %struct.A, %struct.A* @globalstruct, i64 0, i32 0, i64 10
%load11 = load i8, i8* %within_allocation
; GEP is outside the underlying object size
+; CHECK-NOT: %outside_allocation
%outside_allocation = getelementptr inbounds %struct.A, %struct.A* @globalstruct, i64 0, i32 1, i64 10
%load12 = load i8, i8* %outside_allocation
+ ; Loads from aligned globals
+; CHECK: @globalptr.align1{{.*}}(unaligned)
+; CHECK: @globalptr.align16{{.*}}(aligned)
+ %load13 = load i8, i8* @globalptr.align1, align 16
+ %load14 = load i8, i8* @globalptr.align16, align 16
+
+ ; Loads from aligned arguments
+; CHECK: %dparam.align1{{.*}}(unaligned)
+; CHECK: %dparam.align16{{.*}}(aligned)
+ %load15 = load i8, i8 addrspace(1)* %dparam.align1, align 16
+ %load16 = load i8, i8 addrspace(1)* %dparam.align16, align 16
+
+ ; Loads from aligned allocas
+; CHECK: %alloca.align1{{.*}}(unaligned)
+; CHECK: %alloca.align16{{.*}}(aligned)
+ %alloca.align1 = alloca i1, align 1
+ %alloca.align16 = alloca i1, align 16
+ %load17 = load i1, i1* %alloca.align1, align 16
+ %load18 = load i1, i1* %alloca.align16, align 16
+
+ ; Loads from GEPs
+; CHECK: %gep.align1.offset1{{.*}}(unaligned)
+; CHECK: %gep.align16.offset1{{.*}}(unaligned)
+; CHECK: %gep.align1.offset16{{.*}}(unaligned)
+; CHECK: %gep.align16.offset16{{.*}}(aligned)
+ %gep.align1.offset1 = getelementptr inbounds i8, i8 addrspace(1)* %dparam.align1, i32 1
+ %gep.align16.offset1 = getelementptr inbounds i8, i8 addrspace(1)* %dparam.align16, i32 1
+ %gep.align1.offset16 = getelementptr inbounds i8, i8 addrspace(1)* %dparam.align1, i32 16
+ %gep.align16.offset16 = getelementptr inbounds i8, i8 addrspace(1)* %dparam.align16, i32 16
+ %load19 = load i8, i8 addrspace(1)* %gep.align1.offset1, align 16
+ %load20 = load i8, i8 addrspace(1)* %gep.align16.offset1, align 16
+ %load21 = load i8, i8 addrspace(1)* %gep.align1.offset16, align 16
+ %load22 = load i8, i8 addrspace(1)* %gep.align16.offset16, align 16
+
ret void
}