/// \brief Check whether the access through \p Ptr has a constant stride.
static int isStridedPtr(ScalarEvolution *SE, DataLayout *DL, Value *Ptr,
const Loop *Lp) {
- const Type *PtrTy = Ptr->getType();
- assert(PtrTy->isPointerTy() && "Unexpected non ptr");
+ const Type *Ty = Ptr->getType();
+ assert(Ty->isPointerTy() && "Unexpected non ptr");
// Make sure that the pointer does not point to aggregate types.
- if (cast<PointerType>(Ptr->getType())->getElementType()->isAggregateType()) {
+ const PointerType *PtrTy = cast<PointerType>(Ty);
+ if (PtrTy->getElementType()->isAggregateType()) {
DEBUG(dbgs() << "LV: Bad stride - Not a pointer to a scalar type" << *Ptr
<< "\n");
return 0;
}
// The address calculation must not wrap. Otherwise, a dependence could be
- // inverted. An inbounds getelementptr that is a AddRec with a unit stride
+ // inverted.
+ // An inbounds getelementptr that is a AddRec with a unit stride
// cannot wrap per definition. The unit stride requirement is checked later.
+ // An getelementptr without an inbounds attribute and unit stride would have
+ // to access the pointer value "0" which is undefined behavior in address
+ // space 0, therefore we can also vectorize this case.
bool IsInBoundsGEP = isInBoundsGep(Ptr);
bool IsNoWrapAddRec = AR->getNoWrapFlags(SCEV::NoWrapMask);
- if (!IsNoWrapAddRec && !IsInBoundsGEP) {
+ bool IsInAddressSpaceZero = PtrTy->getAddressSpace() == 0;
+ if (!IsNoWrapAddRec && !IsInBoundsGEP && !IsInAddressSpaceZero) {
DEBUG(dbgs() << "LV: Bad stride - Pointer may wrap in the address space "
<< *Ptr << " SCEV: " << *PtrScev << "\n");
return 0;
return 0;
}
- int64_t Size = DL->getTypeAllocSize(PtrTy->getPointerElementType());
+ int64_t Size = DL->getTypeAllocSize(PtrTy->getElementType());
const APInt &APStepVal = C->getValue()->getValue();
// Huge step value - give up.
return 0;
// If the SCEV could wrap but we have an inbounds gep with a unit stride we
- // know we can't "wrap around the address space".
- if (!IsNoWrapAddRec && IsInBoundsGEP && Stride != 1 && Stride != -1)
+ // know we can't "wrap around the address space". In case of address space
+ // zero we know that this won't happen without triggering undefined behavior.
+ if (!IsNoWrapAddRec && (IsInBoundsGEP || IsInAddressSpaceZero) &&
+ Stride != 1 && Stride != -1)
return 0;
return Stride;
--- /dev/null
+; RUN: opt -S -loop-vectorize -force-vector-width=4 -force-vector-unroll=1 < %s | FileCheck %s
+target datalayout = "e-p:32:32:32-S128-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f16:16:16-f32:32:32-f64:32:64-f128:128:128-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32"
+
+
+; We can vectorize this code because if the address computation would wrap then
+; a load from 0 would take place which is undefined behaviour in address space 0
+; according to LLVM IR semantics.
+
+; PR16592
+
+; CHECK: safe
+; CHECK: <4 x float>
+
+define void @safe(float* %A, float* %B, float %K) {
+entry:
+ br label %"<bb 3>"
+
+"<bb 3>":
+ %i_15 = phi i32 [ 0, %entry ], [ %i_19, %"<bb 3>" ]
+ %pp3 = getelementptr float* %A, i32 %i_15
+ %D.1396_10 = load float* %pp3, align 4
+ %pp24 = getelementptr float* %B, i32 %i_15
+ %D.1398_15 = load float* %pp24, align 4
+ %D.1399_17 = fadd float %D.1398_15, %K
+ %D.1400_18 = fmul float %D.1396_10, %D.1399_17
+ store float %D.1400_18, float* %pp3, align 4
+ %i_19 = add nsw i32 %i_15, 1
+ %exitcond = icmp ne i32 %i_19, 64
+ br i1 %exitcond, label %"<bb 3>", label %return
+
+return:
+ ret void
+}
+
+; In a non-default address space we don't have this rule.
+
+; CHECK: notsafe
+; CHECK-NOT: <4 x float>
+
+define void @notsafe(float addrspace(5) * %A, float* %B, float %K) {
+entry:
+ br label %"<bb 3>"
+
+"<bb 3>":
+ %i_15 = phi i32 [ 0, %entry ], [ %i_19, %"<bb 3>" ]
+ %pp3 = getelementptr float addrspace(5) * %A, i32 %i_15
+ %D.1396_10 = load float addrspace(5) * %pp3, align 4
+ %pp24 = getelementptr float* %B, i32 %i_15
+ %D.1398_15 = load float* %pp24, align 4
+ %D.1399_17 = fadd float %D.1398_15, %K
+ %D.1400_18 = fmul float %D.1396_10, %D.1399_17
+ store float %D.1400_18, float addrspace(5) * %pp3, align 4
+ %i_19 = add nsw i32 %i_15, 1
+ %exitcond = icmp ne i32 %i_19, 64
+ br i1 %exitcond, label %"<bb 3>", label %return
+
+return:
+ ret void
+}
+
+