return;
}
case Instruction::Load: {
+ // Check that a vectorized load would load the same memory as a scalar
+ // load.
+ // For example we don't want vectorize loads that are smaller than 8 bit.
+ // Even though we have a packed struct {<i2, i2, i2, i2>} LLVM treats
+ // loading/storing it as an i8 struct. If we vectorize loads/stores from
+ // such a struct we read/write packed bits disagreeing with the
+ // unvectorized version.
+ const DataLayout &DL = F->getParent()->getDataLayout();
+ Type *ScalarTy = VL[0]->getType();
+
+ if (DL.getTypeSizeInBits(ScalarTy) !=
+ DL.getTypeAllocSizeInBits(ScalarTy)) {
+ BS.cancelScheduling(VL);
+ newTreeEntry(VL, false);
+ DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n");
+ return;
+ }
// Check if the loads are consecutive or of we need to swizzle them.
for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) {
LoadInst *L = cast<LoadInst>(VL[i]);
DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
return;
}
- const DataLayout &DL = F->getParent()->getDataLayout();
+
if (!isConsecutiveAccess(VL[i], VL[i + 1], DL)) {
if (VL.size() == 2 && isConsecutiveAccess(VL[1], VL[0], DL)) {
++NumLoadsWantToChangeOrder;
ret void
}
+define i8 @test3(i8 *%addr) {
+; Check that we do not vectorize types that are padded to a bigger ones.
+;
+; CHECK-LABEL: @test3
+; CHECK-NOT: <4 x i2>
+; CHECK: ret i8
+entry:
+ %a = bitcast i8* %addr to i2*
+ %a0 = getelementptr inbounds i2, i2* %a, i64 0
+ %a1 = getelementptr inbounds i2, i2* %a, i64 1
+ %a2 = getelementptr inbounds i2, i2* %a, i64 2
+ %a3 = getelementptr inbounds i2, i2* %a, i64 3
+ %l0 = load i2, i2* %a0, align 1
+ %l1 = load i2, i2* %a1, align 1
+ %l2 = load i2, i2* %a2, align 1
+ %l3 = load i2, i2* %a3, align 1
+ br label %bb1
+bb1: ; preds = %entry
+ %p0 = phi i2 [ %l0, %entry ]
+ %p1 = phi i2 [ %l1, %entry ]
+ %p2 = phi i2 [ %l2, %entry ]
+ %p3 = phi i2 [ %l3, %entry ]
+ %r = zext i2 %p2 to i8
+ ret i8 %r
+}
+
declare void @f(i64, i64)