-; RUN: llc -march=aarch64 -aarch64-neon-syntax=generic -lower-interleaved-accesses=true < %s | FileCheck %s
+; RUN: llc -march=aarch64 -aarch64-neon-syntax=generic -lower-interleaved-accesses=true < %s | FileCheck %s -check-prefix=NEON
+; RUN: llc -march=aarch64 -mattr=-neon -lower-interleaved-accesses=true < %s | FileCheck %s -check-prefix=NONEON
-; CHECK-LABEL: load_factor2:
-; CHECK: ld2 { v0.8b, v1.8b }, [x0]
+; NEON-LABEL: load_factor2:
+; NEON: ld2 { v0.8b, v1.8b }, [x0]
+; NONEON-LABEL: load_factor2:
+; NONEON-NOT: ld2
define <8 x i8> @load_factor2(<16 x i8>* %ptr) {
%wide.vec = load <16 x i8>, <16 x i8>* %ptr, align 4
%strided.v0 = shufflevector <16 x i8> %wide.vec, <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
ret <8 x i8> %add
}
-; CHECK-LABEL: load_factor3:
-; CHECK: ld3 { v0.4s, v1.4s, v2.4s }, [x0]
+; NEON-LABEL: load_factor3:
+; NEON: ld3 { v0.4s, v1.4s, v2.4s }, [x0]
+; NONEON-LABEL: load_factor3:
+; NONEON-NOT: ld3
define <4 x i32> @load_factor3(i32* %ptr) {
%base = bitcast i32* %ptr to <12 x i32>*
%wide.vec = load <12 x i32>, <12 x i32>* %base, align 4
ret <4 x i32> %add
}
-; CHECK-LABEL: load_factor4:
-; CHECK: ld4 { v0.4s, v1.4s, v2.4s, v3.4s }, [x0]
+; NEON-LABEL: load_factor4:
+; NEON: ld4 { v0.4s, v1.4s, v2.4s, v3.4s }, [x0]
+; NONEON-LABEL: load_factor4:
+; NONEON-NOT: ld4
define <4 x i32> @load_factor4(i32* %ptr) {
%base = bitcast i32* %ptr to <16 x i32>*
%wide.vec = load <16 x i32>, <16 x i32>* %base, align 4
ret <4 x i32> %add
}
-; CHECK-LABEL: store_factor2:
-; CHECK: st2 { v0.8b, v1.8b }, [x0]
+; NEON-LABEL: store_factor2:
+; NEON: st2 { v0.8b, v1.8b }, [x0]
+; NONEON-LABEL: store_factor2:
+; NONEON-NOT: st2
define void @store_factor2(<16 x i8>* %ptr, <8 x i8> %v0, <8 x i8> %v1) {
%interleaved.vec = shufflevector <8 x i8> %v0, <8 x i8> %v1, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
store <16 x i8> %interleaved.vec, <16 x i8>* %ptr, align 4
ret void
}
-; CHECK-LABEL: store_factor3:
-; CHECK: st3 { v0.4s, v1.4s, v2.4s }, [x0]
+; NEON-LABEL: store_factor3:
+; NEON: st3 { v0.4s, v1.4s, v2.4s }, [x0]
+; NONEON-LABEL: store_factor3:
+; NONEON-NOT: st3
define void @store_factor3(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2) {
%base = bitcast i32* %ptr to <12 x i32>*
%v0_v1 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
ret void
}
-; CHECK-LABEL: store_factor4:
-; CHECK: st4 { v0.4s, v1.4s, v2.4s, v3.4s }, [x0]
+; NEON-LABEL: store_factor4:
+; NEON: st4 { v0.4s, v1.4s, v2.4s, v3.4s }, [x0]
+; NONEON-LABEL: store_factor4:
+; NONEON-NOT: st4
define void @store_factor4(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3) {
%base = bitcast i32* %ptr to <16 x i32>*
%v0_v1 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; The following cases test that interleaved access of pointer vectors can be
; matched to ldN/stN instruction.
-; CHECK-LABEL: load_ptrvec_factor2:
-; CHECK: ld2 { v0.2d, v1.2d }, [x0]
+; NEON-LABEL: load_ptrvec_factor2:
+; NEON: ld2 { v0.2d, v1.2d }, [x0]
+; NONEON-LABEL: load_ptrvec_factor2:
+; NONEON-NOT: ld2
define <2 x i32*> @load_ptrvec_factor2(i32** %ptr) {
%base = bitcast i32** %ptr to <4 x i32*>*
%wide.vec = load <4 x i32*>, <4 x i32*>* %base, align 4
ret <2 x i32*> %strided.v0
}
-; CHECK-LABEL: load_ptrvec_factor3:
-; CHECK: ld3 { v0.2d, v1.2d, v2.2d }, [x0]
+; NEON-LABEL: load_ptrvec_factor3:
+; NEON: ld3 { v0.2d, v1.2d, v2.2d }, [x0]
+; NONEON-LABEL: load_ptrvec_factor3:
+; NONEON-NOT: ld3
define void @load_ptrvec_factor3(i32** %ptr, <2 x i32*>* %ptr1, <2 x i32*>* %ptr2) {
%base = bitcast i32** %ptr to <6 x i32*>*
%wide.vec = load <6 x i32*>, <6 x i32*>* %base, align 4
ret void
}
-; CHECK-LABEL: load_ptrvec_factor4:
-; CHECK: ld4 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0]
+; NEON-LABEL: load_ptrvec_factor4:
+; NEON: ld4 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0]
+; NONEON-LABEL: load_ptrvec_factor4:
+; NONEON-NOT: ld4
define void @load_ptrvec_factor4(i32** %ptr, <2 x i32*>* %ptr1, <2 x i32*>* %ptr2) {
%base = bitcast i32** %ptr to <8 x i32*>*
%wide.vec = load <8 x i32*>, <8 x i32*>* %base, align 4
ret void
}
-; CHECK-LABEL: store_ptrvec_factor2:
-; CHECK: st2 { v0.2d, v1.2d }, [x0]
+; NEON-LABEL: store_ptrvec_factor2:
+; NEON: st2 { v0.2d, v1.2d }, [x0]
+; NONEON-LABEL: store_ptrvec_factor2:
+; NONEON-NOT: st2
define void @store_ptrvec_factor2(i32** %ptr, <2 x i32*> %v0, <2 x i32*> %v1) {
%base = bitcast i32** %ptr to <4 x i32*>*
%interleaved.vec = shufflevector <2 x i32*> %v0, <2 x i32*> %v1, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
ret void
}
-; CHECK-LABEL: store_ptrvec_factor3:
-; CHECK: st3 { v0.2d, v1.2d, v2.2d }, [x0]
+; NEON-LABEL: store_ptrvec_factor3:
+; NEON: st3 { v0.2d, v1.2d, v2.2d }, [x0]
+; NONEON-LABEL: store_ptrvec_factor3:
+; NONEON-NOT: st3
define void @store_ptrvec_factor3(i32** %ptr, <2 x i32*> %v0, <2 x i32*> %v1, <2 x i32*> %v2) {
%base = bitcast i32** %ptr to <6 x i32*>*
%v0_v1 = shufflevector <2 x i32*> %v0, <2 x i32*> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
ret void
}
-; CHECK-LABEL: store_ptrvec_factor4:
-; CHECK: st4 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0]
+; NEON-LABEL: store_ptrvec_factor4:
+; NEON: st4 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0]
+; NONEON-LABEL: store_ptrvec_factor4:
+; NONEON-NOT: st4
define void @store_ptrvec_factor4(i32* %ptr, <2 x i32*> %v0, <2 x i32*> %v1, <2 x i32*> %v2, <2 x i32*> %v3) {
%base = bitcast i32* %ptr to <8 x i32*>*
%v0_v1 = shufflevector <2 x i32*> %v0, <2 x i32*> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; Following cases check that shuffle maskes with undef indices can be matched
; into ldN/stN instruction.
-; CHECK-LABEL: load_undef_mask_factor2:
-; CHECK: ld2 { v0.4s, v1.4s }, [x0]
+; NEON-LABEL: load_undef_mask_factor2:
+; NEON: ld2 { v0.4s, v1.4s }, [x0]
+; NONEON-LABEL: load_undef_mask_factor2:
+; NONEON-NOT: ld2
define <4 x i32> @load_undef_mask_factor2(i32* %ptr) {
%base = bitcast i32* %ptr to <8 x i32>*
%wide.vec = load <8 x i32>, <8 x i32>* %base, align 4
ret <4 x i32> %add
}
-; CHECK-LABEL: load_undef_mask_factor3:
-; CHECK: ld3 { v0.4s, v1.4s, v2.4s }, [x0]
+; NEON-LABEL: load_undef_mask_factor3:
+; NEON: ld3 { v0.4s, v1.4s, v2.4s }, [x0]
+; NONEON-LABEL: load_undef_mask_factor3:
+; NONEON-NOT: ld3
define <4 x i32> @load_undef_mask_factor3(i32* %ptr) {
%base = bitcast i32* %ptr to <12 x i32>*
%wide.vec = load <12 x i32>, <12 x i32>* %base, align 4
ret <4 x i32> %add
}
-; CHECK-LABEL: load_undef_mask_factor4:
-; CHECK: ld4 { v0.4s, v1.4s, v2.4s, v3.4s }, [x0]
+; NEON-LABEL: load_undef_mask_factor4:
+; NEON: ld4 { v0.4s, v1.4s, v2.4s, v3.4s }, [x0]
+; NONEON-LABEL: load_undef_mask_factor4:
+; NONEON-NOT: ld4
define <4 x i32> @load_undef_mask_factor4(i32* %ptr) {
%base = bitcast i32* %ptr to <16 x i32>*
%wide.vec = load <16 x i32>, <16 x i32>* %base, align 4
ret <4 x i32> %add
}
-; CHECK-LABEL: store_undef_mask_factor2:
-; CHECK: st2 { v0.4s, v1.4s }, [x0]
+; NEON-LABEL: store_undef_mask_factor2:
+; NEON: st2 { v0.4s, v1.4s }, [x0]
+; NONEON-LABEL: store_undef_mask_factor2:
+; NONEON-NOT: st2
define void @store_undef_mask_factor2(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1) {
%base = bitcast i32* %ptr to <8 x i32>*
%interleaved.vec = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 2, i32 6, i32 3, i32 7>
ret void
}
-; CHECK-LABEL: store_undef_mask_factor3:
-; CHECK: st3 { v0.4s, v1.4s, v2.4s }, [x0]
+; NEON-LABEL: store_undef_mask_factor3:
+; NEON: st3 { v0.4s, v1.4s, v2.4s }, [x0]
+; NONEON-LABEL: store_undef_mask_factor3:
+; NONEON-NOT: st3
define void @store_undef_mask_factor3(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2) {
%base = bitcast i32* %ptr to <12 x i32>*
%v0_v1 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
ret void
}
-; CHECK-LABEL: store_undef_mask_factor4:
-; CHECK: st4 { v0.4s, v1.4s, v2.4s, v3.4s }, [x0]
+; NEON-LABEL: store_undef_mask_factor4:
+; NEON: st4 { v0.4s, v1.4s, v2.4s, v3.4s }, [x0]
+; NONEON-LABEL: store_undef_mask_factor4:
+; NONEON-NOT: st4
define void @store_undef_mask_factor4(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3) {
%base = bitcast i32* %ptr to <16 x i32>*
%v0_v1 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; RUN: llc -mtriple=arm-eabi -mattr=+neon -lower-interleaved-accesses=true < %s | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+neon -lower-interleaved-accesses=true < %s | FileCheck %s -check-prefix=NEON
+; RUN: llc -mtriple=arm-eabi -mattr=-neon -lower-interleaved-accesses=true < %s | FileCheck %s -check-prefix=NONEON
-; CHECK-LABEL: load_factor2:
-; CHECK: vld2.8 {d16, d17}, [r0]
+; NEON-LABEL: load_factor2:
+; NEON: vld2.8 {d16, d17}, [r0]
+; NONEON-LABEL: load_factor2:
+; NONEON-NOT: vld2
define <8 x i8> @load_factor2(<16 x i8>* %ptr) {
%wide.vec = load <16 x i8>, <16 x i8>* %ptr, align 4
%strided.v0 = shufflevector <16 x i8> %wide.vec, <16 x i8> undef, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
ret <8 x i8> %add
}
-; CHECK-LABEL: load_factor3:
-; CHECK: vld3.32 {d16, d17, d18}, [r0]
+; NEON-LABEL: load_factor3:
+; NEON: vld3.32 {d16, d17, d18}, [r0]
+; NONEON-LABEL: load_factor3:
+; NONEON-NOT: vld3
define <2 x i32> @load_factor3(i32* %ptr) {
%base = bitcast i32* %ptr to <6 x i32>*
%wide.vec = load <6 x i32>, <6 x i32>* %base, align 4
ret <2 x i32> %add
}
-; CHECK-LABEL: load_factor4:
-; CHECK: vld4.32 {d16, d18, d20, d22}, [r0]!
-; CHECK: vld4.32 {d17, d19, d21, d23}, [r0]
+; NEON-LABEL: load_factor4:
+; NEON: vld4.32 {d16, d18, d20, d22}, [r0]!
+; NEON: vld4.32 {d17, d19, d21, d23}, [r0]
+; NONEON-LABEL: load_factor4:
+; NONEON-NOT: vld4
define <4 x i32> @load_factor4(i32* %ptr) {
%base = bitcast i32* %ptr to <16 x i32>*
%wide.vec = load <16 x i32>, <16 x i32>* %base, align 4
ret <4 x i32> %add
}
-; CHECK-LABEL: store_factor2:
-; CHECK: vst2.8 {d16, d17}, [r0]
+; NEON-LABEL: store_factor2:
+; NEON: vst2.8 {d16, d17}, [r0]
+; NONEON-LABEL: store_factor2:
+; NONEON-NOT: vst2
define void @store_factor2(<16 x i8>* %ptr, <8 x i8> %v0, <8 x i8> %v1) {
%interleaved.vec = shufflevector <8 x i8> %v0, <8 x i8> %v1, <16 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11, i32 4, i32 12, i32 5, i32 13, i32 6, i32 14, i32 7, i32 15>
store <16 x i8> %interleaved.vec, <16 x i8>* %ptr, align 4
ret void
}
-; CHECK-LABEL: store_factor3:
-; CHECK: vst3.32 {d16, d18, d20}, [r0]!
-; CHECK: vst3.32 {d17, d19, d21}, [r0]
+; NEON-LABEL: store_factor3:
+; NEON: vst3.32 {d16, d18, d20}, [r0]!
+; NEON: vst3.32 {d17, d19, d21}, [r0]
+; NONEON-LABEL: store_factor3:
+; NONEON-NOT: vst3.32
define void @store_factor3(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2) {
%base = bitcast i32* %ptr to <12 x i32>*
%v0_v1 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
ret void
}
-; CHECK-LABEL: store_factor4:
-; CHECK: vst4.32 {d16, d18, d20, d22}, [r0]!
-; CHECK: vst4.32 {d17, d19, d21, d23}, [r0]
+; NEON-LABEL: store_factor4:
+; NEON: vst4.32 {d16, d18, d20, d22}, [r0]!
+; NEON: vst4.32 {d17, d19, d21, d23}, [r0]
+; NONEON-LABEL: store_factor4:
+; NONEON-NOT: vst4
define void @store_factor4(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3) {
%base = bitcast i32* %ptr to <16 x i32>*
%v0_v1 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; The following cases test that interleaved access of pointer vectors can be
; matched to ldN/stN instruction.
-; CHECK-LABEL: load_ptrvec_factor2:
-; CHECK: vld2.32 {d16, d17}, [r0]
+; NEON-LABEL: load_ptrvec_factor2:
+; NEON: vld2.32 {d16, d17}, [r0]
+; NONEON-LABEL: load_ptrvec_factor2:
+; NONEON-NOT: vld2
define <2 x i32*> @load_ptrvec_factor2(i32** %ptr) {
%base = bitcast i32** %ptr to <4 x i32*>*
%wide.vec = load <4 x i32*>, <4 x i32*>* %base, align 4
ret <2 x i32*> %strided.v0
}
-; CHECK-LABEL: load_ptrvec_factor3:
-; CHECK: vld3.32 {d16, d17, d18}, [r0]
+; NEON-LABEL: load_ptrvec_factor3:
+; NEON: vld3.32 {d16, d17, d18}, [r0]
+; NONEON-LABEL: load_ptrvec_factor3:
+; NONEON-NOT: vld3
define void @load_ptrvec_factor3(i32** %ptr, <2 x i32*>* %ptr1, <2 x i32*>* %ptr2) {
%base = bitcast i32** %ptr to <6 x i32*>*
%wide.vec = load <6 x i32*>, <6 x i32*>* %base, align 4
ret void
}
-; CHECK-LABEL: load_ptrvec_factor4:
-; CHECK: vld4.32 {d16, d17, d18, d19}, [r0]
+; NEON-LABEL: load_ptrvec_factor4:
+; NEON: vld4.32 {d16, d17, d18, d19}, [r0]
+; NONEON-LABEL: load_ptrvec_factor4:
+; NONEON-NOT: vld4
define void @load_ptrvec_factor4(i32** %ptr, <2 x i32*>* %ptr1, <2 x i32*>* %ptr2) {
%base = bitcast i32** %ptr to <8 x i32*>*
%wide.vec = load <8 x i32*>, <8 x i32*>* %base, align 4
ret void
}
-; CHECK-LABEL: store_ptrvec_factor2:
-; CHECK: vst2.32 {d16, d17}, [r0]
+; NEON-LABEL: store_ptrvec_factor2:
+; NEON: vst2.32 {d16, d17}, [r0]
+; NONEON-LABEL: store_ptrvec_factor2:
+; NONEON-NOT: vst2
define void @store_ptrvec_factor2(i32** %ptr, <2 x i32*> %v0, <2 x i32*> %v1) {
%base = bitcast i32** %ptr to <4 x i32*>*
%interleaved.vec = shufflevector <2 x i32*> %v0, <2 x i32*> %v1, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
ret void
}
-; CHECK-LABEL: store_ptrvec_factor3:
-; CHECK: vst3.32 {d16, d17, d18}, [r0]
+; NEON-LABEL: store_ptrvec_factor3:
+; NEON: vst3.32 {d16, d17, d18}, [r0]
+; NONEON-LABEL: store_ptrvec_factor3:
+; NONEON-NOT: vst3
define void @store_ptrvec_factor3(i32** %ptr, <2 x i32*> %v0, <2 x i32*> %v1, <2 x i32*> %v2) {
%base = bitcast i32** %ptr to <6 x i32*>*
%v0_v1 = shufflevector <2 x i32*> %v0, <2 x i32*> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
ret void
}
-; CHECK-LABEL: store_ptrvec_factor4:
-; CHECK: vst4.32 {d16, d17, d18, d19}, [r0]
+; NEON-LABEL: store_ptrvec_factor4:
+; NEON: vst4.32 {d16, d17, d18, d19}, [r0]
+; NONEON-LABEL: store_ptrvec_factor4:
+; NONEON-NOT: vst4
define void @store_ptrvec_factor4(i32* %ptr, <2 x i32*> %v0, <2 x i32*> %v1, <2 x i32*> %v2, <2 x i32*> %v3) {
%base = bitcast i32* %ptr to <8 x i32*>*
%v0_v1 = shufflevector <2 x i32*> %v0, <2 x i32*> %v1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; Following cases check that shuffle maskes with undef indices can be matched
; into ldN/stN instruction.
-; CHECK-LABEL: load_undef_mask_factor2:
-; CHECK: vld2.32 {d16, d17, d18, d19}, [r0]
+; NEON-LABEL: load_undef_mask_factor2:
+; NEON: vld2.32 {d16, d17, d18, d19}, [r0]
+; NONEON-LABEL: load_undef_mask_factor2:
+; NONEON-NOT: vld2
define <4 x i32> @load_undef_mask_factor2(i32* %ptr) {
%base = bitcast i32* %ptr to <8 x i32>*
%wide.vec = load <8 x i32>, <8 x i32>* %base, align 4
ret <4 x i32> %add
}
-; CHECK-LABEL: load_undef_mask_factor3:
-; CHECK: vld3.32 {d16, d18, d20}, [r0]!
-; CHECK: vld3.32 {d17, d19, d21}, [r0]
+; NEON-LABEL: load_undef_mask_factor3:
+; NEON: vld3.32 {d16, d18, d20}, [r0]!
+; NEON: vld3.32 {d17, d19, d21}, [r0]
+; NONEON-LABEL: load_undef_mask_factor3:
+; NONEON-NOT: vld3
define <4 x i32> @load_undef_mask_factor3(i32* %ptr) {
%base = bitcast i32* %ptr to <12 x i32>*
%wide.vec = load <12 x i32>, <12 x i32>* %base, align 4
ret <4 x i32> %add
}
-; CHECK-LABEL: load_undef_mask_factor4:
-; CHECK: vld4.32 {d16, d18, d20, d22}, [r0]!
-; CHECK: vld4.32 {d17, d19, d21, d23}, [r0]
+; NEON-LABEL: load_undef_mask_factor4:
+; NEON: vld4.32 {d16, d18, d20, d22}, [r0]!
+; NEON: vld4.32 {d17, d19, d21, d23}, [r0]
+; NONEON-LABEL: load_undef_mask_factor4:
+; NONEON-NOT: vld4
define <4 x i32> @load_undef_mask_factor4(i32* %ptr) {
%base = bitcast i32* %ptr to <16 x i32>*
%wide.vec = load <16 x i32>, <16 x i32>* %base, align 4
ret <4 x i32> %add
}
-; CHECK-LABEL: store_undef_mask_factor2:
-; CHECK: vst2.32 {d16, d17, d18, d19}, [r0]
+; NEON-LABEL: store_undef_mask_factor2:
+; NEON: vst2.32 {d16, d17, d18, d19}, [r0]
+; NONEON-LABEL: store_undef_mask_factor2:
+; NONEON-NOT: vst2
define void @store_undef_mask_factor2(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1) {
%base = bitcast i32* %ptr to <8 x i32>*
%interleaved.vec = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 2, i32 6, i32 3, i32 7>
ret void
}
-; CHECK-LABEL: store_undef_mask_factor3:
-; CHECK: vst3.32 {d16, d18, d20}, [r0]!
-; CHECK: vst3.32 {d17, d19, d21}, [r0]
+; NEON-LABEL: store_undef_mask_factor3:
+; NEON: vst3.32 {d16, d18, d20}, [r0]!
+; NEON: vst3.32 {d17, d19, d21}, [r0]
+; NONEON-LABEL: store_undef_mask_factor3:
+; NONEON-NOT: vst3
define void @store_undef_mask_factor3(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2) {
%base = bitcast i32* %ptr to <12 x i32>*
%v0_v1 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
ret void
}
-; CHECK-LABEL: store_undef_mask_factor4:
-; CHECK: vst4.32 {d16, d18, d20, d22}, [r0]!
-; CHECK: vst4.32 {d17, d19, d21, d23}, [r0]
+; NEON-LABEL: store_undef_mask_factor4:
+; NEON: vst4.32 {d16, d18, d20, d22}, [r0]!
+; NEON: vst4.32 {d17, d19, d21, d23}, [r0]
+; NONEON-LABEL: store_undef_mask_factor4:
+; NONEON-NOT: vst4
define void @store_undef_mask_factor4(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3) {
%base = bitcast i32* %ptr to <16 x i32>*
%v0_v1 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; The following test cases check that address spaces are properly handled
-; CHECK-LABEL: load_address_space
-; CHECK: vld3.32
+; NEON-LABEL: load_address_space
+; NEON: vld3.32
+; NONEON-LABEL: load_address_space
+; NONEON-NOT: vld3
define void @load_address_space(<4 x i32> addrspace(1)* %A, <2 x i32>* %B) {
%tmp = load <4 x i32>, <4 x i32> addrspace(1)* %A
%interleaved = shufflevector <4 x i32> %tmp, <4 x i32> undef, <2 x i32> <i32 0, i32 3>
ret void
}
-; CHECK-LABEL: store_address_space
-; CHECK: vst2.32
+; NEON-LABEL: store_address_space
+; NEON: vst2.32
+; NONEON-LABEL: store_address_space
+; NONEON-NOT: vst2
define void @store_address_space(<2 x i32>* %A, <2 x i32>* %B, <4 x i32> addrspace(1)* %C) {
%tmp0 = load <2 x i32>, <2 x i32>* %A
%tmp1 = load <2 x i32>, <2 x i32>* %B