1 ; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
2 ; Implementing vld / vst as REG_SEQUENCE eliminates the extra vmov's.
4 %struct.int16x8_t = type { <8 x i16> }
5 %struct.int32x4_t = type { <4 x i32> }
6 %struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> }
7 %struct.__neon_int16x8x2_t = type { <8 x i16>, <8 x i16> }
8 %struct.__neon_int32x4x2_t = type { <4 x i32>, <4 x i32> }
10 define arm_apcscc void @t1(i16* %i_ptr, i16* %o_ptr, %struct.int32x4_t* nocapture %vT0ptr, %struct.int32x4_t* nocapture %vT1ptr) nounwind {
20 %0 = getelementptr inbounds %struct.int32x4_t* %vT0ptr, i32 0, i32 0 ; <<4 x i32>*> [#uses=1]
21 %1 = load <4 x i32>* %0, align 16 ; <<4 x i32>> [#uses=1]
22 %2 = getelementptr inbounds %struct.int32x4_t* %vT1ptr, i32 0, i32 0 ; <<4 x i32>*> [#uses=1]
23 %3 = load <4 x i32>* %2, align 16 ; <<4 x i32>> [#uses=1]
24 %4 = bitcast i16* %i_ptr to i8* ; <i8*> [#uses=1]
25 %5 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %4) ; <<8 x i16>> [#uses=1]
26 %6 = bitcast <8 x i16> %5 to <2 x double> ; <<2 x double>> [#uses=2]
27 %7 = extractelement <2 x double> %6, i32 0 ; <double> [#uses=1]
28 %8 = bitcast double %7 to <4 x i16> ; <<4 x i16>> [#uses=1]
29 %9 = tail call <4 x i32> @llvm.arm.neon.vmovls.v4i32(<4 x i16> %8) ; <<4 x i32>> [#uses=1]
30 %10 = extractelement <2 x double> %6, i32 1 ; <double> [#uses=1]
31 %11 = bitcast double %10 to <4 x i16> ; <<4 x i16>> [#uses=1]
32 %12 = tail call <4 x i32> @llvm.arm.neon.vmovls.v4i32(<4 x i16> %11) ; <<4 x i32>> [#uses=1]
33 %13 = mul <4 x i32> %1, %9 ; <<4 x i32>> [#uses=1]
34 %14 = mul <4 x i32> %3, %12 ; <<4 x i32>> [#uses=1]
35 %15 = tail call <4 x i16> @llvm.arm.neon.vshiftn.v4i16(<4 x i32> %13, <4 x i32> <i32 -12, i32 -12, i32 -12, i32 -12>) ; <<4 x i16>> [#uses=1]
36 %16 = tail call <4 x i16> @llvm.arm.neon.vshiftn.v4i16(<4 x i32> %14, <4 x i32> <i32 -12, i32 -12, i32 -12, i32 -12>) ; <<4 x i16>> [#uses=1]
37 %17 = shufflevector <4 x i16> %15, <4 x i16> %16, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> ; <<8 x i16>> [#uses=1]
38 %18 = bitcast i16* %o_ptr to i8* ; <i8*> [#uses=1]
39 tail call void @llvm.arm.neon.vst1.v8i16(i8* %18, <8 x i16> %17)
43 define arm_apcscc void @t2(i16* %i_ptr, i16* %o_ptr, %struct.int16x8_t* nocapture %vT0ptr, %struct.int16x8_t* nocapture %vT1ptr) nounwind {
54 %0 = getelementptr inbounds %struct.int16x8_t* %vT0ptr, i32 0, i32 0 ; <<8 x i16>*> [#uses=1]
55 %1 = load <8 x i16>* %0, align 16 ; <<8 x i16>> [#uses=1]
56 %2 = getelementptr inbounds %struct.int16x8_t* %vT1ptr, i32 0, i32 0 ; <<8 x i16>*> [#uses=1]
57 %3 = load <8 x i16>* %2, align 16 ; <<8 x i16>> [#uses=1]
58 %4 = bitcast i16* %i_ptr to i8* ; <i8*> [#uses=1]
59 %5 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %4) ; <<8 x i16>> [#uses=1]
60 %6 = getelementptr inbounds i16* %i_ptr, i32 8 ; <i16*> [#uses=1]
61 %7 = bitcast i16* %6 to i8* ; <i8*> [#uses=1]
62 %8 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %7) ; <<8 x i16>> [#uses=1]
63 %9 = mul <8 x i16> %1, %5 ; <<8 x i16>> [#uses=1]
64 %10 = mul <8 x i16> %3, %8 ; <<8 x i16>> [#uses=1]
65 %11 = bitcast i16* %o_ptr to i8* ; <i8*> [#uses=1]
66 tail call void @llvm.arm.neon.vst1.v8i16(i8* %11, <8 x i16> %9)
67 %12 = getelementptr inbounds i16* %o_ptr, i32 8 ; <i16*> [#uses=1]
68 %13 = bitcast i16* %12 to i8* ; <i8*> [#uses=1]
69 tail call void @llvm.arm.neon.vst1.v8i16(i8* %13, <8 x i16> %10)
73 define <8 x i8> @t3(i8* %A, i8* %B) nounwind {
79 %tmp1 = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A) ; <%struct.__neon_int8x8x3_t> [#uses=2]
80 %tmp2 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 0 ; <<8 x i8>> [#uses=1]
81 %tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 2 ; <<8 x i8>> [#uses=1]
82 %tmp4 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 1 ; <<8 x i8>> [#uses=1]
83 %tmp5 = sub <8 x i8> %tmp3, %tmp4
84 %tmp6 = add <8 x i8> %tmp2, %tmp3 ; <<8 x i8>> [#uses=1]
85 %tmp7 = mul <8 x i8> %tmp4, %tmp2
86 tail call void @llvm.arm.neon.vst3.v8i8(i8* %B, <8 x i8> %tmp5, <8 x i8> %tmp6, <8 x i8> %tmp7)
90 define arm_apcscc void @t4(i32* %in, i32* %out) nounwind {
98 %tmp1 = bitcast i32* %in to i8* ; <i8*> [#uses=1]
99 %tmp2 = tail call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %tmp1) ; <%struct.__neon_int32x4x2_t> [#uses=2]
100 %tmp3 = getelementptr inbounds i32* %in, i32 8 ; <i32*> [#uses=1]
101 %tmp4 = bitcast i32* %tmp3 to i8* ; <i8*> [#uses=1]
102 %tmp5 = tail call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %tmp4) ; <%struct.__neon_int32x4x2_t> [#uses=2]
103 %tmp8 = bitcast i32* %out to i8* ; <i8*> [#uses=1]
104 br i1 undef, label %return1, label %return2
109 ; CHECK-NEXT: vadd.i32
110 ; CHECK-NEXT: vadd.i32
111 ; CHECK-NEXT: vst2.32
112 %tmp52 = extractvalue %struct.__neon_int32x4x2_t %tmp2, 0 ; <<4 x i32>> [#uses=1]
113 %tmp57 = extractvalue %struct.__neon_int32x4x2_t %tmp2, 1 ; <<4 x i32>> [#uses=1]
114 %tmp = extractvalue %struct.__neon_int32x4x2_t %tmp5, 0 ; <<4 x i32>> [#uses=1]
115 %tmp39 = extractvalue %struct.__neon_int32x4x2_t %tmp5, 1 ; <<4 x i32>> [#uses=1]
116 %tmp6 = add <4 x i32> %tmp52, %tmp ; <<4 x i32>> [#uses=1]
117 %tmp7 = add <4 x i32> %tmp57, %tmp39 ; <<4 x i32>> [#uses=1]
118 tail call void @llvm.arm.neon.vst2.v4i32(i8* %tmp8, <4 x i32> %tmp6, <4 x i32> %tmp7)
126 ; CHECK: vst2.32 {d0, d1, d2, d3}
127 %tmp100 = extractvalue %struct.__neon_int32x4x2_t %tmp2, 0 ; <<4 x i32>> [#uses=1]
128 %tmp101 = extractvalue %struct.__neon_int32x4x2_t %tmp5, 1 ; <<4 x i32>> [#uses=1]
129 %tmp102 = add <4 x i32> %tmp100, %tmp101 ; <<4 x i32>> [#uses=1]
130 tail call void @llvm.arm.neon.vst2.v4i32(i8* %tmp8, <4 x i32> %tmp102, <4 x i32> %tmp101)
131 call void @llvm.trap()
135 define <8 x i16> @t5(i16* %A, <8 x i16>* %B) nounwind {
140 ; CHECK: vld2.16 {d0[1], d2[1]}, [r0]
143 %tmp0 = bitcast i16* %A to i8* ; <i8*> [#uses=1]
144 %tmp1 = load <8 x i16>* %B ; <<8 x i16>> [#uses=2]
145 %tmp2 = call %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1) ; <%struct.__neon_int16x8x2_t> [#uses=2]
146 %tmp3 = extractvalue %struct.__neon_int16x8x2_t %tmp2, 0 ; <<8 x i16>> [#uses=1]
147 %tmp4 = extractvalue %struct.__neon_int16x8x2_t %tmp2, 1 ; <<8 x i16>> [#uses=1]
148 %tmp5 = add <8 x i16> %tmp3, %tmp4 ; <<8 x i16>> [#uses=1]
152 declare <8 x i16> @llvm.arm.neon.vld1.v8i16(i8*) nounwind readonly
154 declare <4 x i32> @llvm.arm.neon.vmovls.v4i32(<4 x i16>) nounwind readnone
156 declare <4 x i16> @llvm.arm.neon.vshiftn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
158 declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>) nounwind
160 declare void @llvm.arm.neon.vst3.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>) nounwind
162 declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8*) nounwind readonly
164 declare %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8*) nounwind readonly
166 declare %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8*, <8 x i16>, <8 x i16>, i32) nounwind readonly
168 declare void @llvm.arm.neon.vst2.v4i32(i8*, <4 x i32>, <4 x i32>) nounwind
170 declare void @llvm.trap() nounwind