1 ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon -fp-contract=fast
2 ; arm64 has separate copy as aarch64-neon-vector-list-spill.ll
4 ; FIXME: We should not generate ld/st for such register spill/fill, because the
5 ; test case seems very simple and the register pressure is not high. If the
6 ; spill/fill algorithm is optimized, this test case may not be triggered. And
7 ; then we can delete it.
8 define i32 @spill.DPairReg(i8* %arg1, i32 %arg2) {
9 ; CHECK-LABEL: spill.DPairReg:
10 ; CHECK: ld2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}]
11 ; CHECK: st1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
12 ; CHECK: ld1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
14 %vld = tail call { <2 x i32>, <2 x i32> } @llvm.arm.neon.vld2.v2i32(i8* %arg1, i32 4)
15 %cmp = icmp eq i32 %arg2, 0
16 br i1 %cmp, label %if.then, label %if.end
23 %vld.extract = extractvalue { <2 x i32>, <2 x i32> } %vld, 0
24 %res = extractelement <2 x i32> %vld.extract, i32 1
28 define i16 @spill.DTripleReg(i8* %arg1, i32 %arg2) {
29 ; CHECK-LABEL: spill.DTripleReg:
30 ; CHECK: ld3 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}]
31 ; CHECK: st1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
32 ; CHECK: ld1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
34 %vld = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3.v4i16(i8* %arg1, i32 4)
35 %cmp = icmp eq i32 %arg2, 0
36 br i1 %cmp, label %if.then, label %if.end
43 %vld.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld, 0
44 %res = extractelement <4 x i16> %vld.extract, i32 1
48 define i16 @spill.DQuadReg(i8* %arg1, i32 %arg2) {
49 ; CHECK-LABEL: spill.DQuadReg:
50 ; CHECK: ld4 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}]
51 ; CHECK: st1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
52 ; CHECK: ld1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
54 %vld = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld4.v4i16(i8* %arg1, i32 4)
55 %cmp = icmp eq i32 %arg2, 0
56 br i1 %cmp, label %if.then, label %if.end
63 %vld.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld, 0
64 %res = extractelement <4 x i16> %vld.extract, i32 0
68 define i32 @spill.QPairReg(i8* %arg1, i32 %arg2) {
69 ; CHECK-LABEL: spill.QPairReg:
70 ; CHECK: ld3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
71 ; CHECK: st1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
72 ; CHECK: ld1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
74 %vld = tail call { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2.v4i32(i8* %arg1, i32 4)
75 %cmp = icmp eq i32 %arg2, 0
76 br i1 %cmp, label %if.then, label %if.end
83 %vld.extract = extractvalue { <4 x i32>, <4 x i32> } %vld, 0
84 %res = extractelement <4 x i32> %vld.extract, i32 1
88 define float @spill.QTripleReg(i8* %arg1, i32 %arg2) {
89 ; CHECK-LABEL: spill.QTripleReg:
90 ; CHECK: ld3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}]
91 ; CHECK: st1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
92 ; CHECK: ld1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
94 %vld3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld3.v4f32(i8* %arg1, i32 4)
95 %cmp = icmp eq i32 %arg2, 0
96 br i1 %cmp, label %if.then, label %if.end
103 %vld3.extract = extractvalue { <4 x float>, <4 x float>, <4 x float> } %vld3, 0
104 %res = extractelement <4 x float> %vld3.extract, i32 1
108 define i8 @spill.QQuadReg(i8* %arg1, i32 %arg2) {
109 ; CHECK-LABEL: spill.QQuadReg:
110 ; CHECK: ld4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}]
111 ; CHECK: st1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
112 ; CHECK: ld1 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}]
114 %vld = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld4.v16i8(i8* %arg1, i32 4)
115 %cmp = icmp eq i32 %arg2, 0
116 br i1 %cmp, label %if.then, label %if.end
119 tail call void @foo()
123 %vld.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld, 0
124 %res = extractelement <16 x i8> %vld.extract, i32 1
128 declare { <2 x i32>, <2 x i32> } @llvm.arm.neon.vld2.v2i32(i8*, i32)
129 declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld3.v4i16(i8*, i32)
130 declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.arm.neon.vld4.v4i16(i8*, i32)
131 declare { <4 x i32>, <4 x i32> } @llvm.arm.neon.vld2.v4i32(i8*, i32)
132 declare { <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vld3.v4f32(i8*, i32)
133 declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld4.v16i8(i8*, i32)
137 ; FIXME: We should not generate ld/st for such register spill/fill, because the
138 ; test case seems very simple and the register pressure is not high. If the
139 ; spill/fill algorithm is optimized, this test case may not be triggered. And
140 ; then we can delete it.
141 ; check the spill for Register Class QPair_with_qsub_0_in_FPR128Lo
142 define <8 x i16> @test_2xFPR128Lo(i64 %got, i8* %ptr, <1 x i64> %a) {
143 tail call void @llvm.arm.neon.vst2lane.v1i64(i8* %ptr, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, i32 0, i32 8)
144 tail call void @foo()
145 %sv = shufflevector <1 x i64> zeroinitializer, <1 x i64> %a, <2 x i32> <i32 0, i32 1>
146 %1 = bitcast <2 x i64> %sv to <8 x i16>
147 %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
148 %3 = mul <8 x i16> %2, %2
152 ; check the spill for Register Class QTriple_with_qsub_0_in_FPR128Lo
153 define <8 x i16> @test_3xFPR128Lo(i64 %got, i8* %ptr, <1 x i64> %a) {
154 tail call void @llvm.arm.neon.vst3lane.v1i64(i8* %ptr, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, i32 0, i32 8)
155 tail call void @foo()
156 %sv = shufflevector <1 x i64> zeroinitializer, <1 x i64> %a, <2 x i32> <i32 0, i32 1>
157 %1 = bitcast <2 x i64> %sv to <8 x i16>
158 %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
159 %3 = mul <8 x i16> %2, %2
163 ; check the spill for Register Class QQuad_with_qsub_0_in_FPR128Lo
164 define <8 x i16> @test_4xFPR128Lo(i64 %got, i8* %ptr, <1 x i64> %a) {
165 tail call void @llvm.arm.neon.vst4lane.v1i64(i8* %ptr, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, i32 0, i32 8)
166 tail call void @foo()
167 %sv = shufflevector <1 x i64> zeroinitializer, <1 x i64> %a, <2 x i32> <i32 0, i32 1>
168 %1 = bitcast <2 x i64> %sv to <8 x i16>
169 %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
170 %3 = mul <8 x i16> %2, %2
174 declare void @llvm.arm.neon.vst2lane.v1i64(i8*, <1 x i64>, <1 x i64>, i32, i32)
175 declare void @llvm.arm.neon.vst3lane.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, i32, i32)
176 declare void @llvm.arm.neon.vst4lane.v1i64(i8*, <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i32, i32)