1 ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
2 ; arm64 has copied test in its own directory.
4 declare float @llvm.aarch64.neon.vminnmv(<4 x float>)
6 declare float @llvm.aarch64.neon.vmaxnmv(<4 x float>)
8 declare float @llvm.aarch64.neon.vminv(<4 x float>)
10 declare float @llvm.aarch64.neon.vmaxv(<4 x float>)
12 declare <1 x i32> @llvm.aarch64.neon.vaddv.v1i32.v4i32(<4 x i32>)
14 declare <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v8i16(<8 x i16>)
16 declare <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v16i8(<16 x i8>)
18 declare <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v4i16(<4 x i16>)
20 declare <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v8i8(<8 x i8>)
22 declare <1 x i32> @llvm.aarch64.neon.uminv.v1i32.v4i32(<4 x i32>)
24 declare <1 x i16> @llvm.aarch64.neon.uminv.v1i16.v8i16(<8 x i16>)
26 declare <1 x i8> @llvm.aarch64.neon.uminv.v1i8.v16i8(<16 x i8>)
28 declare <1 x i32> @llvm.aarch64.neon.sminv.v1i32.v4i32(<4 x i32>)
30 declare <1 x i16> @llvm.aarch64.neon.sminv.v1i16.v8i16(<8 x i16>)
32 declare <1 x i8> @llvm.aarch64.neon.sminv.v1i8.v16i8(<16 x i8>)
34 declare <1 x i16> @llvm.aarch64.neon.uminv.v1i16.v4i16(<4 x i16>)
36 declare <1 x i8> @llvm.aarch64.neon.uminv.v1i8.v8i8(<8 x i8>)
38 declare <1 x i16> @llvm.aarch64.neon.sminv.v1i16.v4i16(<4 x i16>)
40 declare <1 x i8> @llvm.aarch64.neon.sminv.v1i8.v8i8(<8 x i8>)
42 declare <1 x i32> @llvm.aarch64.neon.umaxv.v1i32.v4i32(<4 x i32>)
44 declare <1 x i16> @llvm.aarch64.neon.umaxv.v1i16.v8i16(<8 x i16>)
46 declare <1 x i8> @llvm.aarch64.neon.umaxv.v1i8.v16i8(<16 x i8>)
48 declare <1 x i32> @llvm.aarch64.neon.smaxv.v1i32.v4i32(<4 x i32>)
50 declare <1 x i16> @llvm.aarch64.neon.smaxv.v1i16.v8i16(<8 x i16>)
52 declare <1 x i8> @llvm.aarch64.neon.smaxv.v1i8.v16i8(<16 x i8>)
54 declare <1 x i16> @llvm.aarch64.neon.umaxv.v1i16.v4i16(<4 x i16>)
56 declare <1 x i8> @llvm.aarch64.neon.umaxv.v1i8.v8i8(<8 x i8>)
58 declare <1 x i16> @llvm.aarch64.neon.smaxv.v1i16.v4i16(<4 x i16>)
60 declare <1 x i8> @llvm.aarch64.neon.smaxv.v1i8.v8i8(<8 x i8>)
62 declare <1 x i64> @llvm.aarch64.neon.uaddlv.v1i64.v4i32(<4 x i32>)
64 declare <1 x i32> @llvm.aarch64.neon.uaddlv.v1i32.v8i16(<8 x i16>)
66 declare <1 x i16> @llvm.aarch64.neon.uaddlv.v1i16.v16i8(<16 x i8>)
68 declare <1 x i64> @llvm.aarch64.neon.saddlv.v1i64.v4i32(<4 x i32>)
70 declare <1 x i32> @llvm.aarch64.neon.saddlv.v1i32.v8i16(<8 x i16>)
72 declare <1 x i16> @llvm.aarch64.neon.saddlv.v1i16.v16i8(<16 x i8>)
74 declare <1 x i32> @llvm.aarch64.neon.uaddlv.v1i32.v4i16(<4 x i16>)
76 declare <1 x i16> @llvm.aarch64.neon.uaddlv.v1i16.v8i8(<8 x i8>)
78 declare <1 x i32> @llvm.aarch64.neon.saddlv.v1i32.v4i16(<4 x i16>)
80 declare <1 x i16> @llvm.aarch64.neon.saddlv.v1i16.v8i8(<8 x i8>)
82 define i16 @test_vaddlv_s8(<8 x i8> %a) {
83 ; CHECK: test_vaddlv_s8:
84 ; CHECK: saddlv h{{[0-9]+}}, {{v[0-9]+}}.8b
86 %saddlv.i = tail call <1 x i16> @llvm.aarch64.neon.saddlv.v1i16.v8i8(<8 x i8> %a)
87 %0 = extractelement <1 x i16> %saddlv.i, i32 0
91 define i32 @test_vaddlv_s16(<4 x i16> %a) {
92 ; CHECK: test_vaddlv_s16:
93 ; CHECK: saddlv s{{[0-9]+}}, {{v[0-9]+}}.4h
95 %saddlv.i = tail call <1 x i32> @llvm.aarch64.neon.saddlv.v1i32.v4i16(<4 x i16> %a)
96 %0 = extractelement <1 x i32> %saddlv.i, i32 0
100 define i16 @test_vaddlv_u8(<8 x i8> %a) {
101 ; CHECK: test_vaddlv_u8:
102 ; CHECK: uaddlv h{{[0-9]+}}, {{v[0-9]+}}.8b
104 %uaddlv.i = tail call <1 x i16> @llvm.aarch64.neon.uaddlv.v1i16.v8i8(<8 x i8> %a)
105 %0 = extractelement <1 x i16> %uaddlv.i, i32 0
109 define i32 @test_vaddlv_u16(<4 x i16> %a) {
110 ; CHECK: test_vaddlv_u16:
111 ; CHECK: uaddlv s{{[0-9]+}}, {{v[0-9]+}}.4h
113 %uaddlv.i = tail call <1 x i32> @llvm.aarch64.neon.uaddlv.v1i32.v4i16(<4 x i16> %a)
114 %0 = extractelement <1 x i32> %uaddlv.i, i32 0
118 define i16 @test_vaddlvq_s8(<16 x i8> %a) {
119 ; CHECK: test_vaddlvq_s8:
120 ; CHECK: saddlv h{{[0-9]+}}, {{v[0-9]+}}.16b
122 %saddlv.i = tail call <1 x i16> @llvm.aarch64.neon.saddlv.v1i16.v16i8(<16 x i8> %a)
123 %0 = extractelement <1 x i16> %saddlv.i, i32 0
127 define i32 @test_vaddlvq_s16(<8 x i16> %a) {
128 ; CHECK: test_vaddlvq_s16:
129 ; CHECK: saddlv s{{[0-9]+}}, {{v[0-9]+}}.8h
131 %saddlv.i = tail call <1 x i32> @llvm.aarch64.neon.saddlv.v1i32.v8i16(<8 x i16> %a)
132 %0 = extractelement <1 x i32> %saddlv.i, i32 0
136 define i64 @test_vaddlvq_s32(<4 x i32> %a) {
137 ; CHECK: test_vaddlvq_s32:
138 ; CHECK: saddlv d{{[0-9]+}}, {{v[0-9]+}}.4s
140 %saddlv.i = tail call <1 x i64> @llvm.aarch64.neon.saddlv.v1i64.v4i32(<4 x i32> %a)
141 %0 = extractelement <1 x i64> %saddlv.i, i32 0
145 define i16 @test_vaddlvq_u8(<16 x i8> %a) {
146 ; CHECK: test_vaddlvq_u8:
147 ; CHECK: uaddlv h{{[0-9]+}}, {{v[0-9]+}}.16b
149 %uaddlv.i = tail call <1 x i16> @llvm.aarch64.neon.uaddlv.v1i16.v16i8(<16 x i8> %a)
150 %0 = extractelement <1 x i16> %uaddlv.i, i32 0
154 define i32 @test_vaddlvq_u16(<8 x i16> %a) {
155 ; CHECK: test_vaddlvq_u16:
156 ; CHECK: uaddlv s{{[0-9]+}}, {{v[0-9]+}}.8h
158 %uaddlv.i = tail call <1 x i32> @llvm.aarch64.neon.uaddlv.v1i32.v8i16(<8 x i16> %a)
159 %0 = extractelement <1 x i32> %uaddlv.i, i32 0
163 define i64 @test_vaddlvq_u32(<4 x i32> %a) {
164 ; CHECK: test_vaddlvq_u32:
165 ; CHECK: uaddlv d{{[0-9]+}}, {{v[0-9]+}}.4s
167 %uaddlv.i = tail call <1 x i64> @llvm.aarch64.neon.uaddlv.v1i64.v4i32(<4 x i32> %a)
168 %0 = extractelement <1 x i64> %uaddlv.i, i32 0
172 define i8 @test_vmaxv_s8(<8 x i8> %a) {
173 ; CHECK: test_vmaxv_s8:
174 ; CHECK: smaxv b{{[0-9]+}}, {{v[0-9]+}}.8b
176 %smaxv.i = tail call <1 x i8> @llvm.aarch64.neon.smaxv.v1i8.v8i8(<8 x i8> %a)
177 %0 = extractelement <1 x i8> %smaxv.i, i32 0
181 define i16 @test_vmaxv_s16(<4 x i16> %a) {
182 ; CHECK: test_vmaxv_s16:
183 ; CHECK: smaxv h{{[0-9]+}}, {{v[0-9]+}}.4h
185 %smaxv.i = tail call <1 x i16> @llvm.aarch64.neon.smaxv.v1i16.v4i16(<4 x i16> %a)
186 %0 = extractelement <1 x i16> %smaxv.i, i32 0
190 define i8 @test_vmaxv_u8(<8 x i8> %a) {
191 ; CHECK: test_vmaxv_u8:
192 ; CHECK: umaxv b{{[0-9]+}}, {{v[0-9]+}}.8b
194 %umaxv.i = tail call <1 x i8> @llvm.aarch64.neon.umaxv.v1i8.v8i8(<8 x i8> %a)
195 %0 = extractelement <1 x i8> %umaxv.i, i32 0
199 define i16 @test_vmaxv_u16(<4 x i16> %a) {
200 ; CHECK: test_vmaxv_u16:
201 ; CHECK: umaxv h{{[0-9]+}}, {{v[0-9]+}}.4h
203 %umaxv.i = tail call <1 x i16> @llvm.aarch64.neon.umaxv.v1i16.v4i16(<4 x i16> %a)
204 %0 = extractelement <1 x i16> %umaxv.i, i32 0
208 define i8 @test_vmaxvq_s8(<16 x i8> %a) {
209 ; CHECK: test_vmaxvq_s8:
210 ; CHECK: smaxv b{{[0-9]+}}, {{v[0-9]+}}.16b
212 %smaxv.i = tail call <1 x i8> @llvm.aarch64.neon.smaxv.v1i8.v16i8(<16 x i8> %a)
213 %0 = extractelement <1 x i8> %smaxv.i, i32 0
217 define i16 @test_vmaxvq_s16(<8 x i16> %a) {
218 ; CHECK: test_vmaxvq_s16:
219 ; CHECK: smaxv h{{[0-9]+}}, {{v[0-9]+}}.8h
221 %smaxv.i = tail call <1 x i16> @llvm.aarch64.neon.smaxv.v1i16.v8i16(<8 x i16> %a)
222 %0 = extractelement <1 x i16> %smaxv.i, i32 0
226 define i32 @test_vmaxvq_s32(<4 x i32> %a) {
227 ; CHECK: test_vmaxvq_s32:
228 ; CHECK: smaxv s{{[0-9]+}}, {{v[0-9]+}}.4s
230 %smaxv.i = tail call <1 x i32> @llvm.aarch64.neon.smaxv.v1i32.v4i32(<4 x i32> %a)
231 %0 = extractelement <1 x i32> %smaxv.i, i32 0
235 define i8 @test_vmaxvq_u8(<16 x i8> %a) {
236 ; CHECK: test_vmaxvq_u8:
237 ; CHECK: umaxv b{{[0-9]+}}, {{v[0-9]+}}.16b
239 %umaxv.i = tail call <1 x i8> @llvm.aarch64.neon.umaxv.v1i8.v16i8(<16 x i8> %a)
240 %0 = extractelement <1 x i8> %umaxv.i, i32 0
244 define i16 @test_vmaxvq_u16(<8 x i16> %a) {
245 ; CHECK: test_vmaxvq_u16:
246 ; CHECK: umaxv h{{[0-9]+}}, {{v[0-9]+}}.8h
248 %umaxv.i = tail call <1 x i16> @llvm.aarch64.neon.umaxv.v1i16.v8i16(<8 x i16> %a)
249 %0 = extractelement <1 x i16> %umaxv.i, i32 0
253 define i32 @test_vmaxvq_u32(<4 x i32> %a) {
254 ; CHECK: test_vmaxvq_u32:
255 ; CHECK: umaxv s{{[0-9]+}}, {{v[0-9]+}}.4s
257 %umaxv.i = tail call <1 x i32> @llvm.aarch64.neon.umaxv.v1i32.v4i32(<4 x i32> %a)
258 %0 = extractelement <1 x i32> %umaxv.i, i32 0
262 define i8 @test_vminv_s8(<8 x i8> %a) {
263 ; CHECK: test_vminv_s8:
264 ; CHECK: sminv b{{[0-9]+}}, {{v[0-9]+}}.8b
266 %sminv.i = tail call <1 x i8> @llvm.aarch64.neon.sminv.v1i8.v8i8(<8 x i8> %a)
267 %0 = extractelement <1 x i8> %sminv.i, i32 0
271 define i16 @test_vminv_s16(<4 x i16> %a) {
272 ; CHECK: test_vminv_s16:
273 ; CHECK: sminv h{{[0-9]+}}, {{v[0-9]+}}.4h
275 %sminv.i = tail call <1 x i16> @llvm.aarch64.neon.sminv.v1i16.v4i16(<4 x i16> %a)
276 %0 = extractelement <1 x i16> %sminv.i, i32 0
280 define i8 @test_vminv_u8(<8 x i8> %a) {
281 ; CHECK: test_vminv_u8:
282 ; CHECK: uminv b{{[0-9]+}}, {{v[0-9]+}}.8b
284 %uminv.i = tail call <1 x i8> @llvm.aarch64.neon.uminv.v1i8.v8i8(<8 x i8> %a)
285 %0 = extractelement <1 x i8> %uminv.i, i32 0
289 define i16 @test_vminv_u16(<4 x i16> %a) {
290 ; CHECK: test_vminv_u16:
291 ; CHECK: uminv h{{[0-9]+}}, {{v[0-9]+}}.4h
293 %uminv.i = tail call <1 x i16> @llvm.aarch64.neon.uminv.v1i16.v4i16(<4 x i16> %a)
294 %0 = extractelement <1 x i16> %uminv.i, i32 0
298 define i8 @test_vminvq_s8(<16 x i8> %a) {
299 ; CHECK: test_vminvq_s8:
300 ; CHECK: sminv b{{[0-9]+}}, {{v[0-9]+}}.16b
302 %sminv.i = tail call <1 x i8> @llvm.aarch64.neon.sminv.v1i8.v16i8(<16 x i8> %a)
303 %0 = extractelement <1 x i8> %sminv.i, i32 0
307 define i16 @test_vminvq_s16(<8 x i16> %a) {
308 ; CHECK: test_vminvq_s16:
309 ; CHECK: sminv h{{[0-9]+}}, {{v[0-9]+}}.8h
311 %sminv.i = tail call <1 x i16> @llvm.aarch64.neon.sminv.v1i16.v8i16(<8 x i16> %a)
312 %0 = extractelement <1 x i16> %sminv.i, i32 0
316 define i32 @test_vminvq_s32(<4 x i32> %a) {
317 ; CHECK: test_vminvq_s32:
318 ; CHECK: sminv s{{[0-9]+}}, {{v[0-9]+}}.4s
320 %sminv.i = tail call <1 x i32> @llvm.aarch64.neon.sminv.v1i32.v4i32(<4 x i32> %a)
321 %0 = extractelement <1 x i32> %sminv.i, i32 0
325 define i8 @test_vminvq_u8(<16 x i8> %a) {
326 ; CHECK: test_vminvq_u8:
327 ; CHECK: uminv b{{[0-9]+}}, {{v[0-9]+}}.16b
329 %uminv.i = tail call <1 x i8> @llvm.aarch64.neon.uminv.v1i8.v16i8(<16 x i8> %a)
330 %0 = extractelement <1 x i8> %uminv.i, i32 0
334 define i16 @test_vminvq_u16(<8 x i16> %a) {
335 ; CHECK: test_vminvq_u16:
336 ; CHECK: uminv h{{[0-9]+}}, {{v[0-9]+}}.8h
338 %uminv.i = tail call <1 x i16> @llvm.aarch64.neon.uminv.v1i16.v8i16(<8 x i16> %a)
339 %0 = extractelement <1 x i16> %uminv.i, i32 0
343 define i32 @test_vminvq_u32(<4 x i32> %a) {
344 ; CHECK: test_vminvq_u32:
345 ; CHECK: uminv s{{[0-9]+}}, {{v[0-9]+}}.4s
347 %uminv.i = tail call <1 x i32> @llvm.aarch64.neon.uminv.v1i32.v4i32(<4 x i32> %a)
348 %0 = extractelement <1 x i32> %uminv.i, i32 0
352 define i8 @test_vaddv_s8(<8 x i8> %a) {
353 ; CHECK: test_vaddv_s8:
354 ; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.8b
356 %vaddv.i = tail call <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v8i8(<8 x i8> %a)
357 %0 = extractelement <1 x i8> %vaddv.i, i32 0
361 define i16 @test_vaddv_s16(<4 x i16> %a) {
362 ; CHECK: test_vaddv_s16:
363 ; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.4h
365 %vaddv.i = tail call <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v4i16(<4 x i16> %a)
366 %0 = extractelement <1 x i16> %vaddv.i, i32 0
370 define i8 @test_vaddv_u8(<8 x i8> %a) {
371 ; CHECK: test_vaddv_u8:
372 ; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.8b
374 %vaddv.i = tail call <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v8i8(<8 x i8> %a)
375 %0 = extractelement <1 x i8> %vaddv.i, i32 0
379 define i16 @test_vaddv_u16(<4 x i16> %a) {
380 ; CHECK: test_vaddv_u16:
381 ; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.4h
383 %vaddv.i = tail call <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v4i16(<4 x i16> %a)
384 %0 = extractelement <1 x i16> %vaddv.i, i32 0
388 define i8 @test_vaddvq_s8(<16 x i8> %a) {
389 ; CHECK: test_vaddvq_s8:
390 ; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.16b
392 %vaddv.i = tail call <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v16i8(<16 x i8> %a)
393 %0 = extractelement <1 x i8> %vaddv.i, i32 0
397 define i16 @test_vaddvq_s16(<8 x i16> %a) {
398 ; CHECK: test_vaddvq_s16:
399 ; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.8h
401 %vaddv.i = tail call <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v8i16(<8 x i16> %a)
402 %0 = extractelement <1 x i16> %vaddv.i, i32 0
406 define i32 @test_vaddvq_s32(<4 x i32> %a) {
407 ; CHECK: test_vaddvq_s32:
408 ; CHECK: addv s{{[0-9]+}}, {{v[0-9]+}}.4s
410 %vaddv.i = tail call <1 x i32> @llvm.aarch64.neon.vaddv.v1i32.v4i32(<4 x i32> %a)
411 %0 = extractelement <1 x i32> %vaddv.i, i32 0
415 define i8 @test_vaddvq_u8(<16 x i8> %a) {
416 ; CHECK: test_vaddvq_u8:
417 ; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.16b
419 %vaddv.i = tail call <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v16i8(<16 x i8> %a)
420 %0 = extractelement <1 x i8> %vaddv.i, i32 0
424 define i16 @test_vaddvq_u16(<8 x i16> %a) {
425 ; CHECK: test_vaddvq_u16:
426 ; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.8h
428 %vaddv.i = tail call <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v8i16(<8 x i16> %a)
429 %0 = extractelement <1 x i16> %vaddv.i, i32 0
433 define i32 @test_vaddvq_u32(<4 x i32> %a) {
434 ; CHECK: test_vaddvq_u32:
435 ; CHECK: addv s{{[0-9]+}}, {{v[0-9]+}}.4s
437 %vaddv.i = tail call <1 x i32> @llvm.aarch64.neon.vaddv.v1i32.v4i32(<4 x i32> %a)
438 %0 = extractelement <1 x i32> %vaddv.i, i32 0
442 define float @test_vmaxvq_f32(<4 x float> %a) {
443 ; CHECK: test_vmaxvq_f32:
444 ; CHECK: fmaxv s{{[0-9]+}}, {{v[0-9]+}}.4s
446 %0 = call float @llvm.aarch64.neon.vmaxv(<4 x float> %a)
450 define float @test_vminvq_f32(<4 x float> %a) {
451 ; CHECK: test_vminvq_f32:
452 ; CHECK: fminv s{{[0-9]+}}, {{v[0-9]+}}.4s
454 %0 = call float @llvm.aarch64.neon.vminv(<4 x float> %a)
458 define float @test_vmaxnmvq_f32(<4 x float> %a) {
459 ; CHECK: test_vmaxnmvq_f32:
460 ; CHECK: fmaxnmv s{{[0-9]+}}, {{v[0-9]+}}.4s
462 %0 = call float @llvm.aarch64.neon.vmaxnmv(<4 x float> %a)
466 define float @test_vminnmvq_f32(<4 x float> %a) {
467 ; CHECK: test_vminnmvq_f32:
468 ; CHECK: fminnmv s{{[0-9]+}}, {{v[0-9]+}}.4s
470 %0 = call float @llvm.aarch64.neon.vminnmv(<4 x float> %a)