1 ; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
3 define i64 @test_vshrd_n_s64(i64 %a) {
4 ; CHECK: test_vshrd_n_s64
5 ; CHECK: sshr {{d[0-9]+}}, {{d[0-9]+}}, #63
7 %vsshr = insertelement <1 x i64> undef, i64 %a, i32 0
8 %vsshr1 = call <1 x i64> @llvm.aarch64.neon.vshrds.n(<1 x i64> %vsshr, i32 63)
9 %0 = extractelement <1 x i64> %vsshr1, i32 0
13 declare <1 x i64> @llvm.aarch64.neon.vshrds.n(<1 x i64>, i32)
15 define i64 @test_vshrd_n_u64(i64 %a) {
16 ; CHECK: test_vshrd_n_u64
17 ; CHECK: ushr {{d[0-9]+}}, {{d[0-9]+}}, #63
19 %vushr = insertelement <1 x i64> undef, i64 %a, i32 0
20 %vushr1 = call <1 x i64> @llvm.aarch64.neon.vshrdu.n(<1 x i64> %vushr, i32 63)
21 %0 = extractelement <1 x i64> %vushr1, i32 0
25 declare <1 x i64> @llvm.aarch64.neon.vshrdu.n(<1 x i64>, i32)
27 define i64 @test_vrshrd_n_s64(i64 %a) {
28 ; CHECK: test_vrshrd_n_s64
29 ; CHECK: srshr {{d[0-9]+}}, {{d[0-9]+}}, #63
31 %vsrshr = insertelement <1 x i64> undef, i64 %a, i32 0
32 %vsrshr1 = call <1 x i64> @llvm.aarch64.neon.vsrshr.v1i64(<1 x i64> %vsrshr, i32 63)
33 %0 = extractelement <1 x i64> %vsrshr1, i32 0
37 declare <1 x i64> @llvm.aarch64.neon.vsrshr.v1i64(<1 x i64>, i32)
39 define i64 @test_vrshrd_n_u64(i64 %a) {
40 ; CHECK: test_vrshrd_n_u64
41 ; CHECK: urshr {{d[0-9]+}}, {{d[0-9]+}}, #63
43 %vurshr = insertelement <1 x i64> undef, i64 %a, i32 0
44 %vurshr1 = call <1 x i64> @llvm.aarch64.neon.vurshr.v1i64(<1 x i64> %vurshr, i32 63)
45 %0 = extractelement <1 x i64> %vurshr1, i32 0
49 declare <1 x i64> @llvm.aarch64.neon.vurshr.v1i64(<1 x i64>, i32)
51 define i64 @test_vsrad_n_s64(i64 %a, i64 %b) {
52 ; CHECK: test_vsrad_n_s64
53 ; CHECK: ssra {{d[0-9]+}}, {{d[0-9]+}}, #63
55 %vssra = insertelement <1 x i64> undef, i64 %a, i32 0
56 %vssra1 = insertelement <1 x i64> undef, i64 %b, i32 0
57 %vssra2 = call <1 x i64> @llvm.aarch64.neon.vsrads.n(<1 x i64> %vssra, <1 x i64> %vssra1, i32 63)
58 %0 = extractelement <1 x i64> %vssra2, i32 0
62 declare <1 x i64> @llvm.aarch64.neon.vsrads.n(<1 x i64>, <1 x i64>, i32)
64 define i64 @test_vsrad_n_u64(i64 %a, i64 %b) {
65 ; CHECK: test_vsrad_n_u64
66 ; CHECK: usra {{d[0-9]+}}, {{d[0-9]+}}, #63
68 %vusra = insertelement <1 x i64> undef, i64 %a, i32 0
69 %vusra1 = insertelement <1 x i64> undef, i64 %b, i32 0
70 %vusra2 = call <1 x i64> @llvm.aarch64.neon.vsradu.n(<1 x i64> %vusra, <1 x i64> %vusra1, i32 63)
71 %0 = extractelement <1 x i64> %vusra2, i32 0
75 declare <1 x i64> @llvm.aarch64.neon.vsradu.n(<1 x i64>, <1 x i64>, i32)
77 define i64 @test_vrsrad_n_s64(i64 %a, i64 %b) {
78 ; CHECK: test_vrsrad_n_s64
79 ; CHECK: srsra {{d[0-9]+}}, {{d[0-9]+}}, #63
81 %vsrsra = insertelement <1 x i64> undef, i64 %a, i32 0
82 %vsrsra1 = insertelement <1 x i64> undef, i64 %b, i32 0
83 %vsrsra2 = call <1 x i64> @llvm.aarch64.neon.vrsrads.n(<1 x i64> %vsrsra, <1 x i64> %vsrsra1, i32 63)
84 %0 = extractelement <1 x i64> %vsrsra2, i32 0
88 declare <1 x i64> @llvm.aarch64.neon.vrsrads.n(<1 x i64>, <1 x i64>, i32)
90 define i64 @test_vrsrad_n_u64(i64 %a, i64 %b) {
91 ; CHECK: test_vrsrad_n_u64
92 ; CHECK: ursra {{d[0-9]+}}, {{d[0-9]+}}, #63
94 %vursra = insertelement <1 x i64> undef, i64 %a, i32 0
95 %vursra1 = insertelement <1 x i64> undef, i64 %b, i32 0
96 %vursra2 = call <1 x i64> @llvm.aarch64.neon.vrsradu.n(<1 x i64> %vursra, <1 x i64> %vursra1, i32 63)
97 %0 = extractelement <1 x i64> %vursra2, i32 0
101 declare <1 x i64> @llvm.aarch64.neon.vrsradu.n(<1 x i64>, <1 x i64>, i32)
103 define i64 @test_vshld_n_s64(i64 %a) {
104 ; CHECK: test_vshld_n_s64
105 ; CHECK: shl {{d[0-9]+}}, {{d[0-9]+}}, #63
107 %vshl = insertelement <1 x i64> undef, i64 %a, i32 0
108 %vshl1 = call <1 x i64> @llvm.aarch64.neon.vshld.n(<1 x i64> %vshl, i32 63)
109 %0 = extractelement <1 x i64> %vshl1, i32 0
113 declare <1 x i64> @llvm.aarch64.neon.vshld.n(<1 x i64>, i32)
115 define i64 @test_vshld_n_u64(i64 %a) {
116 ; CHECK: test_vshld_n_u64
117 ; CHECK: shl {{d[0-9]+}}, {{d[0-9]+}}, #63
119 %vshl = insertelement <1 x i64> undef, i64 %a, i32 0
120 %vshl1 = call <1 x i64> @llvm.aarch64.neon.vshld.n(<1 x i64> %vshl, i32 63)
121 %0 = extractelement <1 x i64> %vshl1, i32 0
125 define i8 @test_vqshlb_n_s8(i8 %a) {
126 ; CHECK: test_vqshlb_n_s8
127 ; CHECK: sqshl {{b[0-9]+}}, {{b[0-9]+}}, #7
129 %vsqshl = insertelement <1 x i8> undef, i8 %a, i32 0
130 %vsqshl1 = call <1 x i8> @llvm.aarch64.neon.vqshls.n.v1i8(<1 x i8> %vsqshl, i32 7)
131 %0 = extractelement <1 x i8> %vsqshl1, i32 0
135 declare <1 x i8> @llvm.aarch64.neon.vqshls.n.v1i8(<1 x i8>, i32)
137 define i16 @test_vqshlh_n_s16(i16 %a) {
138 ; CHECK: test_vqshlh_n_s16
139 ; CHECK: sqshl {{h[0-9]+}}, {{h[0-9]+}}, #15
141 %vsqshl = insertelement <1 x i16> undef, i16 %a, i32 0
142 %vsqshl1 = call <1 x i16> @llvm.aarch64.neon.vqshls.n.v1i16(<1 x i16> %vsqshl, i32 15)
143 %0 = extractelement <1 x i16> %vsqshl1, i32 0
147 declare <1 x i16> @llvm.aarch64.neon.vqshls.n.v1i16(<1 x i16>, i32)
149 define i32 @test_vqshls_n_s32(i32 %a) {
150 ; CHECK: test_vqshls_n_s32
151 ; CHECK: sqshl {{s[0-9]+}}, {{s[0-9]+}}, #31
153 %vsqshl = insertelement <1 x i32> undef, i32 %a, i32 0
154 %vsqshl1 = call <1 x i32> @llvm.aarch64.neon.vqshls.n.v1i32(<1 x i32> %vsqshl, i32 31)
155 %0 = extractelement <1 x i32> %vsqshl1, i32 0
159 declare <1 x i32> @llvm.aarch64.neon.vqshls.n.v1i32(<1 x i32>, i32)
161 define i64 @test_vqshld_n_s64(i64 %a) {
162 ; CHECK: test_vqshld_n_s64
163 ; CHECK: sqshl {{d[0-9]+}}, {{d[0-9]+}}, #63
165 %vsqshl = insertelement <1 x i64> undef, i64 %a, i32 0
166 %vsqshl1 = call <1 x i64> @llvm.aarch64.neon.vqshls.n.v1i64(<1 x i64> %vsqshl, i32 63)
167 %0 = extractelement <1 x i64> %vsqshl1, i32 0
171 declare <1 x i64> @llvm.aarch64.neon.vqshls.n.v1i64(<1 x i64>, i32)
173 define i8 @test_vqshlb_n_u8(i8 %a) {
174 ; CHECK: test_vqshlb_n_u8
175 ; CHECK: uqshl {{b[0-9]+}}, {{b[0-9]+}}, #7
177 %vuqshl = insertelement <1 x i8> undef, i8 %a, i32 0
178 %vuqshl1 = call <1 x i8> @llvm.aarch64.neon.vqshlu.n.v1i8(<1 x i8> %vuqshl, i32 7)
179 %0 = extractelement <1 x i8> %vuqshl1, i32 0
183 declare <1 x i8> @llvm.aarch64.neon.vqshlu.n.v1i8(<1 x i8>, i32)
185 define i16 @test_vqshlh_n_u16(i16 %a) {
186 ; CHECK: test_vqshlh_n_u16
187 ; CHECK: uqshl {{h[0-9]+}}, {{h[0-9]+}}, #15
189 %vuqshl = insertelement <1 x i16> undef, i16 %a, i32 0
190 %vuqshl1 = call <1 x i16> @llvm.aarch64.neon.vqshlu.n.v1i16(<1 x i16> %vuqshl, i32 15)
191 %0 = extractelement <1 x i16> %vuqshl1, i32 0
195 declare <1 x i16> @llvm.aarch64.neon.vqshlu.n.v1i16(<1 x i16>, i32)
197 define i32 @test_vqshls_n_u32(i32 %a) {
198 ; CHECK: test_vqshls_n_u32
199 ; CHECK: uqshl {{s[0-9]+}}, {{s[0-9]+}}, #31
201 %vuqshl = insertelement <1 x i32> undef, i32 %a, i32 0
202 %vuqshl1 = call <1 x i32> @llvm.aarch64.neon.vqshlu.n.v1i32(<1 x i32> %vuqshl, i32 31)
203 %0 = extractelement <1 x i32> %vuqshl1, i32 0
207 declare <1 x i32> @llvm.aarch64.neon.vqshlu.n.v1i32(<1 x i32>, i32)
209 define i64 @test_vqshld_n_u64(i64 %a) {
210 ; CHECK: test_vqshld_n_u64
211 ; CHECK: uqshl {{d[0-9]+}}, {{d[0-9]+}}, #63
213 %vuqshl = insertelement <1 x i64> undef, i64 %a, i32 0
214 %vuqshl1 = call <1 x i64> @llvm.aarch64.neon.vqshlu.n.v1i64(<1 x i64> %vuqshl, i32 63)
215 %0 = extractelement <1 x i64> %vuqshl1, i32 0
219 declare <1 x i64> @llvm.aarch64.neon.vqshlu.n.v1i64(<1 x i64>, i32)
221 define i8 @test_vqshlub_n_s8(i8 %a) {
222 ; CHECK: test_vqshlub_n_s8
223 ; CHECK: sqshlu {{b[0-9]+}}, {{b[0-9]+}}, #7
225 %vsqshlu = insertelement <1 x i8> undef, i8 %a, i32 0
226 %vsqshlu1 = call <1 x i8> @llvm.aarch64.neon.vsqshlu.v1i8(<1 x i8> %vsqshlu, i32 7)
227 %0 = extractelement <1 x i8> %vsqshlu1, i32 0
231 declare <1 x i8> @llvm.aarch64.neon.vsqshlu.v1i8(<1 x i8>, i32)
233 define i16 @test_vqshluh_n_s16(i16 %a) {
234 ; CHECK: test_vqshluh_n_s16
235 ; CHECK: sqshlu {{h[0-9]+}}, {{h[0-9]+}}, #15
237 %vsqshlu = insertelement <1 x i16> undef, i16 %a, i32 0
238 %vsqshlu1 = call <1 x i16> @llvm.aarch64.neon.vsqshlu.v1i16(<1 x i16> %vsqshlu, i32 15)
239 %0 = extractelement <1 x i16> %vsqshlu1, i32 0
243 declare <1 x i16> @llvm.aarch64.neon.vsqshlu.v1i16(<1 x i16>, i32)
245 define i32 @test_vqshlus_n_s32(i32 %a) {
246 ; CHECK: test_vqshlus_n_s32
247 ; CHECK: sqshlu {{s[0-9]+}}, {{s[0-9]+}}, #31
249 %vsqshlu = insertelement <1 x i32> undef, i32 %a, i32 0
250 %vsqshlu1 = call <1 x i32> @llvm.aarch64.neon.vsqshlu.v1i32(<1 x i32> %vsqshlu, i32 31)
251 %0 = extractelement <1 x i32> %vsqshlu1, i32 0
255 declare <1 x i32> @llvm.aarch64.neon.vsqshlu.v1i32(<1 x i32>, i32)
257 define i64 @test_vqshlud_n_s64(i64 %a) {
258 ; CHECK: test_vqshlud_n_s64
259 ; CHECK: sqshlu {{d[0-9]+}}, {{d[0-9]+}}, #63
261 %vsqshlu = insertelement <1 x i64> undef, i64 %a, i32 0
262 %vsqshlu1 = call <1 x i64> @llvm.aarch64.neon.vsqshlu.v1i64(<1 x i64> %vsqshlu, i32 63)
263 %0 = extractelement <1 x i64> %vsqshlu1, i32 0
267 declare <1 x i64> @llvm.aarch64.neon.vsqshlu.v1i64(<1 x i64>, i32)
269 define i64 @test_vsrid_n_s64(i64 %a, i64 %b) {
270 ; CHECK: test_vsrid_n_s64
271 ; CHECK: sri {{d[0-9]+}}, {{d[0-9]+}}, #63
273 %vsri = insertelement <1 x i64> undef, i64 %a, i32 0
274 %vsri1 = insertelement <1 x i64> undef, i64 %b, i32 0
275 %vsri2 = call <1 x i64> @llvm.aarch64.neon.vsri.v1i64(<1 x i64> %vsri, <1 x i64> %vsri1, i32 63)
276 %0 = extractelement <1 x i64> %vsri2, i32 0
280 declare <1 x i64> @llvm.aarch64.neon.vsri.v1i64(<1 x i64>, <1 x i64>, i32)
282 define i64 @test_vsrid_n_u64(i64 %a, i64 %b) {
283 ; CHECK: test_vsrid_n_u64
284 ; CHECK: sri {{d[0-9]+}}, {{d[0-9]+}}, #63
286 %vsri = insertelement <1 x i64> undef, i64 %a, i32 0
287 %vsri1 = insertelement <1 x i64> undef, i64 %b, i32 0
288 %vsri2 = call <1 x i64> @llvm.aarch64.neon.vsri.v1i64(<1 x i64> %vsri, <1 x i64> %vsri1, i32 63)
289 %0 = extractelement <1 x i64> %vsri2, i32 0
293 define i64 @test_vslid_n_s64(i64 %a, i64 %b) {
294 ; CHECK: test_vslid_n_s64
295 ; CHECK: sli {{d[0-9]+}}, {{d[0-9]+}}, #63
297 %vsli = insertelement <1 x i64> undef, i64 %a, i32 0
298 %vsli1 = insertelement <1 x i64> undef, i64 %b, i32 0
299 %vsli2 = call <1 x i64> @llvm.aarch64.neon.vsli.v1i64(<1 x i64> %vsli, <1 x i64> %vsli1, i32 63)
300 %0 = extractelement <1 x i64> %vsli2, i32 0
304 declare <1 x i64> @llvm.aarch64.neon.vsli.v1i64(<1 x i64>, <1 x i64>, i32)
306 define i64 @test_vslid_n_u64(i64 %a, i64 %b) {
307 ; CHECK: test_vslid_n_u64
308 ; CHECK: sli {{d[0-9]+}}, {{d[0-9]+}}, #63
310 %vsli = insertelement <1 x i64> undef, i64 %a, i32 0
311 %vsli1 = insertelement <1 x i64> undef, i64 %b, i32 0
312 %vsli2 = call <1 x i64> @llvm.aarch64.neon.vsli.v1i64(<1 x i64> %vsli, <1 x i64> %vsli1, i32 63)
313 %0 = extractelement <1 x i64> %vsli2, i32 0
317 define i8 @test_vqshrnh_n_s16(i16 %a) {
318 ; CHECK: test_vqshrnh_n_s16
319 ; CHECK: sqshrn {{b[0-9]+}}, {{h[0-9]+}}, #8
321 %vsqshrn = insertelement <1 x i16> undef, i16 %a, i32 0
322 %vsqshrn1 = call <1 x i8> @llvm.aarch64.neon.vsqshrn.v1i8(<1 x i16> %vsqshrn, i32 8)
323 %0 = extractelement <1 x i8> %vsqshrn1, i32 0
327 declare <1 x i8> @llvm.aarch64.neon.vsqshrn.v1i8(<1 x i16>, i32)
329 define i16 @test_vqshrns_n_s32(i32 %a) {
330 ; CHECK: test_vqshrns_n_s32
331 ; CHECK: sqshrn {{h[0-9]+}}, {{s[0-9]+}}, #16
333 %vsqshrn = insertelement <1 x i32> undef, i32 %a, i32 0
334 %vsqshrn1 = call <1 x i16> @llvm.aarch64.neon.vsqshrn.v1i16(<1 x i32> %vsqshrn, i32 16)
335 %0 = extractelement <1 x i16> %vsqshrn1, i32 0
339 declare <1 x i16> @llvm.aarch64.neon.vsqshrn.v1i16(<1 x i32>, i32)
341 define i32 @test_vqshrnd_n_s64(i64 %a) {
342 ; CHECK: test_vqshrnd_n_s64
343 ; CHECK: sqshrn {{s[0-9]+}}, {{d[0-9]+}}, #32
345 %vsqshrn = insertelement <1 x i64> undef, i64 %a, i32 0
346 %vsqshrn1 = call <1 x i32> @llvm.aarch64.neon.vsqshrn.v1i32(<1 x i64> %vsqshrn, i32 32)
347 %0 = extractelement <1 x i32> %vsqshrn1, i32 0
351 declare <1 x i32> @llvm.aarch64.neon.vsqshrn.v1i32(<1 x i64>, i32)
353 define i8 @test_vqshrnh_n_u16(i16 %a) {
354 ; CHECK: test_vqshrnh_n_u16
355 ; CHECK: uqshrn {{b[0-9]+}}, {{h[0-9]+}}, #8
357 %vuqshrn = insertelement <1 x i16> undef, i16 %a, i32 0
358 %vuqshrn1 = call <1 x i8> @llvm.aarch64.neon.vuqshrn.v1i8(<1 x i16> %vuqshrn, i32 8)
359 %0 = extractelement <1 x i8> %vuqshrn1, i32 0
363 declare <1 x i8> @llvm.aarch64.neon.vuqshrn.v1i8(<1 x i16>, i32)
365 define i16 @test_vqshrns_n_u32(i32 %a) {
366 ; CHECK: test_vqshrns_n_u32
367 ; CHECK: uqshrn {{h[0-9]+}}, {{s[0-9]+}}, #16
369 %vuqshrn = insertelement <1 x i32> undef, i32 %a, i32 0
370 %vuqshrn1 = call <1 x i16> @llvm.aarch64.neon.vuqshrn.v1i16(<1 x i32> %vuqshrn, i32 16)
371 %0 = extractelement <1 x i16> %vuqshrn1, i32 0
375 declare <1 x i16> @llvm.aarch64.neon.vuqshrn.v1i16(<1 x i32>, i32)
377 define i32 @test_vqshrnd_n_u64(i64 %a) {
378 ; CHECK: test_vqshrnd_n_u64
379 ; CHECK: uqshrn {{s[0-9]+}}, {{d[0-9]+}}, #32
381 %vuqshrn = insertelement <1 x i64> undef, i64 %a, i32 0
382 %vuqshrn1 = call <1 x i32> @llvm.aarch64.neon.vuqshrn.v1i32(<1 x i64> %vuqshrn, i32 32)
383 %0 = extractelement <1 x i32> %vuqshrn1, i32 0
387 declare <1 x i32> @llvm.aarch64.neon.vuqshrn.v1i32(<1 x i64>, i32)
389 define i8 @test_vqrshrnh_n_s16(i16 %a) {
390 ; CHECK: test_vqrshrnh_n_s16
391 ; CHECK: sqrshrn {{b[0-9]+}}, {{h[0-9]+}}, #8
393 %vsqrshrn = insertelement <1 x i16> undef, i16 %a, i32 0
394 %vsqrshrn1 = call <1 x i8> @llvm.aarch64.neon.vsqrshrn.v1i8(<1 x i16> %vsqrshrn, i32 8)
395 %0 = extractelement <1 x i8> %vsqrshrn1, i32 0
399 declare <1 x i8> @llvm.aarch64.neon.vsqrshrn.v1i8(<1 x i16>, i32)
401 define i16 @test_vqrshrns_n_s32(i32 %a) {
402 ; CHECK: test_vqrshrns_n_s32
403 ; CHECK: sqrshrn {{h[0-9]+}}, {{s[0-9]+}}, #16
405 %vsqrshrn = insertelement <1 x i32> undef, i32 %a, i32 0
406 %vsqrshrn1 = call <1 x i16> @llvm.aarch64.neon.vsqrshrn.v1i16(<1 x i32> %vsqrshrn, i32 16)
407 %0 = extractelement <1 x i16> %vsqrshrn1, i32 0
411 declare <1 x i16> @llvm.aarch64.neon.vsqrshrn.v1i16(<1 x i32>, i32)
413 define i32 @test_vqrshrnd_n_s64(i64 %a) {
414 ; CHECK: test_vqrshrnd_n_s64
415 ; CHECK: sqrshrn {{s[0-9]+}}, {{d[0-9]+}}, #32
417 %vsqrshrn = insertelement <1 x i64> undef, i64 %a, i32 0
418 %vsqrshrn1 = call <1 x i32> @llvm.aarch64.neon.vsqrshrn.v1i32(<1 x i64> %vsqrshrn, i32 32)
419 %0 = extractelement <1 x i32> %vsqrshrn1, i32 0
423 declare <1 x i32> @llvm.aarch64.neon.vsqrshrn.v1i32(<1 x i64>, i32)
425 define i8 @test_vqrshrnh_n_u16(i16 %a) {
426 ; CHECK: test_vqrshrnh_n_u16
427 ; CHECK: uqrshrn {{b[0-9]+}}, {{h[0-9]+}}, #8
429 %vuqrshrn = insertelement <1 x i16> undef, i16 %a, i32 0
430 %vuqrshrn1 = call <1 x i8> @llvm.aarch64.neon.vuqrshrn.v1i8(<1 x i16> %vuqrshrn, i32 8)
431 %0 = extractelement <1 x i8> %vuqrshrn1, i32 0
435 declare <1 x i8> @llvm.aarch64.neon.vuqrshrn.v1i8(<1 x i16>, i32)
437 define i16 @test_vqrshrns_n_u32(i32 %a) {
438 ; CHECK: test_vqrshrns_n_u32
439 ; CHECK: uqrshrn {{h[0-9]+}}, {{s[0-9]+}}, #16
441 %vuqrshrn = insertelement <1 x i32> undef, i32 %a, i32 0
442 %vuqrshrn1 = call <1 x i16> @llvm.aarch64.neon.vuqrshrn.v1i16(<1 x i32> %vuqrshrn, i32 16)
443 %0 = extractelement <1 x i16> %vuqrshrn1, i32 0
447 declare <1 x i16> @llvm.aarch64.neon.vuqrshrn.v1i16(<1 x i32>, i32)
449 define i32 @test_vqrshrnd_n_u64(i64 %a) {
450 ; CHECK: test_vqrshrnd_n_u64
451 ; CHECK: uqrshrn {{s[0-9]+}}, {{d[0-9]+}}, #32
453 %vuqrshrn = insertelement <1 x i64> undef, i64 %a, i32 0
454 %vuqrshrn1 = call <1 x i32> @llvm.aarch64.neon.vuqrshrn.v1i32(<1 x i64> %vuqrshrn, i32 32)
455 %0 = extractelement <1 x i32> %vuqrshrn1, i32 0
459 declare <1 x i32> @llvm.aarch64.neon.vuqrshrn.v1i32(<1 x i64>, i32)
461 define i8 @test_vqshrunh_n_s16(i16 %a) {
462 ; CHECK: test_vqshrunh_n_s16
463 ; CHECK: sqshrun {{b[0-9]+}}, {{h[0-9]+}}, #8
465 %vsqshrun = insertelement <1 x i16> undef, i16 %a, i32 0
466 %vsqshrun1 = call <1 x i8> @llvm.aarch64.neon.vsqshrun.v1i8(<1 x i16> %vsqshrun, i32 8)
467 %0 = extractelement <1 x i8> %vsqshrun1, i32 0
471 declare <1 x i8> @llvm.aarch64.neon.vsqshrun.v1i8(<1 x i16>, i32)
473 define i16 @test_vqshruns_n_s32(i32 %a) {
474 ; CHECK: test_vqshruns_n_s32
475 ; CHECK: sqshrun {{h[0-9]+}}, {{s[0-9]+}}, #16
477 %vsqshrun = insertelement <1 x i32> undef, i32 %a, i32 0
478 %vsqshrun1 = call <1 x i16> @llvm.aarch64.neon.vsqshrun.v1i16(<1 x i32> %vsqshrun, i32 16)
479 %0 = extractelement <1 x i16> %vsqshrun1, i32 0
483 declare <1 x i16> @llvm.aarch64.neon.vsqshrun.v1i16(<1 x i32>, i32)
485 define i32 @test_vqshrund_n_s64(i64 %a) {
486 ; CHECK: test_vqshrund_n_s64
487 ; CHECK: sqshrun {{s[0-9]+}}, {{d[0-9]+}}, #32
489 %vsqshrun = insertelement <1 x i64> undef, i64 %a, i32 0
490 %vsqshrun1 = call <1 x i32> @llvm.aarch64.neon.vsqshrun.v1i32(<1 x i64> %vsqshrun, i32 32)
491 %0 = extractelement <1 x i32> %vsqshrun1, i32 0
495 declare <1 x i32> @llvm.aarch64.neon.vsqshrun.v1i32(<1 x i64>, i32)
497 define i8 @test_vqrshrunh_n_s16(i16 %a) {
498 ; CHECK: test_vqrshrunh_n_s16
499 ; CHECK: sqrshrun {{b[0-9]+}}, {{h[0-9]+}}, #8
501 %vsqrshrun = insertelement <1 x i16> undef, i16 %a, i32 0
502 %vsqrshrun1 = call <1 x i8> @llvm.aarch64.neon.vsqrshrun.v1i8(<1 x i16> %vsqrshrun, i32 8)
503 %0 = extractelement <1 x i8> %vsqrshrun1, i32 0
507 declare <1 x i8> @llvm.aarch64.neon.vsqrshrun.v1i8(<1 x i16>, i32)
509 define i16 @test_vqrshruns_n_s32(i32 %a) {
510 ; CHECK: test_vqrshruns_n_s32
511 ; CHECK: sqrshrun {{h[0-9]+}}, {{s[0-9]+}}, #16
513 %vsqrshrun = insertelement <1 x i32> undef, i32 %a, i32 0
514 %vsqrshrun1 = call <1 x i16> @llvm.aarch64.neon.vsqrshrun.v1i16(<1 x i32> %vsqrshrun, i32 16)
515 %0 = extractelement <1 x i16> %vsqrshrun1, i32 0
519 declare <1 x i16> @llvm.aarch64.neon.vsqrshrun.v1i16(<1 x i32>, i32)
521 define i32 @test_vqrshrund_n_s64(i64 %a) {
522 ; CHECK: test_vqrshrund_n_s64
523 ; CHECK: sqrshrun {{s[0-9]+}}, {{d[0-9]+}}, #32
525 %vsqrshrun = insertelement <1 x i64> undef, i64 %a, i32 0
526 %vsqrshrun1 = call <1 x i32> @llvm.aarch64.neon.vsqrshrun.v1i32(<1 x i64> %vsqrshrun, i32 32)
527 %0 = extractelement <1 x i32> %vsqrshrun1, i32 0
531 declare <1 x i32> @llvm.aarch64.neon.vsqrshrun.v1i32(<1 x i64>, i32)