1 ; RUN: llc -show-mc-encoding -march=arm -mcpu=cortex-a8 -mattr=+neon < %s | FileCheck %s
4 define <8 x i8> @vshls_8xi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
5 %tmp1 = load <8 x i8>* %A
6 %tmp2 = load <8 x i8>* %B
7 ; CHECK: vshl.u8 d16, d17, d16 @ encoding: [0xa1,0x04,0x40,0xf3]
8 %tmp3 = shl <8 x i8> %tmp1, %tmp2
13 define <4 x i16> @vshls_4xi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
14 %tmp1 = load <4 x i16>* %A
15 %tmp2 = load <4 x i16>* %B
16 ; CHECK: vshl.u16 d16, d17, d16 @ encoding: [0xa1,0x04,0x50,0xf3]
17 %tmp3 = shl <4 x i16> %tmp1, %tmp2
22 define <2 x i32> @vshls_2xi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
23 %tmp1 = load <2 x i32>* %A
24 %tmp2 = load <2 x i32>* %B
25 ; CHECK: vshl.u32 d16, d17, d16 @ encoding: [0xa1,0x04,0x60,0xf3]
26 %tmp3 = shl <2 x i32> %tmp1, %tmp2
31 define <1 x i64> @vshls_1xi64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
32 %tmp1 = load <1 x i64>* %A
33 %tmp2 = load <1 x i64>* %B
34 ; CHECK: vshl.u64 d16, d17, d16 @ encoding: [0xa1,0x04,0x70,0xf3]
35 %tmp3 = shl <1 x i64> %tmp1, %tmp2
40 define <8 x i8> @vshli_8xi8(<8 x i8>* %A) nounwind {
41 %tmp1 = load <8 x i8>* %A
42 ; CHECK: vshl.i8 d16, d16, #7 @ encoding: [0x30,0x05,0xcf,0xf2]
43 %tmp2 = shl <8 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
48 define <4 x i16> @vshli_4xi16(<4 x i16>* %A) nounwind {
49 %tmp1 = load <4 x i16>* %A
50 ; CHECK: vshl.i16 d16, d16, #15 @ encoding: [0x30,0x05,0xdf,0xf2
51 %tmp2 = shl <4 x i16> %tmp1, < i16 15, i16 15, i16 15, i16 15 >
56 define <2 x i32> @vshli_2xi32(<2 x i32>* %A) nounwind {
57 %tmp1 = load <2 x i32>* %A
58 ; CHECK: vshl.i32 d16, d16, #31 @ encoding: [0x30,0x05,0xff,0xf2]
59 %tmp2 = shl <2 x i32> %tmp1, < i32 31, i32 31 >
64 define <1 x i64> @vshli_1xi64(<1 x i64>* %A) nounwind {
65 %tmp1 = load <1 x i64>* %A
66 ; CHECK: vshl.i64 d16, d16, #63 @ encoding: [0xb0,0x05,0xff,0xf2]
67 %tmp2 = shl <1 x i64> %tmp1, < i64 63 >
72 define <16 x i8> @vshls_16xi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
73 %tmp1 = load <16 x i8>* %A
74 %tmp2 = load <16 x i8>* %B
75 ; CHECK: vshl.u8 q8, q8, q9 @ encoding: [0xe0,0x04,0x42,0xf3]
76 %tmp3 = shl <16 x i8> %tmp1, %tmp2
81 define <8 x i16> @vshls_8xi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
82 %tmp1 = load <8 x i16>* %A
83 %tmp2 = load <8 x i16>* %B
84 %tmp3 = shl <8 x i16> %tmp1, %tmp2
89 define <4 x i32> @vshls_4xi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
90 %tmp1 = load <4 x i32>* %A
91 %tmp2 = load <4 x i32>* %B
92 ; CHECK: vshl.u32 q8, q8, q9 @ encoding: [0xe0,0x04,0x62,0xf3]
93 %tmp3 = shl <4 x i32> %tmp1, %tmp2
98 define <2 x i64> @vshls_2xi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
99 %tmp1 = load <2 x i64>* %A
100 %tmp2 = load <2 x i64>* %B
101 ; CHECK: vshl.u64 q8, q8, q9 @ encoding: [0xe0,0x04,0x72,0xf3]
102 %tmp3 = shl <2 x i64> %tmp1, %tmp2
107 define <16 x i8> @vshli_16xi8(<16 x i8>* %A) nounwind {
108 %tmp1 = load <16 x i8>* %A
109 ; CHECK: vshl.i8 q8, q8, #7 @ encoding: [0x70,0x05,0xcf,0xf2]
110 %tmp2 = shl <16 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
115 define <8 x i16> @vshli_8xi16(<8 x i16>* %A) nounwind {
116 %tmp1 = load <8 x i16>* %A
117 ; CHECK: vshl.i16 q8, q8, #15 @ encoding: [0x70,0x05,0xdf,0xf2]
118 %tmp2 = shl <8 x i16> %tmp1, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >
123 define <4 x i32> @vshli_4xi32(<4 x i32>* %A) nounwind {
124 %tmp1 = load <4 x i32>* %A
125 ; CHECK: vshl.i32 q8, q8, #31 @ encoding: [0x70,0x05,0xff,0xf2]
126 %tmp2 = shl <4 x i32> %tmp1, < i32 31, i32 31, i32 31, i32 31 >
131 define <2 x i64> @vshli_2xi64(<2 x i64>* %A) nounwind {
132 %tmp1 = load <2 x i64>* %A
133 ; CHECK: vshl.i64 q8, q8, #63 @ encoding: [0xf0,0x05,0xff,0xf2]
134 %tmp2 = shl <2 x i64> %tmp1, < i64 63, i64 63 >
139 define <8 x i8> @vshru_8xi8(<8 x i8>* %A) nounwind {
140 %tmp1 = load <8 x i8>* %A
141 ; CHECK: vshr.u8 d16, d16, #8 @ encoding: [0x30,0x00,0xc8,0xf3]
142 %tmp2 = lshr <8 x i8> %tmp1, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
147 define <4 x i16> @vshru_4xi16(<4 x i16>* %A) nounwind {
148 %tmp1 = load <4 x i16>* %A
149 ; CHECK: vshr.u16 d16, d16, #16 @ encoding: [0x30,0x00,0xd0,0xf3]
150 %tmp2 = lshr <4 x i16> %tmp1, < i16 16, i16 16, i16 16, i16 16 >
155 define <2 x i32> @vshru_2xi32(<2 x i32>* %A) nounwind {
156 %tmp1 = load <2 x i32>* %A
157 ; CHECK: vshr.u32 d16, d16, #32 @ encoding: [0x30,0x00,0xe0,0xf3]
158 %tmp2 = lshr <2 x i32> %tmp1, < i32 32, i32 32 >
163 define <1 x i64> @vshru_1xi64(<1 x i64>* %A) nounwind {
164 %tmp1 = load <1 x i64>* %A
165 ; CHECK: vshr.u64 d16, d16, #64 @ encoding: [0xb0,0x00,0xc0,0xf3]
166 %tmp2 = lshr <1 x i64> %tmp1, < i64 64 >
171 define <16 x i8> @vshru_16xi8(<16 x i8>* %A) nounwind {
172 %tmp1 = load <16 x i8>* %A
173 ; CHECK: vshr.u8 q8, q8, #8 @ encoding: [0x70,0x00,0xc8,0xf3]
174 %tmp2 = lshr <16 x i8> %tmp1, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
179 define <8 x i16> @vshru_8xi16(<8 x i16>* %A) nounwind {
180 %tmp1 = load <8 x i16>* %A
181 ; CHECK: vshr.u16 q8, q8, #16 @ encoding: [0x70,0x00,0xd0,0xf3]
182 %tmp2 = lshr <8 x i16> %tmp1, < i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16 >
187 define <4 x i32> @vshru_4xi32(<4 x i32>* %A) nounwind {
188 %tmp1 = load <4 x i32>* %A
189 ; CHECK: vshr.u32 q8, q8, #32 @ encoding: [0x70,0x00,0xe0,0xf3]
190 %tmp2 = lshr <4 x i32> %tmp1, < i32 32, i32 32, i32 32, i32 32 >
195 define <2 x i64> @vshru_2xi64(<2 x i64>* %A) nounwind {
196 %tmp1 = load <2 x i64>* %A
197 ; CHECK: vshr.u64 q8, q8, #64 @ encoding: [0xf0,0x00,0xc0,0xf3]
198 %tmp2 = lshr <2 x i64> %tmp1, < i64 64, i64 64 >
203 define <8 x i8> @vshrs_8xi8(<8 x i8>* %A) nounwind {
204 %tmp1 = load <8 x i8>* %A
205 ; CHECK: vshr.s8 d16, d16, #8 @ encoding: [0x30,0x00,0xc8,0xf2
206 %tmp2 = ashr <8 x i8> %tmp1, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
211 define <4 x i16> @vshrs_4xi16(<4 x i16>* %A) nounwind {
212 %tmp1 = load <4 x i16>* %A
213 ; CHECK: vshr.s16 d16, d16, #16 @ encoding: [0x30,0x00,0xd0,0xf2]
214 %tmp2 = ashr <4 x i16> %tmp1, < i16 16, i16 16, i16 16, i16 16 >
219 define <2 x i32> @vshrs_2xi32(<2 x i32>* %A) nounwind {
220 %tmp1 = load <2 x i32>* %A
221 ; CHECK: vshr.s32 d16, d16, #32 @ encoding: [0x30,0x00,0xe0,0xf2]
222 %tmp2 = ashr <2 x i32> %tmp1, < i32 32, i32 32 >
227 define <1 x i64> @vshrs_1xi64(<1 x i64>* %A) nounwind {
228 %tmp1 = load <1 x i64>* %A
229 ; CHECK: vshr.s64 d16, d16, #64 @ encoding: [0xb0,0x00,0xc0,0xf2]
230 %tmp2 = ashr <1 x i64> %tmp1, < i64 64 >
235 define <16 x i8> @vshrs_16xi8(<16 x i8>* %A) nounwind {
236 %tmp1 = load <16 x i8>* %A
237 ; CHECK: vshr.s8 q8, q8, #8 @ encoding: [0x70,0x00,0xc8,0xf2]
238 %tmp2 = ashr <16 x i8> %tmp1, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
243 define <8 x i16> @vshrs_8xi16(<8 x i16>* %A) nounwind {
244 %tmp1 = load <8 x i16>* %A
245 ; CHECK: vshr.s16 q8, q8, #16 @ encoding: [0x70,0x00,0xd0,0xf2]
246 %tmp2 = ashr <8 x i16> %tmp1, < i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16 >
251 define <4 x i32> @vshrs_4xi32(<4 x i32>* %A) nounwind {
252 %tmp1 = load <4 x i32>* %A
253 ; CHECK: vshr.s32 q8, q8, #32 @ encoding: [0x70,0x00,0xe0,0xf2]
254 %tmp2 = ashr <4 x i32> %tmp1, < i32 32, i32 32, i32 32, i32 32 >
259 define <2 x i64> @vshrs_2xi64(<2 x i64>* %A) nounwind {
260 %tmp1 = load <2 x i64>* %A
261 ; CHECK: vshr.s64 q8, q8, #64 @ encoding: [0xf0,0x00,0xc0,0xf2]
262 %tmp2 = ashr <2 x i64> %tmp1, < i64 64, i64 64 >
266 declare <8 x i16> @llvm.arm.neon.vshiftls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
267 declare <4 x i32> @llvm.arm.neon.vshiftls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
268 declare <2 x i64> @llvm.arm.neon.vshiftls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
271 define <8 x i16> @vshlls_8xi8(<8 x i8>* %A) nounwind {
272 %tmp1 = load <8 x i8>* %A
273 ; CHECK: vshll.s8 q8, d16, #7 @ encoding: [0x30,0x0a,0xcf,0xf2]
274 %tmp2 = call <8 x i16> @llvm.arm.neon.vshiftls.v8i16(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
278 ; CHECK: vshlls_4xi16
279 define <4 x i32> @vshlls_4xi16(<4 x i16>* %A) nounwind {
280 %tmp1 = load <4 x i16>* %A
281 ; CHECK: vshll.s16 q8, d16, #15 @ encoding: [0x30,0x0a,0xdf,0xf2]
282 %tmp2 = call <4 x i32> @llvm.arm.neon.vshiftls.v4i32(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
286 ; CHECK: vshlls_2xi32
287 define <2 x i64> @vshlls_2xi32(<2 x i32>* %A) nounwind {
288 %tmp1 = load <2 x i32>* %A
289 ; CHECK: vshll.s32 q8, d16, #31 @ encoding: [0x30,0x0a,0xff,0xf2]
290 %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftls.v2i64(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >)
294 declare <8 x i16> @llvm.arm.neon.vshiftlu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
295 declare <4 x i32> @llvm.arm.neon.vshiftlu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
296 declare <2 x i64> @llvm.arm.neon.vshiftlu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
299 define <8 x i16> @vshllu_8xi8(<8 x i8>* %A) nounwind {
300 %tmp1 = load <8 x i8>* %A
301 ; CHECK: vshll.u8 q8, d16, #7 @ encoding: [0x30,0x0a,0xcf,0xf3]
302 %tmp2 = call <8 x i16> @llvm.arm.neon.vshiftlu.v8i16(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
306 ; CHECK: vshllu_4xi16
307 define <4 x i32> @vshllu_4xi16(<4 x i16>* %A) nounwind {
308 %tmp1 = load <4 x i16>* %A
309 ; CHECK: vshll.u16 q8, d16, #15 @ encoding: [0x30,0x0a,0xdf,0xf3]
310 %tmp2 = call <4 x i32> @llvm.arm.neon.vshiftlu.v4i32(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
314 ; CHECK: vshllu_2xi32
315 define <2 x i64> @vshllu_2xi32(<2 x i32>* %A) nounwind {
316 %tmp1 = load <2 x i32>* %A
317 ; CHECK: vshll.u32 q8, d16, #31 @ encoding: [0x30,0x0a,0xff,0xf3]
318 %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftlu.v2i64(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >)
322 ; The following tests use the maximum shift count, so the signedness is
323 ; irrelevant. Test both signed and unsigned versions.
326 define <8 x i16> @vshlli_8xi8(<8 x i8>* %A) nounwind {
327 %tmp1 = load <8 x i8>* %A
328 ; CHECK: vshll.i8 q8, d16, #8 @ encoding: [0x20,0x03,0xf2,0xf3]
329 %tmp2 = call <8 x i16> @llvm.arm.neon.vshiftls.v8i16(<8 x i8> %tmp1, <8 x i8> < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >)
333 ; CHECK: vshlli_4xi16
334 define <4 x i32> @vshlli_4xi16(<4 x i16>* %A) nounwind {
335 %tmp1 = load <4 x i16>* %A
336 ; CHECK: vshll.i16 q8, d16, #16 @ encoding: [0x20,0x03,0xf6,0xf3]
337 %tmp2 = call <4 x i32> @llvm.arm.neon.vshiftlu.v4i32(<4 x i16> %tmp1, <4 x i16> < i16 16, i16 16, i16 16, i16 16 >)
341 ; CHECK: vshlli_2xi32
342 define <2 x i64> @vshlli_2xi32(<2 x i32>* %A) nounwind {
343 %tmp1 = load <2 x i32>* %A
344 ; CHECK: vshll.i32 q8, d16, #32 @ encoding: [0x20,0x03,0xfa,0xf3]
345 %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftls.v2i64(<2 x i32> %tmp1, <2 x i32> < i32 32, i32 32 >)
349 declare <8 x i8> @llvm.arm.neon.vshiftn.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
350 declare <4 x i16> @llvm.arm.neon.vshiftn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
351 declare <2 x i32> @llvm.arm.neon.vshiftn.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
353 ; CHECK: vshrns_8xi16
354 define <8 x i8> @vshrns_8xi16(<8 x i16>* %A) nounwind {
355 %tmp1 = load <8 x i16>* %A
356 ; CHECK: vshrn.i16 d16, q8, #8 @ encoding: [0x30,0x08,0xc8,0xf2]
357 %tmp2 = call <8 x i8> @llvm.arm.neon.vshiftn.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
361 ; CHECK: vshrns_4xi32
362 define <4 x i16> @vshrns_4xi32(<4 x i32>* %A) nounwind {
363 %tmp1 = load <4 x i32>* %A
364 ; CHECK: vshrn.i32 d16, q8, #16 @ encoding: [0x30,0x08,0xd0,0xf2]
365 %tmp2 = call <4 x i16> @llvm.arm.neon.vshiftn.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
369 ; CHECK: vshrns_2xi64
370 define <2 x i32> @vshrns_2xi64(<2 x i64>* %A) nounwind {
371 %tmp1 = load <2 x i64>* %A
372 ; CHECK: vshrn.i64 d16, q8, #32 @ encoding: [0x30,0x08,0xe0,0xf2]
373 %tmp2 = call <2 x i32> @llvm.arm.neon.vshiftn.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
377 declare <8 x i8> @llvm.arm.neon.vrshifts.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
378 declare <4 x i16> @llvm.arm.neon.vrshifts.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
379 declare <2 x i32> @llvm.arm.neon.vrshifts.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
380 declare <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
383 define <8 x i8> @vrshls_8xi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
384 %tmp1 = load <8 x i8>* %A
385 %tmp2 = load <8 x i8>* %B
386 ; CHECK: vrshl.s8 d16, d16, d17 @ encoding: [0xa0,0x05,0x41,0xf2]
387 %tmp3 = call <8 x i8> @llvm.arm.neon.vrshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
391 ; CHECK: vrshls_4xi16
392 define <4 x i16> @vrshls_4xi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
393 %tmp1 = load <4 x i16>* %A
394 %tmp2 = load <4 x i16>* %B
395 ; CHECK: vrshl.s16 d16, d16, d17 @ encoding: [0xa0,0x05,0x51,0xf2]
396 %tmp3 = call <4 x i16> @llvm.arm.neon.vrshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
400 ; CHECK: vrshls_2xi32
401 define <2 x i32> @vrshls_2xi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
402 %tmp1 = load <2 x i32>* %A
403 %tmp2 = load <2 x i32>* %B
404 ; CHECK: vrshl.s32 d16, d16, d17 @ encoding: [0xa0,0x05,0x61,0xf2]
405 %tmp3 = call <2 x i32> @llvm.arm.neon.vrshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
409 ; CHECK: vrshls_1xi64
410 define <1 x i64> @vrshls_1xi64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
411 %tmp1 = load <1 x i64>* %A
412 %tmp2 = load <1 x i64>* %B
413 ; CHECK: vrshl.s64 d16, d16, d17 @ encoding: [0xa0,0x05,0x71,0xf2]
414 %tmp3 = call <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
418 declare <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
419 declare <4 x i16> @llvm.arm.neon.vrshiftu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
420 declare <2 x i32> @llvm.arm.neon.vrshiftu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
421 declare <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
424 define <8 x i8> @vrshlu_8xi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
425 %tmp1 = load <8 x i8>* %A
426 %tmp2 = load <8 x i8>* %B
427 ; CHECK: vrshl.u8 d16, d16, d17 @ encoding: [0xa0,0x05,0x41,0xf3]
428 %tmp3 = call <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
432 ; CHECK: vrshlu_4xi16
433 define <4 x i16> @vrshlu_4xi16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
434 %tmp1 = load <4 x i16>* %A
435 %tmp2 = load <4 x i16>* %B
436 ; CHECK: vrshl.u16 d16, d16, d17 @ encoding: [0xa0,0x05,0x51,0xf3]
437 %tmp3 = call <4 x i16> @llvm.arm.neon.vrshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
441 ; CHECK: vrshlu_2xi32
442 define <2 x i32> @vrshlu_2xi32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
443 %tmp1 = load <2 x i32>* %A
444 %tmp2 = load <2 x i32>* %B
445 ; CHECK: vrshl.u32 d16, d16, d17 @ encoding: [0xa0,0x05,0x61,0xf3]
446 %tmp3 = call <2 x i32> @llvm.arm.neon.vrshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
450 ; CHECK: vrshlu_1xi64
451 define <1 x i64> @vrshlu_1xi64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
452 %tmp1 = load <1 x i64>* %A
453 %tmp2 = load <1 x i64>* %B
454 ; CHECK: vrshl.u64 d16, d16, d17 @ encoding: [0xa0,0x05,0x71,0xf3]
455 %tmp3 = call <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
459 declare <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
460 declare <8 x i16> @llvm.arm.neon.vrshifts.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
461 declare <4 x i32> @llvm.arm.neon.vrshifts.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
462 declare <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
464 ; CHECK: vrshls_16xi8
465 define <16 x i8> @vrshls_16xi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
466 %tmp1 = load <16 x i8>* %A
467 %tmp2 = load <16 x i8>* %B
468 ; CHECK: vrshl.s8 q8, q8, q9 @ encoding: [0xe0,0x05,0x42,0xf2]
469 %tmp3 = call <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
473 ; CHECK: vrshls_8xi16
474 define <8 x i16> @vrshls_8xi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
475 %tmp1 = load <8 x i16>* %A
476 %tmp2 = load <8 x i16>* %B
477 ; CHECK: vrshl.s16 q8, q8, q9 @ encoding: [0xe0,0x05,0x52,0xf2]
478 %tmp3 = call <8 x i16> @llvm.arm.neon.vrshifts.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
482 ; CHECK: vrshls_4xi32
483 define <4 x i32> @vrshls_4xi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
484 %tmp1 = load <4 x i32>* %A
485 %tmp2 = load <4 x i32>* %B
486 ; CHECK: vrshl.s32 q8, q8, q9 @ encoding: [0xe0,0x05,0x62,0xf2]
487 %tmp3 = call <4 x i32> @llvm.arm.neon.vrshifts.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
491 ; CHECK: vrshls_2xi64
492 define <2 x i64> @vrshls_2xi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
493 %tmp1 = load <2 x i64>* %A
494 %tmp2 = load <2 x i64>* %B
495 ; CHECK: vrshl.s64 q8, q8, q9 @ encoding: [0xe0,0x05,0x72,0xf2]
496 %tmp3 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
500 declare <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
501 declare <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
502 declare <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
503 declare <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
505 ; CHECK: vrshlu_16xi8
506 define <16 x i8> @vrshlu_16xi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
507 %tmp1 = load <16 x i8>* %A
508 %tmp2 = load <16 x i8>* %B
509 ; CHECK: vrshl.u8 q8, q8, q9 @ encoding: [0xe0,0x05,0x42,0xf3]
510 %tmp3 = call <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
514 ; CHECK: vrshlu_8xi16
515 define <8 x i16> @vrshlu_8xi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
516 %tmp1 = load <8 x i16>* %A
517 %tmp2 = load <8 x i16>* %B
518 ; CHECK: vrshl.u16 q8, q8, q9 @ encoding: [0xe0,0x05,0x52,0xf3]
519 %tmp3 = call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
523 ; CHECK: vrshlu_4xi32
524 define <4 x i32> @vrshlu_4xi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
525 %tmp1 = load <4 x i32>* %A
526 %tmp2 = load <4 x i32>* %B
527 ; CHECK: vrshl.u32 q8, q8, q9 @ encoding: [0xe0,0x05,0x62,0xf3]
528 %tmp3 = call <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
532 ; CHECK: vrshlu_2xi64
533 define <2 x i64> @vrshlu_2xi64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
534 %tmp1 = load <2 x i64>* %A
535 %tmp2 = load <2 x i64>* %B
536 ; CHECK: vrshl.u64 q8, q8, q9 @ encoding: [0xe0,0x05,0x72,0xf3]
537 %tmp3 = call <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
542 define <8 x i8> @vrshrs_8xi8(<8 x i8>* %A) nounwind {
543 %tmp1 = load <8 x i8>* %A
544 ; CHECK: vrshr.s8 d16, d16, #8 @ encoding: [0x30,0x02,0xc8,0xf2]
545 %tmp2 = call <8 x i8> @llvm.arm.neon.vrshifts.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
549 ; CHECK: vrshrs_4xi16
550 define <4 x i16> @vrshrs_4xi16(<4 x i16>* %A) nounwind {
551 %tmp1 = load <4 x i16>* %A
552 ; CHECK: vrshr.s16 d16, d16, #16 @ encoding: [0x30,0x02,0xd0,0xf2]
553 %tmp2 = call <4 x i16> @llvm.arm.neon.vrshifts.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >)
557 ; CHECK: vrshrs_2xi32
558 define <2 x i32> @vrshrs_2xi32(<2 x i32>* %A) nounwind {
559 %tmp1 = load <2 x i32>* %A
560 ; CHECK: vrshr.s32 d16, d16, #32 @ encoding: [0x30,0x02,0xe0,0xf2]
561 %tmp2 = call <2 x i32> @llvm.arm.neon.vrshifts.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 -32, i32 -32 >)
565 ; CHECK: vrshrs_1xi64
566 define <1 x i64> @vrshrs_1xi64(<1 x i64>* %A) nounwind {
567 %tmp1 = load <1 x i64>* %A
568 ; CHECK: vrshr.s64 d16, d16, #64 @ encoding: [0xb0,0x02,0xc0,0xf2]
569 %tmp2 = call <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 -64 >)
574 define <8 x i8> @vrshru_8xi8(<8 x i8>* %A) nounwind {
575 %tmp1 = load <8 x i8>* %A
576 ; CHECK: vrshr.u8 d16, d16, #8 @ encoding: [0x30,0x02,0xc8,0xf3]
577 %tmp2 = call <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
581 ; CHECK: vrshru_4xi16
582 define <4 x i16> @vrshru_4xi16(<4 x i16>* %A) nounwind {
583 %tmp1 = load <4 x i16>* %A
584 ; CHECK: vrshr.u16 d16, d16, #16 @ encoding: [0x30,0x02,0xd0,0xf3]
585 %tmp2 = call <4 x i16> @llvm.arm.neon.vrshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >)
589 ; CHECK: vrshru_2xi32
590 define <2 x i32> @vrshru_2xi32(<2 x i32>* %A) nounwind {
591 %tmp1 = load <2 x i32>* %A
592 ; CHECK: vrshr.u32 d16, d16, #32 @ encoding: [0x30,0x02,0xe0,0xf3]
593 %tmp2 = call <2 x i32> @llvm.arm.neon.vrshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 -32, i32 -32 >)
597 ; CHECK: vrshru_1xi64
598 define <1 x i64> @vrshru_1xi64(<1 x i64>* %A) nounwind {
599 %tmp1 = load <1 x i64>* %A
600 ; CHECK: vrshr.u64 d16, d16, #64 @ encoding: [0xb0,0x02,0xc0,0xf3]
601 %tmp2 = call <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 -64 >)
605 ; CHECK: vrshrs_16xi8
606 define <16 x i8> @vrshrs_16xi8(<16 x i8>* %A) nounwind {
607 %tmp1 = load <16 x i8>* %A
608 ; CHECK: vrshr.s8 q8, q8, #8 @ encoding: [0x70,0x02,0xc8,0xf2]
609 %tmp2 = call <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
613 ; CHECK: vrshrs_8xi16
614 define <8 x i16> @vrshrs_8xi16(<8 x i16>* %A) nounwind {
615 %tmp1 = load <8 x i16>* %A
616 ; CHECK: vrshr.s16 q8, q8, #16 @ encoding: [0x70,0x02,0xd0,0xf2]
617 %tmp2 = call <8 x i16> @llvm.arm.neon.vrshifts.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >)
621 ; CHECK: vrshrs_4xi32
622 define <4 x i32> @vrshrs_4xi32(<4 x i32>* %A) nounwind {
623 %tmp1 = load <4 x i32>* %A
624 ; CHECK: vrshr.s32 q8, q8, #32 @ encoding: [0x70,0x02,0xe0,0xf2]
625 %tmp2 = call <4 x i32> @llvm.arm.neon.vrshifts.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >)
629 ; CHECK: vrshrs_2xi64
630 define <2 x i64> @vrshrs_2xi64(<2 x i64>* %A) nounwind {
631 %tmp1 = load <2 x i64>* %A
632 ; CHECK: vrshr.s64 q8, q8, #64 @ encoding: [0xf0,0x02,0xc0,0xf2]
633 %tmp2 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 >)
637 ; CHECK: vrshru_16xi8
638 define <16 x i8> @vrshru_16xi8(<16 x i8>* %A) nounwind {
639 %tmp1 = load <16 x i8>* %A
640 ; CHECK: vrshr.u8 q8, q8, #8 @ encoding: [0x70,0x02,0xc8,0xf3]
641 %tmp2 = call <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
645 ; CHECK: vrshru_8xi16
646 define <8 x i16> @vrshru_8xi16(<8 x i16>* %A) nounwind {
647 %tmp1 = load <8 x i16>* %A
648 ; CHECK: vrshr.u16 q8, q8, #16 @ encoding: [0x70,0x02,0xd0,0xf3]
649 %tmp2 = call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >)
653 ; CHECK: vrshru_4xi32
654 define <4 x i32> @vrshru_4xi32(<4 x i32>* %A) nounwind {
655 %tmp1 = load <4 x i32>* %A
656 ; CHECK: vrshr.u32 q8, q8, #32 @ encoding: [0x70,0x02,0xe0,0xf3]
657 %tmp2 = call <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >)
661 ; CHECK: vrshru_2xi64
662 define <2 x i64> @vrshru_2xi64(<2 x i64>* %A) nounwind {
663 %tmp1 = load <2 x i64>* %A
664 ; CHECK: vrshr.u64 q8, q8, #64 @ encoding: [0xf0,0x02,0xc0,0xf3]
665 %tmp2 = call <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 >)
669 declare <8 x i8> @llvm.arm.neon.vrshiftn.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
670 declare <4 x i16> @llvm.arm.neon.vrshiftn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
671 declare <2 x i32> @llvm.arm.neon.vrshiftn.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
673 ; CHECK: vrshrns_8xi16
674 define <8 x i8> @vrshrns_8xi16(<8 x i16>* %A) nounwind {
675 %tmp1 = load <8 x i16>* %A
676 ; CHECK: vrshrn.i16 d16, q8, #8 @ encoding: [0x70,0x08,0xc8,0xf2]
677 %tmp2 = call <8 x i8> @llvm.arm.neon.vrshiftn.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
681 ; CHECK: vrshrns_4xi32
682 define <4 x i16> @vrshrns_4xi32(<4 x i32>* %A) nounwind {
683 %tmp1 = load <4 x i32>* %A
684 ; CHECK: vrshrn.i32 d16, q8, #16 @ encoding: [0x70,0x08,0xd0,0xf2]
685 %tmp2 = call <4 x i16> @llvm.arm.neon.vrshiftn.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
689 ; CHECK: vrshrns_2xi64
690 define <2 x i32> @vrshrns_2xi64(<2 x i64>* %A) nounwind {
691 %tmp1 = load <2 x i64>* %A
692 ; CHECK: vrshrn.i64 d16, q8, #32 @ encoding: [0x70,0x08,0xe0,0xf2]
693 %tmp2 = call <2 x i32> @llvm.arm.neon.vrshiftn.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)