1 ; RUN: llc -show-mc-encoding -march=arm -mcpu=cortex-a8 -mattr=+neon < %s | FileCheck %s
3 define <8 x i8> @vsras8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
4 %tmp1 = load <8 x i8>* %A
5 %tmp2 = load <8 x i8>* %B
6 ; CHECK: vsra.s8 d16, d17, #8 @ encoding: [0x31,0x01,0xc8,0xf2]
7 %tmp3 = ashr <8 x i8> %tmp2, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
8 %tmp4 = add <8 x i8> %tmp1, %tmp3
12 define <4 x i16> @vsras16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
13 %tmp1 = load <4 x i16>* %A
14 %tmp2 = load <4 x i16>* %B
15 ; CHECK: vsra.s16 d16, d17, #16 @ encoding: [0x31,0x01,0xd0,0xf2]
16 %tmp3 = ashr <4 x i16> %tmp2, < i16 16, i16 16, i16 16, i16 16 >
17 %tmp4 = add <4 x i16> %tmp1, %tmp3
21 define <2 x i32> @vsras32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
22 %tmp1 = load <2 x i32>* %A
23 %tmp2 = load <2 x i32>* %B
24 ; CHECK: vsra.s32 d16, d17, #32 @ encoding: [0x31,0x01,0xe0,0xf2]
25 %tmp3 = ashr <2 x i32> %tmp2, < i32 32, i32 32 >
26 %tmp4 = add <2 x i32> %tmp1, %tmp3
30 define <1 x i64> @vsras64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
31 %tmp1 = load <1 x i64>* %A
32 %tmp2 = load <1 x i64>* %B
33 ; CHECK: vsra.s64 d16, d17, #64 @ encoding: [0xb1,0x01,0xc0,0xf2]
34 %tmp3 = ashr <1 x i64> %tmp2, < i64 64 >
35 %tmp4 = add <1 x i64> %tmp1, %tmp3
39 define <16 x i8> @vsraQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
40 %tmp1 = load <16 x i8>* %A
41 %tmp2 = load <16 x i8>* %B
42 ; CHECK: vsra.s8 q9, q8, #8 @ encoding: [0x70,0x21,0xc8,0xf2]
43 %tmp3 = ashr <16 x i8> %tmp2, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
44 %tmp4 = add <16 x i8> %tmp1, %tmp3
48 define <8 x i16> @vsraQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
49 %tmp1 = load <8 x i16>* %A
50 %tmp2 = load <8 x i16>* %B
51 ; CHECK: vsra.s16 q9, q8, #16 @ encoding: [0x70,0x21,0xd0,0xf2]
52 %tmp3 = ashr <8 x i16> %tmp2, < i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16 >
53 %tmp4 = add <8 x i16> %tmp1, %tmp3
57 define <4 x i32> @vsraQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
58 %tmp1 = load <4 x i32>* %A
59 %tmp2 = load <4 x i32>* %B
60 ; CHECK: vsra.s32 q9, q8, #32 @ encoding: [0x70,0x21,0xe0,0xf2]
61 %tmp3 = ashr <4 x i32> %tmp2, < i32 32, i32 32, i32 32, i32 32 >
62 %tmp4 = add <4 x i32> %tmp1, %tmp3
66 define <2 x i64> @vsraQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
67 %tmp1 = load <2 x i64>* %A
68 %tmp2 = load <2 x i64>* %B
69 ; CHECK: vsra.s64 q9, q8, #64 @ encoding: [0xf0,0x21,0xc0,0xf2]
70 %tmp3 = ashr <2 x i64> %tmp2, < i64 64, i64 64 >
71 %tmp4 = add <2 x i64> %tmp1, %tmp3
75 define <8 x i8> @vsrau8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
76 %tmp1 = load <8 x i8>* %A
77 %tmp2 = load <8 x i8>* %B
78 ; CHECK: vsra.u8 d16, d17, #8 @ encoding: [0x31,0x01,0xc8,0xf3]
79 %tmp3 = lshr <8 x i8> %tmp2, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
80 %tmp4 = add <8 x i8> %tmp1, %tmp3
84 define <4 x i16> @vsrau16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
85 %tmp1 = load <4 x i16>* %A
86 %tmp2 = load <4 x i16>* %B
87 ; CHECK: vsra.u16 d16, d17, #16 @ encoding: [0x31,0x01,0xd0,0xf3]
88 %tmp3 = lshr <4 x i16> %tmp2, < i16 16, i16 16, i16 16, i16 16 >
89 %tmp4 = add <4 x i16> %tmp1, %tmp3
93 define <2 x i32> @vsrau32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
94 %tmp1 = load <2 x i32>* %A
95 %tmp2 = load <2 x i32>* %B
96 ; CHECK: vsra.u32 d16, d17, #32 @ encoding: [0x31,0x01,0xe0,0xf3]
97 %tmp3 = lshr <2 x i32> %tmp2, < i32 32, i32 32 >
98 %tmp4 = add <2 x i32> %tmp1, %tmp3
102 define <1 x i64> @vsrau64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
103 %tmp1 = load <1 x i64>* %A
104 %tmp2 = load <1 x i64>* %B
105 ; CHECK: vsra.u64 d16, d17, #64 @ encoding: [0xb1,0x01,0xc0,0xf3]
106 %tmp3 = lshr <1 x i64> %tmp2, < i64 64 >
107 %tmp4 = add <1 x i64> %tmp1, %tmp3
111 define <16 x i8> @vsraQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
112 %tmp1 = load <16 x i8>* %A
113 %tmp2 = load <16 x i8>* %B
114 ; CHECK: vsra.u8 q9, q8, #8 @ encoding: [0x70,0x21,0xc8,0xf3]
115 %tmp3 = lshr <16 x i8> %tmp2, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
116 %tmp4 = add <16 x i8> %tmp1, %tmp3
120 define <8 x i16> @vsraQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
121 %tmp1 = load <8 x i16>* %A
122 %tmp2 = load <8 x i16>* %B
123 ; CHECK: vsra.u16 q9, q8, #16 @ encoding: [0x70,0x21,0xd0,0xf3]
124 %tmp3 = lshr <8 x i16> %tmp2, < i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16 >
125 %tmp4 = add <8 x i16> %tmp1, %tmp3
129 define <4 x i32> @vsraQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
130 %tmp1 = load <4 x i32>* %A
131 %tmp2 = load <4 x i32>* %B
132 ; CHECK: vsra.u32 q9, q8, #32 @ encoding: [0x70,0x21,0xe0,0xf3]
133 %tmp3 = lshr <4 x i32> %tmp2, < i32 32, i32 32, i32 32, i32 32 >
134 %tmp4 = add <4 x i32> %tmp1, %tmp3
138 define <2 x i64> @vsraQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
139 %tmp1 = load <2 x i64>* %A
140 %tmp2 = load <2 x i64>* %B
141 ; CHECK: vsra.u64 q9, q8, #64 @ encoding: [0xf0,0x21,0xc0,0xf3]
142 %tmp3 = lshr <2 x i64> %tmp2, < i64 64, i64 64 >
143 %tmp4 = add <2 x i64> %tmp1, %tmp3
147 define <8 x i8> @vrsras8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
148 %tmp1 = load <8 x i8>* %A
149 %tmp2 = load <8 x i8>* %B
150 ; CHECK: vrsra.s8 d16, d17, #8 @ encoding: [0x31,0x03,0xc8,0xf2]
151 %tmp3 = call <8 x i8> @llvm.arm.neon.vrshifts.v8i8(<8 x i8> %tmp2, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
152 %tmp4 = add <8 x i8> %tmp1, %tmp3
156 define <4 x i16> @vrsras16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
157 %tmp1 = load <4 x i16>* %A
158 %tmp2 = load <4 x i16>* %B
159 ; CHECK: vrsra.s16 d16, d17, #16 @ encoding: [0x31,0x03,0xd0,0xf2]
160 %tmp3 = call <4 x i16> @llvm.arm.neon.vrshifts.v4i16(<4 x i16> %tmp2, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >)
161 %tmp4 = add <4 x i16> %tmp1, %tmp3
165 define <2 x i32> @vrsras32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
166 %tmp1 = load <2 x i32>* %A
167 %tmp2 = load <2 x i32>* %B
168 ; CHECK: vrsra.s32 d16, d17, #32 @ encoding: [0x31,0x03,0xe0,0xf2]
169 %tmp3 = call <2 x i32> @llvm.arm.neon.vrshifts.v2i32(<2 x i32> %tmp2, <2 x i32> < i32 -32, i32 -32 >)
170 %tmp4 = add <2 x i32> %tmp1, %tmp3
174 define <1 x i64> @vrsras64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
175 %tmp1 = load <1 x i64>* %A
176 %tmp2 = load <1 x i64>* %B
177 ; CHECK: vrsra.s64 d16, d17, #64 @ encoding: [0xb1,0x03,0xc0,0xf2]
178 %tmp3 = call <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64> %tmp2, <1 x i64> < i64 -64 >)
179 %tmp4 = add <1 x i64> %tmp1, %tmp3
183 define <8 x i8> @vrsrau8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
184 %tmp1 = load <8 x i8>* %A
185 %tmp2 = load <8 x i8>* %B
186 ; CHECK: vrsra.u8 d16, d17, #8 @ encoding: [0x31,0x03,0xc8,0xf3]
187 %tmp3 = call <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8> %tmp2, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
188 %tmp4 = add <8 x i8> %tmp1, %tmp3
192 define <4 x i16> @vrsrau16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
193 %tmp1 = load <4 x i16>* %A
194 %tmp2 = load <4 x i16>* %B
195 ; CHECK: vrsra.u16 d16, d17, #16 @ encoding: [0x31,0x03,0xd0,0xf3]
196 %tmp3 = call <4 x i16> @llvm.arm.neon.vrshiftu.v4i16(<4 x i16> %tmp2, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >)
197 %tmp4 = add <4 x i16> %tmp1, %tmp3
201 define <2 x i32> @vrsrau32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
202 %tmp1 = load <2 x i32>* %A
203 %tmp2 = load <2 x i32>* %B
204 ; CHECK: vrsra.u32 d16, d17, #32 @ encoding: [0x31,0x03,0xe0,0xf3]
205 %tmp3 = call <2 x i32> @llvm.arm.neon.vrshiftu.v2i32(<2 x i32> %tmp2, <2 x i32> < i32 -32, i32 -32 >)
206 %tmp4 = add <2 x i32> %tmp1, %tmp3
210 define <1 x i64> @vrsrau64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
211 %tmp1 = load <1 x i64>* %A
212 %tmp2 = load <1 x i64>* %B
213 ; CHECK: vrsra.u64 d16, d17, #64 @ encoding: [0xb1,0x03,0xc0,0xf3]
214 %tmp3 = call <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64> %tmp2, <1 x i64> < i64 -64 >)
215 %tmp4 = add <1 x i64> %tmp1, %tmp3
219 define <16 x i8> @vrsraQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
220 %tmp1 = load <16 x i8>* %A
221 %tmp2 = load <16 x i8>* %B
222 ; CHECK: vrsra.s8 q9, q8, #8 @ encoding: [0x70,0x23,0xc8,0xf2]
223 %tmp3 = call <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8> %tmp2, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
224 %tmp4 = add <16 x i8> %tmp1, %tmp3
228 define <8 x i16> @vrsraQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
229 %tmp1 = load <8 x i16>* %A
230 %tmp2 = load <8 x i16>* %B
231 ; CHECK: vrsra.s16 q9, q8, #16 @ encoding: [0x70,0x23,0xd0,0xf2]
232 %tmp3 = call <8 x i16> @llvm.arm.neon.vrshifts.v8i16(<8 x i16> %tmp2, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >)
233 %tmp4 = add <8 x i16> %tmp1, %tmp3
237 define <4 x i32> @vrsraQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
238 %tmp1 = load <4 x i32>* %A
239 %tmp2 = load <4 x i32>* %B
240 ; CHECK: vrsra.s32 q9, q8, #32 @ encoding: [0x70,0x23,0xe0,0xf2]
241 %tmp3 = call <4 x i32> @llvm.arm.neon.vrshifts.v4i32(<4 x i32> %tmp2, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >)
242 %tmp4 = add <4 x i32> %tmp1, %tmp3
246 define <2 x i64> @vrsraQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
247 %tmp1 = load <2 x i64>* %A
248 %tmp2 = load <2 x i64>* %B
249 ; CHECK: vrsra.s64 q9, q8, #64 @ encoding: [0xf0,0x23,0xc0,0xf2]
250 %tmp3 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %tmp2, <2 x i64> < i64 -64, i64 -64 >)
251 %tmp4 = add <2 x i64> %tmp1, %tmp3
255 define <16 x i8> @vrsraQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
256 %tmp1 = load <16 x i8>* %A
257 %tmp2 = load <16 x i8>* %B
258 ; CHECK: vrsra.u8 q9, q8, #8 @ encoding: [0x70,0x23,0xc8,0xf3]
259 %tmp3 = call <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8> %tmp2, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
260 %tmp4 = add <16 x i8> %tmp1, %tmp3
264 define <8 x i16> @vrsraQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
265 %tmp1 = load <8 x i16>* %A
266 %tmp2 = load <8 x i16>* %B
267 ; CHECK: vrsra.u16 q9, q8, #16 @ encoding: [0x70,0x23,0xd0,0xf3]
268 %tmp3 = call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %tmp2, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >)
269 %tmp4 = add <8 x i16> %tmp1, %tmp3
273 define <4 x i32> @vrsraQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
274 %tmp1 = load <4 x i32>* %A
275 %tmp2 = load <4 x i32>* %B
276 ; CHECK: vrsra.u32 q9, q8, #32 @ encoding: [0x70,0x23,0xe0,0xf3]
277 %tmp3 = call <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32> %tmp2, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >)
278 %tmp4 = add <4 x i32> %tmp1, %tmp3
282 define <2 x i64> @vrsraQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
283 %tmp1 = load <2 x i64>* %A
284 %tmp2 = load <2 x i64>* %B
285 ; CHECK: vrsra.u64 q9, q8, #64 @ encoding: [0xf0,0x23,0xc0,0xf3]
286 %tmp3 = call <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64> %tmp2, <2 x i64> < i64 -64, i64 -64 >)
287 %tmp4 = add <2 x i64> %tmp1, %tmp3
291 declare <8 x i8> @llvm.arm.neon.vrshifts.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
292 declare <4 x i16> @llvm.arm.neon.vrshifts.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
293 declare <2 x i32> @llvm.arm.neon.vrshifts.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
294 declare <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
296 declare <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
297 declare <4 x i16> @llvm.arm.neon.vrshiftu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
298 declare <2 x i32> @llvm.arm.neon.vrshiftu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
299 declare <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
301 declare <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
302 declare <8 x i16> @llvm.arm.neon.vrshifts.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
303 declare <4 x i32> @llvm.arm.neon.vrshifts.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
304 declare <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
306 declare <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
307 declare <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
308 declare <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
309 declare <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
311 define <8 x i8> @vsli8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
312 %tmp1 = load <8 x i8>* %A
313 %tmp2 = load <8 x i8>* %B
314 ; CHECK: vsli.8 d17, d16, #7 @ encoding: [0x30,0x15,0xcf,0xf3]
315 %tmp3 = call <8 x i8> @llvm.arm.neon.vshiftins.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
319 define <4 x i16> @vsli16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
320 %tmp1 = load <4 x i16>* %A
321 %tmp2 = load <4 x i16>* %B
322 ; CHECK: vsli.16 d17, d16, #15 @ encoding: [0x30,0x15,0xdf,0xf3]
323 %tmp3 = call <4 x i16> @llvm.arm.neon.vshiftins.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
327 define <2 x i32> @vsli32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
328 %tmp1 = load <2 x i32>* %A
329 %tmp2 = load <2 x i32>* %B
330 ; CHECK: vsli.32 d17, d16, #31 @ encoding: [0x30,0x15,0xff,0xf3]
331 %tmp3 = call <2 x i32> @llvm.arm.neon.vshiftins.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> < i32 31, i32 31 >)
335 define <1 x i64> @vsli64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
336 %tmp1 = load <1 x i64>* %A
337 %tmp2 = load <1 x i64>* %B
338 ; CHECK: vsli.64 d17, d16, #63 @ encoding: [0xb0,0x15,0xff,0xf3]
339 %tmp3 = call <1 x i64> @llvm.arm.neon.vshiftins.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2, <1 x i64> < i64 63 >)
343 define <16 x i8> @vsliQ8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
344 %tmp1 = load <16 x i8>* %A
345 %tmp2 = load <16 x i8>* %B
346 ; CHECK: vsli.8 q8, q9, #7 @ encoding: [0x72,0x05,0xcf,0xf3]
347 %tmp3 = call <16 x i8> @llvm.arm.neon.vshiftins.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
351 define <8 x i16> @vsliQ16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
352 %tmp1 = load <8 x i16>* %A
353 %tmp2 = load <8 x i16>* %B
354 ; CHECK: vsli.16 q8, q9, #15 @ encoding: [0x72,0x05,0xdf,0xf3]
355 %tmp3 = call <8 x i16> @llvm.arm.neon.vshiftins.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >)
359 define <4 x i32> @vsliQ32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
360 %tmp1 = load <4 x i32>* %A
361 %tmp2 = load <4 x i32>* %B
362 ; CHECK: vsli.32 q8, q9, #31 @ encoding: [0x72,0x05,0xff,0xf3]
363 %tmp3 = call <4 x i32> @llvm.arm.neon.vshiftins.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> < i32 31, i32 31, i32 31, i32 31 >)
367 define <2 x i64> @vsliQ64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
368 %tmp1 = load <2 x i64>* %A
369 %tmp2 = load <2 x i64>* %B
370 ; CHECK: vsli.64 q8, q9, #63 @ encoding: [0xf2,0x05,0xff,0xf3]
371 %tmp3 = call <2 x i64> @llvm.arm.neon.vshiftins.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2, <2 x i64> < i64 63, i64 63 >)
375 define <8 x i8> @vsri8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
376 %tmp1 = load <8 x i8>* %A
377 %tmp2 = load <8 x i8>* %B
378 ; CHECK: vsri.8 d17, d16, #8 @ encoding: [0x30,0x14,0xc8,0xf3]
379 %tmp3 = call <8 x i8> @llvm.arm.neon.vshiftins.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
383 define <4 x i16> @vsri16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
384 %tmp1 = load <4 x i16>* %A
385 %tmp2 = load <4 x i16>* %B
386 ; CHECK: vsri.16 d17, d16, #16 @ encoding: [0x30,0x14,0xd0,0xf3
387 %tmp3 = call <4 x i16> @llvm.arm.neon.vshiftins.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >)
391 define <2 x i32> @vsri32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
392 %tmp1 = load <2 x i32>* %A
393 %tmp2 = load <2 x i32>* %B
394 ; CHECK: vsri.32 d17, d16, #32 @ encoding: [0x30,0x14,0xe0,0xf3]
395 %tmp3 = call <2 x i32> @llvm.arm.neon.vshiftins.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> < i32 -32, i32 -32 >)
399 define <1 x i64> @vsri64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
400 %tmp1 = load <1 x i64>* %A
401 %tmp2 = load <1 x i64>* %B
402 ; CHECK: vsri.64 d17, d16, #64 @ encoding: [0xb0,0x14,0xc0,0xf3]
403 %tmp3 = call <1 x i64> @llvm.arm.neon.vshiftins.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2, <1 x i64> < i64 -64 >)
407 define <16 x i8> @vsriQ8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
408 %tmp1 = load <16 x i8>* %A
409 %tmp2 = load <16 x i8>* %B
410 ; CHECK: vsri.8 q8, q9, #8 @ encoding: [0x72,0x04,0xc8,0xf3]
411 %tmp3 = call <16 x i8> @llvm.arm.neon.vshiftins.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >)
415 define <8 x i16> @vsriQ16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
416 %tmp1 = load <8 x i16>* %A
417 %tmp2 = load <8 x i16>* %B
418 ; CHECK: vsri.16 q8, q9, #16 @ encoding: [0x72,0x04,0xd0,0xf3]
419 %tmp3 = call <8 x i16> @llvm.arm.neon.vshiftins.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >)
423 define <4 x i32> @vsriQ32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
424 %tmp1 = load <4 x i32>* %A
425 %tmp2 = load <4 x i32>* %B
426 ; CHECK: vsri.32 q8, q9, #32 @ encoding: [0x72,0x04,0xe0,0xf3]
427 %tmp3 = call <4 x i32> @llvm.arm.neon.vshiftins.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >)
431 define <2 x i64> @vsriQ64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
432 %tmp1 = load <2 x i64>* %A
433 %tmp2 = load <2 x i64>* %B
434 ; CHECK: vsri.64 q8, q9, #64 @ encoding: [0xf2,0x04,0xc0,0xf3]
435 %tmp3 = call <2 x i64> @llvm.arm.neon.vshiftins.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2, <2 x i64> < i64 -64, i64 -64 >)
439 declare <8 x i8> @llvm.arm.neon.vshiftins.v8i8(<8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone
440 declare <4 x i16> @llvm.arm.neon.vshiftins.v4i16(<4 x i16>, <4 x i16>, <4 x i16>) nounwind readnone
441 declare <2 x i32> @llvm.arm.neon.vshiftins.v2i32(<2 x i32>, <2 x i32>, <2 x i32>) nounwind readnone
442 declare <1 x i64> @llvm.arm.neon.vshiftins.v1i64(<1 x i64>, <1 x i64>, <1 x i64>) nounwind readnone
444 declare <16 x i8> @llvm.arm.neon.vshiftins.v16i8(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone
445 declare <8 x i16> @llvm.arm.neon.vshiftins.v8i16(<8 x i16>, <8 x i16>, <8 x i16>) nounwind readnone
446 declare <4 x i32> @llvm.arm.neon.vshiftins.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) nounwind readnone
447 declare <2 x i64> @llvm.arm.neon.vshiftins.v2i64(<2 x i64>, <2 x i64>, <2 x i64>) nounwind readnone