1 ; RUN: llc < %s -march=arm -mcpu=cortex-a8 | FileCheck %s
3 define <8 x i8> @vmuli8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
6 %tmp1 = load <8 x i8>* %A
7 %tmp2 = load <8 x i8>* %B
8 %tmp3 = mul <8 x i8> %tmp1, %tmp2
12 define <4 x i16> @vmuli16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
15 %tmp1 = load <4 x i16>* %A
16 %tmp2 = load <4 x i16>* %B
17 %tmp3 = mul <4 x i16> %tmp1, %tmp2
21 define <2 x i32> @vmuli32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
24 %tmp1 = load <2 x i32>* %A
25 %tmp2 = load <2 x i32>* %B
26 %tmp3 = mul <2 x i32> %tmp1, %tmp2
30 define <2 x float> @vmulf32(<2 x float>* %A, <2 x float>* %B) nounwind {
33 %tmp1 = load <2 x float>* %A
34 %tmp2 = load <2 x float>* %B
35 %tmp3 = fmul <2 x float> %tmp1, %tmp2
39 define <8 x i8> @vmulp8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
42 %tmp1 = load <8 x i8>* %A
43 %tmp2 = load <8 x i8>* %B
44 %tmp3 = call <8 x i8> @llvm.arm.neon.vmulp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
48 define <16 x i8> @vmulQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
51 %tmp1 = load <16 x i8>* %A
52 %tmp2 = load <16 x i8>* %B
53 %tmp3 = mul <16 x i8> %tmp1, %tmp2
57 define <8 x i16> @vmulQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
60 %tmp1 = load <8 x i16>* %A
61 %tmp2 = load <8 x i16>* %B
62 %tmp3 = mul <8 x i16> %tmp1, %tmp2
66 define <4 x i32> @vmulQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
69 %tmp1 = load <4 x i32>* %A
70 %tmp2 = load <4 x i32>* %B
71 %tmp3 = mul <4 x i32> %tmp1, %tmp2
75 define <4 x float> @vmulQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
78 %tmp1 = load <4 x float>* %A
79 %tmp2 = load <4 x float>* %B
80 %tmp3 = fmul <4 x float> %tmp1, %tmp2
84 define <16 x i8> @vmulQp8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
87 %tmp1 = load <16 x i8>* %A
88 %tmp2 = load <16 x i8>* %B
89 %tmp3 = call <16 x i8> @llvm.arm.neon.vmulp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
93 declare <8 x i8> @llvm.arm.neon.vmulp.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
94 declare <16 x i8> @llvm.arm.neon.vmulp.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
96 define arm_aapcs_vfpcc <2 x float> @test_vmul_lanef32(<2 x float> %arg0_float32x2_t, <2 x float> %arg1_float32x2_t) nounwind readnone {
98 ; CHECK: test_vmul_lanef32:
99 ; CHECK: vmul.f32 d0, d0, d1[0]
100 %0 = shufflevector <2 x float> %arg1_float32x2_t, <2 x float> undef, <2 x i32> zeroinitializer ; <<2 x float>> [#uses=1]
101 %1 = fmul <2 x float> %0, %arg0_float32x2_t ; <<2 x float>> [#uses=1]
105 define arm_aapcs_vfpcc <4 x i16> @test_vmul_lanes16(<4 x i16> %arg0_int16x4_t, <4 x i16> %arg1_int16x4_t) nounwind readnone {
107 ; CHECK: test_vmul_lanes16:
108 ; CHECK: vmul.i16 d0, d0, d1[1]
109 %0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses$
110 %1 = mul <4 x i16> %0, %arg0_int16x4_t ; <<4 x i16>> [#uses=1]
114 define arm_aapcs_vfpcc <2 x i32> @test_vmul_lanes32(<2 x i32> %arg0_int32x2_t, <2 x i32> %arg1_int32x2_t) nounwind readnone {
116 ; CHECK: test_vmul_lanes32:
117 ; CHECK: vmul.i32 d0, d0, d1[1]
118 %0 = shufflevector <2 x i32> %arg1_int32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
119 %1 = mul <2 x i32> %0, %arg0_int32x2_t ; <<2 x i32>> [#uses=1]
123 define arm_aapcs_vfpcc <4 x float> @test_vmulQ_lanef32(<4 x float> %arg0_float32x4_t, <2 x float> %arg1_float32x2_t) nounwind readnone {
125 ; CHECK: test_vmulQ_lanef32:
126 ; CHECK: vmul.f32 q0, q0, d2[1]
127 %0 = shufflevector <2 x float> %arg1_float32x2_t, <2 x float> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x float>$
128 %1 = fmul <4 x float> %0, %arg0_float32x4_t ; <<4 x float>> [#uses=1]
132 define arm_aapcs_vfpcc <8 x i16> @test_vmulQ_lanes16(<8 x i16> %arg0_int16x8_t, <4 x i16> %arg1_int16x4_t) nounwind readnone {
134 ; CHECK: test_vmulQ_lanes16:
135 ; CHECK: vmul.i16 q0, q0, d2[1]
136 %0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
137 %1 = mul <8 x i16> %0, %arg0_int16x8_t ; <<8 x i16>> [#uses=1]
141 define arm_aapcs_vfpcc <4 x i32> @test_vmulQ_lanes32(<4 x i32> %arg0_int32x4_t, <2 x i32> %arg1_int32x2_t) nounwind readnone {
143 ; CHECK: test_vmulQ_lanes32:
144 ; CHECK: vmul.i32 q0, q0, d2[1]
145 %0 = shufflevector <2 x i32> %arg1_int32x2_t, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i32>> [#uses$
146 %1 = mul <4 x i32> %0, %arg0_int32x4_t ; <<4 x i32>> [#uses=1]
150 define <8 x i16> @vmulls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
153 %tmp1 = load <8 x i8>* %A
154 %tmp2 = load <8 x i8>* %B
155 %tmp3 = sext <8 x i8> %tmp1 to <8 x i16>
156 %tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
157 %tmp5 = mul <8 x i16> %tmp3, %tmp4
161 define <8 x i16> @vmulls8_int(<8 x i8>* %A, <8 x i8>* %B) nounwind {
164 %tmp1 = load <8 x i8>* %A
165 %tmp2 = load <8 x i8>* %B
166 %tmp3 = call <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
170 define <4 x i32> @vmulls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
173 %tmp1 = load <4 x i16>* %A
174 %tmp2 = load <4 x i16>* %B
175 %tmp3 = sext <4 x i16> %tmp1 to <4 x i32>
176 %tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
177 %tmp5 = mul <4 x i32> %tmp3, %tmp4
181 define <4 x i32> @vmulls16_int(<4 x i16>* %A, <4 x i16>* %B) nounwind {
182 ;CHECK: vmulls16_int:
184 %tmp1 = load <4 x i16>* %A
185 %tmp2 = load <4 x i16>* %B
186 %tmp3 = call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
190 define <2 x i64> @vmulls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
193 %tmp1 = load <2 x i32>* %A
194 %tmp2 = load <2 x i32>* %B
195 %tmp3 = sext <2 x i32> %tmp1 to <2 x i64>
196 %tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
197 %tmp5 = mul <2 x i64> %tmp3, %tmp4
201 define <2 x i64> @vmulls32_int(<2 x i32>* %A, <2 x i32>* %B) nounwind {
202 ;CHECK: vmulls32_int:
204 %tmp1 = load <2 x i32>* %A
205 %tmp2 = load <2 x i32>* %B
206 %tmp3 = call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
210 define <8 x i16> @vmullu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
213 %tmp1 = load <8 x i8>* %A
214 %tmp2 = load <8 x i8>* %B
215 %tmp3 = zext <8 x i8> %tmp1 to <8 x i16>
216 %tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
217 %tmp5 = mul <8 x i16> %tmp3, %tmp4
221 define <8 x i16> @vmullu8_int(<8 x i8>* %A, <8 x i8>* %B) nounwind {
224 %tmp1 = load <8 x i8>* %A
225 %tmp2 = load <8 x i8>* %B
226 %tmp3 = call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
230 define <4 x i32> @vmullu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
233 %tmp1 = load <4 x i16>* %A
234 %tmp2 = load <4 x i16>* %B
235 %tmp3 = zext <4 x i16> %tmp1 to <4 x i32>
236 %tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
237 %tmp5 = mul <4 x i32> %tmp3, %tmp4
241 define <4 x i32> @vmullu16_int(<4 x i16>* %A, <4 x i16>* %B) nounwind {
242 ;CHECK: vmullu16_int:
244 %tmp1 = load <4 x i16>* %A
245 %tmp2 = load <4 x i16>* %B
246 %tmp3 = call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
250 define <2 x i64> @vmullu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
253 %tmp1 = load <2 x i32>* %A
254 %tmp2 = load <2 x i32>* %B
255 %tmp3 = zext <2 x i32> %tmp1 to <2 x i64>
256 %tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
257 %tmp5 = mul <2 x i64> %tmp3, %tmp4
261 define <2 x i64> @vmullu32_int(<2 x i32>* %A, <2 x i32>* %B) nounwind {
262 ;CHECK: vmullu32_int:
264 %tmp1 = load <2 x i32>* %A
265 %tmp2 = load <2 x i32>* %B
266 %tmp3 = call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
270 define <8 x i16> @vmullp8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
273 %tmp1 = load <8 x i8>* %A
274 %tmp2 = load <8 x i8>* %B
275 %tmp3 = call <8 x i16> @llvm.arm.neon.vmullp.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
279 define arm_aapcs_vfpcc <4 x i32> @test_vmull_lanes16(<4 x i16> %arg0_int16x4_t, <4 x i16> %arg1_int16x4_t) nounwind readnone {
281 ; CHECK: test_vmull_lanes16
282 ; CHECK: vmull.s16 q0, d0, d1[1]
283 %0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
284 %1 = sext <4 x i16> %arg0_int16x4_t to <4 x i32>
285 %2 = sext <4 x i16> %0 to <4 x i32>
286 %3 = mul <4 x i32> %1, %2
290 define arm_aapcs_vfpcc <4 x i32> @test_vmull_lanes16_int(<4 x i16> %arg0_int16x4_t, <4 x i16> %arg1_int16x4_t) nounwind readnone {
292 ; CHECK: test_vmull_lanes16_int
293 ; CHECK: vmull.s16 q0, d0, d1[1]
294 %0 = shufflevector <4 x i16> %arg1_int16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
295 %1 = tail call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %arg0_int16x4_t, <4 x i16> %0) ; <<4 x i32>> [#uses=1]
299 define arm_aapcs_vfpcc <2 x i64> @test_vmull_lanes32(<2 x i32> %arg0_int32x2_t, <2 x i32> %arg1_int32x2_t) nounwind readnone {
301 ; CHECK: test_vmull_lanes32
302 ; CHECK: vmull.s32 q0, d0, d1[1]
303 %0 = shufflevector <2 x i32> %arg1_int32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
304 %1 = sext <2 x i32> %arg0_int32x2_t to <2 x i64>
305 %2 = sext <2 x i32> %0 to <2 x i64>
306 %3 = mul <2 x i64> %1, %2
310 define arm_aapcs_vfpcc <2 x i64> @test_vmull_lanes32_int(<2 x i32> %arg0_int32x2_t, <2 x i32> %arg1_int32x2_t) nounwind readnone {
312 ; CHECK: test_vmull_lanes32_int
313 ; CHECK: vmull.s32 q0, d0, d1[1]
314 %0 = shufflevector <2 x i32> %arg1_int32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
315 %1 = tail call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %arg0_int32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1]
319 define arm_aapcs_vfpcc <4 x i32> @test_vmull_laneu16(<4 x i16> %arg0_uint16x4_t, <4 x i16> %arg1_uint16x4_t) nounwind readnone {
321 ; CHECK: test_vmull_laneu16
322 ; CHECK: vmull.u16 q0, d0, d1[1]
323 %0 = shufflevector <4 x i16> %arg1_uint16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
324 %1 = zext <4 x i16> %arg0_uint16x4_t to <4 x i32>
325 %2 = zext <4 x i16> %0 to <4 x i32>
326 %3 = mul <4 x i32> %1, %2
330 define arm_aapcs_vfpcc <4 x i32> @test_vmull_laneu16_int(<4 x i16> %arg0_uint16x4_t, <4 x i16> %arg1_uint16x4_t) nounwind readnone {
332 ; CHECK: test_vmull_laneu16_int
333 ; CHECK: vmull.u16 q0, d0, d1[1]
334 %0 = shufflevector <4 x i16> %arg1_uint16x4_t, <4 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ; <<4 x i16>> [#uses=1]
335 %1 = tail call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %arg0_uint16x4_t, <4 x i16> %0) ; <<4 x i32>> [#uses=1]
339 define arm_aapcs_vfpcc <2 x i64> @test_vmull_laneu32(<2 x i32> %arg0_uint32x2_t, <2 x i32> %arg1_uint32x2_t) nounwind readnone {
341 ; CHECK: test_vmull_laneu32
342 ; CHECK: vmull.u32 q0, d0, d1[1]
343 %0 = shufflevector <2 x i32> %arg1_uint32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
344 %1 = zext <2 x i32> %arg0_uint32x2_t to <2 x i64>
345 %2 = zext <2 x i32> %0 to <2 x i64>
346 %3 = mul <2 x i64> %1, %2
350 define arm_aapcs_vfpcc <2 x i64> @test_vmull_laneu32_int(<2 x i32> %arg0_uint32x2_t, <2 x i32> %arg1_uint32x2_t) nounwind readnone {
352 ; CHECK: test_vmull_laneu32_int
353 ; CHECK: vmull.u32 q0, d0, d1[1]
354 %0 = shufflevector <2 x i32> %arg1_uint32x2_t, <2 x i32> undef, <2 x i32> <i32 1, i32 1> ; <<2 x i32>> [#uses=1]
355 %1 = tail call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %arg0_uint32x2_t, <2 x i32> %0) ; <<2 x i64>> [#uses=1]
359 declare <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
360 declare <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
361 declare <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
363 declare <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
364 declare <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
365 declare <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
367 declare <8 x i16> @llvm.arm.neon.vmullp.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
371 ; VMULL needs to recognize BUILD_VECTORs with sign/zero-extended elements.
373 define <8 x i16> @vmull_extvec_s8(<8 x i8> %arg) nounwind {
374 ; CHECK: vmull_extvec_s8
376 %tmp3 = sext <8 x i8> %arg to <8 x i16>
377 %tmp4 = mul <8 x i16> %tmp3, <i16 -12, i16 -12, i16 -12, i16 -12, i16 -12, i16 -12, i16 -12, i16 -12>
381 define <8 x i16> @vmull_extvec_u8(<8 x i8> %arg) nounwind {
382 ; CHECK: vmull_extvec_u8
384 %tmp3 = zext <8 x i8> %arg to <8 x i16>
385 %tmp4 = mul <8 x i16> %tmp3, <i16 12, i16 12, i16 12, i16 12, i16 12, i16 12, i16 12, i16 12>
389 define <8 x i16> @vmull_noextvec_s8(<8 x i8> %arg) nounwind {
390 ; Do not use VMULL if the BUILD_VECTOR element values are too big.
391 ; CHECK: vmull_noextvec_s8
394 %tmp3 = sext <8 x i8> %arg to <8 x i16>
395 %tmp4 = mul <8 x i16> %tmp3, <i16 -999, i16 -999, i16 -999, i16 -999, i16 -999, i16 -999, i16 -999, i16 -999>
399 define <8 x i16> @vmull_noextvec_u8(<8 x i8> %arg) nounwind {
400 ; Do not use VMULL if the BUILD_VECTOR element values are too big.
401 ; CHECK: vmull_noextvec_u8
404 %tmp3 = zext <8 x i8> %arg to <8 x i16>
405 %tmp4 = mul <8 x i16> %tmp3, <i16 999, i16 999, i16 999, i16 999, i16 999, i16 999, i16 999, i16 999>
409 define <4 x i32> @vmull_extvec_s16(<4 x i16> %arg) nounwind {
410 ; CHECK: vmull_extvec_s16
412 %tmp3 = sext <4 x i16> %arg to <4 x i32>
413 %tmp4 = mul <4 x i32> %tmp3, <i32 -12, i32 -12, i32 -12, i32 -12>
417 define <4 x i32> @vmull_extvec_u16(<4 x i16> %arg) nounwind {
418 ; CHECK: vmull_extvec_u16
420 %tmp3 = zext <4 x i16> %arg to <4 x i32>
421 %tmp4 = mul <4 x i32> %tmp3, <i32 1234, i32 1234, i32 1234, i32 1234>
425 define <2 x i64> @vmull_extvec_s32(<2 x i32> %arg) nounwind {
426 ; CHECK: vmull_extvec_s32
428 %tmp3 = sext <2 x i32> %arg to <2 x i64>
429 %tmp4 = mul <2 x i64> %tmp3, <i64 -1234, i64 -1234>
433 define <2 x i64> @vmull_extvec_u32(<2 x i32> %arg) nounwind {
434 ; CHECK: vmull_extvec_u32
436 %tmp3 = zext <2 x i32> %arg to <2 x i64>
437 %tmp4 = mul <2 x i64> %tmp3, <i64 1234, i64 1234>
442 define void @distribute(i16* %dst, i8* %src, i32 %mul) nounwind {
445 ; CHECK: vmull.u8 [[REG1:(q[0-9]+)]], d{{.*}}, [[REG2:(d[0-9]+)]]
446 ; CHECK: vmlal.u8 [[REG1]], d{{.*}}, [[REG2]]
447 %0 = trunc i32 %mul to i8
448 %1 = insertelement <8 x i8> undef, i8 %0, i32 0
449 %2 = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer
450 %3 = tail call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %src, i32 1)
451 %4 = bitcast <16 x i8> %3 to <2 x double>
452 %5 = extractelement <2 x double> %4, i32 1
453 %6 = bitcast double %5 to <8 x i8>
454 %7 = zext <8 x i8> %6 to <8 x i16>
455 %8 = zext <8 x i8> %2 to <8 x i16>
456 %9 = extractelement <2 x double> %4, i32 0
457 %10 = bitcast double %9 to <8 x i8>
458 %11 = zext <8 x i8> %10 to <8 x i16>
459 %12 = add <8 x i16> %7, %11
460 %13 = mul <8 x i16> %12, %8
461 %14 = bitcast i16* %dst to i8*
462 tail call void @llvm.arm.neon.vst1.v8i16(i8* %14, <8 x i16> %13, i32 2)
466 declare <16 x i8> @llvm.arm.neon.vld1.v16i8(i8*, i32) nounwind readonly
468 declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>, i32) nounwind
470 ; Take advantage of the Cortex-A8 multiplier accumulator forward.
472 %struct.uint8x8_t = type { <8 x i8> }
474 define void @distribute2(%struct.uint8x8_t* nocapture %dst, i8* %src, i32 %mul) nounwind {
480 %0 = trunc i32 %mul to i8
481 %1 = insertelement <8 x i8> undef, i8 %0, i32 0
482 %2 = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer
483 %3 = tail call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %src, i32 1)
484 %4 = bitcast <16 x i8> %3 to <2 x double>
485 %5 = extractelement <2 x double> %4, i32 1
486 %6 = bitcast double %5 to <8 x i8>
487 %7 = extractelement <2 x double> %4, i32 0
488 %8 = bitcast double %7 to <8 x i8>
489 %9 = add <8 x i8> %6, %8
490 %10 = mul <8 x i8> %9, %2
491 %11 = getelementptr inbounds %struct.uint8x8_t* %dst, i32 0, i32 0
492 store <8 x i8> %10, <8 x i8>* %11, align 8
496 define void @distribute2_commutative(%struct.uint8x8_t* nocapture %dst, i8* %src, i32 %mul) nounwind {
498 ; CHECK: distribute2_commutative
502 %0 = trunc i32 %mul to i8
503 %1 = insertelement <8 x i8> undef, i8 %0, i32 0
504 %2 = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer
505 %3 = tail call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %src, i32 1)
506 %4 = bitcast <16 x i8> %3 to <2 x double>
507 %5 = extractelement <2 x double> %4, i32 1
508 %6 = bitcast double %5 to <8 x i8>
509 %7 = extractelement <2 x double> %4, i32 0
510 %8 = bitcast double %7 to <8 x i8>
511 %9 = add <8 x i8> %6, %8
512 %10 = mul <8 x i8> %2, %9
513 %11 = getelementptr inbounds %struct.uint8x8_t* %dst, i32 0, i32 0
514 store <8 x i8> %10, <8 x i8>* %11, align 8
518 ; If one operand has a zero-extend and the other a sign-extend, vmull
520 define i16 @vmullWithInconsistentExtensions(<8 x i8> %vec) {
521 ; CHECK: vmullWithInconsistentExtensions
522 ; CHECK-NOT: vmull.s8
523 %1 = sext <8 x i8> %vec to <8 x i16>
524 %2 = mul <8 x i16> %1, <i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255, i16 255>
525 %3 = extractelement <8 x i16> %2, i32 0