1 ; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
3 define <8 x i8> @test_vrev64D8(<8 x i8>* %A) nounwind {
6 %tmp1 = load <8 x i8>* %A
7 %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
11 define <4 x i16> @test_vrev64D16(<4 x i16>* %A) nounwind {
12 ;CHECK: test_vrev64D16:
14 %tmp1 = load <4 x i16>* %A
15 %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
19 define <2 x i32> @test_vrev64D32(<2 x i32>* %A) nounwind {
20 ;CHECK: test_vrev64D32:
22 %tmp1 = load <2 x i32>* %A
23 %tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
27 define <2 x float> @test_vrev64Df(<2 x float>* %A) nounwind {
28 ;CHECK: test_vrev64Df:
30 %tmp1 = load <2 x float>* %A
31 %tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> <i32 1, i32 0>
35 define <16 x i8> @test_vrev64Q8(<16 x i8>* %A) nounwind {
36 ;CHECK: test_vrev64Q8:
38 %tmp1 = load <16 x i8>* %A
39 %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
43 define <8 x i16> @test_vrev64Q16(<8 x i16>* %A) nounwind {
44 ;CHECK: test_vrev64Q16:
46 %tmp1 = load <8 x i16>* %A
47 %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
51 define <4 x i32> @test_vrev64Q32(<4 x i32>* %A) nounwind {
52 ;CHECK: test_vrev64Q32:
54 %tmp1 = load <4 x i32>* %A
55 %tmp2 = shufflevector <4 x i32> %tmp1, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
59 define <4 x float> @test_vrev64Qf(<4 x float>* %A) nounwind {
60 ;CHECK: test_vrev64Qf:
62 %tmp1 = load <4 x float>* %A
63 %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
67 define <8 x i8> @test_vrev32D8(<8 x i8>* %A) nounwind {
68 ;CHECK: test_vrev32D8:
70 %tmp1 = load <8 x i8>* %A
71 %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
75 define <4 x i16> @test_vrev32D16(<4 x i16>* %A) nounwind {
76 ;CHECK: test_vrev32D16:
78 %tmp1 = load <4 x i16>* %A
79 %tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
83 define <16 x i8> @test_vrev32Q8(<16 x i8>* %A) nounwind {
84 ;CHECK: test_vrev32Q8:
86 %tmp1 = load <16 x i8>* %A
87 %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
91 define <8 x i16> @test_vrev32Q16(<8 x i16>* %A) nounwind {
92 ;CHECK: test_vrev32Q16:
94 %tmp1 = load <8 x i16>* %A
95 %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
99 define <8 x i8> @test_vrev16D8(<8 x i8>* %A) nounwind {
100 ;CHECK: test_vrev16D8:
102 %tmp1 = load <8 x i8>* %A
103 %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
107 define <16 x i8> @test_vrev16Q8(<16 x i8>* %A) nounwind {
108 ;CHECK: test_vrev16Q8:
110 %tmp1 = load <16 x i8>* %A
111 %tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
115 ; Undef shuffle indices should not prevent matching to VREV:
117 define <8 x i8> @test_vrev64D8_undef(<8 x i8>* %A) nounwind {
118 ;CHECK: test_vrev64D8_undef:
120 %tmp1 = load <8 x i8>* %A
121 %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 undef, i32 undef, i32 4, i32 3, i32 2, i32 1, i32 0>
125 define <8 x i16> @test_vrev32Q16_undef(<8 x i16>* %A) nounwind {
126 ;CHECK: test_vrev32Q16_undef:
128 %tmp1 = load <8 x i16>* %A
129 %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 undef, i32 0, i32 undef, i32 2, i32 5, i32 4, i32 7, i32 undef>
133 ; A vcombine feeding a VREV should not obscure things. Radar 8597007.
135 define void @test_with_vcombine(<4 x float>* %v) nounwind {
136 ;CHECK: test_with_vcombine:
139 %tmp1 = load <4 x float>* %v, align 16
140 %tmp2 = bitcast <4 x float> %tmp1 to <2 x double>
141 %tmp3 = extractelement <2 x double> %tmp2, i32 0
142 %tmp4 = bitcast double %tmp3 to <2 x float>
143 %tmp5 = extractelement <2 x double> %tmp2, i32 1
144 %tmp6 = bitcast double %tmp5 to <2 x float>
145 %tmp7 = fadd <2 x float> %tmp6, %tmp6
146 %tmp8 = shufflevector <2 x float> %tmp4, <2 x float> %tmp7, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
147 store <4 x float> %tmp8, <4 x float>* %v, align 16
151 ; Test the shuffle of a 4xi16 which exposed a problem with the perfect shuffle table
153 define void @test_vrev64(<4 x i16>* nocapture %source, <2 x i16>* nocapture %dst) nounwind ssp {
154 ; CHECK: test_vrev64:
158 %0 = bitcast <4 x i16>* %source to <8 x i16>*
159 %tmp2 = load <8 x i16>* %0, align 4
160 %tmp3 = extractelement <8 x i16> %tmp2, i32 6
161 %tmp5 = insertelement <2 x i16> undef, i16 %tmp3, i32 0
162 %tmp9 = extractelement <8 x i16> %tmp2, i32 5
163 %tmp11 = insertelement <2 x i16> %tmp5, i16 %tmp9, i32 1
164 store <2 x i16> %tmp11, <2 x i16>* %dst, align 4