1 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
4 define <4 x float> @test1(<4 x float> %a) nounwind {
5 %b = shufflevector <4 x float> zeroinitializer, <4 x float> %a, <4 x i32> <i32 2, i32 5, i32 undef, i32 undef>
8 ;; TODO: This test could be improved by removing the xor instruction and
9 ;; having vinsertps zero out the needed elements.
15 define <3 x i64> @test2(<2 x i64> %v) nounwind readnone {
18 %1 = shufflevector <2 x i64> %v, <2 x i64> %v, <3 x i32> <i32 0, i32 1, i32 undef>
19 %2 = shufflevector <3 x i64> zeroinitializer, <3 x i64> %1, <3 x i32> <i32 3, i32 4, i32 2>
24 define <4 x i64> @test3(<4 x i64> %a, <4 x i64> %b) nounwind {
25 %c = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 4, i32 5, i32 2, i32 undef>
32 define <8 x float> @test4(float %a) nounwind {
33 %b = insertelement <8 x float> zeroinitializer, float %a, i32 0
40 define <8 x float> @test5(float* nocapture %f) nounwind uwtable readonly ssp {
42 %0 = bitcast float* %f to <4 x float>*
43 %1 = load <4 x float>* %0, align 16
47 ; CHECK-NOT: vinsertf128
48 %shuffle.i = shufflevector <4 x float> %1, <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
49 ret <8 x float> %shuffle.i
52 define <4 x double> @test6(double* nocapture %d) nounwind uwtable readonly ssp {
54 %0 = bitcast double* %d to <2 x double>*
55 %1 = load <2 x double>* %0, align 16
59 ; CHECK-NOT: vinsertf128
60 %shuffle.i = shufflevector <2 x double> %1, <2 x double> <double 0.000000e+00, double undef>, <4 x i32> <i32 0, i32 1, i32 2, i32 2>
61 ret <4 x double> %shuffle.i
64 define <16 x i16> @test7(<4 x i16> %a) nounwind {
66 %b = shufflevector <4 x i16> %a, <4 x i16> undef, <16 x i32> <i32 1, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
72 define void @test8() {
74 %0 = load <16 x i64> addrspace(1)* null, align 128
75 %1 = shufflevector <16 x i64> <i64 undef, i64 undef, i64 0, i64 undef, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 undef, i64 0, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i64> %0, <16 x i32> <i32 17, i32 18, i32 2, i32 undef, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 undef, i32 11, i32 undef, i32 undef, i32 undef, i32 26>
76 %2 = shufflevector <16 x i64> %1, <16 x i64> %0, <16 x i32> <i32 0, i32 1, i32 2, i32 30, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 undef, i32 11, i32 undef, i32 22, i32 20, i32 15>
77 store <16 x i64> %2, <16 x i64> addrspace(1)* undef, align 128
82 ; Extract a value from a shufflevector..
83 define i32 @test9(<4 x i32> %a) nounwind {
86 %b = shufflevector <4 x i32> %a, <4 x i32> undef, <8 x i32> <i32 1, i32 1, i32 2, i32 2, i32 3, i32 3, i32 undef, i32 4>
87 %r = extractelement <8 x i32> %b, i32 2
92 ; Extract a value which is the result of an undef mask.
93 define i32 @test10(<4 x i32> %a) nounwind {
95 ; CHECK-NOT: {{^[^#]*[a-z]}}
97 %b = shufflevector <4 x i32> %a, <4 x i32> undef, <8 x i32> <i32 1, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
98 %r = extractelement <8 x i32> %b, i32 2
102 define <4 x float> @test11(<4 x float> %a) nounwind {
105 %tmp1 = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
106 ret <4 x float> %tmp1
109 define <4 x float> @test12(<4 x float>* %a) nounwind {
112 %tmp0 = load <4 x float>* %a
113 %tmp1 = shufflevector <4 x float> %tmp0, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
114 ret <4 x float> %tmp1
117 define <4 x i32> @test13(<4 x i32> %a) nounwind {
120 %tmp1 = shufflevector <4 x i32> %a, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
124 define <4 x i32> @test14(<4 x i32>* %a) nounwind {
126 ; CHECK: vpshufd $27, (
127 %tmp0 = load <4 x i32>* %a
128 %tmp1 = shufflevector <4 x i32> %tmp0, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
135 define <4 x i32> @test15(<2 x i32>%x) nounwind readnone {
136 %x1 = shufflevector <2 x i32> %x, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
141 define <8 x float> @test16(float* nocapture %f) nounwind uwtable readonly ssp {
143 %0 = bitcast float* %f to <4 x float>*
144 %1 = load <4 x float>* %0, align 8
148 ; CHECK-NOT: vinsertf128
149 %shuffle.i = shufflevector <4 x float> %1, <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
150 ret <8 x float> %shuffle.i
159 define <32 x i8> @shuf1(<32 x i8> %inval1, <32 x i8> %inval2) {
161 %0 = shufflevector <32 x i8> %inval1, <32 x i8> %inval2, <32 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
165 ; handle the case where only half of the 256-bits is splittable
171 define <32 x i8> @shuf2(<32 x i8> %inval1, <32 x i8> %inval2) {
173 %0 = shufflevector <32 x i8> %inval1, <32 x i8> %inval2, <32 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30, i32 31, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
180 define <4 x i32> @blend1(<4 x i32> %a, <4 x i32> %b) nounwind alwaysinline {
181 %t = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 7>
188 define <4 x i32> @blend2(<4 x i32> %a, <4 x i32> %b) nounwind alwaysinline {
189 %t = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
196 define <4 x float> @blend2a(<4 x float> %a, <4 x float> %b) nounwind alwaysinline {
197 %t = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
202 ; CHECK-NOT: vblendps
204 define <4 x i32> @blend3(<4 x i32> %a, <4 x i32> %b) nounwind alwaysinline {
205 %t = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 1, i32 5, i32 2, i32 7>
212 define <4 x i64> @blend4(<4 x i64> %a, <4 x i64> %b) nounwind alwaysinline {
213 %t = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 7>
220 define <16 x i16> @narrow(<16 x i16> %a) nounwind alwaysinline {
221 %t = shufflevector <16 x i16> %a, <16 x i16> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 1, i32 6, i32 7, i32 4, i32 5, i32 10, i32 11, i32 8, i32 undef, i32 14, i32 15, i32 undef, i32 undef>
225 ;CHECK-LABEL: test17:
226 ;CHECK-NOT: vinsertf128
228 define <8 x float> @test17(<4 x float> %y) {
229 %x = shufflevector <4 x float> %y, <4 x float> undef, <8 x i32> <i32 undef, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
237 define <8 x float> @test18(<8 x float> %A, <8 x float>%B) nounwind {
238 %S = shufflevector <8 x float> %A, <8 x float> %B, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
246 define <8 x float> @test19(<8 x float> %A, <8 x float>%B) nounwind {
247 %S = shufflevector <8 x float> %A, <8 x float> %B, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
252 ; Make sure loads happen before stores.
253 ; CHECK: swap8doubles
254 ; CHECK: vmovups {{[0-9]*}}(%rdi), %xmm{{[0-9]+}}
255 ; CHECK: vmovups {{[0-9]*}}(%rdi), %xmm{{[0-9]+}}
256 ; CHECK: vinsertf128 $1, {{[0-9]*}}(%rdi), %ymm{{[0-9]+}}
257 ; CHECK: vinsertf128 $1, {{[0-9]*}}(%rdi), %ymm{{[0-9]+}}
258 ; CHECK: vmovaps {{[0-9]*}}(%rsi), %ymm{{[0-9]+}}
259 ; CHECK: vmovaps {{[0-9]*}}(%rsi), %ymm{{[0-9]+}}
260 ; CHECK: vmovaps %xmm{{[0-9]+}}, {{[0-9]*}}(%rdi)
261 ; CHECK: vextractf128
262 ; CHECK: vmovaps %xmm{{[0-9]+}}, {{[0-9]*}}(%rdi)
263 ; CHECK: vextractf128
264 ; CHECK: vmovaps %ymm{{[0-9]+}}, {{[0-9]*}}(%rsi)
265 ; CHECK: vmovaps %ymm{{[0-9]+}}, {{[0-9]*}}(%rsi)
266 define void @swap8doubles(double* nocapture %A, double* nocapture %C) nounwind uwtable ssp {
268 %add.ptr = getelementptr inbounds double* %A, i64 2
269 %v.i = bitcast double* %A to <2 x double>*
270 %0 = load <2 x double>* %v.i, align 1
271 %shuffle.i.i = shufflevector <2 x double> %0, <2 x double> <double 0.000000e+00, double undef>, <4 x i32> <i32 0, i32 1, i32 2, i32 2>
272 %v1.i = bitcast double* %add.ptr to <2 x double>*
273 %1 = load <2 x double>* %v1.i, align 1
274 %2 = tail call <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double> %shuffle.i.i, <2 x double> %1, i8 1) nounwind
275 %add.ptr1 = getelementptr inbounds double* %A, i64 6
276 %add.ptr2 = getelementptr inbounds double* %A, i64 4
277 %v.i27 = bitcast double* %add.ptr2 to <2 x double>*
278 %3 = load <2 x double>* %v.i27, align 1
279 %shuffle.i.i28 = shufflevector <2 x double> %3, <2 x double> <double 0.000000e+00, double undef>, <4 x i32> <i32 0, i32 1, i32 2, i32 2>
280 %v1.i29 = bitcast double* %add.ptr1 to <2 x double>*
281 %4 = load <2 x double>* %v1.i29, align 1
282 %5 = tail call <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double> %shuffle.i.i28, <2 x double> %4, i8 1) nounwind
283 %6 = bitcast double* %C to <4 x double>*
284 %7 = load <4 x double>* %6, align 32
285 %add.ptr5 = getelementptr inbounds double* %C, i64 4
286 %8 = bitcast double* %add.ptr5 to <4 x double>*
287 %9 = load <4 x double>* %8, align 32
288 %shuffle.i26 = shufflevector <4 x double> %7, <4 x double> undef, <2 x i32> <i32 0, i32 1>
289 %10 = tail call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %7, i8 1)
290 %shuffle.i = shufflevector <4 x double> %9, <4 x double> undef, <2 x i32> <i32 0, i32 1>
291 %11 = tail call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %9, i8 1)
292 store <2 x double> %shuffle.i26, <2 x double>* %v.i, align 16
293 store <2 x double> %10, <2 x double>* %v1.i, align 16
294 store <2 x double> %shuffle.i, <2 x double>* %v.i27, align 16
295 store <2 x double> %11, <2 x double>* %v1.i29, align 16
296 store <4 x double> %2, <4 x double>* %6, align 32
297 store <4 x double> %5, <4 x double>* %8, align 32
300 declare <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double>, i8) nounwind readnone
301 declare <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double>, <2 x double>, i8) nounwind readnone
303 ; this test case just should not fail
304 define void @test20() {
305 %a0 = insertelement <3 x double> <double 0.000000e+00, double 0.000000e+00, double undef>, double 0.000000e+00, i32 2
306 store <3 x double> %a0, <3 x double>* undef, align 1
307 %a1 = insertelement <3 x double> <double 0.000000e+00, double 0.000000e+00, double undef>, double undef, i32 2
308 store <3 x double> %a1, <3 x double>* undef, align 1
312 define <2 x i64> @test_insert_64_zext(<2 x i64> %i) {
313 ; CHECK-LABEL: test_insert_64_zext
316 %1 = shufflevector <2 x i64> %i, <2 x i64> <i64 0, i64 undef>, <2 x i32> <i32 0, i32 2>
320 ;; Ensure we don't use insertps from non v4x32 vectors.
321 ;; On SSE4.1 it works because bigger vectors use more than 1 register.
322 ;; On AVX they get passed in a single register.
323 ;; FIXME: We could probably optimize this case, if we're only using the
325 define <4 x i32> @insert_from_diff_size(<8 x i32> %x) {
326 ; CHECK-LABEL: insert_from_diff_size:
327 ; CHECK-NOT: insertps
329 %vecext = extractelement <8 x i32> %x, i32 0
330 %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
331 %vecinit1 = insertelement <4 x i32> %vecinit, i32 0, i32 1
332 %vecinit2 = insertelement <4 x i32> %vecinit1, i32 0, i32 2
333 %a.0 = extractelement <8 x i32> %x, i32 0
334 %vecinit3 = insertelement <4 x i32> %vecinit2, i32 %a.0, i32 3
335 ret <4 x i32> %vecinit3