1 ; RUN: llc < %s -mtriple=i686-apple-darwin9 -mattr=sse4.1 -mcpu=penryn | FileCheck %s --check-prefix=X32
2 ; RUN: llc < %s -mtriple=x86_64-apple-darwin9 -mattr=sse4.1 -mcpu=penryn | FileCheck %s --check-prefix=X64
4 @g16 = external global i16
6 define <4 x i32> @pinsrd_1(i32 %s, <4 x i32> %tmp) nounwind {
9 ; X32-NEXT: pinsrd $1, {{[0-9]+}}(%esp), %xmm0
12 ; X64-LABEL: pinsrd_1:
14 ; X64-NEXT: pinsrd $1, %edi, %xmm0
16 %tmp1 = insertelement <4 x i32> %tmp, i32 %s, i32 1
20 define <16 x i8> @pinsrb_1(i8 %s, <16 x i8> %tmp) nounwind {
21 ; X32-LABEL: pinsrb_1:
23 ; X32-NEXT: pinsrb $1, {{[0-9]+}}(%esp), %xmm0
26 ; X64-LABEL: pinsrb_1:
28 ; X64-NEXT: pinsrb $1, %edi, %xmm0
30 %tmp1 = insertelement <16 x i8> %tmp, i8 %s, i32 1
34 define <2 x i64> @pmovsxbd_1(i32* %p) nounwind {
35 ; X32-LABEL: pmovsxbd_1:
36 ; X32: ## BB#0: ## %entry
37 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
38 ; X32-NEXT: pmovsxbd (%eax), %xmm0
41 ; X64-LABEL: pmovsxbd_1:
42 ; X64: ## BB#0: ## %entry
43 ; X64-NEXT: pmovsxbd (%rdi), %xmm0
46 %0 = load i32* %p, align 4
47 %1 = insertelement <4 x i32> undef, i32 %0, i32 0
48 %2 = insertelement <4 x i32> %1, i32 0, i32 1
49 %3 = insertelement <4 x i32> %2, i32 0, i32 2
50 %4 = insertelement <4 x i32> %3, i32 0, i32 3
51 %5 = bitcast <4 x i32> %4 to <16 x i8>
52 %6 = tail call <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8> %5) nounwind readnone
53 %7 = bitcast <4 x i32> %6 to <2 x i64>
57 define <2 x i64> @pmovsxwd_1(i64* %p) nounwind readonly {
58 ; X32-LABEL: pmovsxwd_1:
59 ; X32: ## BB#0: ## %entry
60 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
61 ; X32-NEXT: pmovsxwd (%eax), %xmm0
64 ; X64-LABEL: pmovsxwd_1:
65 ; X64: ## BB#0: ## %entry
66 ; X64-NEXT: pmovsxwd (%rdi), %xmm0
69 %0 = load i64* %p ; <i64> [#uses=1]
70 %tmp2 = insertelement <2 x i64> zeroinitializer, i64 %0, i32 0 ; <<2 x i64>> [#uses=1]
71 %1 = bitcast <2 x i64> %tmp2 to <8 x i16> ; <<8 x i16>> [#uses=1]
72 %2 = tail call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %1) nounwind readnone ; <<4 x i32>> [#uses=1]
73 %3 = bitcast <4 x i32> %2 to <2 x i64> ; <<2 x i64>> [#uses=1]
77 define <2 x i64> @pmovzxbq_1() nounwind {
78 ; X32-LABEL: pmovzxbq_1:
79 ; X32: ## BB#0: ## %entry
80 ; X32-NEXT: movl L_g16$non_lazy_ptr, %eax
81 ; X32-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
84 ; X64-LABEL: pmovzxbq_1:
85 ; X64: ## BB#0: ## %entry
86 ; X64-NEXT: movq _g16@{{.*}}(%rip), %rax
87 ; X64-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
90 %0 = load i16* @g16, align 2 ; <i16> [#uses=1]
91 %1 = insertelement <8 x i16> undef, i16 %0, i32 0 ; <<8 x i16>> [#uses=1]
92 %2 = bitcast <8 x i16> %1 to <16 x i8> ; <<16 x i8>> [#uses=1]
93 %3 = tail call <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8> %2) nounwind readnone ; <<2 x i64>> [#uses=1]
97 declare <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8>) nounwind readnone
98 declare <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16>) nounwind readnone
99 declare <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8>) nounwind readnone
101 define i32 @extractps_1(<4 x float> %v) nounwind {
102 ; X32-LABEL: extractps_1:
104 ; X32-NEXT: extractps $3, %xmm0, %eax
107 ; X64-LABEL: extractps_1:
109 ; X64-NEXT: extractps $3, %xmm0, %eax
111 %s = extractelement <4 x float> %v, i32 3
112 %i = bitcast float %s to i32
115 define i32 @extractps_2(<4 x float> %v) nounwind {
116 ; X32-LABEL: extractps_2:
118 ; X32-NEXT: extractps $3, %xmm0, %eax
121 ; X64-LABEL: extractps_2:
123 ; X64-NEXT: extractps $3, %xmm0, %eax
125 %t = bitcast <4 x float> %v to <4 x i32>
126 %s = extractelement <4 x i32> %t, i32 3
131 ; The non-store form of extractps puts its result into a GPR.
132 ; This makes it suitable for an extract from a <4 x float> that
133 ; is bitcasted to i32, but unsuitable for much of anything else.
135 define float @ext_1(<4 x float> %v) nounwind {
138 ; X32-NEXT: pushl %eax
139 ; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
140 ; X32-NEXT: addss LCPI7_0, %xmm0
141 ; X32-NEXT: movss %xmm0, (%esp)
142 ; X32-NEXT: flds (%esp)
143 ; X32-NEXT: popl %eax
148 ; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
149 ; X64-NEXT: addss {{.*}}(%rip), %xmm0
151 %s = extractelement <4 x float> %v, i32 3
152 %t = fadd float %s, 1.0
155 define float @ext_2(<4 x float> %v) nounwind {
158 ; X32-NEXT: pushl %eax
159 ; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
160 ; X32-NEXT: movss %xmm0, (%esp)
161 ; X32-NEXT: flds (%esp)
162 ; X32-NEXT: popl %eax
167 ; X64-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
169 %s = extractelement <4 x float> %v, i32 3
172 define i32 @ext_3(<4 x i32> %v) nounwind {
175 ; X32-NEXT: pextrd $3, %xmm0, %eax
180 ; X64-NEXT: pextrd $3, %xmm0, %eax
182 %i = extractelement <4 x i32> %v, i32 3
186 define <4 x float> @insertps_1(<4 x float> %t1, <4 x float> %t2) nounwind {
187 ; X32-LABEL: insertps_1:
189 ; X32-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[1,2,3]
192 ; X64-LABEL: insertps_1:
194 ; X64-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[1,2,3]
196 %tmp1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %t1, <4 x float> %t2, i32 1) nounwind readnone
197 ret <4 x float> %tmp1
200 declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i32) nounwind readnone
202 define <4 x float> @insertps_2(<4 x float> %t1, float %t2) nounwind {
203 ; X32-LABEL: insertps_2:
205 ; X32-NEXT: insertps {{.*#+}} xmm0 = mem[0],xmm0[1,2,3]
208 ; X64-LABEL: insertps_2:
210 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
212 %tmp1 = insertelement <4 x float> %t1, float %t2, i32 0
213 ret <4 x float> %tmp1
215 define <4 x float> @insertps_3(<4 x float> %t1, <4 x float> %t2) nounwind {
216 ; X32-LABEL: insertps_3:
218 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
221 ; X64-LABEL: insertps_3:
223 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
225 %tmp2 = extractelement <4 x float> %t2, i32 0
226 %tmp1 = insertelement <4 x float> %t1, float %tmp2, i32 0
227 ret <4 x float> %tmp1
230 define i32 @ptestz_1(<2 x i64> %t1, <2 x i64> %t2) nounwind {
231 ; X32-LABEL: ptestz_1:
233 ; X32-NEXT: ptest %xmm1, %xmm0
235 ; X32-NEXT: movzbl %al, %eax
238 ; X64-LABEL: ptestz_1:
240 ; X64-NEXT: ptest %xmm1, %xmm0
242 ; X64-NEXT: movzbl %al, %eax
244 %tmp1 = call i32 @llvm.x86.sse41.ptestz(<2 x i64> %t1, <2 x i64> %t2) nounwind readnone
248 define i32 @ptestz_2(<2 x i64> %t1, <2 x i64> %t2) nounwind {
249 ; X32-LABEL: ptestz_2:
251 ; X32-NEXT: ptest %xmm1, %xmm0
252 ; X32-NEXT: sbbl %eax, %eax
253 ; X32-NEXT: andl $1, %eax
256 ; X64-LABEL: ptestz_2:
258 ; X64-NEXT: ptest %xmm1, %xmm0
259 ; X64-NEXT: sbbl %eax, %eax
260 ; X64-NEXT: andl $1, %eax
262 %tmp1 = call i32 @llvm.x86.sse41.ptestc(<2 x i64> %t1, <2 x i64> %t2) nounwind readnone
266 define i32 @ptestz_3(<2 x i64> %t1, <2 x i64> %t2) nounwind {
267 ; X32-LABEL: ptestz_3:
269 ; X32-NEXT: ptest %xmm1, %xmm0
271 ; X32-NEXT: movzbl %al, %eax
274 ; X64-LABEL: ptestz_3:
276 ; X64-NEXT: ptest %xmm1, %xmm0
278 ; X64-NEXT: movzbl %al, %eax
280 %tmp1 = call i32 @llvm.x86.sse41.ptestnzc(<2 x i64> %t1, <2 x i64> %t2) nounwind readnone
285 declare i32 @llvm.x86.sse41.ptestz(<2 x i64>, <2 x i64>) nounwind readnone
286 declare i32 @llvm.x86.sse41.ptestc(<2 x i64>, <2 x i64>) nounwind readnone
287 declare i32 @llvm.x86.sse41.ptestnzc(<2 x i64>, <2 x i64>) nounwind readnone
289 ; This used to compile to insertps $0 + insertps $16. insertps $0 is always
291 define <2 x float> @buildvector(<2 x float> %A, <2 x float> %B) nounwind {
292 ; X32-LABEL: buildvector:
293 ; X32: ## BB#0: ## %entry
294 ; X32-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
295 ; X32-NEXT: movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
296 ; X32-NEXT: addss %xmm1, %xmm0
297 ; X32-NEXT: addss %xmm2, %xmm3
298 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
301 ; X64-LABEL: buildvector:
302 ; X64: ## BB#0: ## %entry
303 ; X64-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
304 ; X64-NEXT: movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
305 ; X64-NEXT: addss %xmm1, %xmm0
306 ; X64-NEXT: addss %xmm2, %xmm3
307 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
310 %tmp7 = extractelement <2 x float> %A, i32 0
311 %tmp5 = extractelement <2 x float> %A, i32 1
312 %tmp3 = extractelement <2 x float> %B, i32 0
313 %tmp1 = extractelement <2 x float> %B, i32 1
314 %add.r = fadd float %tmp7, %tmp3
315 %add.i = fadd float %tmp5, %tmp1
316 %tmp11 = insertelement <2 x float> undef, float %add.r, i32 0
317 %tmp9 = insertelement <2 x float> %tmp11, float %add.i, i32 1
318 ret <2 x float> %tmp9
321 define <4 x float> @insertps_from_shufflevector_1(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
322 ; X32-LABEL: insertps_from_shufflevector_1:
323 ; X32: ## BB#0: ## %entry
324 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
325 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
328 ; X64-LABEL: insertps_from_shufflevector_1:
329 ; X64: ## BB#0: ## %entry
330 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
333 %0 = load <4 x float>* %pb, align 16
334 %vecinit6 = shufflevector <4 x float> %a, <4 x float> %0, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
335 ret <4 x float> %vecinit6
338 define <4 x float> @insertps_from_shufflevector_2(<4 x float> %a, <4 x float> %b) {
339 ; X32-LABEL: insertps_from_shufflevector_2:
340 ; X32: ## BB#0: ## %entry
341 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
344 ; X64-LABEL: insertps_from_shufflevector_2:
345 ; X64: ## BB#0: ## %entry
346 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1],xmm0[3]
349 %vecinit6 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 5, i32 3>
350 ret <4 x float> %vecinit6
353 ; For loading an i32 from memory into an xmm register we use pinsrd
354 ; instead of insertps
355 define <4 x i32> @pinsrd_from_shufflevector_i32(<4 x i32> %a, <4 x i32>* nocapture readonly %pb) {
356 ; X32-LABEL: pinsrd_from_shufflevector_i32:
357 ; X32: ## BB#0: ## %entry
358 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
359 ; X32-NEXT: pshufd {{.*#+}} xmm1 = mem[0,1,2,0]
360 ; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
363 ; X64-LABEL: pinsrd_from_shufflevector_i32:
364 ; X64: ## BB#0: ## %entry
365 ; X64-NEXT: pshufd {{.*#+}} xmm1 = mem[0,1,2,0]
366 ; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
369 %0 = load <4 x i32>* %pb, align 16
370 %vecinit6 = shufflevector <4 x i32> %a, <4 x i32> %0, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
371 ret <4 x i32> %vecinit6
374 define <4 x i32> @insertps_from_shufflevector_i32_2(<4 x i32> %a, <4 x i32> %b) {
375 ; X32-LABEL: insertps_from_shufflevector_i32_2:
376 ; X32: ## BB#0: ## %entry
377 ; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
378 ; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
381 ; X64-LABEL: insertps_from_shufflevector_i32_2:
382 ; X64: ## BB#0: ## %entry
383 ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
384 ; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
387 %vecinit6 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 7, i32 2, i32 3>
388 ret <4 x i32> %vecinit6
391 define <4 x float> @insertps_from_load_ins_elt_undef(<4 x float> %a, float* %b) {
392 ; X32-LABEL: insertps_from_load_ins_elt_undef:
394 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
395 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
398 ; X64-LABEL: insertps_from_load_ins_elt_undef:
400 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
402 %1 = load float* %b, align 4
403 %2 = insertelement <4 x float> undef, float %1, i32 0
404 %result = shufflevector <4 x float> %a, <4 x float> %2, <4 x i32> <i32 0, i32 4, i32 2, i32 3>
405 ret <4 x float> %result
408 ; TODO: Like on pinsrd_from_shufflevector_i32, remove this mov instr
409 define <4 x i32> @insertps_from_load_ins_elt_undef_i32(<4 x i32> %a, i32* %b) {
410 ; X32-LABEL: insertps_from_load_ins_elt_undef_i32:
412 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
413 ; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
414 ; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
415 ; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
418 ; X64-LABEL: insertps_from_load_ins_elt_undef_i32:
420 ; X64-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
421 ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
422 ; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7]
424 %1 = load i32* %b, align 4
425 %2 = insertelement <4 x i32> undef, i32 %1, i32 0
426 %result = shufflevector <4 x i32> %a, <4 x i32> %2, <4 x i32> <i32 0, i32 1, i32 4, i32 3>
427 ret <4 x i32> %result
430 ;;;;;; Shuffles optimizable with a single insertps or blend instruction
431 define <4 x float> @shuf_XYZ0(<4 x float> %x, <4 x float> %a) {
432 ; X32-LABEL: shuf_XYZ0:
434 ; X32-NEXT: xorps %xmm1, %xmm1
435 ; X32-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
438 ; X64-LABEL: shuf_XYZ0:
440 ; X64-NEXT: xorps %xmm1, %xmm1
441 ; X64-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
443 %vecext = extractelement <4 x float> %x, i32 0
444 %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
445 %vecext1 = extractelement <4 x float> %x, i32 1
446 %vecinit2 = insertelement <4 x float> %vecinit, float %vecext1, i32 1
447 %vecext3 = extractelement <4 x float> %x, i32 2
448 %vecinit4 = insertelement <4 x float> %vecinit2, float %vecext3, i32 2
449 %vecinit5 = insertelement <4 x float> %vecinit4, float 0.0, i32 3
450 ret <4 x float> %vecinit5
453 define <4 x float> @shuf_XY00(<4 x float> %x, <4 x float> %a) {
454 ; X32-LABEL: shuf_XY00:
456 ; X32-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
459 ; X64-LABEL: shuf_XY00:
461 ; X64-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
463 %vecext = extractelement <4 x float> %x, i32 0
464 %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
465 %vecext1 = extractelement <4 x float> %x, i32 1
466 %vecinit2 = insertelement <4 x float> %vecinit, float %vecext1, i32 1
467 %vecinit3 = insertelement <4 x float> %vecinit2, float 0.0, i32 2
468 %vecinit4 = insertelement <4 x float> %vecinit3, float 0.0, i32 3
469 ret <4 x float> %vecinit4
472 define <4 x float> @shuf_XYY0(<4 x float> %x, <4 x float> %a) {
473 ; X32-LABEL: shuf_XYY0:
475 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,1],zero
478 ; X64-LABEL: shuf_XYY0:
480 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,1],zero
482 %vecext = extractelement <4 x float> %x, i32 0
483 %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
484 %vecext1 = extractelement <4 x float> %x, i32 1
485 %vecinit2 = insertelement <4 x float> %vecinit, float %vecext1, i32 1
486 %vecinit4 = insertelement <4 x float> %vecinit2, float %vecext1, i32 2
487 %vecinit5 = insertelement <4 x float> %vecinit4, float 0.0, i32 3
488 ret <4 x float> %vecinit5
491 define <4 x float> @shuf_XYW0(<4 x float> %x, <4 x float> %a) {
492 ; X32-LABEL: shuf_XYW0:
494 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,3],zero
497 ; X64-LABEL: shuf_XYW0:
499 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,3],zero
501 %vecext = extractelement <4 x float> %x, i32 0
502 %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
503 %vecext1 = extractelement <4 x float> %x, i32 1
504 %vecinit2 = insertelement <4 x float> %vecinit, float %vecext1, i32 1
505 %vecext2 = extractelement <4 x float> %x, i32 3
506 %vecinit3 = insertelement <4 x float> %vecinit2, float %vecext2, i32 2
507 %vecinit4 = insertelement <4 x float> %vecinit3, float 0.0, i32 3
508 ret <4 x float> %vecinit4
511 define <4 x float> @shuf_W00W(<4 x float> %x, <4 x float> %a) {
512 ; X32-LABEL: shuf_W00W:
514 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[3],zero,zero,xmm0[3]
517 ; X64-LABEL: shuf_W00W:
519 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[3],zero,zero,xmm0[3]
521 %vecext = extractelement <4 x float> %x, i32 3
522 %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
523 %vecinit2 = insertelement <4 x float> %vecinit, float 0.0, i32 1
524 %vecinit3 = insertelement <4 x float> %vecinit2, float 0.0, i32 2
525 %vecinit4 = insertelement <4 x float> %vecinit3, float %vecext, i32 3
526 ret <4 x float> %vecinit4
529 define <4 x float> @shuf_X00A(<4 x float> %x, <4 x float> %a) {
530 ; X32-LABEL: shuf_X00A:
532 ; X32-NEXT: xorps %xmm2, %xmm2
533 ; X32-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
534 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
537 ; X64-LABEL: shuf_X00A:
539 ; X64-NEXT: xorps %xmm2, %xmm2
540 ; X64-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
541 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
543 %vecext = extractelement <4 x float> %x, i32 0
544 %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
545 %vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 1
546 %vecinit2 = insertelement <4 x float> %vecinit1, float 0.0, i32 2
547 %vecinit4 = shufflevector <4 x float> %vecinit2, <4 x float> %a, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
548 ret <4 x float> %vecinit4
551 define <4 x float> @shuf_X00X(<4 x float> %x, <4 x float> %a) {
552 ; X32-LABEL: shuf_X00X:
554 ; X32-NEXT: xorps %xmm1, %xmm1
555 ; X32-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
556 ; X32-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
557 ; X32-NEXT: movaps %xmm1, %xmm0
560 ; X64-LABEL: shuf_X00X:
562 ; X64-NEXT: xorps %xmm1, %xmm1
563 ; X64-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
564 ; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0]
565 ; X64-NEXT: movaps %xmm1, %xmm0
567 %vecext = extractelement <4 x float> %x, i32 0
568 %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
569 %vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 1
570 %vecinit2 = insertelement <4 x float> %vecinit1, float 0.0, i32 2
571 %vecinit4 = shufflevector <4 x float> %vecinit2, <4 x float> %x, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
572 ret <4 x float> %vecinit4
575 define <4 x float> @shuf_X0YC(<4 x float> %x, <4 x float> %a) {
576 ; X32-LABEL: shuf_X0YC:
578 ; X32-NEXT: xorps %xmm2, %xmm2
579 ; X32-NEXT: blendps {{.*#+}} xmm2 = xmm0[0],xmm2[1],xmm0[2,3]
580 ; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1],zero
581 ; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[2]
582 ; X32-NEXT: movaps %xmm2, %xmm0
585 ; X64-LABEL: shuf_X0YC:
587 ; X64-NEXT: xorps %xmm2, %xmm2
588 ; X64-NEXT: blendps {{.*#+}} xmm2 = xmm0[0],xmm2[1],xmm0[2,3]
589 ; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1],zero
590 ; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[2]
591 ; X64-NEXT: movaps %xmm2, %xmm0
593 %vecext = extractelement <4 x float> %x, i32 0
594 %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
595 %vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 1
596 %vecinit3 = shufflevector <4 x float> %vecinit1, <4 x float> %x, <4 x i32> <i32 0, i32 1, i32 5, i32 undef>
597 %vecinit5 = shufflevector <4 x float> %vecinit3, <4 x float> %a, <4 x i32> <i32 0, i32 1, i32 2, i32 6>
598 ret <4 x float> %vecinit5
601 define <4 x i32> @i32_shuf_XYZ0(<4 x i32> %x, <4 x i32> %a) {
602 ; X32-LABEL: i32_shuf_XYZ0:
604 ; X32-NEXT: pxor %xmm1, %xmm1
605 ; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
608 ; X64-LABEL: i32_shuf_XYZ0:
610 ; X64-NEXT: pxor %xmm1, %xmm1
611 ; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
613 %vecext = extractelement <4 x i32> %x, i32 0
614 %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
615 %vecext1 = extractelement <4 x i32> %x, i32 1
616 %vecinit2 = insertelement <4 x i32> %vecinit, i32 %vecext1, i32 1
617 %vecext3 = extractelement <4 x i32> %x, i32 2
618 %vecinit4 = insertelement <4 x i32> %vecinit2, i32 %vecext3, i32 2
619 %vecinit5 = insertelement <4 x i32> %vecinit4, i32 0, i32 3
620 ret <4 x i32> %vecinit5
623 define <4 x i32> @i32_shuf_XY00(<4 x i32> %x, <4 x i32> %a) {
624 ; X32-LABEL: i32_shuf_XY00:
626 ; X32-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
629 ; X64-LABEL: i32_shuf_XY00:
631 ; X64-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
633 %vecext = extractelement <4 x i32> %x, i32 0
634 %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
635 %vecext1 = extractelement <4 x i32> %x, i32 1
636 %vecinit2 = insertelement <4 x i32> %vecinit, i32 %vecext1, i32 1
637 %vecinit3 = insertelement <4 x i32> %vecinit2, i32 0, i32 2
638 %vecinit4 = insertelement <4 x i32> %vecinit3, i32 0, i32 3
639 ret <4 x i32> %vecinit4
642 define <4 x i32> @i32_shuf_XYY0(<4 x i32> %x, <4 x i32> %a) {
643 ; X32-LABEL: i32_shuf_XYY0:
645 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,1],zero
648 ; X64-LABEL: i32_shuf_XYY0:
650 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,1],zero
652 %vecext = extractelement <4 x i32> %x, i32 0
653 %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
654 %vecext1 = extractelement <4 x i32> %x, i32 1
655 %vecinit2 = insertelement <4 x i32> %vecinit, i32 %vecext1, i32 1
656 %vecinit4 = insertelement <4 x i32> %vecinit2, i32 %vecext1, i32 2
657 %vecinit5 = insertelement <4 x i32> %vecinit4, i32 0, i32 3
658 ret <4 x i32> %vecinit5
661 define <4 x i32> @i32_shuf_XYW0(<4 x i32> %x, <4 x i32> %a) {
662 ; X32-LABEL: i32_shuf_XYW0:
664 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,3],zero
667 ; X64-LABEL: i32_shuf_XYW0:
669 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,3],zero
671 %vecext = extractelement <4 x i32> %x, i32 0
672 %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
673 %vecext1 = extractelement <4 x i32> %x, i32 1
674 %vecinit2 = insertelement <4 x i32> %vecinit, i32 %vecext1, i32 1
675 %vecext2 = extractelement <4 x i32> %x, i32 3
676 %vecinit3 = insertelement <4 x i32> %vecinit2, i32 %vecext2, i32 2
677 %vecinit4 = insertelement <4 x i32> %vecinit3, i32 0, i32 3
678 ret <4 x i32> %vecinit4
681 define <4 x i32> @i32_shuf_W00W(<4 x i32> %x, <4 x i32> %a) {
682 ; X32-LABEL: i32_shuf_W00W:
684 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[3],zero,zero,xmm0[3]
687 ; X64-LABEL: i32_shuf_W00W:
689 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[3],zero,zero,xmm0[3]
691 %vecext = extractelement <4 x i32> %x, i32 3
692 %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
693 %vecinit2 = insertelement <4 x i32> %vecinit, i32 0, i32 1
694 %vecinit3 = insertelement <4 x i32> %vecinit2, i32 0, i32 2
695 %vecinit4 = insertelement <4 x i32> %vecinit3, i32 %vecext, i32 3
696 ret <4 x i32> %vecinit4
699 define <4 x i32> @i32_shuf_X00A(<4 x i32> %x, <4 x i32> %a) {
700 ; X32-LABEL: i32_shuf_X00A:
702 ; X32-NEXT: pxor %xmm2, %xmm2
703 ; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
704 ; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
705 ; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
708 ; X64-LABEL: i32_shuf_X00A:
710 ; X64-NEXT: pxor %xmm2, %xmm2
711 ; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
712 ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
713 ; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
715 %vecext = extractelement <4 x i32> %x, i32 0
716 %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
717 %vecinit1 = insertelement <4 x i32> %vecinit, i32 0, i32 1
718 %vecinit2 = insertelement <4 x i32> %vecinit1, i32 0, i32 2
719 %vecinit4 = shufflevector <4 x i32> %vecinit2, <4 x i32> %a, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
720 ret <4 x i32> %vecinit4
723 define <4 x i32> @i32_shuf_X00X(<4 x i32> %x, <4 x i32> %a) {
724 ; X32-LABEL: i32_shuf_X00X:
726 ; X32-NEXT: pxor %xmm1, %xmm1
727 ; X32-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
728 ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
729 ; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
732 ; X64-LABEL: i32_shuf_X00X:
734 ; X64-NEXT: pxor %xmm1, %xmm1
735 ; X64-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
736 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
737 ; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
739 %vecext = extractelement <4 x i32> %x, i32 0
740 %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
741 %vecinit1 = insertelement <4 x i32> %vecinit, i32 0, i32 1
742 %vecinit2 = insertelement <4 x i32> %vecinit1, i32 0, i32 2
743 %vecinit4 = shufflevector <4 x i32> %vecinit2, <4 x i32> %x, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
744 ret <4 x i32> %vecinit4
747 define <4 x i32> @i32_shuf_X0YC(<4 x i32> %x, <4 x i32> %a) {
748 ; X32-LABEL: i32_shuf_X0YC:
750 ; X32-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
751 ; X32-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6,7]
752 ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,2,2]
753 ; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3,4,5],xmm0[6,7]
756 ; X64-LABEL: i32_shuf_X0YC:
758 ; X64-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
759 ; X64-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6,7]
760 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,2,2]
761 ; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3,4,5],xmm0[6,7]
763 %vecext = extractelement <4 x i32> %x, i32 0
764 %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0
765 %vecinit1 = insertelement <4 x i32> %vecinit, i32 0, i32 1
766 %vecinit3 = shufflevector <4 x i32> %vecinit1, <4 x i32> %x, <4 x i32> <i32 0, i32 1, i32 5, i32 undef>
767 %vecinit5 = shufflevector <4 x i32> %vecinit3, <4 x i32> %a, <4 x i32> <i32 0, i32 1, i32 2, i32 6>
768 ret <4 x i32> %vecinit5
771 ;; Test for a bug in the first implementation of LowerBuildVectorv4x32
772 define < 4 x float> @test_insertps_no_undef(<4 x float> %x) {
773 ; X32-LABEL: test_insertps_no_undef:
775 ; X32-NEXT: xorps %xmm1, %xmm1
776 ; X32-NEXT: blendps {{.*#+}} xmm1 = xmm0[0,1,2],xmm1[3]
777 ; X32-NEXT: maxps %xmm1, %xmm0
780 ; X64-LABEL: test_insertps_no_undef:
782 ; X64-NEXT: xorps %xmm1, %xmm1
783 ; X64-NEXT: blendps {{.*#+}} xmm1 = xmm0[0,1,2],xmm1[3]
784 ; X64-NEXT: maxps %xmm1, %xmm0
786 %vecext = extractelement <4 x float> %x, i32 0
787 %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
788 %vecext1 = extractelement <4 x float> %x, i32 1
789 %vecinit2 = insertelement <4 x float> %vecinit, float %vecext1, i32 1
790 %vecext3 = extractelement <4 x float> %x, i32 2
791 %vecinit4 = insertelement <4 x float> %vecinit2, float %vecext3, i32 2
792 %vecinit5 = insertelement <4 x float> %vecinit4, float 0.0, i32 3
793 %mask = fcmp olt <4 x float> %vecinit5, %x
794 %res = select <4 x i1> %mask, <4 x float> %x, <4 x float>%vecinit5
798 define <8 x i16> @blendvb_fallback(<8 x i1> %mask, <8 x i16> %x, <8 x i16> %y) {
799 ; X32-LABEL: blendvb_fallback:
801 ; X32-NEXT: psllw $15, %xmm0
802 ; X32-NEXT: psraw $15, %xmm0
803 ; X32-NEXT: pblendvb %xmm1, %xmm2
804 ; X32-NEXT: movdqa %xmm2, %xmm0
807 ; X64-LABEL: blendvb_fallback:
809 ; X64-NEXT: psllw $15, %xmm0
810 ; X64-NEXT: psraw $15, %xmm0
811 ; X64-NEXT: pblendvb %xmm1, %xmm2
812 ; X64-NEXT: movdqa %xmm2, %xmm0
814 %ret = select <8 x i1> %mask, <8 x i16> %x, <8 x i16> %y
818 ; On X32, account for the argument's move to registers
819 define <4 x float> @insertps_from_vector_load(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
820 ; X32-LABEL: insertps_from_vector_load:
822 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
823 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
826 ; X64-LABEL: insertps_from_vector_load:
828 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
830 %1 = load <4 x float>* %pb, align 16
831 %2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 48)
835 ;; Use a non-zero CountS for insertps
836 ;; Try to match a bit more of the instr, since we need the load's offset.
837 define <4 x float> @insertps_from_vector_load_offset(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
838 ; X32-LABEL: insertps_from_vector_load_offset:
840 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
841 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[1],xmm0[3]
844 ; X64-LABEL: insertps_from_vector_load_offset:
846 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[1],xmm0[3]
848 %1 = load <4 x float>* %pb, align 16
849 %2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 96)
853 ;; Try to match a bit more of the instr, since we need the load's offset.
854 define <4 x float> @insertps_from_vector_load_offset_2(<4 x float> %a, <4 x float>* nocapture readonly %pb, i64 %index) {
855 ; X32-LABEL: insertps_from_vector_load_offset_2:
857 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
858 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
859 ; X32-NEXT: shll $4, %ecx
860 ; X32-NEXT: insertps {{.*#+}} xmm0 = mem[3],xmm0[1,2,3]
863 ; X64-LABEL: insertps_from_vector_load_offset_2:
865 ; X64-NEXT: shlq $4, %rsi
866 ; X64-NEXT: insertps {{.*#+}} xmm0 = mem[3],xmm0[1,2,3]
868 %1 = getelementptr inbounds <4 x float>* %pb, i64 %index
869 %2 = load <4 x float>* %1, align 16
870 %3 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %2, i32 192)
874 define <4 x float> @insertps_from_broadcast_loadf32(<4 x float> %a, float* nocapture readonly %fb, i64 %index) {
875 ; X32-LABEL: insertps_from_broadcast_loadf32:
877 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
878 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
879 ; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
880 ; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
881 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
884 ; X64-LABEL: insertps_from_broadcast_loadf32:
886 ; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
887 ; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
888 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
890 %1 = getelementptr inbounds float* %fb, i64 %index
891 %2 = load float* %1, align 4
892 %3 = insertelement <4 x float> undef, float %2, i32 0
893 %4 = insertelement <4 x float> %3, float %2, i32 1
894 %5 = insertelement <4 x float> %4, float %2, i32 2
895 %6 = insertelement <4 x float> %5, float %2, i32 3
896 %7 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %6, i32 48)
900 define <4 x float> @insertps_from_broadcast_loadv4f32(<4 x float> %a, <4 x float>* nocapture readonly %b) {
901 ; X32-LABEL: insertps_from_broadcast_loadv4f32:
903 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
904 ; X32-NEXT: movups (%eax), %xmm1
905 ; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
906 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
909 ; X64-LABEL: insertps_from_broadcast_loadv4f32:
911 ; X64-NEXT: movups (%rdi), %xmm1
912 ; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
913 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
915 %1 = load <4 x float>* %b, align 4
916 %2 = extractelement <4 x float> %1, i32 0
917 %3 = insertelement <4 x float> undef, float %2, i32 0
918 %4 = insertelement <4 x float> %3, float %2, i32 1
919 %5 = insertelement <4 x float> %4, float %2, i32 2
920 %6 = insertelement <4 x float> %5, float %2, i32 3
921 %7 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %6, i32 48)
925 ;; FIXME: We're emitting an extraneous pshufd/vbroadcast.
926 define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d, float* nocapture readonly %fb, i64 %index) {
927 ; X32-LABEL: insertps_from_broadcast_multiple_use:
929 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
930 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
931 ; X32-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero
932 ; X32-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,0,0,0]
933 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0]
934 ; X32-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
935 ; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[0]
936 ; X32-NEXT: insertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
937 ; X32-NEXT: addps %xmm1, %xmm0
938 ; X32-NEXT: addps %xmm2, %xmm3
939 ; X32-NEXT: addps %xmm3, %xmm0
942 ; X64-LABEL: insertps_from_broadcast_multiple_use:
944 ; X64-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero
945 ; X64-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,0,0,0]
946 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0]
947 ; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
948 ; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[0]
949 ; X64-NEXT: insertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
950 ; X64-NEXT: addps %xmm1, %xmm0
951 ; X64-NEXT: addps %xmm2, %xmm3
952 ; X64-NEXT: addps %xmm3, %xmm0
954 %1 = getelementptr inbounds float* %fb, i64 %index
955 %2 = load float* %1, align 4
956 %3 = insertelement <4 x float> undef, float %2, i32 0
957 %4 = insertelement <4 x float> %3, float %2, i32 1
958 %5 = insertelement <4 x float> %4, float %2, i32 2
959 %6 = insertelement <4 x float> %5, float %2, i32 3
960 %7 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %6, i32 48)
961 %8 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %b, <4 x float> %6, i32 48)
962 %9 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %c, <4 x float> %6, i32 48)
963 %10 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %d, <4 x float> %6, i32 48)
964 %11 = fadd <4 x float> %7, %8
965 %12 = fadd <4 x float> %9, %10
966 %13 = fadd <4 x float> %11, %12
970 define <4 x float> @insertps_with_undefs(<4 x float> %a, float* %b) {
971 ; X32-LABEL: insertps_with_undefs:
973 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
974 ; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
975 ; X32-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
976 ; X32-NEXT: movapd %xmm1, %xmm0
979 ; X64-LABEL: insertps_with_undefs:
981 ; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
982 ; X64-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
983 ; X64-NEXT: movapd %xmm1, %xmm0
985 %1 = load float* %b, align 4
986 %2 = insertelement <4 x float> undef, float %1, i32 0
987 %result = shufflevector <4 x float> %a, <4 x float> %2, <4 x i32> <i32 4, i32 undef, i32 0, i32 7>
988 ret <4 x float> %result
991 ; Test for a bug in X86ISelLowering.cpp:getINSERTPS where we were using
992 ; the destination index to change the load, instead of the source index.
993 define <4 x float> @pr20087(<4 x float> %a, <4 x float> *%ptr) {
994 ; X32-LABEL: pr20087:
996 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
997 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],mem[2]
1000 ; X64-LABEL: pr20087:
1002 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],mem[2]
1004 %load = load <4 x float> *%ptr
1005 %ret = shufflevector <4 x float> %load, <4 x float> %a, <4 x i32> <i32 4, i32 undef, i32 6, i32 2>
1006 ret <4 x float> %ret
1009 ; Edge case for insertps where we end up with a shuffle with mask=<0, 7, -1, -1>
1010 define void @insertps_pr20411(i32* noalias nocapture %RET) #1 {
1011 ; X32-LABEL: insertps_pr20411:
1013 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
1014 ; X32-NEXT: pshufd {{.*#+}} xmm0 = mem[2,3,0,1]
1015 ; X32-NEXT: pblendw {{.*#+}} xmm0 = mem[0,1],xmm0[2,3],mem[4,5,6,7]
1016 ; X32-NEXT: movdqu %xmm0, (%eax)
1019 ; X64-LABEL: insertps_pr20411:
1021 ; X64-NEXT: pshufd {{.*#+}} xmm0 = mem[2,3,0,1]
1022 ; X64-NEXT: pblendw {{.*#+}} xmm0 = mem[0,1],xmm0[2,3],mem[4,5,6,7]
1023 ; X64-NEXT: movdqu %xmm0, (%rdi)
1025 %gather_load = shufflevector <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, <8 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
1026 %shuffle109 = shufflevector <4 x i32> <i32 4, i32 5, i32 6, i32 7>, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> ; 4 5 6 7
1027 %shuffle116 = shufflevector <8 x i32> %gather_load, <8 x i32> undef, <4 x i32> <i32 3, i32 undef, i32 undef, i32 undef> ; 3 x x x
1028 %shuffle117 = shufflevector <4 x i32> %shuffle109, <4 x i32> %shuffle116, <4 x i32> <i32 4, i32 3, i32 undef, i32 undef> ; 3 7 x x
1029 %ptrcast = bitcast i32* %RET to <4 x i32>*
1030 store <4 x i32> %shuffle117, <4 x i32>* %ptrcast, align 4
1034 define <4 x float> @insertps_4(<4 x float> %A, <4 x float> %B) {
1035 ; X32-LABEL: insertps_4:
1036 ; X32: ## BB#0: ## %entry
1037 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2],zero
1040 ; X64-LABEL: insertps_4:
1041 ; X64: ## BB#0: ## %entry
1042 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2],zero
1045 %vecext = extractelement <4 x float> %A, i32 0
1046 %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
1047 %vecinit1 = insertelement <4 x float> %vecinit, float 0.000000e+00, i32 1
1048 %vecext2 = extractelement <4 x float> %B, i32 2
1049 %vecinit3 = insertelement <4 x float> %vecinit1, float %vecext2, i32 2
1050 %vecinit4 = insertelement <4 x float> %vecinit3, float 0.000000e+00, i32 3
1051 ret <4 x float> %vecinit4
1054 define <4 x float> @insertps_5(<4 x float> %A, <4 x float> %B) {
1055 ; X32-LABEL: insertps_5:
1056 ; X32: ## BB#0: ## %entry
1057 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[1],zero,zero
1060 ; X64-LABEL: insertps_5:
1061 ; X64: ## BB#0: ## %entry
1062 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[1],zero,zero
1065 %vecext = extractelement <4 x float> %A, i32 0
1066 %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
1067 %vecext1 = extractelement <4 x float> %B, i32 1
1068 %vecinit2 = insertelement <4 x float> %vecinit, float %vecext1, i32 1
1069 %vecinit3 = insertelement <4 x float> %vecinit2, float 0.000000e+00, i32 2
1070 %vecinit4 = insertelement <4 x float> %vecinit3, float 0.000000e+00, i32 3
1071 ret <4 x float> %vecinit4
1074 define <4 x float> @insertps_6(<4 x float> %A, <4 x float> %B) {
1075 ; X32-LABEL: insertps_6:
1076 ; X32: ## BB#0: ## %entry
1077 ; X32-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[1],xmm1[2],zero
1080 ; X64-LABEL: insertps_6:
1081 ; X64: ## BB#0: ## %entry
1082 ; X64-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[1],xmm1[2],zero
1085 %vecext = extractelement <4 x float> %A, i32 1
1086 %vecinit = insertelement <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, float %vecext, i32 1
1087 %vecext1 = extractelement <4 x float> %B, i32 2
1088 %vecinit2 = insertelement <4 x float> %vecinit, float %vecext1, i32 2
1089 %vecinit3 = insertelement <4 x float> %vecinit2, float 0.000000e+00, i32 3
1090 ret <4 x float> %vecinit3
1093 define <4 x float> @insertps_7(<4 x float> %A, <4 x float> %B) {
1094 ; X32-LABEL: insertps_7:
1095 ; X32: ## BB#0: ## %entry
1096 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[1],zero
1099 ; X64-LABEL: insertps_7:
1100 ; X64: ## BB#0: ## %entry
1101 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[1],zero
1104 %vecext = extractelement <4 x float> %A, i32 0
1105 %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
1106 %vecinit1 = insertelement <4 x float> %vecinit, float 0.000000e+00, i32 1
1107 %vecext2 = extractelement <4 x float> %B, i32 1
1108 %vecinit3 = insertelement <4 x float> %vecinit1, float %vecext2, i32 2
1109 %vecinit4 = insertelement <4 x float> %vecinit3, float 0.000000e+00, i32 3
1110 ret <4 x float> %vecinit4
1113 define <4 x float> @insertps_8(<4 x float> %A, <4 x float> %B) {
1114 ; X32-LABEL: insertps_8:
1115 ; X32: ## BB#0: ## %entry
1116 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
1119 ; X64-LABEL: insertps_8:
1120 ; X64: ## BB#0: ## %entry
1121 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],zero,zero
1124 %vecext = extractelement <4 x float> %A, i32 0
1125 %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
1126 %vecext1 = extractelement <4 x float> %B, i32 0
1127 %vecinit2 = insertelement <4 x float> %vecinit, float %vecext1, i32 1
1128 %vecinit3 = insertelement <4 x float> %vecinit2, float 0.000000e+00, i32 2
1129 %vecinit4 = insertelement <4 x float> %vecinit3, float 0.000000e+00, i32 3
1130 ret <4 x float> %vecinit4
1133 define <4 x float> @insertps_9(<4 x float> %A, <4 x float> %B) {
1134 ; X32-LABEL: insertps_9:
1135 ; X32: ## BB#0: ## %entry
1136 ; X32-NEXT: insertps {{.*#+}} xmm1 = zero,xmm0[0],xmm1[2],zero
1137 ; X32-NEXT: movaps %xmm1, %xmm0
1140 ; X64-LABEL: insertps_9:
1141 ; X64: ## BB#0: ## %entry
1142 ; X64-NEXT: insertps {{.*#+}} xmm1 = zero,xmm0[0],xmm1[2],zero
1143 ; X64-NEXT: movaps %xmm1, %xmm0
1146 %vecext = extractelement <4 x float> %A, i32 0
1147 %vecinit = insertelement <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, float %vecext, i32 1
1148 %vecext1 = extractelement <4 x float> %B, i32 2
1149 %vecinit2 = insertelement <4 x float> %vecinit, float %vecext1, i32 2
1150 %vecinit3 = insertelement <4 x float> %vecinit2, float 0.000000e+00, i32 3
1151 ret <4 x float> %vecinit3
1154 define <4 x float> @insertps_10(<4 x float> %A)
1155 ; X32-LABEL: insertps_10:
1157 ; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[0],zero
1160 ; X64-LABEL: insertps_10:
1162 ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[0],zero
1165 %vecext = extractelement <4 x float> %A, i32 0
1166 %vecbuild1 = insertelement <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>, float %vecext, i32 0
1167 %vecbuild2 = insertelement <4 x float> %vecbuild1, float %vecext, i32 2
1168 ret <4 x float> %vecbuild2
1171 define <4 x float> @build_vector_to_shuffle_1(<4 x float> %A) {
1172 ; X32-LABEL: build_vector_to_shuffle_1:
1173 ; X32: ## BB#0: ## %entry
1174 ; X32-NEXT: xorps %xmm1, %xmm1
1175 ; X32-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
1178 ; X64-LABEL: build_vector_to_shuffle_1:
1179 ; X64: ## BB#0: ## %entry
1180 ; X64-NEXT: xorps %xmm1, %xmm1
1181 ; X64-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
1184 %vecext = extractelement <4 x float> %A, i32 1
1185 %vecinit = insertelement <4 x float> zeroinitializer, float %vecext, i32 1
1186 %vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 2
1187 %vecinit3 = shufflevector <4 x float> %vecinit1, <4 x float> %A, <4 x i32> <i32 0, i32 1, i32 2, i32 7>
1188 ret <4 x float> %vecinit3
1191 define <4 x float> @build_vector_to_shuffle_2(<4 x float> %A) {
1192 ; X32-LABEL: build_vector_to_shuffle_2:
1193 ; X32: ## BB#0: ## %entry
1194 ; X32-NEXT: xorps %xmm1, %xmm1
1195 ; X32-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
1198 ; X64-LABEL: build_vector_to_shuffle_2:
1199 ; X64: ## BB#0: ## %entry
1200 ; X64-NEXT: xorps %xmm1, %xmm1
1201 ; X64-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3]
1204 %vecext = extractelement <4 x float> %A, i32 1
1205 %vecinit = insertelement <4 x float> zeroinitializer, float %vecext, i32 1
1206 %vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 2
1207 ret <4 x float> %vecinit1