1 ; RUN: llc < %s -mtriple=i686-darwin -mattr=+mmx,+sse2 | FileCheck --check-prefix=X32 %s
2 ; RUN: llc < %s -mtriple=x86_64-darwin -mattr=+mmx,+sse2 | FileCheck --check-prefix=X64 %s
4 ; If there is no explicit MMX type usage, always promote to XMM.
6 define void @test0(<1 x i64>* %x) {
8 ; X32: ## BB#0: ## %entry
9 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
10 ; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
11 ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
12 ; X32-NEXT: movq %xmm0, (%eax)
16 ; X64: ## BB#0: ## %entry
17 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
18 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
19 ; X64-NEXT: movq %xmm0, (%rdi)
22 %tmp2 = load <1 x i64>, <1 x i64>* %x
23 %tmp6 = bitcast <1 x i64> %tmp2 to <2 x i32>
24 %tmp9 = shufflevector <2 x i32> %tmp6, <2 x i32> undef, <2 x i32> < i32 1, i32 1 >
25 %tmp10 = bitcast <2 x i32> %tmp9 to <1 x i64>
26 store <1 x i64> %tmp10, <1 x i64>* %x
30 define void @test1() {
32 ; X32: ## BB#0: ## %entry
33 ; X32-NEXT: pushl %edi
35 ; X32-NEXT: .cfi_def_cfa_offset 8
36 ; X32-NEXT: subl $16, %esp
38 ; X32-NEXT: .cfi_def_cfa_offset 24
40 ; X32-NEXT: .cfi_offset %edi, -8
41 ; X32-NEXT: xorps %xmm0, %xmm0
42 ; X32-NEXT: movlps %xmm0, (%esp)
43 ; X32-NEXT: movq (%esp), %mm0
44 ; X32-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7]
45 ; X32-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
46 ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
47 ; X32-NEXT: movq %xmm0, {{[0-9]+}}(%esp)
48 ; X32-NEXT: movq {{[0-9]+}}(%esp), %mm1
49 ; X32-NEXT: xorl %edi, %edi
50 ; X32-NEXT: maskmovq %mm1, %mm0
51 ; X32-NEXT: addl $16, %esp
56 ; X64: ## BB#0: ## %entry
57 ; X64-NEXT: xorps %xmm0, %xmm0
58 ; X64-NEXT: movlps %xmm0, -{{[0-9]+}}(%rsp)
59 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %mm0
60 ; X64-NEXT: pshuflw {{.*#+}} xmm0 = mem[0,2,2,3,4,5,6,7]
61 ; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
62 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
63 ; X64-NEXT: movq %xmm0, -{{[0-9]+}}(%rsp)
64 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %mm1
65 ; X64-NEXT: xorl %edi, %edi
66 ; X64-NEXT: maskmovq %mm1, %mm0
69 %tmp528 = bitcast <8 x i8> zeroinitializer to <2 x i32>
70 %tmp529 = and <2 x i32> %tmp528, bitcast (<4 x i16> < i16 -32640, i16 16448, i16 8224, i16 4112 > to <2 x i32>)
71 %tmp542 = bitcast <2 x i32> %tmp529 to <4 x i16>
72 %tmp543 = add <4 x i16> %tmp542, < i16 0, i16 16448, i16 24672, i16 28784 >
73 %tmp555 = bitcast <4 x i16> %tmp543 to <8 x i8>
74 %tmp556 = bitcast <8 x i8> %tmp555 to x86_mmx
75 %tmp557 = bitcast <8 x i8> zeroinitializer to x86_mmx
76 tail call void @llvm.x86.mmx.maskmovq( x86_mmx %tmp557, x86_mmx %tmp556, i8* null)
80 @tmp_V2i = common global <2 x i32> zeroinitializer
82 define void @test2() nounwind {
84 ; X32: ## BB#0: ## %entry
85 ; X32-NEXT: movl L_tmp_V2i$non_lazy_ptr, %eax
86 ; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
87 ; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0,0,1,1]
88 ; X32-NEXT: movlps %xmm0, (%eax)
92 ; X64: ## BB#0: ## %entry
93 ; X64-NEXT: movq _tmp_V2i@{{.*}}(%rip), %rax
94 ; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
95 ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
96 ; X64-NEXT: movq %xmm0, (%rax)
99 %0 = load <2 x i32>, <2 x i32>* @tmp_V2i, align 8
100 %1 = shufflevector <2 x i32> %0, <2 x i32> undef, <2 x i32> zeroinitializer
101 store <2 x i32> %1, <2 x i32>* @tmp_V2i, align 8
105 declare void @llvm.x86.mmx.maskmovq(x86_mmx, x86_mmx, i8*)