1 ; RUN: llc < %s -march=x86 -mattr=+mmx,+sse2 | FileCheck -check-prefix=X32 %s
2 ; RUN: llc < %s -march=x86-64 -mattr=+mmx,+sse2 | FileCheck -check-prefix=X64 %s
4 ; If there is no explicit MMX type usage, always promote to XMM.
6 define void @test0(<1 x i64>* %x) {
11 ; X32-NEXT: movlpd %xmm
14 %tmp2 = load <1 x i64>* %x
15 %tmp6 = bitcast <1 x i64> %tmp2 to <2 x i32>
16 %tmp9 = shufflevector <2 x i32> %tmp6, <2 x i32> undef, <2 x i32> < i32 1, i32 1 >
17 %tmp10 = bitcast <2 x i32> %tmp9 to <1 x i64>
18 store <1 x i64> %tmp10, <1 x i64>* %x
22 define void @test1() {
29 %tmp528 = bitcast <8 x i8> zeroinitializer to <2 x i32>
30 %tmp529 = and <2 x i32> %tmp528, bitcast (<4 x i16> < i16 -32640, i16 16448, i16 8224, i16 4112 > to <2 x i32>)
31 %tmp542 = bitcast <2 x i32> %tmp529 to <4 x i16>
32 %tmp543 = add <4 x i16> %tmp542, < i16 0, i16 16448, i16 24672, i16 28784 >
33 %tmp555 = bitcast <4 x i16> %tmp543 to <8 x i8>
34 %tmp556 = bitcast <8 x i8> %tmp555 to x86_mmx
35 %tmp557 = bitcast <8 x i8> zeroinitializer to x86_mmx
36 tail call void @llvm.x86.mmx.maskmovq( x86_mmx %tmp557, x86_mmx %tmp556, i8* null)
40 declare void @llvm.x86.mmx.maskmovq(x86_mmx, x86_mmx, i8*)