-; These are tests for SSE3 codegen. Yonah has SSE3 and earlier but not SSSE3+.
+; These are tests for SSE3 codegen.
-; RUN: llc < %s -march=x86-64 -mcpu=yonah -mtriple=i686-apple-darwin9 -O3 \
+; RUN: llc < %s -march=x86-64 -mcpu=nocona -mtriple=i686-apple-darwin9 -O3 \
; RUN: | FileCheck %s --check-prefix=X64
; Test for v8xi16 lowering where we extract the first element of the vector and
ret void
; X64: t0:
-; X64: movddup (%rsi), %xmm0
-; X64: xorl %eax, %eax
-; X64: pshuflw $0, %xmm0, %xmm0
-; X64: pinsrw $0, %eax, %xmm0
-; X64: movaps %xmm0, (%rdi)
+; X64: movdqa (%rsi), %xmm0
+; X64: pslldq $2, %xmm0
+; X64: movdqa %xmm0, (%rdi)
; X64: ret
}
ret <8 x i16> %tmp3
; X64: t1:
-; X64: movl (%rsi), %eax
-; X64: movaps (%rdi), %xmm0
-; X64: pinsrw $0, %eax, %xmm0
+; X64: movdqa (%rdi), %xmm0
+; X64: pinsrw $0, (%rsi), %xmm0
; X64: ret
}
%tmp = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 0, i32 7, i32 2, i32 3, i32 1, i32 5, i32 6, i32 5 >
ret <8 x i16> %tmp
; X64: t4:
-; X64: pextrw $7, %xmm0, %eax
-; X64: pshufhw $100, %xmm0, %xmm2
-; X64: pinsrw $1, %eax, %xmm2
-; X64: pextrw $1, %xmm0, %eax
-; X64: movaps %xmm2, %xmm0
+; X64: pextrw $7, [[XMM0:%xmm[0-9]+]], %eax
+; X64: pshufhw $100, [[XMM0]], [[XMM1:%xmm[0-9]+]]
+; X64: pinsrw $1, %eax, [[XMM1]]
+; X64: pextrw $1, [[XMM0]], %eax
; X64: pinsrw $4, %eax, %xmm0
; X64: ret
}
; X64: t8:
; X64: pshuflw $-58, (%rsi), %xmm0
; X64: pshufhw $-58, %xmm0, %xmm0
-; X64: movaps %xmm0, (%rdi)
+; X64: movdqa %xmm0, (%rdi)
; X64: ret
}
store <4 x i16> %6, <4 x i16>* @g2, align 8
ret void
; X64: t10:
-; X64: pextrw $4, %xmm0, %eax
-; X64: pextrw $6, %xmm0, %edx
-; X64: movlhps %xmm1, %xmm1
-; X64: pshuflw $8, %xmm1, %xmm1
-; X64: pinsrw $2, %eax, %xmm1
-; X64: pinsrw $3, %edx, %xmm1
+; X64: pextrw $4, [[X0:%xmm[0-9]+]], %ecx
+; X64: pextrw $6, [[X0]], %eax
+; X64: movlhps [[X0]], [[X0]]
+; X64: pshuflw $8, [[X0]], [[X0]]
+; X64: pinsrw $2, %ecx, [[X0]]
+; X64: pinsrw $3, %eax, [[X0]]
}
ret <8 x i16> %tmp7
; X64: t11:
-; X64: movlhps %xmm0, %xmm0
; X64: movd %xmm1, %eax
+; X64: movlhps %xmm0, %xmm0
; X64: pshuflw $1, %xmm0, %xmm0
; X64: pinsrw $1, %eax, %xmm0
; X64: ret
ret <8 x i16> %tmp9
; X64: t12:
-; X64: movlhps %xmm0, %xmm0
; X64: pextrw $3, %xmm1, %eax
+; X64: movlhps %xmm0, %xmm0
; X64: pshufhw $3, %xmm0, %xmm0
; X64: pinsrw $5, %eax, %xmm0
; X64: ret
}
-
+; FIXME: t15 is worse off from disabling of scheduler 2-address hack.
define <8 x i16> @t15(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
entry:
%tmp8 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 undef, i32 undef, i32 7, i32 2, i32 8, i32 undef, i32 undef , i32 undef >
%tmp9 = shufflevector <16 x i8> %tmp8, <16 x i8> %T0, <16 x i32> < i32 0, i32 1, i32 2, i32 17, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef , i32 undef >
ret <16 x i8> %tmp9
; X64: t16:
-; X64: pinsrw $0, %eax, %xmm1
; X64: pextrw $8, %xmm0, %eax
-; X64: pinsrw $1, %eax, %xmm1
-; X64: pextrw $1, %xmm1, %ecx
-; X64: movd %xmm1, %edx
-; X64: pinsrw $0, %edx, %xmm1
-; X64: pinsrw $1, %eax, %xmm0
+; X64: pslldq $2, %xmm0
+; X64: movd %xmm0, %ecx
+; X64: pextrw $1, %xmm0, %edx
+; X64: pinsrw $0, %ecx, %xmm0
; X64: ret
}
+
+; rdar://8520311
+define <4 x i32> @t17() nounwind {
+entry:
+; X64: t17:
+; X64: movddup (%rax), %xmm0
+ %tmp1 = load <4 x float>* undef, align 16
+ %tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
+ %tmp3 = load <4 x float>* undef, align 16
+ %tmp4 = shufflevector <4 x float> %tmp2, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 1>
+ %tmp5 = bitcast <4 x float> %tmp3 to <4 x i32>
+ %tmp6 = shufflevector <4 x i32> %tmp5, <4 x i32> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 1>
+ %tmp7 = and <4 x i32> %tmp6, <i32 undef, i32 undef, i32 -1, i32 0>
+ ret <4 x i32> %tmp7
+}