1 ; RUN: llc < %s -march=x86 -mcpu=core2 | FileCheck %s
2 ; RUN: llc < %s -march=x86-64 -mcpu=corei7 | FileCheck %s
4 ; Verify that the backend correctly combines SSE2 builtin intrinsics.
7 define <4 x i32> @test_psra_1(<4 x i32> %A) {
8 %1 = tail call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> %A, i32 3)
9 %2 = tail call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %1, <4 x i32> <i32 3, i32 0, i32 7, i32 0>)
10 %3 = tail call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> %2, i32 2)
13 ; CHECK-LABEL: test_psra_1
14 ; CHECK: psrad $8, %xmm0
17 define <8 x i16> @test_psra_2(<8 x i16> %A) {
18 %1 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> %A, i32 3)
19 %2 = tail call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %1, <8 x i16> <i16 3, i16 0, i16 0, i16 0, i16 7, i16 0, i16 0, i16 0>)
20 %3 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> %2, i32 2)
23 ; CHECK-LABEL: test_psra_2
24 ; CHECK: psraw $8, %xmm0
27 define <4 x i32> @test_psra_3(<4 x i32> %A) {
28 %1 = tail call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> %A, i32 0)
29 %2 = tail call <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32> %1, <4 x i32> <i32 0, i32 0, i32 7, i32 0>)
30 %3 = tail call <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32> %2, i32 0)
33 ; CHECK-LABEL: test_psra_3
38 define <8 x i16> @test_psra_4(<8 x i16> %A) {
39 %1 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> %A, i32 0)
40 %2 = tail call <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16> %1, <8 x i16> <i16 0, i16 0, i16 0, i16 0, i16 7, i16 0, i16 0, i16 0>)
41 %3 = tail call <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16> %2, i32 0)
44 ; CHECK-LABEL: test_psra_4
49 declare <8 x i16> @llvm.x86.sse2.psra.w(<8 x i16>, <8 x i16>)
50 declare <8 x i16> @llvm.x86.sse2.psrai.w(<8 x i16>, i32)
51 declare <4 x i32> @llvm.x86.sse2.psra.d(<4 x i32>, <4 x i32>)
52 declare <4 x i32> @llvm.x86.sse2.psrai.d(<4 x i32>, i32)