From 9836c47ea6184cadcbf8cc522e170960666935ee Mon Sep 17 00:00:00 2001 From: Andrea Di Biagio Date: Thu, 15 May 2014 15:18:15 +0000 Subject: [PATCH] [X86] Teach the backend how to fold SSE4.1/AVX/AVX2 blend intrinsics. Added target specific combine rules to fold blend intrinsics according to the following rules: 1) fold(blend A, A, Mask) -> A; 2) fold(blend A, B, ) -> A; 3) fold(blend A, B, ) -> B. Added two new tests to verify that the new folding rules work for all the optimized blend intrinsics. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@208895 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 56 +++++- test/CodeGen/X86/combine-avx-intrinsics.ll | 119 ++++++++++++ test/CodeGen/X86/combine-avx2-intrinsics.ll | 113 ++++++++++++ test/CodeGen/X86/combine-sse41-intrinsics.ll | 182 +++++++++++++++++++ 4 files changed, 468 insertions(+), 2 deletions(-) create mode 100644 test/CodeGen/X86/combine-avx-intrinsics.ll create mode 100644 test/CodeGen/X86/combine-sse41-intrinsics.ll diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 4e9eecc15ed..8c9cc60f0f0 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -18473,10 +18473,61 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, return SDValue(); } -static SDValue PerformINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG) { +static SDValue PerformINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG, + const X86Subtarget *Subtarget) { unsigned IntNo = cast(N->getOperand(0))->getZExtValue(); switch (IntNo) { default: return SDValue(); + // SSE/AVX/AVX2 blend intrinsics. + case Intrinsic::x86_avx2_pblendvb: + case Intrinsic::x86_avx2_pblendw: + case Intrinsic::x86_avx2_pblendd_128: + case Intrinsic::x86_avx2_pblendd_256: + // Don't try to simplify this intrinsic if we don't have AVX2. + if (!Subtarget->hasAVX2()) + return SDValue(); + // FALL-THROUGH + case Intrinsic::x86_avx_blend_pd_256: + case Intrinsic::x86_avx_blend_ps_256: + case Intrinsic::x86_avx_blendv_pd_256: + case Intrinsic::x86_avx_blendv_ps_256: + // Don't try to simplify this intrinsic if we don't have AVX. + if (!Subtarget->hasAVX()) + return SDValue(); + // FALL-THROUGH + case Intrinsic::x86_sse41_pblendw: + case Intrinsic::x86_sse41_blendpd: + case Intrinsic::x86_sse41_blendps: + case Intrinsic::x86_sse41_blendvps: + case Intrinsic::x86_sse41_blendvpd: + case Intrinsic::x86_sse41_pblendvb: { + SDValue Op0 = N->getOperand(1); + SDValue Op1 = N->getOperand(2); + SDValue Mask = N->getOperand(3); + + // Don't try to simplify this intrinsic if we don't have SSE4.1. + if (!Subtarget->hasSSE41()) + return SDValue(); + + // fold (blend A, A, Mask) -> A + if (Op0 == Op1) + return Op0; + // fold (blend A, B, allZeros) -> A + if (ISD::isBuildVectorAllZeros(Mask.getNode())) + return Op0; + // fold (blend A, B, allOnes) -> B + if (ISD::isBuildVectorAllOnes(Mask.getNode())) + return Op1; + + // Simplify the case where the mask is a constant i32 value. + if (ConstantSDNode *C = dyn_cast(Mask)) { + if (C->isNullValue()) + return Op0; + if (C->isAllOnesValue()) + return Op1; + } + } + // Packed SSE2/AVX2 arithmetic shift immediate intrinsics. case Intrinsic::x86_sse2_psrai_w: case Intrinsic::x86_sse2_psrai_d: @@ -20343,7 +20394,8 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, case X86ISD::VPERM2X128: case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget); case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget); - case ISD::INTRINSIC_WO_CHAIN: return PerformINTRINSIC_WO_CHAINCombine(N, DAG); + case ISD::INTRINSIC_WO_CHAIN: + return PerformINTRINSIC_WO_CHAINCombine(N, DAG, Subtarget); } return SDValue(); diff --git a/test/CodeGen/X86/combine-avx-intrinsics.ll b/test/CodeGen/X86/combine-avx-intrinsics.ll new file mode 100644 index 00000000000..f610f7fcb91 --- /dev/null +++ b/test/CodeGen/X86/combine-avx-intrinsics.ll @@ -0,0 +1,119 @@ +; RUN: llc < %s -march=x86-64 -mcpu=corei7-avx | FileCheck %s + + +define <4 x double> @test_x86_avx_blend_pd_256(<4 x double> %a0) { + %1 = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a0, i32 7) + ret <4 x double> %1 +} +; CHECK-LABEL: test_x86_avx_blend_pd_256 +; CHECK-NOT: vblendpd +; CHECK: ret + + +define <8 x float> @test_x86_avx_blend_ps_256(<8 x float> %a0) { + %1 = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a0, i32 7) + ret <8 x float> %1 +} +; CHECK-LABEL: test_x86_avx_blend_ps_256 +; CHECK-NOT: vblendps +; CHECK: ret + + +define <4 x double> @test_x86_avx_blendv_pd_256(<4 x double> %a0, <4 x double> %a1) { + %1 = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a0, <4 x double> %a0, <4 x double> %a1) + ret <4 x double> %1 +} +; CHECK-LABEL: test_x86_avx_blendv_pd_256 +; CHECK-NOT: vblendvpd +; CHECK: ret + + +define <8 x float> @test_x86_avx_blendv_ps_256(<8 x float> %a0, <8 x float> %a1) { + %1 = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a0, <8 x float> %a0, <8 x float> %a1) + ret <8 x float> %1 +} +; CHECK-LABEL: test_x86_avx_blendv_ps_256 +; CHECK-NOT: vblendvps +; CHECK: ret + + +define <4 x double> @test2_x86_avx_blend_pd_256(<4 x double> %a0, <4 x double> %a1) { + %1 = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a1, i32 0) + ret <4 x double> %1 +} +; CHECK-LABEL: test2_x86_avx_blend_pd_256 +; CHECK-NOT: vblendpd +; CHECK: ret + + +define <8 x float> @test2_x86_avx_blend_ps_256(<8 x float> %a0, <8 x float> %a1) { + %1 = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a1, i32 0) + ret <8 x float> %1 +} +; CHECK-LABEL: test2_x86_avx_blend_ps_256 +; CHECK-NOT: vblendps +; CHECK: ret + + +define <4 x double> @test2_x86_avx_blendv_pd_256(<4 x double> %a0, <4 x double> %a1) { + %1 = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> zeroinitializer) + ret <4 x double> %1 +} +; CHECK-LABEL: test2_x86_avx_blendv_pd_256 +; CHECK-NOT: vblendvpd +; CHECK: ret + + +define <8 x float> @test2_x86_avx_blendv_ps_256(<8 x float> %a0, <8 x float> %a1) { + %1 = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> zeroinitializer) + ret <8 x float> %1 +} +; CHECK-LABEL: test2_x86_avx_blendv_ps_256 +; CHECK-NOT: vblendvps +; CHECK: ret + + +define <4 x double> @test3_x86_avx_blend_pd_256(<4 x double> %a0, <4 x double> %a1) { + %1 = call <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double> %a0, <4 x double> %a1, i32 -1) + ret <4 x double> %1 +} +; CHECK-LABEL: test3_x86_avx_blend_pd_256 +; CHECK-NOT: vblendpd +; CHECK: ret + + +define <8 x float> @test3_x86_avx_blend_ps_256(<8 x float> %a0, <8 x float> %a1) { + %1 = call <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float> %a0, <8 x float> %a1, i32 -1) + ret <8 x float> %1 +} +; CHECK-LABEL: test3_x86_avx_blend_ps_256 +; CHECK-NOT: vblendps +; CHECK: ret + + +define <4 x double> @test3_x86_avx_blendv_pd_256(<4 x double> %a0, <4 x double> %a1) { + %Mask = bitcast <4 x i64> to <4 x double> + %1 = call <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %Mask) + ret <4 x double> %1 +} +; CHECK-LABEL: test3_x86_avx_blendv_pd_256 +; CHECK-NOT: vblendvpd +; CHECK: ret + + +define <8 x float> @test3_x86_avx_blendv_ps_256(<8 x float> %a0, <8 x float> %a1) { + %Mask = bitcast <4 x i64> to <8 x float> + %1 = call <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %Mask) + ret <8 x float> %1 +} +; CHECK-LABEL: test3_x86_avx_blendv_ps_256 +; CHECK-NOT: vblendvps +; CHECK: ret + + + +declare <4 x double> @llvm.x86.avx.blend.pd.256(<4 x double>, <4 x double>, i32) +declare <8 x float> @llvm.x86.avx.blend.ps.256(<8 x float>, <8 x float>, i32) +declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4 x double>) +declare <8 x float> @llvm.x86.avx.blendv.ps.256(<8 x float>, <8 x float>, <8 x float>) + diff --git a/test/CodeGen/X86/combine-avx2-intrinsics.ll b/test/CodeGen/X86/combine-avx2-intrinsics.ll index 0560a8d6ae2..8794f8b8684 100644 --- a/test/CodeGen/X86/combine-avx2-intrinsics.ll +++ b/test/CodeGen/X86/combine-avx2-intrinsics.ll @@ -44,6 +44,119 @@ define <8 x i32> @test_psra_4(<8 x i32> %A) { ; CHECK: ret +define <32 x i8> @test_x86_avx2_pblendvb(<32 x i8> %a0, <32 x i8> %a1) { + %res = call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> %a0, <32 x i8> %a0, <32 x i8> %a1) + ret <32 x i8> %res +} +; CHECK-LABEL: test_x86_avx2_pblendvb +; CHECK-NOT: vpblendvb +; CHECK: ret + + +define <16 x i16> @test_x86_avx2_pblendw(<16 x i16> %a0) { + %res = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %a0, <16 x i16> %a0, i32 7) + ret <16 x i16> %res +} +; CHECK-LABEL: test_x86_avx2_pblendw +; CHECK-NOT: vpblendw +; CHECK: ret + + +define <4 x i32> @test_x86_avx2_pblendd_128(<4 x i32> %a0) { + %res = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %a0, <4 x i32> %a0, i32 7) + ret <4 x i32> %res +} +; CHECK-LABEL: test_x86_avx2_pblendd_128 +; CHECK-NOT: vpblendd +; CHECK: ret + + +define <8 x i32> @test_x86_avx2_pblendd_256(<8 x i32> %a0) { + %res = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %a0, <8 x i32> %a0, i32 7) + ret <8 x i32> %res +} +; CHECK-LABEL: test_x86_avx2_pblendd_256 +; CHECK-NOT: vpblendd +; CHECK: ret + + +define <32 x i8> @test2_x86_avx2_pblendvb(<32 x i8> %a0, <32 x i8> %a1) { + %res = call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> zeroinitializer) + ret <32 x i8> %res +} +; CHECK-LABEL: test2_x86_avx2_pblendvb +; CHECK-NOT: vpblendvb +; CHECK: ret + + +define <16 x i16> @test2_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) { + %res = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %a0, <16 x i16> %a1, i32 0) + ret <16 x i16> %res +} +; CHECK-LABEL: test2_x86_avx2_pblendw +; CHECK-NOT: vpblendw +; CHECK: ret + + +define <4 x i32> @test2_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) { + %res = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %a0, <4 x i32> %a1, i32 0) + ret <4 x i32> %res +} +; CHECK-LABEL: test2_x86_avx2_pblendd_128 +; CHECK-NOT: vpblendd +; CHECK: ret + + +define <8 x i32> @test2_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) { + %res = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %a0, <8 x i32> %a1, i32 0) + ret <8 x i32> %res +} +; CHECK-LABEL: test2_x86_avx2_pblendd_256 +; CHECK-NOT: vpblendd +; CHECK: ret + + +define <32 x i8> @test3_x86_avx2_pblendvb(<32 x i8> %a0, <32 x i8> %a1) { + %1 = bitcast <4 x i64> to <32 x i8> + %res = call <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8> %a0, <32 x i8> %a1, <32 x i8> %1) + ret <32 x i8> %res +} +; CHECK-LABEL: test3_x86_avx2_pblendvb +; CHECK-NOT: vpblendvb +; CHECK: ret + + +define <16 x i16> @test3_x86_avx2_pblendw(<16 x i16> %a0, <16 x i16> %a1) { + %res = call <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16> %a0, <16 x i16> %a1, i32 -1) + ret <16 x i16> %res +} +; CHECK-LABEL: test3_x86_avx2_pblendw +; CHECK-NOT: vpblendw +; CHECK: ret + + +define <4 x i32> @test3_x86_avx2_pblendd_128(<4 x i32> %a0, <4 x i32> %a1) { + %res = call <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32> %a0, <4 x i32> %a1, i32 -1) + ret <4 x i32> %res +} +; CHECK-LABEL: test3_x86_avx2_pblendd_128 +; CHECK-NOT: vpblendd +; CHECK: ret + + +define <8 x i32> @test3_x86_avx2_pblendd_256(<8 x i32> %a0, <8 x i32> %a1) { + %res = call <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32> %a0, <8 x i32> %a1, i32 -1) + ret <8 x i32> %res +} +; CHECK-LABEL: test3_x86_avx2_pblendd_256 +; CHECK-NOT: vpblendd +; CHECK: ret + + +declare <32 x i8> @llvm.x86.avx2.pblendvb(<32 x i8>, <32 x i8>, <32 x i8>) +declare <16 x i16> @llvm.x86.avx2.pblendw(<16 x i16>, <16 x i16>, i32) +declare <4 x i32> @llvm.x86.avx2.pblendd.128(<4 x i32>, <4 x i32>, i32) +declare <8 x i32> @llvm.x86.avx2.pblendd.256(<8 x i32>, <8 x i32>, i32) declare <16 x i16> @llvm.x86.avx2.psra.w(<16 x i16>, <8 x i16>) declare <16 x i16> @llvm.x86.avx2.psrai.w(<16 x i16>, i32) declare <8 x i32> @llvm.x86.avx2.psra.d(<8 x i32>, <4 x i32>) diff --git a/test/CodeGen/X86/combine-sse41-intrinsics.ll b/test/CodeGen/X86/combine-sse41-intrinsics.ll new file mode 100644 index 00000000000..5dd5246c373 --- /dev/null +++ b/test/CodeGen/X86/combine-sse41-intrinsics.ll @@ -0,0 +1,182 @@ +; RUN: llc < %s -march=x86-64 -mcpu=corei7 | FileCheck %s + + +define <2 x double> @test_x86_sse41_blend_pd(<2 x double> %a0, <2 x double> %a1) { + %1 = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a1, i32 0) + ret <2 x double> %1 +} +; CHECK-LABEL: test_x86_sse41_blend_pd +; CHECK-NOT: blendpd +; CHECK: ret + + +define <4 x float> @test_x86_sse41_blend_ps(<4 x float> %a0, <4 x float> %a1) { + %1 = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a1, i32 0) + ret <4 x float> %1 +} +; CHECK-LABEL: test_x86_sse41_blend_ps +; CHECK-NOT: blendps +; CHECK: ret + + +define <2 x double> @test_x86_sse41_blendv_pd(<2 x double> %a0, <2 x double> %a1) { + %1 = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> zeroinitializer) + ret <2 x double> %1 +} +; CHECK-LABEL: test_x86_sse41_blendv_pd +; CHECK-NOT: blendvpd +; CHECK: ret + + +define <4 x float> @test_x86_sse41_blendv_ps(<4 x float> %a0, <4 x float> %a1) { + %1 = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> zeroinitializer) + ret <4 x float> %1 +} +; CHECK-LABEL: test_x86_sse41_blendv_ps +; CHECK-NOT: blendvps +; CHECK: ret + + +define <16 x i8> @test_x86_sse41_pblendv_b(<16 x i8> %a0, <16 x i8> %a1) { + %1 = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> zeroinitializer) + ret <16 x i8> %1 +} +; CHECK-LABEL: test_x86_sse41_pblendv_b +; CHECK-NOT: pblendvb +; CHECK: ret + + +define <8 x i16> @test_x86_sse41_pblend_w(<8 x i16> %a0, <8 x i16> %a1) { + %1 = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a1, i32 0) + ret <8 x i16> %1 +} +; CHECK-LABEL: test_x86_sse41_pblend_w +; CHECK-NOT: pblendw +; CHECK: ret + + +define <2 x double> @test2_x86_sse41_blend_pd(<2 x double> %a0, <2 x double> %a1) { + %1 = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a1, i32 -1) + ret <2 x double> %1 +} +; CHECK-LABEL: test2_x86_sse41_blend_pd +; CHECK-NOT: blendpd +; CHECK: movaps %xmm1, %xmm0 +; CHECK-NEXT: ret + + +define <4 x float> @test2_x86_sse41_blend_ps(<4 x float> %a0, <4 x float> %a1) { + %1 = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a1, i32 -1) + ret <4 x float> %1 +} +; CHECK-LABEL: test2_x86_sse41_blend_ps +; CHECK-NOT: blendps +; CHECK: movaps %xmm1, %xmm0 +; CHECK-NEXT: ret + + +define <2 x double> @test2_x86_sse41_blendv_pd(<2 x double> %a0, <2 x double> %a1) { + %Mask = bitcast <2 x i64> to <2 x double> + %1 = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a0, <2 x double> %a1, <2 x double> %Mask ) + ret <2 x double> %1 +} +; CHECK-LABEL: test2_x86_sse41_blendv_pd +; CHECK-NOT: blendvpd +; CHECK: movaps %xmm1, %xmm0 +; CHECK-NEXT: ret + + +define <4 x float> @test2_x86_sse41_blendv_ps(<4 x float> %a0, <4 x float> %a1) { + %Mask = bitcast <2 x i64> to <4 x float> + %1 = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a0, <4 x float> %a1, <4 x float> %Mask) + ret <4 x float> %1 +} +; CHECK-LABEL: test2_x86_sse41_blendv_ps +; CHECK-NOT: blendvps +; CHECK: movaps %xmm1, %xmm0 +; CHECK-NEXT: ret + + +define <16 x i8> @test2_x86_sse41_pblendv_b(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %a2) { + %Mask = bitcast <2 x i64> to <16 x i8> + %1 = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> %Mask) + ret <16 x i8> %1 +} +; CHECK-LABEL: test2_x86_sse41_pblendv_b +; CHECK-NOT: pblendvb +; CHECK: movaps %xmm1, %xmm0 +; CHECK-NEXT: ret + + +define <8 x i16> @test2_x86_sse41_pblend_w(<8 x i16> %a0, <8 x i16> %a1) { + %1 = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a1, i32 -1) + ret <8 x i16> %1 +} +; CHECK-LABEL: test2_x86_sse41_pblend_w +; CHECK-NOT: pblendw +; CHECK: movaps %xmm1, %xmm0 +; CHECK-NEXT: ret + + +define <2 x double> @test3_x86_sse41_blend_pd(<2 x double> %a0) { + %1 = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a0, i32 7) + ret <2 x double> %1 +} +; CHECK-LABEL: test3_x86_sse41_blend_pd +; CHECK-NOT: blendpd +; CHECK: ret + + +define <4 x float> @test3_x86_sse41_blend_ps(<4 x float> %a0) { + %1 = call <4 x float> @llvm.x86.sse41.blendps(<4 x float> %a0, <4 x float> %a0, i32 7) + ret <4 x float> %1 +} +; CHECK-LABEL: test3_x86_sse41_blend_ps +; CHECK-NOT: blendps +; CHECK: ret + + +define <2 x double> @test3_x86_sse41_blendv_pd(<2 x double> %a0, <2 x double> %a1) { + %1 = call <2 x double> @llvm.x86.sse41.blendvpd(<2 x double> %a0, <2 x double> %a0, <2 x double> %a1 ) + ret <2 x double> %1 +} +; CHECK-LABEL: test3_x86_sse41_blendv_pd +; CHECK-NOT: blendvpd +; CHECK: ret + + +define <4 x float> @test3_x86_sse41_blendv_ps(<4 x float> %a0, <4 x float> %a1) { + %1 = call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %a0, <4 x float> %a0, <4 x float> %a1) + ret <4 x float> %1 +} +; CHECK-LABEL: test3_x86_sse41_blendv_ps +; CHECK-NOT: blendvps +; CHECK: ret + + +define <16 x i8> @test3_x86_sse41_pblendv_b(<16 x i8> %a0, <16 x i8> %a1) { + %1 = call <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8> %a0, <16 x i8> %a0, <16 x i8> %a1) + ret <16 x i8> %1 +} +; CHECK-LABEL: test3_x86_sse41_pblendv_b +; CHECK-NOT: pblendvb +; CHECK: ret + + +define <8 x i16> @test3_x86_sse41_pblend_w(<8 x i16> %a0) { + %1 = call <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16> %a0, <8 x i16> %a0, i32 7) + ret <8 x i16> %1 +} +; CHECK-LABEL: test3_x86_sse41_pblend_w +; CHECK-NOT: pblendw +; CHECK: ret + + +declare <2 x double> @llvm.x86.sse41.blendpd(<2 x double>, <2 x double>, i32) +declare <4 x float> @llvm.x86.sse41.blendps(<4 x float>, <4 x float>, i32) +declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x double>) +declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x float>) +declare <16 x i8> @llvm.x86.sse41.pblendvb(<16 x i8>, <16 x i8>, <16 x i8>) +declare <8 x i16> @llvm.x86.sse41.pblendw(<8 x i16>, <8 x i16>, i32) +declare <8 x i16> @llvm.x86.sse41.phminposuw(<8 x i16>) + -- 2.34.1