From 042bee88f3ab5fec282b6b4e60e8dede9e4c1088 Mon Sep 17 00:00:00 2001 From: Andrea Di Biagio Date: Wed, 5 Nov 2014 13:04:14 +0000 Subject: [PATCH] [X86] Teach method 'isVectorClearMaskLegal' how to check for legal blend masks. This patch improves the folding of vector AND nodes into blend operations for targets that feature SSE4.1. A vector AND node where one of the operands is a constant build_vector with elements that are either zero or all-ones can be converted into a blend. This allows for example to simplify the following code: define <4 x i32> @test(<4 x i32> %A, <4 x i32> %B) { %1 = and <4 x i32> %A, %2 = and <4 x i32> %B, %3 = or <4 x i32> %1, %2 ret <4 x i32> %3 } Before this patch llc (-mcpu=corei7) generated: andps LCPI1_0(%rip), %xmm0, %xmm0 andps LCPI1_1(%rip), %xmm1, %xmm1 orps %xmm1, %xmm0, %xmm0 retq With this patch we generate a single 'vpblendw'. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@221343 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 2 +- lib/Target/X86/X86ISelLowering.cpp | 4 +- test/CodeGen/X86/combine-and.ll | 164 +++++++++++++++++++++++ 3 files changed, 168 insertions(+), 2 deletions(-) create mode 100644 test/CodeGen/X86/combine-and.ll diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 107f5a12693..33b2527287e 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -11293,7 +11293,7 @@ SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) { if (cast(Elt)->isAllOnesValue()) Indices.push_back(i); else if (cast(Elt)->isNullValue()) - Indices.push_back(NumElts); + Indices.push_back(NumElts+i); else return SDValue(); } diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index aa77a3e8ba5..bde948174d7 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -19383,7 +19383,9 @@ X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl &Mask, return (isMOVLMask(Mask, SVT) || isCommutedMOVLMask(Mask, SVT, true) || isSHUFPMask(Mask, SVT) || - isSHUFPMask(Mask, SVT, /* Commuted */ true)); + isSHUFPMask(Mask, SVT, /* Commuted */ true) || + isBlendMask(Mask, SVT, Subtarget->hasSSE41(), + Subtarget->hasInt256())); } return false; } diff --git a/test/CodeGen/X86/combine-and.ll b/test/CodeGen/X86/combine-and.ll new file mode 100644 index 00000000000..59a7a1902aa --- /dev/null +++ b/test/CodeGen/X86/combine-and.ll @@ -0,0 +1,164 @@ +; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 < %s | FileCheck %s +; +; Verify that the DAGCombiner is able to fold a vector AND into a blend +; if one of the operands to the AND is a vector of all constants, and each +; constant element is either zero or all-ones. + + +define <4 x i32> @test1(<4 x i32> %A) { + %1 = and <4 x i32> %A, + ret <4 x i32> %1 +} +; CHECK-LABEL: test1 +; CHECK: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; CHECK-NEXT: retq + + +define <4 x i32> @test2(<4 x i32> %A) { + %1 = and <4 x i32> %A, + ret <4 x i32> %1 +} +; CHECK-LABEL: test2 +; CHECK: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7] +; CHECK-NEXT: retq + + +define <4 x i32> @test3(<4 x i32> %A) { + %1 = and <4 x i32> %A, + ret <4 x i32> %1 +} +; CHECK-LABEL: test3 +; CHECK: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5],xmm1[6,7] +; CHECK-NEXT: retq + + +define <4 x i32> @test4(<4 x i32> %A) { + %1 = and <4 x i32> %A, + ret <4 x i32> %1 +} +; CHECK-LABEL: test4 +; CHECK: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7] +; CHECK-NEXT: retq + + +define <4 x i32> @test5(<4 x i32> %A) { + %1 = and <4 x i32> %A, + ret <4 x i32> %1 +} +; CHECK-LABEL: test5 +; CHECK: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; CHECK-NEXT: retq + + +define <4 x i32> @test6(<4 x i32> %A) { + %1 = and <4 x i32> %A, + ret <4 x i32> %1 +} +; CHECK-LABEL: test6 +; CHECK: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7] +; CHECK-NEXT: retq + + +define <4 x i32> @test7(<4 x i32> %A) { + %1 = and <4 x i32> %A, + ret <4 x i32> %1 +} +; CHECK-LABEL: test7 +; CHECK: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] +; CHECK-NEXT: retq + + +define <4 x i32> @test8(<4 x i32> %A) { + %1 = and <4 x i32> %A, + ret <4 x i32> %1 +} +; CHECK-LABEL: test8 +; CHECK: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5],xmm0[6,7] +; CHECK-NEXT: retq + + +define <4 x i32> @test9(<4 x i32> %A) { + %1 = and <4 x i32> %A, + ret <4 x i32> %1 +} +; CHECK-LABEL: test9 +; CHECK: movq %xmm0, %xmm0 +; CHECK-NEXT: retq + + +define <4 x i32> @test10(<4 x i32> %A) { + %1 = and <4 x i32> %A, + ret <4 x i32> %1 +} +; CHECK-LABEL: test10 +; CHECK: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7] +; CHECK-NEXT: retq + + +define <4 x i32> @test11(<4 x i32> %A) { + %1 = and <4 x i32> %A, + ret <4 x i32> %1 +} +; CHECK-LABEL: test11 +; CHECK: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7] +; CHECK-NEXT: retq + + +define <4 x i32> @test12(<4 x i32> %A) { + %1 = and <4 x i32> %A, + ret <4 x i32> %1 +} +; CHECK-LABEL: test12 +; CHECK: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7] +; CHECK-NEXT: retq + + +define <4 x i32> @test13(<4 x i32> %A) { + %1 = and <4 x i32> %A, + ret <4 x i32> %1 +} +; CHECK-LABEL: test13 +; CHECK: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7] +; CHECK-NEXT: retq + + +define <4 x i32> @test14(<4 x i32> %A) { + %1 = and <4 x i32> %A, + ret <4 x i32> %1 +} +; CHECK-LABEL: test14 +; CHECK: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7] +; CHECK-NEXT: retq + + +define <4 x i32> @test15(<4 x i32> %A, <4 x i32> %B) { + %1 = and <4 x i32> %A, + %2 = and <4 x i32> %B, + %3 = or <4 x i32> %1, %2 + ret <4 x i32> %3 +} +; CHECK-LABEL: test15 +; CHECK: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7] +; CHECK-NEXT: retq + + +define <4 x i32> @test16(<4 x i32> %A, <4 x i32> %B) { + %1 = and <4 x i32> %A, + %2 = and <4 x i32> %B, + %3 = or <4 x i32> %1, %2 + ret <4 x i32> %3 +} +; CHECK-LABEL: test16 +; CHECK: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] +; CHECK-NEXT: retq + + +define <4 x i32> @test17(<4 x i32> %A, <4 x i32> %B) { + %1 = and <4 x i32> %A, + %2 = and <4 x i32> %B, + %3 = or <4 x i32> %1, %2 + ret <4 x i32> %3 +} +; CHECK-LABEL: test17 +; CHECK: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7] +; CHECK-NEXT: retq -- 2.34.1