From: Andrea Di Biagio Date: Tue, 15 Apr 2014 19:30:48 +0000 (+0000) Subject: [X86] Improve the lowering of packed shifts by constant build_vector. X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=749e8fee34bec3ed166fe5b6b440a6fac678cd08;p=oota-llvm.git [X86] Improve the lowering of packed shifts by constant build_vector. This patch teaches the backend how to efficiently lower logical and arithmetic packed shifts on both SSE and AVX/AVX2 machines. When possible, instead of scalarizing a vector shift, the backend should try to expand the shift into a sequence of two packed shifts by immedate count followed by a MOVSS/MOVSD. Example (v4i32 (srl A, (build_vector < X, Y, Y, Y>))) Can be rewritten as: (v4i32 (MOVSS (srl A, ), (srl A, ))) [with X and Y ConstantInt] The advantage is that the two new shifts from the example would be lowered into X86ISD::VSRLI nodes. This is always cheaper than scalarizing the vector into four scalar shifts plus four pairs of vector insert/extract. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@206316 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index e78394e121c..815d27ff5ec 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -13359,6 +13359,79 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget, return DAG.getNode(ISD::MUL, dl, VT, Op, R); } + // If possible, lower this shift as a sequence of two shifts by + // constant plus a MOVSS/MOVSD instead of scalarizing it. + // Example: + // (v4i32 (srl A, (build_vector < X, Y, Y, Y>))) + // + // Could be rewritten as: + // (v4i32 (MOVSS (srl A, ), (srl A, ))) + // + // The advantage is that the two shifts from the example would be + // lowered as X86ISD::VSRLI nodes. This would be cheaper than scalarizing + // the vector shift into four scalar shifts plus four pairs of vector + // insert/extract. + if ((VT == MVT::v8i16 || VT == MVT::v4i32) && + ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) { + unsigned TargetOpcode = X86ISD::MOVSS; + bool CanBeSimplified; + // The splat value for the first packed shift (the 'X' from the example). + SDValue Amt1 = Amt->getOperand(0); + // The splat value for the second packed shift (the 'Y' from the example). + SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) : + Amt->getOperand(2); + + // See if it is possible to replace this node with a sequence of + // two shifts followed by a MOVSS/MOVSD + if (VT == MVT::v4i32) { + // Check if it is legal to use a MOVSS. + CanBeSimplified = Amt2 == Amt->getOperand(2) && + Amt2 == Amt->getOperand(3); + if (!CanBeSimplified) { + // Otherwise, check if we can still simplify this node using a MOVSD. + CanBeSimplified = Amt1 == Amt->getOperand(1) && + Amt->getOperand(2) == Amt->getOperand(3); + TargetOpcode = X86ISD::MOVSD; + Amt2 = Amt->getOperand(2); + } + } else { + // Do similar checks for the case where the machine value type + // is MVT::v8i16. + CanBeSimplified = Amt1 == Amt->getOperand(1); + for (unsigned i=3; i != 8 && CanBeSimplified; ++i) + CanBeSimplified = Amt2 == Amt->getOperand(i); + + if (!CanBeSimplified) { + TargetOpcode = X86ISD::MOVSD; + CanBeSimplified = true; + Amt2 = Amt->getOperand(4); + for (unsigned i=0; i != 4 && CanBeSimplified; ++i) + CanBeSimplified = Amt1 == Amt->getOperand(i); + for (unsigned j=4; j != 8 && CanBeSimplified; ++j) + CanBeSimplified = Amt2 == Amt->getOperand(j); + } + } + + if (CanBeSimplified && isa(Amt1) && + isa(Amt2)) { + // Replace this node with two shifts followed by a MOVSS/MOVSD. + EVT CastVT = MVT::v4i32; + SDValue Splat1 = + DAG.getConstant(cast(Amt1)->getAPIntValue(), VT); + SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1); + SDValue Splat2 = + DAG.getConstant(cast(Amt2)->getAPIntValue(), VT); + SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2); + if (TargetOpcode == X86ISD::MOVSD) + CastVT = MVT::v2i64; + SDValue BitCast1 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift1); + SDValue BitCast2 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift2); + SDValue Result = getTargetShuffleNode(TargetOpcode, dl, CastVT, BitCast2, + BitCast1, DAG); + return DAG.getNode(ISD::BITCAST, dl, VT, Result); + } + } + if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) { assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq."); diff --git a/test/CodeGen/X86/lower-vec-shift.ll b/test/CodeGen/X86/lower-vec-shift.ll new file mode 100644 index 00000000000..c28f82a0ef2 --- /dev/null +++ b/test/CodeGen/X86/lower-vec-shift.ll @@ -0,0 +1,125 @@ +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core2 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX2 + + +; Verify that the following shifts are lowered into a sequence of two shifts plus +; a blend. On pre-avx2 targets, instead of scalarizing logical and arithmetic +; packed shift right by a constant build_vector the backend should always try to +; emit a simpler sequence of two shifts + blend when possible. + +define <8 x i16> @test1(<8 x i16> %a) { + %lshr = lshr <8 x i16> %a, + ret <8 x i16> %lshr +} +; CHECK-LABEL: test1 +; SSE: psrlw +; SSE-NEXT: psrlw +; SSE-NEXT: movss +; AVX: vpsrlw +; AVX-NEXT: vpsrlw +; AVX-NEXT: vmovss +; AVX2: vpsrlw +; AVX2-NEXT: vpsrlw +; AVX2-NEXT: vmovss +; CHECK: ret + + +define <8 x i16> @test2(<8 x i16> %a) { + %lshr = lshr <8 x i16> %a, + ret <8 x i16> %lshr +} +; CHECK-LABEL: test2 +; SSE: psrlw +; SSE-NEXT: psrlw +; SSE-NEXT: movsd +; AVX: vpsrlw +; AVX-NEXT: vpsrlw +; AVX-NEXT: vmovsd +; AVX2: vpsrlw +; AVX2-NEXT: vpsrlw +; AVX2-NEXT: vmovsd +; CHECK: ret + + +define <4 x i32> @test3(<4 x i32> %a) { + %lshr = lshr <4 x i32> %a, + ret <4 x i32> %lshr +} +; CHECK-LABEL: test3 +; SSE: psrld +; SSE-NEXT: psrld +; SSE-NEXT: movss +; AVX: vpsrld +; AVX-NEXT: vpsrld +; AVX-NEXT: vmovss +; AVX2: vpsrlvd +; CHECK: ret + + +define <4 x i32> @test4(<4 x i32> %a) { + %lshr = lshr <4 x i32> %a, + ret <4 x i32> %lshr +} +; CHECK-LABEL: test4 +; SSE: psrld +; SSE-NEXT: psrld +; SSE-NEXT: movsd +; AVX: vpsrld +; AVX-NEXT: vpsrld +; AVX-NEXT: vmovsd +; AVX2: vpsrlvd +; CHECK: ret + + +define <8 x i16> @test5(<8 x i16> %a) { + %lshr = ashr <8 x i16> %a, + ret <8 x i16> %lshr +} + +define <8 x i16> @test6(<8 x i16> %a) { + %lshr = ashr <8 x i16> %a, + ret <8 x i16> %lshr +} +; CHECK-LABEL: test6 +; SSE: psraw +; SSE-NEXT: psraw +; SSE-NEXT: movsd +; AVX: vpsraw +; AVX-NEXT: vpsraw +; AVX-NEXT: vmovsd +; AVX2: vpsraw +; AVX2-NEXT: vpsraw +; AVX2-NEXT: vmovsd +; CHECK: ret + + +define <4 x i32> @test7(<4 x i32> %a) { + %lshr = ashr <4 x i32> %a, + ret <4 x i32> %lshr +} +; CHECK-LABEL: test7 +; SSE: psrad +; SSE-NEXT: psrad +; SSE-NEXT: movss +; AVX: vpsrad +; AVX-NEXT: vpsrad +; AVX-NEXT: vmovss +; AVX2: vpsravd +; CHECK: ret + + +define <4 x i32> @test8(<4 x i32> %a) { + %lshr = ashr <4 x i32> %a, + ret <4 x i32> %lshr +} +; CHECK-LABEL: test8 +; SSE: psrad +; SSE-NEXT: psrad +; SSE-NEXT: movsd +; AVX: vpsrad +; AVX-NEXT: vpsrad +; AVX-NEXT: vmovsd +; AVX2: vpsravd +; CHECK: ret +