From 003a272319d8871492edf9cecc25d9275b872f99 Mon Sep 17 00:00:00 2001 From: Nate Begeman Date: Sat, 18 Feb 2006 02:43:25 +0000 Subject: [PATCH] Add a fold for add that exchanges it with a constant shift if possible, so that the shift may be more easily folded into other operations. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@26286 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/SelectionDAG/TargetLowering.cpp | 30 ++++++++++++++++----- 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp index a48e9074909..533499cbc0a 100644 --- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -459,6 +459,24 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, uint64_t DemandedMask, CountTrailingZeros_64(~KnownZero2)); KnownZero = (1ULL << KnownZeroOut) - 1; KnownOne = 0; + + SDOperand SH = Op.getOperand(0); + // fold (add (shl x, c1), (shl c2, c1)) -> (shl (add x, c2), c1) + if (KnownZero && SH.getOpcode() == ISD::SHL && SH.Val->hasOneUse() && + Op.Val->hasOneUse()) { + if (ConstantSDNode *SA = dyn_cast(SH.getOperand(1))) { + MVT::ValueType VT = Op.getValueType(); + unsigned ShiftAmt = SA->getValue(); + uint64_t AddAmt = AA->getValue(); + uint64_t AddShr = AddAmt >> ShiftAmt; + if (AddAmt == (AddShr << ShiftAmt)) { + SDOperand ADD = TLO.DAG.getNode(ISD::ADD, VT, SH.getOperand(0), + TLO.DAG.getConstant(AddShr, VT)); + SDOperand SHL = TLO.DAG.getNode(ISD::SHL, VT, ADD,SH.getOperand(1)); + return TLO.CombineTo(Op, SHL); + } + } + } } break; case ISD::CTTZ: @@ -577,7 +595,7 @@ void TargetLowering::ComputeMaskedBits(SDOperand Op, uint64_t Mask, KnownOne <<= SA->getValue(); KnownZero |= (1ULL << SA->getValue())-1; // low bits known zero. } - break; + return; case ISD::SRL: // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 if (ConstantSDNode *SA = dyn_cast(Op.getOperand(1))) { @@ -585,12 +603,12 @@ void TargetLowering::ComputeMaskedBits(SDOperand Op, uint64_t Mask, HighBits <<= MVT::getSizeInBits(Op.getValueType())-SA->getValue(); Mask <<= SA->getValue(); ComputeMaskedBits(Op.getOperand(0), Mask, KnownZero, KnownOne, Depth+1); - assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?"); + assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); KnownZero >>= SA->getValue(); KnownOne >>= SA->getValue(); KnownZero |= HighBits; // high bits known zero. } - break; + return; case ISD::SRA: if (ConstantSDNode *SA = dyn_cast(Op.getOperand(1))) { uint64_t HighBits = (1ULL << SA->getValue())-1; @@ -611,7 +629,7 @@ void TargetLowering::ComputeMaskedBits(SDOperand Op, uint64_t Mask, KnownOne |= HighBits; } } - break; + return; case ISD::CTTZ: case ISD::CTLZ: case ISD::CTPOP: { @@ -666,12 +684,12 @@ void TargetLowering::ComputeMaskedBits(SDOperand Op, uint64_t Mask, // We know that the top bits of C-X are clear if X contains less bits // than C (i.e. no wrap-around can happen). For example, 20-X is // positive if we can prove that X is >= 0 and < 16. - break; + return; default: // Allow the target to implement this method for its nodes. if (Op.getOpcode() >= ISD::BUILTIN_OP_END) computeMaskedBitsForTargetNode(Op, Mask, KnownZero, KnownOne); - break; + return; } } -- 2.34.1