From 947c28935d179f486c8137323361923ba4f23886 Mon Sep 17 00:00:00 2001 From: Chris Lattner Date: Mon, 13 Mar 2006 06:51:27 +0000 Subject: [PATCH] Fold X+Y -> X|Y when safe. This implements: Regression/CodeGen/PowerPC/and_add.ll a case that occurs with dynamic allocas of constant size. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@26727 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 1d6d40eb9a9..33b046c3670 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -714,9 +714,27 @@ SDOperand DAGCombiner::visitADD(SDNode *N) { // fold (A+(B-A)) -> B if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(1)) return N1.getOperand(0); - // + if (!MVT::isVector(VT) && SimplifyDemandedBits(SDOperand(N, 0))) return SDOperand(); + + // fold (a+b) -> (a|b) iff a and b share no bits. + if (MVT::isInteger(VT) && !MVT::isVector(VT)) { + uint64_t LHSZero, LHSOne; + uint64_t RHSZero, RHSOne; + uint64_t Mask = MVT::getIntVTBitMask(VT); + TLI.ComputeMaskedBits(N0, Mask, LHSZero, LHSOne); + if (LHSZero) { + TLI.ComputeMaskedBits(N1, Mask, RHSZero, RHSOne); + + // If all possibly-set bits on the LHS are clear on the RHS, return an OR. + // If all possibly-set bits on the RHS are clear on the LHS, return an OR. + if ((RHSZero & (~LHSZero & Mask)) == (~LHSZero & Mask) || + (LHSZero & (~RHSZero & Mask)) == (~RHSZero & Mask)) + return DAG.getNode(ISD::OR, VT, N0, N1); + } + } + return SDOperand(); } -- 2.34.1