fold nested and's early to avoid inefficiencies in MaskedValueIsZero. This
authorChris Lattner <sabre@nondot.org>
Wed, 26 Oct 2005 17:18:16 +0000 (17:18 +0000)
committerChris Lattner <sabre@nondot.org>
Wed, 26 Oct 2005 17:18:16 +0000 (17:18 +0000)
fixes a very slow compile in PR639.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@24011 91177308-0d34-0410-b5e6-96231b3b80d8

lib/Transforms/Scalar/InstructionCombining.cpp

index 9ba0512a78b29b7a02c9e4e314c4bb4414f3805c..f3325096895ae3ceccfe72ed85a47768e0f40e66 100644 (file)
@@ -1725,6 +1725,15 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
     // and X, -1 == X
     if (AndRHS->isAllOnesValue())
       return ReplaceInstUsesWith(I, Op0);
+    
+    // and (and X, c1), c2 -> and (x, c1&c2).  Handle this case here, before
+    // calling MaskedValueIsZero, to avoid inefficient cases where we traipse
+    // through many levels of ands.
+    {
+      Value *X; ConstantInt *C1;
+      if (match(Op0, m_And(m_Value(X), m_ConstantInt(C1))))
+        return BinaryOperator::createAnd(X, ConstantExpr::getAnd(C1, AndRHS));
+    }
 
     if (MaskedValueIsZero(Op0, AndRHS))        // LHS & RHS == 0
       return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));