From: Dan Gohman Date: Thu, 18 Jun 2009 20:23:18 +0000 (+0000) Subject: Generalize LSR's OptimizeSMax to handle unsigned max tests as well X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=4658c9b4eaa89f00f682a7510b83e7d4895fe18f;p=oota-llvm.git Generalize LSR's OptimizeSMax to handle unsigned max tests as well as signed max tests. Along with r73717, this helps CodeGen avoid emitting code for a maximum operation for this class of loop. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@73718 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp index 03966672d62..ad9235a18e7 100644 --- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -143,10 +143,10 @@ namespace { /// inside the loop then try to eliminate the cast opeation. void OptimizeShadowIV(Loop *L); - /// OptimizeSMax - Rewrite the loop's terminating condition - /// if it uses an smax computation. - ICmpInst *OptimizeSMax(Loop *L, ICmpInst *Cond, - IVStrideUse* &CondUse); + /// OptimizeMax - Rewrite the loop's terminating condition + /// if it uses a max computation. + ICmpInst *OptimizeMax(Loop *L, ICmpInst *Cond, + IVStrideUse* &CondUse); bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse, const SCEVHandle *&CondStride); @@ -2044,8 +2044,8 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond, return Cond; } -/// OptimizeSMax - Rewrite the loop's terminating condition if it uses -/// an smax computation. +/// OptimizeMax - Rewrite the loop's terminating condition if it uses +/// a max computation. /// /// This is a narrow solution to a specific, but acute, problem. For loops /// like this: @@ -2055,10 +2055,10 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond, /// p[i] = 0.0; /// } while (++i < n); /// -/// where the comparison is signed, the trip count isn't just 'n', because -/// 'n' could be negative. And unfortunately this can come up even for loops -/// where the user didn't use a C do-while loop. For example, seemingly -/// well-behaved top-test loops will commonly be lowered like this: +/// the trip count isn't just 'n', because 'n' might not be positive. And +/// unfortunately this can come up even for loops where the user didn't use +/// a C do-while loop. For example, seemingly well-behaved top-test loops +/// will commonly be lowered like this: // /// if (n > 0) { /// i = 0; @@ -2071,14 +2071,14 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond, /// test in such a way that indvars can't find it. /// /// When indvars can't find the if test in loops like this, it creates a -/// signed-max expression, which allows it to give the loop a canonical +/// max expression, which allows it to give the loop a canonical /// induction variable: /// /// i = 0; -/// smax = n < 1 ? 1 : n; +/// max = n < 1 ? 1 : n; /// do { /// p[i] = 0.0; -/// } while (++i != smax); +/// } while (++i != max); /// /// Canonical induction variables are necessary because the loop passes /// are designed around them. The most obvious example of this is the @@ -2094,8 +2094,8 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond, /// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting /// the instructions for the maximum computation. /// -ICmpInst *LoopStrengthReduce::OptimizeSMax(Loop *L, ICmpInst *Cond, - IVStrideUse* &CondUse) { +ICmpInst *LoopStrengthReduce::OptimizeMax(Loop *L, ICmpInst *Cond, + IVStrideUse* &CondUse) { // Check that the loop matches the pattern we're looking for. if (Cond->getPredicate() != CmpInst::ICMP_EQ && Cond->getPredicate() != CmpInst::ICMP_NE) @@ -2113,12 +2113,14 @@ ICmpInst *LoopStrengthReduce::OptimizeSMax(Loop *L, ICmpInst *Cond, SCEVHandle IterationCount = SE->getAddExpr(BackedgeTakenCount, One); // Check for a max calculation that matches the pattern. - const SCEVSMaxExpr *SMax = dyn_cast(IterationCount); - if (!SMax || SMax != SE->getSCEV(Sel)) return Cond; + if (!isa(IterationCount) && !isa(IterationCount)) + return Cond; + const SCEVNAryExpr *Max = cast(IterationCount); + if (Max != SE->getSCEV(Sel)) return Cond; - SCEVHandle SMaxLHS = SMax->getOperand(0); - SCEVHandle SMaxRHS = SMax->getOperand(1); - if (!SMaxLHS || SMaxLHS != One) return Cond; + SCEVHandle MaxLHS = Max->getOperand(0); + SCEVHandle MaxRHS = Max->getOperand(1); + if (!MaxLHS || MaxLHS != One) return Cond; // Check the relevant induction variable for conformance to // the pattern. @@ -2135,19 +2137,23 @@ ICmpInst *LoopStrengthReduce::OptimizeSMax(Loop *L, ICmpInst *Cond, // Check the right operand of the select, and remember it, as it will // be used in the new comparison instruction. Value *NewRHS = 0; - if (SE->getSCEV(Sel->getOperand(1)) == SMaxRHS) + if (SE->getSCEV(Sel->getOperand(1)) == MaxRHS) NewRHS = Sel->getOperand(1); - else if (SE->getSCEV(Sel->getOperand(2)) == SMaxRHS) + else if (SE->getSCEV(Sel->getOperand(2)) == MaxRHS) NewRHS = Sel->getOperand(2); if (!NewRHS) return Cond; + // Determine the new comparison opcode. It may be signed or unsigned, + // and the original comparison may be either equality or inequality. + CmpInst::Predicate Pred = + isa(Max) ? CmpInst::ICMP_SLT : CmpInst::ICMP_ULT; + if (Cond->getPredicate() == CmpInst::ICMP_EQ) + Pred = CmpInst::getInversePredicate(Pred); + // Ok, everything looks ok to change the condition into an SLT or SGE and // delete the max calculation. ICmpInst *NewCond = - new ICmpInst(Cond->getPredicate() == CmpInst::ICMP_NE ? - CmpInst::ICMP_SLT : - CmpInst::ICMP_SGE, - Cond->getOperand(0), NewRHS, "scmp", Cond); + new ICmpInst(Pred, Cond->getOperand(0), NewRHS, "scmp", Cond); // Delete the max calculation instructions. Cond->replaceAllUsesWith(NewCond); @@ -2360,10 +2366,10 @@ void LoopStrengthReduce::OptimizeLoopTermCond(Loop *L) { StrideNoReuse.insert(*CondStride); } - // If the trip count is computed in terms of an smax (due to ScalarEvolution + // If the trip count is computed in terms of a max (due to ScalarEvolution // being unable to find a sufficient guard, for example), change the loop - // comparison to use SLT instead of NE. - Cond = OptimizeSMax(L, Cond, CondUse); + // comparison to use SLT or ULT instead of NE. + Cond = OptimizeMax(L, Cond, CondUse); // If possible, change stride and operands of the compare instruction to // eliminate one stride. diff --git a/test/CodeGen/X86/optimize-max-0.ll b/test/CodeGen/X86/optimize-max-0.ll new file mode 100644 index 00000000000..90c14565e9a --- /dev/null +++ b/test/CodeGen/X86/optimize-max-0.ll @@ -0,0 +1,461 @@ +; RUN: llvm-as < %s | llc -march=x86 | not grep cmov + +; LSR should be able to eliminate the max computations by +; making the loops use slt/ult comparisons instead of ne comparisons. + +target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" +target triple = "i386-apple-darwin9" + +define void @foo(i8* %r, i32 %s, i32 %w, i32 %x, i8* %j, i32 %d) nounwind { +entry: + %0 = mul i32 %x, %w ; [#uses=2] + %1 = mul i32 %x, %w ; [#uses=1] + %2 = sdiv i32 %1, 4 ; [#uses=1] + %.sum2 = add i32 %2, %0 ; [#uses=2] + %cond = icmp eq i32 %d, 1 ; [#uses=1] + br i1 %cond, label %bb29, label %bb10.preheader + +bb10.preheader: ; preds = %entry + %3 = icmp sgt i32 %x, 0 ; [#uses=1] + br i1 %3, label %bb.nph9, label %bb18.loopexit + +bb.nph7: ; preds = %bb7.preheader + %4 = mul i32 %y.08, %w ; [#uses=1] + %5 = mul i32 %y.08, %s ; [#uses=1] + %6 = add i32 %5, 1 ; [#uses=1] + %tmp8 = icmp sgt i32 1, %w ; [#uses=1] + %smax9 = select i1 %tmp8, i32 1, i32 %w ; [#uses=1] + br label %bb6 + +bb6: ; preds = %bb7, %bb.nph7 + %x.06 = phi i32 [ 0, %bb.nph7 ], [ %indvar.next7, %bb7 ] ; [#uses=3] + %7 = add i32 %x.06, %4 ; [#uses=1] + %8 = shl i32 %x.06, 1 ; [#uses=1] + %9 = add i32 %6, %8 ; [#uses=1] + %10 = getelementptr i8* %r, i32 %9 ; [#uses=1] + %11 = load i8* %10, align 1 ; [#uses=1] + %12 = getelementptr i8* %j, i32 %7 ; [#uses=1] + store i8 %11, i8* %12, align 1 + br label %bb7 + +bb7: ; preds = %bb6 + %indvar.next7 = add i32 %x.06, 1 ; [#uses=2] + %exitcond10 = icmp ne i32 %indvar.next7, %smax9 ; [#uses=1] + br i1 %exitcond10, label %bb6, label %bb7.bb9_crit_edge + +bb7.bb9_crit_edge: ; preds = %bb7 + br label %bb9 + +bb9: ; preds = %bb7.preheader, %bb7.bb9_crit_edge + br label %bb10 + +bb10: ; preds = %bb9 + %indvar.next11 = add i32 %y.08, 1 ; [#uses=2] + %exitcond12 = icmp ne i32 %indvar.next11, %x ; [#uses=1] + br i1 %exitcond12, label %bb7.preheader, label %bb10.bb18.loopexit_crit_edge + +bb10.bb18.loopexit_crit_edge: ; preds = %bb10 + br label %bb10.bb18.loopexit_crit_edge.split + +bb10.bb18.loopexit_crit_edge.split: ; preds = %bb.nph9, %bb10.bb18.loopexit_crit_edge + br label %bb18.loopexit + +bb.nph9: ; preds = %bb10.preheader + %13 = icmp sgt i32 %w, 0 ; [#uses=1] + br i1 %13, label %bb.nph9.split, label %bb10.bb18.loopexit_crit_edge.split + +bb.nph9.split: ; preds = %bb.nph9 + br label %bb7.preheader + +bb7.preheader: ; preds = %bb.nph9.split, %bb10 + %y.08 = phi i32 [ 0, %bb.nph9.split ], [ %indvar.next11, %bb10 ] ; [#uses=3] + br i1 true, label %bb.nph7, label %bb9 + +bb.nph5: ; preds = %bb18.loopexit + %14 = sdiv i32 %w, 2 ; [#uses=1] + %15 = icmp slt i32 %w, 2 ; [#uses=1] + %16 = sdiv i32 %x, 2 ; [#uses=2] + br i1 %15, label %bb18.bb20_crit_edge.split, label %bb.nph5.split + +bb.nph5.split: ; preds = %bb.nph5 + %tmp2 = icmp sgt i32 1, %16 ; [#uses=1] + %smax3 = select i1 %tmp2, i32 1, i32 %16 ; [#uses=1] + br label %bb13 + +bb13: ; preds = %bb18, %bb.nph5.split + %y.14 = phi i32 [ 0, %bb.nph5.split ], [ %indvar.next1, %bb18 ] ; [#uses=4] + %17 = mul i32 %14, %y.14 ; [#uses=2] + %18 = shl i32 %y.14, 1 ; [#uses=1] + %19 = srem i32 %y.14, 2 ; [#uses=1] + %20 = add i32 %19, %18 ; [#uses=1] + %21 = mul i32 %20, %s ; [#uses=2] + br i1 true, label %bb.nph3, label %bb17 + +bb.nph3: ; preds = %bb13 + %22 = add i32 %17, %0 ; [#uses=1] + %23 = add i32 %17, %.sum2 ; [#uses=1] + %24 = sdiv i32 %w, 2 ; [#uses=2] + %tmp = icmp sgt i32 1, %24 ; [#uses=1] + %smax = select i1 %tmp, i32 1, i32 %24 ; [#uses=1] + br label %bb14 + +bb14: ; preds = %bb15, %bb.nph3 + %x.12 = phi i32 [ 0, %bb.nph3 ], [ %indvar.next, %bb15 ] ; [#uses=5] + %25 = shl i32 %x.12, 2 ; [#uses=1] + %26 = add i32 %25, %21 ; [#uses=1] + %27 = getelementptr i8* %r, i32 %26 ; [#uses=1] + %28 = load i8* %27, align 1 ; [#uses=1] + %.sum = add i32 %22, %x.12 ; [#uses=1] + %29 = getelementptr i8* %j, i32 %.sum ; [#uses=1] + store i8 %28, i8* %29, align 1 + %30 = shl i32 %x.12, 2 ; [#uses=1] + %31 = or i32 %30, 2 ; [#uses=1] + %32 = add i32 %31, %21 ; [#uses=1] + %33 = getelementptr i8* %r, i32 %32 ; [#uses=1] + %34 = load i8* %33, align 1 ; [#uses=1] + %.sum6 = add i32 %23, %x.12 ; [#uses=1] + %35 = getelementptr i8* %j, i32 %.sum6 ; [#uses=1] + store i8 %34, i8* %35, align 1 + br label %bb15 + +bb15: ; preds = %bb14 + %indvar.next = add i32 %x.12, 1 ; [#uses=2] + %exitcond = icmp ne i32 %indvar.next, %smax ; [#uses=1] + br i1 %exitcond, label %bb14, label %bb15.bb17_crit_edge + +bb15.bb17_crit_edge: ; preds = %bb15 + br label %bb17 + +bb17: ; preds = %bb15.bb17_crit_edge, %bb13 + br label %bb18 + +bb18.loopexit: ; preds = %bb10.bb18.loopexit_crit_edge.split, %bb10.preheader + %36 = icmp slt i32 %x, 2 ; [#uses=1] + br i1 %36, label %bb20, label %bb.nph5 + +bb18: ; preds = %bb17 + %indvar.next1 = add i32 %y.14, 1 ; [#uses=2] + %exitcond4 = icmp ne i32 %indvar.next1, %smax3 ; [#uses=1] + br i1 %exitcond4, label %bb13, label %bb18.bb20_crit_edge + +bb18.bb20_crit_edge: ; preds = %bb18 + br label %bb18.bb20_crit_edge.split + +bb18.bb20_crit_edge.split: ; preds = %bb18.bb20_crit_edge, %bb.nph5 + br label %bb20 + +bb20: ; preds = %bb18.bb20_crit_edge.split, %bb18.loopexit + switch i32 %d, label %return [ + i32 3, label %bb22 + i32 1, label %bb29 + ] + +bb22: ; preds = %bb20 + %37 = mul i32 %x, %w ; [#uses=1] + %38 = sdiv i32 %37, 4 ; [#uses=1] + %.sum3 = add i32 %38, %.sum2 ; [#uses=2] + %39 = add i32 %x, 15 ; [#uses=1] + %40 = and i32 %39, -16 ; [#uses=1] + %41 = add i32 %w, 15 ; [#uses=1] + %42 = and i32 %41, -16 ; [#uses=1] + %43 = mul i32 %40, %s ; [#uses=1] + %44 = icmp sgt i32 %x, 0 ; [#uses=1] + br i1 %44, label %bb.nph, label %bb26 + +bb.nph: ; preds = %bb22 + br label %bb23 + +bb23: ; preds = %bb24, %bb.nph + %y.21 = phi i32 [ 0, %bb.nph ], [ %indvar.next5, %bb24 ] ; [#uses=3] + %45 = mul i32 %y.21, %42 ; [#uses=1] + %.sum1 = add i32 %45, %43 ; [#uses=1] + %46 = getelementptr i8* %r, i32 %.sum1 ; [#uses=1] + %47 = mul i32 %y.21, %w ; [#uses=1] + %.sum5 = add i32 %47, %.sum3 ; [#uses=1] + %48 = getelementptr i8* %j, i32 %.sum5 ; [#uses=1] + tail call void @llvm.memcpy.i32(i8* %48, i8* %46, i32 %w, i32 1) + br label %bb24 + +bb24: ; preds = %bb23 + %indvar.next5 = add i32 %y.21, 1 ; [#uses=2] + %exitcond6 = icmp ne i32 %indvar.next5, %x ; [#uses=1] + br i1 %exitcond6, label %bb23, label %bb24.bb26_crit_edge + +bb24.bb26_crit_edge: ; preds = %bb24 + br label %bb26 + +bb26: ; preds = %bb24.bb26_crit_edge, %bb22 + %49 = mul i32 %x, %w ; [#uses=1] + %.sum4 = add i32 %.sum3, %49 ; [#uses=1] + %50 = getelementptr i8* %j, i32 %.sum4 ; [#uses=1] + %51 = mul i32 %x, %w ; [#uses=1] + %52 = sdiv i32 %51, 2 ; [#uses=1] + tail call void @llvm.memset.i32(i8* %50, i8 -128, i32 %52, i32 1) + ret void + +bb29: ; preds = %bb20, %entry + %53 = add i32 %w, 15 ; [#uses=1] + %54 = and i32 %53, -16 ; [#uses=1] + %55 = icmp sgt i32 %x, 0 ; [#uses=1] + br i1 %55, label %bb.nph11, label %bb33 + +bb.nph11: ; preds = %bb29 + br label %bb30 + +bb30: ; preds = %bb31, %bb.nph11 + %y.310 = phi i32 [ 0, %bb.nph11 ], [ %indvar.next13, %bb31 ] ; [#uses=3] + %56 = mul i32 %y.310, %54 ; [#uses=1] + %57 = getelementptr i8* %r, i32 %56 ; [#uses=1] + %58 = mul i32 %y.310, %w ; [#uses=1] + %59 = getelementptr i8* %j, i32 %58 ; [#uses=1] + tail call void @llvm.memcpy.i32(i8* %59, i8* %57, i32 %w, i32 1) + br label %bb31 + +bb31: ; preds = %bb30 + %indvar.next13 = add i32 %y.310, 1 ; [#uses=2] + %exitcond14 = icmp ne i32 %indvar.next13, %x ; [#uses=1] + br i1 %exitcond14, label %bb30, label %bb31.bb33_crit_edge + +bb31.bb33_crit_edge: ; preds = %bb31 + br label %bb33 + +bb33: ; preds = %bb31.bb33_crit_edge, %bb29 + %60 = mul i32 %x, %w ; [#uses=1] + %61 = getelementptr i8* %j, i32 %60 ; [#uses=1] + %62 = mul i32 %x, %w ; [#uses=1] + %63 = sdiv i32 %62, 2 ; [#uses=1] + tail call void @llvm.memset.i32(i8* %61, i8 -128, i32 %63, i32 1) + ret void + +return: ; preds = %bb20 + ret void +} + +define void @bar(i8* %r, i32 %s, i32 %w, i32 %x, i8* %j, i32 %d) nounwind { +entry: + %0 = mul i32 %x, %w ; [#uses=2] + %1 = mul i32 %x, %w ; [#uses=1] + %2 = udiv i32 %1, 4 ; [#uses=1] + %.sum2 = add i32 %2, %0 ; [#uses=2] + %cond = icmp eq i32 %d, 1 ; [#uses=1] + br i1 %cond, label %bb29, label %bb10.preheader + +bb10.preheader: ; preds = %entry + %3 = icmp ne i32 %x, 0 ; [#uses=1] + br i1 %3, label %bb.nph9, label %bb18.loopexit + +bb.nph7: ; preds = %bb7.preheader + %4 = mul i32 %y.08, %w ; [#uses=1] + %5 = mul i32 %y.08, %s ; [#uses=1] + %6 = add i32 %5, 1 ; [#uses=1] + %tmp8 = icmp ugt i32 1, %w ; [#uses=1] + %smax9 = select i1 %tmp8, i32 1, i32 %w ; [#uses=1] + br label %bb6 + +bb6: ; preds = %bb7, %bb.nph7 + %x.06 = phi i32 [ 0, %bb.nph7 ], [ %indvar.next7, %bb7 ] ; [#uses=3] + %7 = add i32 %x.06, %4 ; [#uses=1] + %8 = shl i32 %x.06, 1 ; [#uses=1] + %9 = add i32 %6, %8 ; [#uses=1] + %10 = getelementptr i8* %r, i32 %9 ; [#uses=1] + %11 = load i8* %10, align 1 ; [#uses=1] + %12 = getelementptr i8* %j, i32 %7 ; [#uses=1] + store i8 %11, i8* %12, align 1 + br label %bb7 + +bb7: ; preds = %bb6 + %indvar.next7 = add i32 %x.06, 1 ; [#uses=2] + %exitcond10 = icmp ne i32 %indvar.next7, %smax9 ; [#uses=1] + br i1 %exitcond10, label %bb6, label %bb7.bb9_crit_edge + +bb7.bb9_crit_edge: ; preds = %bb7 + br label %bb9 + +bb9: ; preds = %bb7.preheader, %bb7.bb9_crit_edge + br label %bb10 + +bb10: ; preds = %bb9 + %indvar.next11 = add i32 %y.08, 1 ; [#uses=2] + %exitcond12 = icmp ne i32 %indvar.next11, %x ; [#uses=1] + br i1 %exitcond12, label %bb7.preheader, label %bb10.bb18.loopexit_crit_edge + +bb10.bb18.loopexit_crit_edge: ; preds = %bb10 + br label %bb10.bb18.loopexit_crit_edge.split + +bb10.bb18.loopexit_crit_edge.split: ; preds = %bb.nph9, %bb10.bb18.loopexit_crit_edge + br label %bb18.loopexit + +bb.nph9: ; preds = %bb10.preheader + %13 = icmp ugt i32 %w, 0 ; [#uses=1] + br i1 %13, label %bb.nph9.split, label %bb10.bb18.loopexit_crit_edge.split + +bb.nph9.split: ; preds = %bb.nph9 + br label %bb7.preheader + +bb7.preheader: ; preds = %bb.nph9.split, %bb10 + %y.08 = phi i32 [ 0, %bb.nph9.split ], [ %indvar.next11, %bb10 ] ; [#uses=3] + br i1 true, label %bb.nph7, label %bb9 + +bb.nph5: ; preds = %bb18.loopexit + %14 = udiv i32 %w, 2 ; [#uses=1] + %15 = icmp ult i32 %w, 2 ; [#uses=1] + %16 = udiv i32 %x, 2 ; [#uses=2] + br i1 %15, label %bb18.bb20_crit_edge.split, label %bb.nph5.split + +bb.nph5.split: ; preds = %bb.nph5 + %tmp2 = icmp ugt i32 1, %16 ; [#uses=1] + %smax3 = select i1 %tmp2, i32 1, i32 %16 ; [#uses=1] + br label %bb13 + +bb13: ; preds = %bb18, %bb.nph5.split + %y.14 = phi i32 [ 0, %bb.nph5.split ], [ %indvar.next1, %bb18 ] ; [#uses=4] + %17 = mul i32 %14, %y.14 ; [#uses=2] + %18 = shl i32 %y.14, 1 ; [#uses=1] + %19 = urem i32 %y.14, 2 ; [#uses=1] + %20 = add i32 %19, %18 ; [#uses=1] + %21 = mul i32 %20, %s ; [#uses=2] + br i1 true, label %bb.nph3, label %bb17 + +bb.nph3: ; preds = %bb13 + %22 = add i32 %17, %0 ; [#uses=1] + %23 = add i32 %17, %.sum2 ; [#uses=1] + %24 = udiv i32 %w, 2 ; [#uses=2] + %tmp = icmp ugt i32 1, %24 ; [#uses=1] + %smax = select i1 %tmp, i32 1, i32 %24 ; [#uses=1] + br label %bb14 + +bb14: ; preds = %bb15, %bb.nph3 + %x.12 = phi i32 [ 0, %bb.nph3 ], [ %indvar.next, %bb15 ] ; [#uses=5] + %25 = shl i32 %x.12, 2 ; [#uses=1] + %26 = add i32 %25, %21 ; [#uses=1] + %27 = getelementptr i8* %r, i32 %26 ; [#uses=1] + %28 = load i8* %27, align 1 ; [#uses=1] + %.sum = add i32 %22, %x.12 ; [#uses=1] + %29 = getelementptr i8* %j, i32 %.sum ; [#uses=1] + store i8 %28, i8* %29, align 1 + %30 = shl i32 %x.12, 2 ; [#uses=1] + %31 = or i32 %30, 2 ; [#uses=1] + %32 = add i32 %31, %21 ; [#uses=1] + %33 = getelementptr i8* %r, i32 %32 ; [#uses=1] + %34 = load i8* %33, align 1 ; [#uses=1] + %.sum6 = add i32 %23, %x.12 ; [#uses=1] + %35 = getelementptr i8* %j, i32 %.sum6 ; [#uses=1] + store i8 %34, i8* %35, align 1 + br label %bb15 + +bb15: ; preds = %bb14 + %indvar.next = add i32 %x.12, 1 ; [#uses=2] + %exitcond = icmp ne i32 %indvar.next, %smax ; [#uses=1] + br i1 %exitcond, label %bb14, label %bb15.bb17_crit_edge + +bb15.bb17_crit_edge: ; preds = %bb15 + br label %bb17 + +bb17: ; preds = %bb15.bb17_crit_edge, %bb13 + br label %bb18 + +bb18.loopexit: ; preds = %bb10.bb18.loopexit_crit_edge.split, %bb10.preheader + %36 = icmp ult i32 %x, 2 ; [#uses=1] + br i1 %36, label %bb20, label %bb.nph5 + +bb18: ; preds = %bb17 + %indvar.next1 = add i32 %y.14, 1 ; [#uses=2] + %exitcond4 = icmp ne i32 %indvar.next1, %smax3 ; [#uses=1] + br i1 %exitcond4, label %bb13, label %bb18.bb20_crit_edge + +bb18.bb20_crit_edge: ; preds = %bb18 + br label %bb18.bb20_crit_edge.split + +bb18.bb20_crit_edge.split: ; preds = %bb18.bb20_crit_edge, %bb.nph5 + br label %bb20 + +bb20: ; preds = %bb18.bb20_crit_edge.split, %bb18.loopexit + switch i32 %d, label %return [ + i32 3, label %bb22 + i32 1, label %bb29 + ] + +bb22: ; preds = %bb20 + %37 = mul i32 %x, %w ; [#uses=1] + %38 = udiv i32 %37, 4 ; [#uses=1] + %.sum3 = add i32 %38, %.sum2 ; [#uses=2] + %39 = add i32 %x, 15 ; [#uses=1] + %40 = and i32 %39, -16 ; [#uses=1] + %41 = add i32 %w, 15 ; [#uses=1] + %42 = and i32 %41, -16 ; [#uses=1] + %43 = mul i32 %40, %s ; [#uses=1] + %44 = icmp ugt i32 %x, 0 ; [#uses=1] + br i1 %44, label %bb.nph, label %bb26 + +bb.nph: ; preds = %bb22 + br label %bb23 + +bb23: ; preds = %bb24, %bb.nph + %y.21 = phi i32 [ 0, %bb.nph ], [ %indvar.next5, %bb24 ] ; [#uses=3] + %45 = mul i32 %y.21, %42 ; [#uses=1] + %.sum1 = add i32 %45, %43 ; [#uses=1] + %46 = getelementptr i8* %r, i32 %.sum1 ; [#uses=1] + %47 = mul i32 %y.21, %w ; [#uses=1] + %.sum5 = add i32 %47, %.sum3 ; [#uses=1] + %48 = getelementptr i8* %j, i32 %.sum5 ; [#uses=1] + tail call void @llvm.memcpy.i32(i8* %48, i8* %46, i32 %w, i32 1) + br label %bb24 + +bb24: ; preds = %bb23 + %indvar.next5 = add i32 %y.21, 1 ; [#uses=2] + %exitcond6 = icmp ne i32 %indvar.next5, %x ; [#uses=1] + br i1 %exitcond6, label %bb23, label %bb24.bb26_crit_edge + +bb24.bb26_crit_edge: ; preds = %bb24 + br label %bb26 + +bb26: ; preds = %bb24.bb26_crit_edge, %bb22 + %49 = mul i32 %x, %w ; [#uses=1] + %.sum4 = add i32 %.sum3, %49 ; [#uses=1] + %50 = getelementptr i8* %j, i32 %.sum4 ; [#uses=1] + %51 = mul i32 %x, %w ; [#uses=1] + %52 = udiv i32 %51, 2 ; [#uses=1] + tail call void @llvm.memset.i32(i8* %50, i8 -128, i32 %52, i32 1) + ret void + +bb29: ; preds = %bb20, %entry + %53 = add i32 %w, 15 ; [#uses=1] + %54 = and i32 %53, -16 ; [#uses=1] + %55 = icmp ugt i32 %x, 0 ; [#uses=1] + br i1 %55, label %bb.nph11, label %bb33 + +bb.nph11: ; preds = %bb29 + br label %bb30 + +bb30: ; preds = %bb31, %bb.nph11 + %y.310 = phi i32 [ 0, %bb.nph11 ], [ %indvar.next13, %bb31 ] ; [#uses=3] + %56 = mul i32 %y.310, %54 ; [#uses=1] + %57 = getelementptr i8* %r, i32 %56 ; [#uses=1] + %58 = mul i32 %y.310, %w ; [#uses=1] + %59 = getelementptr i8* %j, i32 %58 ; [#uses=1] + tail call void @llvm.memcpy.i32(i8* %59, i8* %57, i32 %w, i32 1) + br label %bb31 + +bb31: ; preds = %bb30 + %indvar.next13 = add i32 %y.310, 1 ; [#uses=2] + %exitcond14 = icmp ne i32 %indvar.next13, %x ; [#uses=1] + br i1 %exitcond14, label %bb30, label %bb31.bb33_crit_edge + +bb31.bb33_crit_edge: ; preds = %bb31 + br label %bb33 + +bb33: ; preds = %bb31.bb33_crit_edge, %bb29 + %60 = mul i32 %x, %w ; [#uses=1] + %61 = getelementptr i8* %j, i32 %60 ; [#uses=1] + %62 = mul i32 %x, %w ; [#uses=1] + %63 = udiv i32 %62, 2 ; [#uses=1] + tail call void @llvm.memset.i32(i8* %61, i8 -128, i32 %63, i32 1) + ret void + +return: ; preds = %bb20 + ret void +} + +declare void @llvm.memcpy.i32(i8*, i8*, i32, i32) nounwind + +declare void @llvm.memset.i32(i8*, i8, i32, i32) nounwind diff --git a/test/CodeGen/X86/optimize-max-1.ll b/test/CodeGen/X86/optimize-max-1.ll new file mode 100644 index 00000000000..084e1818f5d --- /dev/null +++ b/test/CodeGen/X86/optimize-max-1.ll @@ -0,0 +1,78 @@ +; RUN: llvm-as < %s | llc -march=x86-64 | not grep cmov + +; LSR should be able to eliminate both smax and umax expressions +; in loop trip counts. + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" + +define void @fs(double* nocapture %p, i64 %n) nounwind { +entry: + %tmp = icmp slt i64 %n, 1 ; [#uses=1] + %smax = select i1 %tmp, i64 1, i64 %n ; [#uses=1] + br label %bb + +bb: ; preds = %bb, %entry + %i.0 = phi i64 [ 0, %entry ], [ %0, %bb ] ; [#uses=2] + %scevgep = getelementptr double* %p, i64 %i.0 ; [#uses=1] + store double 0.000000e+00, double* %scevgep, align 8 + %0 = add i64 %i.0, 1 ; [#uses=2] + %exitcond = icmp eq i64 %0, %smax ; [#uses=1] + br i1 %exitcond, label %return, label %bb + +return: ; preds = %bb + ret void +} + +define void @bs(double* nocapture %p, i64 %n) nounwind { +entry: + %tmp = icmp sge i64 %n, 1 ; [#uses=1] + %smax = select i1 %tmp, i64 %n, i64 1 ; [#uses=1] + br label %bb + +bb: ; preds = %bb, %entry + %i.0 = phi i64 [ 0, %entry ], [ %0, %bb ] ; [#uses=2] + %scevgep = getelementptr double* %p, i64 %i.0 ; [#uses=1] + store double 0.000000e+00, double* %scevgep, align 8 + %0 = add i64 %i.0, 1 ; [#uses=2] + %exitcond = icmp eq i64 %0, %smax ; [#uses=1] + br i1 %exitcond, label %return, label %bb + +return: ; preds = %bb + ret void +} + +define void @fu(double* nocapture %p, i64 %n) nounwind { +entry: + %tmp = icmp eq i64 %n, 0 ; [#uses=1] + %umax = select i1 %tmp, i64 1, i64 %n ; [#uses=1] + br label %bb + +bb: ; preds = %bb, %entry + %i.0 = phi i64 [ 0, %entry ], [ %0, %bb ] ; [#uses=2] + %scevgep = getelementptr double* %p, i64 %i.0 ; [#uses=1] + store double 0.000000e+00, double* %scevgep, align 8 + %0 = add i64 %i.0, 1 ; [#uses=2] + %exitcond = icmp eq i64 %0, %umax ; [#uses=1] + br i1 %exitcond, label %return, label %bb + +return: ; preds = %bb + ret void +} + +define void @bu(double* nocapture %p, i64 %n) nounwind { +entry: + %tmp = icmp ne i64 %n, 0 ; [#uses=1] + %umax = select i1 %tmp, i64 %n, i64 1 ; [#uses=1] + br label %bb + +bb: ; preds = %bb, %entry + %i.0 = phi i64 [ 0, %entry ], [ %0, %bb ] ; [#uses=2] + %scevgep = getelementptr double* %p, i64 %i.0 ; [#uses=1] + store double 0.000000e+00, double* %scevgep, align 8 + %0 = add i64 %i.0, 1 ; [#uses=2] + %exitcond = icmp eq i64 %0, %umax ; [#uses=1] + br i1 %exitcond, label %return, label %bb + +return: ; preds = %bb + ret void +} diff --git a/test/CodeGen/X86/optimize-smax.ll b/test/CodeGen/X86/optimize-smax.ll deleted file mode 100644 index 0c3be31e293..00000000000 --- a/test/CodeGen/X86/optimize-smax.ll +++ /dev/null @@ -1,236 +0,0 @@ -; RUN: llvm-as < %s | llc -march=x86 | not grep cmov - -; LSR should be able to eliminate the smax computations by -; making the loops use slt comparisons instead of ne comparisons. - -target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" -target triple = "i386-apple-darwin9" - -define void @foo(i8* %r, i32 %s, i32 %w, i32 %x, i8* %j, i32 %d) nounwind { -entry: - %0 = mul i32 %x, %w ; [#uses=2] - %1 = mul i32 %x, %w ; [#uses=1] - %2 = sdiv i32 %1, 4 ; [#uses=1] - %.sum2 = add i32 %2, %0 ; [#uses=2] - %cond = icmp eq i32 %d, 1 ; [#uses=1] - br i1 %cond, label %bb29, label %bb10.preheader - -bb10.preheader: ; preds = %entry - %3 = icmp sgt i32 %x, 0 ; [#uses=1] - br i1 %3, label %bb.nph9, label %bb18.loopexit - -bb.nph7: ; preds = %bb7.preheader - %4 = mul i32 %y.08, %w ; [#uses=1] - %5 = mul i32 %y.08, %s ; [#uses=1] - %6 = add i32 %5, 1 ; [#uses=1] - %tmp8 = icmp sgt i32 1, %w ; [#uses=1] - %smax9 = select i1 %tmp8, i32 1, i32 %w ; [#uses=1] - br label %bb6 - -bb6: ; preds = %bb7, %bb.nph7 - %x.06 = phi i32 [ 0, %bb.nph7 ], [ %indvar.next7, %bb7 ] ; [#uses=3] - %7 = add i32 %x.06, %4 ; [#uses=1] - %8 = shl i32 %x.06, 1 ; [#uses=1] - %9 = add i32 %6, %8 ; [#uses=1] - %10 = getelementptr i8* %r, i32 %9 ; [#uses=1] - %11 = load i8* %10, align 1 ; [#uses=1] - %12 = getelementptr i8* %j, i32 %7 ; [#uses=1] - store i8 %11, i8* %12, align 1 - br label %bb7 - -bb7: ; preds = %bb6 - %indvar.next7 = add i32 %x.06, 1 ; [#uses=2] - %exitcond10 = icmp ne i32 %indvar.next7, %smax9 ; [#uses=1] - br i1 %exitcond10, label %bb6, label %bb7.bb9_crit_edge - -bb7.bb9_crit_edge: ; preds = %bb7 - br label %bb9 - -bb9: ; preds = %bb7.preheader, %bb7.bb9_crit_edge - br label %bb10 - -bb10: ; preds = %bb9 - %indvar.next11 = add i32 %y.08, 1 ; [#uses=2] - %exitcond12 = icmp ne i32 %indvar.next11, %x ; [#uses=1] - br i1 %exitcond12, label %bb7.preheader, label %bb10.bb18.loopexit_crit_edge - -bb10.bb18.loopexit_crit_edge: ; preds = %bb10 - br label %bb10.bb18.loopexit_crit_edge.split - -bb10.bb18.loopexit_crit_edge.split: ; preds = %bb.nph9, %bb10.bb18.loopexit_crit_edge - br label %bb18.loopexit - -bb.nph9: ; preds = %bb10.preheader - %13 = icmp sgt i32 %w, 0 ; [#uses=1] - br i1 %13, label %bb.nph9.split, label %bb10.bb18.loopexit_crit_edge.split - -bb.nph9.split: ; preds = %bb.nph9 - br label %bb7.preheader - -bb7.preheader: ; preds = %bb.nph9.split, %bb10 - %y.08 = phi i32 [ 0, %bb.nph9.split ], [ %indvar.next11, %bb10 ] ; [#uses=3] - br i1 true, label %bb.nph7, label %bb9 - -bb.nph5: ; preds = %bb18.loopexit - %14 = sdiv i32 %w, 2 ; [#uses=1] - %15 = icmp slt i32 %w, 2 ; [#uses=1] - %16 = sdiv i32 %x, 2 ; [#uses=2] - br i1 %15, label %bb18.bb20_crit_edge.split, label %bb.nph5.split - -bb.nph5.split: ; preds = %bb.nph5 - %tmp2 = icmp sgt i32 1, %16 ; [#uses=1] - %smax3 = select i1 %tmp2, i32 1, i32 %16 ; [#uses=1] - br label %bb13 - -bb13: ; preds = %bb18, %bb.nph5.split - %y.14 = phi i32 [ 0, %bb.nph5.split ], [ %indvar.next1, %bb18 ] ; [#uses=4] - %17 = mul i32 %14, %y.14 ; [#uses=2] - %18 = shl i32 %y.14, 1 ; [#uses=1] - %19 = srem i32 %y.14, 2 ; [#uses=1] - %20 = add i32 %19, %18 ; [#uses=1] - %21 = mul i32 %20, %s ; [#uses=2] - br i1 true, label %bb.nph3, label %bb17 - -bb.nph3: ; preds = %bb13 - %22 = add i32 %17, %0 ; [#uses=1] - %23 = add i32 %17, %.sum2 ; [#uses=1] - %24 = sdiv i32 %w, 2 ; [#uses=2] - %tmp = icmp sgt i32 1, %24 ; [#uses=1] - %smax = select i1 %tmp, i32 1, i32 %24 ; [#uses=1] - br label %bb14 - -bb14: ; preds = %bb15, %bb.nph3 - %x.12 = phi i32 [ 0, %bb.nph3 ], [ %indvar.next, %bb15 ] ; [#uses=5] - %25 = shl i32 %x.12, 2 ; [#uses=1] - %26 = add i32 %25, %21 ; [#uses=1] - %27 = getelementptr i8* %r, i32 %26 ; [#uses=1] - %28 = load i8* %27, align 1 ; [#uses=1] - %.sum = add i32 %22, %x.12 ; [#uses=1] - %29 = getelementptr i8* %j, i32 %.sum ; [#uses=1] - store i8 %28, i8* %29, align 1 - %30 = shl i32 %x.12, 2 ; [#uses=1] - %31 = or i32 %30, 2 ; [#uses=1] - %32 = add i32 %31, %21 ; [#uses=1] - %33 = getelementptr i8* %r, i32 %32 ; [#uses=1] - %34 = load i8* %33, align 1 ; [#uses=1] - %.sum6 = add i32 %23, %x.12 ; [#uses=1] - %35 = getelementptr i8* %j, i32 %.sum6 ; [#uses=1] - store i8 %34, i8* %35, align 1 - br label %bb15 - -bb15: ; preds = %bb14 - %indvar.next = add i32 %x.12, 1 ; [#uses=2] - %exitcond = icmp ne i32 %indvar.next, %smax ; [#uses=1] - br i1 %exitcond, label %bb14, label %bb15.bb17_crit_edge - -bb15.bb17_crit_edge: ; preds = %bb15 - br label %bb17 - -bb17: ; preds = %bb15.bb17_crit_edge, %bb13 - br label %bb18 - -bb18.loopexit: ; preds = %bb10.bb18.loopexit_crit_edge.split, %bb10.preheader - %36 = icmp slt i32 %x, 2 ; [#uses=1] - br i1 %36, label %bb20, label %bb.nph5 - -bb18: ; preds = %bb17 - %indvar.next1 = add i32 %y.14, 1 ; [#uses=2] - %exitcond4 = icmp ne i32 %indvar.next1, %smax3 ; [#uses=1] - br i1 %exitcond4, label %bb13, label %bb18.bb20_crit_edge - -bb18.bb20_crit_edge: ; preds = %bb18 - br label %bb18.bb20_crit_edge.split - -bb18.bb20_crit_edge.split: ; preds = %bb18.bb20_crit_edge, %bb.nph5 - br label %bb20 - -bb20: ; preds = %bb18.bb20_crit_edge.split, %bb18.loopexit - switch i32 %d, label %return [ - i32 3, label %bb22 - i32 1, label %bb29 - ] - -bb22: ; preds = %bb20 - %37 = mul i32 %x, %w ; [#uses=1] - %38 = sdiv i32 %37, 4 ; [#uses=1] - %.sum3 = add i32 %38, %.sum2 ; [#uses=2] - %39 = add i32 %x, 15 ; [#uses=1] - %40 = and i32 %39, -16 ; [#uses=1] - %41 = add i32 %w, 15 ; [#uses=1] - %42 = and i32 %41, -16 ; [#uses=1] - %43 = mul i32 %40, %s ; [#uses=1] - %44 = icmp sgt i32 %x, 0 ; [#uses=1] - br i1 %44, label %bb.nph, label %bb26 - -bb.nph: ; preds = %bb22 - br label %bb23 - -bb23: ; preds = %bb24, %bb.nph - %y.21 = phi i32 [ 0, %bb.nph ], [ %indvar.next5, %bb24 ] ; [#uses=3] - %45 = mul i32 %y.21, %42 ; [#uses=1] - %.sum1 = add i32 %45, %43 ; [#uses=1] - %46 = getelementptr i8* %r, i32 %.sum1 ; [#uses=1] - %47 = mul i32 %y.21, %w ; [#uses=1] - %.sum5 = add i32 %47, %.sum3 ; [#uses=1] - %48 = getelementptr i8* %j, i32 %.sum5 ; [#uses=1] - tail call void @llvm.memcpy.i32(i8* %48, i8* %46, i32 %w, i32 1) - br label %bb24 - -bb24: ; preds = %bb23 - %indvar.next5 = add i32 %y.21, 1 ; [#uses=2] - %exitcond6 = icmp ne i32 %indvar.next5, %x ; [#uses=1] - br i1 %exitcond6, label %bb23, label %bb24.bb26_crit_edge - -bb24.bb26_crit_edge: ; preds = %bb24 - br label %bb26 - -bb26: ; preds = %bb24.bb26_crit_edge, %bb22 - %49 = mul i32 %x, %w ; [#uses=1] - %.sum4 = add i32 %.sum3, %49 ; [#uses=1] - %50 = getelementptr i8* %j, i32 %.sum4 ; [#uses=1] - %51 = mul i32 %x, %w ; [#uses=1] - %52 = sdiv i32 %51, 2 ; [#uses=1] - tail call void @llvm.memset.i32(i8* %50, i8 -128, i32 %52, i32 1) - ret void - -bb29: ; preds = %bb20, %entry - %53 = add i32 %w, 15 ; [#uses=1] - %54 = and i32 %53, -16 ; [#uses=1] - %55 = icmp sgt i32 %x, 0 ; [#uses=1] - br i1 %55, label %bb.nph11, label %bb33 - -bb.nph11: ; preds = %bb29 - br label %bb30 - -bb30: ; preds = %bb31, %bb.nph11 - %y.310 = phi i32 [ 0, %bb.nph11 ], [ %indvar.next13, %bb31 ] ; [#uses=3] - %56 = mul i32 %y.310, %54 ; [#uses=1] - %57 = getelementptr i8* %r, i32 %56 ; [#uses=1] - %58 = mul i32 %y.310, %w ; [#uses=1] - %59 = getelementptr i8* %j, i32 %58 ; [#uses=1] - tail call void @llvm.memcpy.i32(i8* %59, i8* %57, i32 %w, i32 1) - br label %bb31 - -bb31: ; preds = %bb30 - %indvar.next13 = add i32 %y.310, 1 ; [#uses=2] - %exitcond14 = icmp ne i32 %indvar.next13, %x ; [#uses=1] - br i1 %exitcond14, label %bb30, label %bb31.bb33_crit_edge - -bb31.bb33_crit_edge: ; preds = %bb31 - br label %bb33 - -bb33: ; preds = %bb31.bb33_crit_edge, %bb29 - %60 = mul i32 %x, %w ; [#uses=1] - %61 = getelementptr i8* %j, i32 %60 ; [#uses=1] - %62 = mul i32 %x, %w ; [#uses=1] - %63 = sdiv i32 %62, 2 ; [#uses=1] - tail call void @llvm.memset.i32(i8* %61, i8 -128, i32 %63, i32 1) - ret void - -return: ; preds = %bb20 - ret void -} - -declare void @llvm.memcpy.i32(i8*, i8*, i32, i32) nounwind - -declare void @llvm.memset.i32(i8*, i8, i32, i32) nounwind