-; RUN: llvm-as < %s | llc -march=x86-64 -f -o %t
-; RUN: grep inc %t | count 1
+; RUN: llc < %s -march=x86-64 -enable-lsr-nested -o %t
+; RUN: not grep inc %t
; RUN: grep dec %t | count 2
-; RUN: grep addq %t | count 8
-; RUN: grep addb %t | count 2
-; RUN: grep leaq %t | count 12
-; RUN: grep leal %t | count 2
-; RUN: grep movq %t | count 4
+; RUN: grep addq %t | count 12
+; RUN: not grep addb %t
+; RUN: not grep leal %t
+; RUN: not grep movq %t
; IV users in each of the loops from other loops shouldn't cause LSR
; to insert new induction variables. Previously it would create a
; flood of new induction variables.
; Also, the loop reversal should kick in once.
+;
+; In this example, performing LSR on the entire loop nest,
+; as opposed to only the inner loop can further reduce induction variables,
+; and their related instructions and registers.
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
target triple = "x86_64-unknown-linux-gnu"