; RUN: not grep sar %t
; RUN: not grep shl %t
; RUN: grep add %t | count 2
-; RUN: grep inc %t | count 3
+; RUN: grep inc %t | count 4
; RUN: grep dec %t | count 2
-; RUN: grep lea %t | count 3
+; RUN: grep lea %t | count 2
; Optimize away zext-inreg and sext-inreg on the loop induction
; variable using trip-count information.
ret void
}
-; TODO: If we could handle all the loads and stores as post-inc users, we could
-; use {-1,+,1} in the induction variable register, and we'd get another inc,
-; one fewer add, and a comparison with zero.
define void @another_count_up(double* %d, i64 %n) nounwind {
entry:
br label %loop
%indvar.i24 = and i64 %indvar, 16777215
%t3 = getelementptr double* %d, i64 %indvar.i24
%t4 = load double* %t3
- %t5 = fmul double %t4, 2.3
+ %t5 = fdiv double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double* %d, i64 %indvar
%t7 = load double* %t6
%indvar.i24 = ashr i64 %s1, 24
%t3 = getelementptr double* %d, i64 %indvar.i24
%t4 = load double* %t3
- %t5 = fmul double %t4, 2.3
+ %t5 = fdiv double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double* %d, i64 %indvar
%t7 = load double* %t6
%indvar.i24 = ashr i64 %s1, 24
%t3 = getelementptr double* %d, i64 %indvar.i24
%t4 = load double* %t3
- %t5 = fmul double %t4, 2.3
+ %t5 = fdiv double %t4, 2.3
store double %t5, double* %t3
%t6 = getelementptr double* %d, i64 %indvar
%t7 = load double* %t6