2 ; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mcpu=cortex-a53 -pre-RA-sched=source -enable-misched -verify-misched -debug-only=misched -o - 2>&1 > /dev/null | FileCheck %s
3 ; arm64 now has a separate copy of this test.
5 ; The Cortex-A53 machine model will cause the MADD instruction to be scheduled
6 ; much higher than the ADD instructions in order to hide latency. When not
7 ; specifying a subtarget, the MADD will remain near the end of the block.
9 ; CHECK: ********** MI Scheduling **********
11 ; CHECK: *** Final schedule for BB#2 ***
15 ; CHECK: ADDwwi_lsl0_s
16 ; CHECK: ********** INTERVALS **********
17 @main.x = private unnamed_addr constant [8 x i32] [i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1], align 4
18 @main.y = private unnamed_addr constant [8 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2], align 4
20 ; Function Attrs: nounwind
21 define i32 @main() #0 {
23 %retval = alloca i32, align 4
24 %x = alloca [8 x i32], align 4
25 %y = alloca [8 x i32], align 4
26 %i = alloca i32, align 4
27 %xx = alloca i32, align 4
28 %yy = alloca i32, align 4
29 store i32 0, i32* %retval
30 %0 = bitcast [8 x i32]* %x to i8*
31 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast ([8 x i32]* @main.x to i8*), i64 32, i32 4, i1 false)
32 %1 = bitcast [8 x i32]* %y to i8*
33 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ([8 x i32]* @main.y to i8*), i64 32, i32 4, i1 false)
34 store i32 0, i32* %xx, align 4
35 store i32 0, i32* %yy, align 4
36 store i32 0, i32* %i, align 4
39 for.cond: ; preds = %for.inc, %entry
40 %2 = load i32* %i, align 4
41 %cmp = icmp slt i32 %2, 8
42 br i1 %cmp, label %for.body, label %for.end
44 for.body: ; preds = %for.cond
45 %3 = load i32* %i, align 4
46 %idxprom = sext i32 %3 to i64
47 %arrayidx = getelementptr inbounds [8 x i32]* %x, i32 0, i64 %idxprom
48 %4 = load i32* %arrayidx, align 4
49 %add = add nsw i32 %4, 1
50 store i32 %add, i32* %xx, align 4
51 %5 = load i32* %xx, align 4
52 %add1 = add nsw i32 %5, 12
53 store i32 %add1, i32* %xx, align 4
54 %6 = load i32* %xx, align 4
55 %add2 = add nsw i32 %6, 23
56 store i32 %add2, i32* %xx, align 4
57 %7 = load i32* %xx, align 4
58 %add3 = add nsw i32 %7, 34
59 store i32 %add3, i32* %xx, align 4
60 %8 = load i32* %i, align 4
61 %idxprom4 = sext i32 %8 to i64
62 %arrayidx5 = getelementptr inbounds [8 x i32]* %y, i32 0, i64 %idxprom4
63 %9 = load i32* %arrayidx5, align 4
64 %10 = load i32* %yy, align 4
65 %mul = mul nsw i32 %10, %9
66 store i32 %mul, i32* %yy, align 4
69 for.inc: ; preds = %for.body
70 %11 = load i32* %i, align 4
71 %inc = add nsw i32 %11, 1
72 store i32 %inc, i32* %i, align 4
75 for.end: ; preds = %for.cond
76 %12 = load i32* %xx, align 4
77 %13 = load i32* %yy, align 4
78 %add6 = add nsw i32 %12, %13
83 ; The Cortex-A53 machine model will cause the FDIVvvv_42 to be raised to
84 ; hide latency. Whereas normally there would only be a single FADDvvv_4s
85 ; after it, this test checks to make sure there are more than one.
87 ; CHECK: ********** MI Scheduling **********
88 ; CHECK: neon4xfloat:BB#0
89 ; CHECK: *** Final schedule for BB#0 ***
93 ; CHECK: ********** INTERVALS **********
94 define <4 x float> @neon4xfloat(<4 x float> %A, <4 x float> %B) {
95 %tmp1 = fadd <4 x float> %A, %B;
96 %tmp2 = fadd <4 x float> %A, %tmp1;
97 %tmp3 = fadd <4 x float> %A, %tmp2;
98 %tmp4 = fadd <4 x float> %A, %tmp3;
99 %tmp5 = fadd <4 x float> %A, %tmp4;
100 %tmp6 = fadd <4 x float> %A, %tmp5;
101 %tmp7 = fadd <4 x float> %A, %tmp6;
102 %tmp8 = fadd <4 x float> %A, %tmp7;
103 %tmp9 = fdiv <4 x float> %A, %B;
104 %tmp10 = fadd <4 x float> %tmp8, %tmp9;
106 ret <4 x float> %tmp10
109 ; Function Attrs: nounwind
110 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #1
112 attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
113 attributes #1 = { nounwind }