1 ; RUN: llc < %s -march=arm64 -verify-machineinstrs | FileCheck %s
2 ; RUN: llc < %s -march=arm64 -aarch64-unscaled-mem-op=true\
3 ; RUN: -verify-machineinstrs | FileCheck -check-prefix=LDUR_CHK %s
7 define i32 @ldp_int(i32* %p) nounwind {
8 %tmp = load i32* %p, align 4
9 %add.ptr = getelementptr inbounds i32* %p, i64 1
10 %tmp1 = load i32* %add.ptr, align 4
11 %add = add nsw i32 %tmp1, %tmp
17 define i64 @ldp_long(i64* %p) nounwind {
18 %tmp = load i64* %p, align 8
19 %add.ptr = getelementptr inbounds i64* %p, i64 1
20 %tmp1 = load i64* %add.ptr, align 8
21 %add = add nsw i64 %tmp1, %tmp
27 define float @ldp_float(float* %p) nounwind {
28 %tmp = load float* %p, align 4
29 %add.ptr = getelementptr inbounds float* %p, i64 1
30 %tmp1 = load float* %add.ptr, align 4
31 %add = fadd float %tmp, %tmp1
37 define double @ldp_double(double* %p) nounwind {
38 %tmp = load double* %p, align 8
39 %add.ptr = getelementptr inbounds double* %p, i64 1
40 %tmp1 = load double* %add.ptr, align 8
41 %add = fadd double %tmp, %tmp1
45 ; Test the load/store optimizer---combine ldurs into a ldp, if appropriate
46 define i32 @ldur_int(i32* %a) nounwind {
48 ; LDUR_CHK: ldp [[DST1:w[0-9]+]], [[DST2:w[0-9]+]], [x0, #-8]
49 ; LDUR_CHK-NEXT: add w{{[0-9]+}}, [[DST2]], [[DST1]]
51 %p1 = getelementptr inbounds i32* %a, i32 -1
52 %tmp1 = load i32* %p1, align 2
53 %p2 = getelementptr inbounds i32* %a, i32 -2
54 %tmp2 = load i32* %p2, align 2
55 %tmp3 = add i32 %tmp1, %tmp2
59 define i64 @ldur_long(i64* %a) nounwind ssp {
61 ; LDUR_CHK: ldp [[DST1:x[0-9]+]], [[DST2:x[0-9]+]], [x0, #-16]
62 ; LDUR_CHK-NEXT: add x{{[0-9]+}}, [[DST2]], [[DST1]]
64 %p1 = getelementptr inbounds i64* %a, i64 -1
65 %tmp1 = load i64* %p1, align 2
66 %p2 = getelementptr inbounds i64* %a, i64 -2
67 %tmp2 = load i64* %p2, align 2
68 %tmp3 = add i64 %tmp1, %tmp2
72 define float @ldur_float(float* %a) {
73 ; LDUR_CHK: ldur_float
74 ; LDUR_CHK: ldp [[DST1:s[0-9]+]], [[DST2:s[0-9]+]], [x0, #-8]
75 ; LDUR_CHK-NEXT: add s{{[0-9]+}}, [[DST2]], [[DST1]]
77 %p1 = getelementptr inbounds float* %a, i64 -1
78 %tmp1 = load float* %p1, align 2
79 %p2 = getelementptr inbounds float* %a, i64 -2
80 %tmp2 = load float* %p2, align 2
81 %tmp3 = fadd float %tmp1, %tmp2
85 define double @ldur_double(double* %a) {
86 ; LDUR_CHK: ldur_double
87 ; LDUR_CHK: ldp [[DST1:d[0-9]+]], [[DST2:d[0-9]+]], [x0, #-16]
88 ; LDUR_CHK-NEXT: add d{{[0-9]+}}, [[DST2]], [[DST1]]
90 %p1 = getelementptr inbounds double* %a, i64 -1
91 %tmp1 = load double* %p1, align 2
92 %p2 = getelementptr inbounds double* %a, i64 -2
93 %tmp2 = load double* %p2, align 2
94 %tmp3 = fadd double %tmp1, %tmp2
98 ; Now check some boundary conditions
99 define i64 @pairUpBarelyIn(i64* %a) nounwind ssp {
100 ; LDUR_CHK: pairUpBarelyIn
102 ; LDUR_CHK: ldp [[DST1:x[0-9]+]], [[DST2:x[0-9]+]], [x0, #-256]
103 ; LDUR_CHK-NEXT: add x{{[0-9]+}}, [[DST2]], [[DST1]]
105 %p1 = getelementptr inbounds i64* %a, i64 -31
106 %tmp1 = load i64* %p1, align 2
107 %p2 = getelementptr inbounds i64* %a, i64 -32
108 %tmp2 = load i64* %p2, align 2
109 %tmp3 = add i64 %tmp1, %tmp2
113 define i64 @pairUpBarelyOut(i64* %a) nounwind ssp {
114 ; LDUR_CHK: pairUpBarelyOut
116 ; Don't be fragile about which loads or manipulations of the base register
117 ; are used---just check that there isn't an ldp before the add
120 %p1 = getelementptr inbounds i64* %a, i64 -32
121 %tmp1 = load i64* %p1, align 2
122 %p2 = getelementptr inbounds i64* %a, i64 -33
123 %tmp2 = load i64* %p2, align 2
124 %tmp3 = add i64 %tmp1, %tmp2
128 define i64 @pairUpNotAligned(i64* %a) nounwind ssp {
129 ; LDUR_CHK: pairUpNotAligned
132 ; LDUR_CHK-NEXT: ldur
135 %p1 = getelementptr inbounds i64* %a, i64 -18
136 %bp1 = bitcast i64* %p1 to i8*
137 %bp1p1 = getelementptr inbounds i8* %bp1, i64 1
138 %dp1 = bitcast i8* %bp1p1 to i64*
139 %tmp1 = load i64* %dp1, align 1
141 %p2 = getelementptr inbounds i64* %a, i64 -17
142 %bp2 = bitcast i64* %p2 to i8*
143 %bp2p1 = getelementptr inbounds i8* %bp2, i64 1
144 %dp2 = bitcast i8* %bp2p1 to i64*
145 %tmp2 = load i64* %dp2, align 1
147 %tmp3 = add i64 %tmp1, %tmp2