1 ; Test 64-bit square root.
3 ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
5 declare double @llvm.sqrt.f64(double %f)
7 ; Check register square root.
8 define double @f1(double %val) {
10 ; CHECK: sqdbr %f0, %f0
12 %res = call double @llvm.sqrt.f64(double %val)
16 ; Check the low end of the SQDB range.
17 define double @f2(double *%ptr) {
19 ; CHECK: sqdb %f0, 0(%r2)
21 %val = load double *%ptr
22 %res = call double @llvm.sqrt.f64(double %val)
26 ; Check the high end of the aligned SQDB range.
27 define double @f3(double *%base) {
29 ; CHECK: sqdb %f0, 4088(%r2)
31 %ptr = getelementptr double *%base, i64 511
32 %val = load double *%ptr
33 %res = call double @llvm.sqrt.f64(double %val)
37 ; Check the next doubleword up, which needs separate address logic.
38 ; Other sequences besides this one would be OK.
39 define double @f4(double *%base) {
41 ; CHECK: aghi %r2, 4096
42 ; CHECK: sqdb %f0, 0(%r2)
44 %ptr = getelementptr double *%base, i64 512
45 %val = load double *%ptr
46 %res = call double @llvm.sqrt.f64(double %val)
50 ; Check negative displacements, which also need separate address logic.
51 define double @f5(double *%base) {
54 ; CHECK: sqdb %f0, 0(%r2)
56 %ptr = getelementptr double *%base, i64 -1
57 %val = load double *%ptr
58 %res = call double @llvm.sqrt.f64(double %val)
62 ; Check that SQDB allows indices.
63 define double @f6(double *%base, i64 %index) {
65 ; CHECK: sllg %r1, %r3, 3
66 ; CHECK: sqdb %f0, 800(%r1,%r2)
68 %ptr1 = getelementptr double *%base, i64 %index
69 %ptr2 = getelementptr double *%ptr1, i64 100
70 %val = load double *%ptr2
71 %res = call double @llvm.sqrt.f64(double %val)
75 ; Test a case where we spill the source of at least one SQDBR. We want
76 ; to use SQDB if possible.
77 define void @f7(double *%ptr) {
79 ; CHECK: sqdb {{%f[0-9]+}}, 160(%r15)
81 %val0 = load volatile double *%ptr
82 %val1 = load volatile double *%ptr
83 %val2 = load volatile double *%ptr
84 %val3 = load volatile double *%ptr
85 %val4 = load volatile double *%ptr
86 %val5 = load volatile double *%ptr
87 %val6 = load volatile double *%ptr
88 %val7 = load volatile double *%ptr
89 %val8 = load volatile double *%ptr
90 %val9 = load volatile double *%ptr
91 %val10 = load volatile double *%ptr
92 %val11 = load volatile double *%ptr
93 %val12 = load volatile double *%ptr
94 %val13 = load volatile double *%ptr
95 %val14 = load volatile double *%ptr
96 %val15 = load volatile double *%ptr
97 %val16 = load volatile double *%ptr
99 %sqrt0 = call double @llvm.sqrt.f64(double %val0)
100 %sqrt1 = call double @llvm.sqrt.f64(double %val1)
101 %sqrt2 = call double @llvm.sqrt.f64(double %val2)
102 %sqrt3 = call double @llvm.sqrt.f64(double %val3)
103 %sqrt4 = call double @llvm.sqrt.f64(double %val4)
104 %sqrt5 = call double @llvm.sqrt.f64(double %val5)
105 %sqrt6 = call double @llvm.sqrt.f64(double %val6)
106 %sqrt7 = call double @llvm.sqrt.f64(double %val7)
107 %sqrt8 = call double @llvm.sqrt.f64(double %val8)
108 %sqrt9 = call double @llvm.sqrt.f64(double %val9)
109 %sqrt10 = call double @llvm.sqrt.f64(double %val10)
110 %sqrt11 = call double @llvm.sqrt.f64(double %val11)
111 %sqrt12 = call double @llvm.sqrt.f64(double %val12)
112 %sqrt13 = call double @llvm.sqrt.f64(double %val13)
113 %sqrt14 = call double @llvm.sqrt.f64(double %val14)
114 %sqrt15 = call double @llvm.sqrt.f64(double %val15)
115 %sqrt16 = call double @llvm.sqrt.f64(double %val16)
117 store volatile double %val0, double *%ptr
118 store volatile double %val1, double *%ptr
119 store volatile double %val2, double *%ptr
120 store volatile double %val3, double *%ptr
121 store volatile double %val4, double *%ptr
122 store volatile double %val5, double *%ptr
123 store volatile double %val6, double *%ptr
124 store volatile double %val7, double *%ptr
125 store volatile double %val8, double *%ptr
126 store volatile double %val9, double *%ptr
127 store volatile double %val10, double *%ptr
128 store volatile double %val11, double *%ptr
129 store volatile double %val12, double *%ptr
130 store volatile double %val13, double *%ptr
131 store volatile double %val14, double *%ptr
132 store volatile double %val15, double *%ptr
133 store volatile double %val16, double *%ptr
135 store volatile double %sqrt0, double *%ptr
136 store volatile double %sqrt1, double *%ptr
137 store volatile double %sqrt2, double *%ptr
138 store volatile double %sqrt3, double *%ptr
139 store volatile double %sqrt4, double *%ptr
140 store volatile double %sqrt5, double *%ptr
141 store volatile double %sqrt6, double *%ptr
142 store volatile double %sqrt7, double *%ptr
143 store volatile double %sqrt8, double *%ptr
144 store volatile double %sqrt9, double *%ptr
145 store volatile double %sqrt10, double *%ptr
146 store volatile double %sqrt11, double *%ptr
147 store volatile double %sqrt12, double *%ptr
148 store volatile double %sqrt13, double *%ptr
149 store volatile double %sqrt14, double *%ptr
150 store volatile double %sqrt15, double *%ptr
151 store volatile double %sqrt16, double *%ptr