1 ; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
3 @lhs = global fp128 zeroinitializer
4 @rhs = global fp128 zeroinitializer
6 define fp128 @test_add() {
7 ; CHECK-LABEL: test_add:
9 %lhs = load fp128* @lhs
10 %rhs = load fp128* @rhs
11 ; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
12 ; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
14 %val = fadd fp128 %lhs, %rhs
19 define fp128 @test_sub() {
20 ; CHECK-LABEL: test_sub:
22 %lhs = load fp128* @lhs
23 %rhs = load fp128* @rhs
24 ; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
25 ; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
27 %val = fsub fp128 %lhs, %rhs
32 define fp128 @test_mul() {
33 ; CHECK-LABEL: test_mul:
35 %lhs = load fp128* @lhs
36 %rhs = load fp128* @rhs
37 ; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
38 ; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
40 %val = fmul fp128 %lhs, %rhs
45 define fp128 @test_div() {
46 ; CHECK-LABEL: test_div:
48 %lhs = load fp128* @lhs
49 %rhs = load fp128* @rhs
50 ; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
51 ; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
53 %val = fdiv fp128 %lhs, %rhs
61 define void @test_fptosi() {
62 ; CHECK-LABEL: test_fptosi:
63 %val = load fp128* @lhs
65 %val32 = fptosi fp128 %val to i32
66 store i32 %val32, i32* @var32
69 %val64 = fptosi fp128 %val to i64
70 store i64 %val64, i64* @var64
76 define void @test_fptoui() {
77 ; CHECK-LABEL: test_fptoui:
78 %val = load fp128* @lhs
80 %val32 = fptoui fp128 %val to i32
81 store i32 %val32, i32* @var32
82 ; CHECK: bl __fixunstfsi
84 %val64 = fptoui fp128 %val to i64
85 store i64 %val64, i64* @var64
86 ; CHECK: bl __fixunstfdi
91 define void @test_sitofp() {
92 ; CHECK-LABEL: test_sitofp:
94 %src32 = load i32* @var32
95 %val32 = sitofp i32 %src32 to fp128
96 store volatile fp128 %val32, fp128* @lhs
97 ; CHECK: bl __floatsitf
99 %src64 = load i64* @var64
100 %val64 = sitofp i64 %src64 to fp128
101 store volatile fp128 %val64, fp128* @lhs
102 ; CHECK: bl __floatditf
107 define void @test_uitofp() {
108 ; CHECK-LABEL: test_uitofp:
110 %src32 = load i32* @var32
111 %val32 = uitofp i32 %src32 to fp128
112 store volatile fp128 %val32, fp128* @lhs
113 ; CHECK: bl __floatunsitf
115 %src64 = load i64* @var64
116 %val64 = uitofp i64 %src64 to fp128
117 store volatile fp128 %val64, fp128* @lhs
118 ; CHECK: bl __floatunditf
123 define i1 @test_setcc1() {
124 ; CHECK-LABEL: test_setcc1:
126 %lhs = load fp128* @lhs
127 %rhs = load fp128* @rhs
128 ; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
129 ; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
131 ; Technically, everything after the call to __letf2 is redundant, but we'll let
132 ; LLVM have its fun for now.
133 %val = fcmp ole fp128 %lhs, %rhs
136 ; CHECK: csinc w0, wzr, wzr, gt
142 define i1 @test_setcc2() {
143 ; CHECK-LABEL: test_setcc2:
145 %lhs = load fp128* @lhs
146 %rhs = load fp128* @rhs
147 ; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
148 ; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
150 ; Technically, everything after the call to __letf2 is redundant, but we'll let
151 ; LLVM have its fun for now.
152 %val = fcmp ugt fp128 %lhs, %rhs
155 ; CHECK: csinc [[GT:w[0-9]+]], wzr, wzr, le
157 ; CHECK: bl __unordtf2
159 ; CHECK: csinc [[UNORDERED:w[0-9]+]], wzr, wzr, eq
161 ; CHECK: orr w0, [[UNORDERED]], [[GT]]
167 define i32 @test_br_cc() {
168 ; CHECK-LABEL: test_br_cc:
170 %lhs = load fp128* @lhs
171 %rhs = load fp128* @rhs
172 ; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs]
173 ; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs]
175 ; olt == !uge, which LLVM unfortunately "optimizes" this to.
176 %cond = fcmp olt fp128 %lhs, %rhs
179 ; CHECK: csinc [[OGE:w[0-9]+]], wzr, wzr, lt
181 ; CHECK: bl __unordtf2
183 ; CHECK: csinc [[UNORDERED:w[0-9]+]], wzr, wzr, eq
185 ; CHECK: orr [[UGE:w[0-9]+]], [[UNORDERED]], [[OGE]]
186 ; CHECK: cbnz [[UGE]], [[RET29:.LBB[0-9]+_[0-9]+]]
187 br i1 %cond, label %iftrue, label %iffalse
192 ; CHECK-NEXT: movz x0, #42
193 ; CHECK-NEXT: b [[REALRET:.LBB[0-9]+_[0-9]+]]
198 ; CHECK-NEXT: movz x0, #29
199 ; CHECK-NEXT: [[REALRET]]:
203 define void @test_select(i1 %cond, fp128 %lhs, fp128 %rhs) {
204 ; CHECK-LABEL: test_select:
206 %val = select i1 %cond, fp128 %lhs, fp128 %rhs
207 store fp128 %val, fp128* @lhs
209 ; CHECK: str q1, [sp]
210 ; CHECK-NEXT: b.eq [[IFFALSE:.LBB[0-9]+_[0-9]+]]
212 ; CHECK-NEXT: str q0, [sp]
213 ; CHECK-NEXT: [[IFFALSE]]:
214 ; CHECK-NEXT: ldr q0, [sp]
215 ; CHECK: str q0, [{{x[0-9]+}}, #:lo12:lhs]
220 @varfloat = global float 0.0
221 @vardouble = global double 0.0
223 define void @test_round() {
224 ; CHECK-LABEL: test_round:
226 %val = load fp128* @lhs
228 %float = fptrunc fp128 %val to float
229 store float %float, float* @varfloat
230 ; CHECK: bl __trunctfsf2
231 ; CHECK: str s0, [{{x[0-9]+}}, #:lo12:varfloat]
233 %double = fptrunc fp128 %val to double
234 store double %double, double* @vardouble
235 ; CHECK: bl __trunctfdf2
236 ; CHECK: str d0, [{{x[0-9]+}}, #:lo12:vardouble]
241 define void @test_extend() {
242 ; CHECK-LABEL: test_extend:
244 %val = load fp128* @lhs
246 %float = load float* @varfloat
247 %fromfloat = fpext float %float to fp128
248 store volatile fp128 %fromfloat, fp128* @lhs
249 ; CHECK: bl __extendsftf2
250 ; CHECK: str q0, [{{x[0-9]+}}, #:lo12:lhs]
252 %double = load double* @vardouble
253 %fromdouble = fpext double %double to fp128
254 store volatile fp128 %fromdouble, fp128* @lhs
255 ; CHECK: bl __extenddftf2
256 ; CHECK: str q0, [{{x[0-9]+}}, #:lo12:lhs]
262 define fp128 @test_neg(fp128 %in) {
263 ; CHECK: [[MINUS0:.LCPI[0-9]+_0]]:
264 ; Make sure the weird hex constant below *is* -0.0
265 ; CHECK-NEXT: fp128 -0
267 ; CHECK-LABEL: test_neg:
269 ; Could in principle be optimized to fneg which we can't select, this makes
270 ; sure that doesn't happen.
271 %ret = fsub fp128 0xL00000000000000008000000000000000, %in
272 ; CHECK: str q0, [sp, #-16]
273 ; CHECK-NEXT: ldr q1, [sp], #16
274 ; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:[[MINUS0]]]