1 ; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
8 define i8 @test_atomic_load_add_i8(i8 %offset) nounwind {
9 ; CHECK: test_atomic_load_add_i8:
10 %old = atomicrmw add i8* @var8, i8 %offset seq_cst
12 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
13 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
15 ; CHECK: .LBB{{[0-9]+}}_1:
16 ; CHECK-NEXT: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
17 ; w0 below is a reasonable guess but could change: it certainly comes into the
19 ; CHECK-NEXT: add [[NEW:w[0-9]+]], w[[OLD]], w0
20 ; CHECK-NEXT: stlxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
21 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
24 ; CHECK: mov x0, x[[OLD]]
28 define i16 @test_atomic_load_add_i16(i16 %offset) nounwind {
29 ; CHECK: test_atomic_load_add_i16:
30 %old = atomicrmw add i16* @var16, i16 %offset acquire
32 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
33 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
35 ; CHECK: .LBB{{[0-9]+}}_1:
36 ; CHECK-NEXT: ldaxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
37 ; w0 below is a reasonable guess but could change: it certainly comes into the
39 ; CHECK-NEXT: add [[NEW:w[0-9]+]], w[[OLD]], w0
40 ; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
41 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
44 ; CHECK: mov x0, x[[OLD]]
48 define i32 @test_atomic_load_add_i32(i32 %offset) nounwind {
49 ; CHECK: test_atomic_load_add_i32:
50 %old = atomicrmw add i32* @var32, i32 %offset release
52 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
53 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
55 ; CHECK: .LBB{{[0-9]+}}_1:
56 ; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
57 ; w0 below is a reasonable guess but could change: it certainly comes into the
59 ; CHECK-NEXT: add [[NEW:w[0-9]+]], w[[OLD]], w0
60 ; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
61 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
64 ; CHECK: mov x0, x[[OLD]]
68 define i64 @test_atomic_load_add_i64(i64 %offset) nounwind {
69 ; CHECK: test_atomic_load_add_i64:
70 %old = atomicrmw add i64* @var64, i64 %offset monotonic
72 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
73 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
75 ; CHECK: .LBB{{[0-9]+}}_1:
76 ; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
77 ; x0 below is a reasonable guess but could change: it certainly comes into the
79 ; CHECK-NEXT: add [[NEW:x[0-9]+]], x[[OLD]], x0
80 ; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
81 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
84 ; CHECK: mov x0, x[[OLD]]
88 define i8 @test_atomic_load_sub_i8(i8 %offset) nounwind {
89 ; CHECK: test_atomic_load_sub_i8:
90 %old = atomicrmw sub i8* @var8, i8 %offset monotonic
92 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
93 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
95 ; CHECK: .LBB{{[0-9]+}}_1:
96 ; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
97 ; w0 below is a reasonable guess but could change: it certainly comes into the
99 ; CHECK-NEXT: sub [[NEW:w[0-9]+]], w[[OLD]], w0
100 ; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
101 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
104 ; CHECK: mov x0, x[[OLD]]
108 define i16 @test_atomic_load_sub_i16(i16 %offset) nounwind {
109 ; CHECK: test_atomic_load_sub_i16:
110 %old = atomicrmw sub i16* @var16, i16 %offset release
112 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
113 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
115 ; CHECK: .LBB{{[0-9]+}}_1:
116 ; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
117 ; w0 below is a reasonable guess but could change: it certainly comes into the
119 ; CHECK-NEXT: sub [[NEW:w[0-9]+]], w[[OLD]], w0
120 ; CHECK-NEXT: stlxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
121 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
124 ; CHECK: mov x0, x[[OLD]]
128 define i32 @test_atomic_load_sub_i32(i32 %offset) nounwind {
129 ; CHECK: test_atomic_load_sub_i32:
130 %old = atomicrmw sub i32* @var32, i32 %offset acquire
132 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
133 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
135 ; CHECK: .LBB{{[0-9]+}}_1:
136 ; CHECK-NEXT: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
137 ; w0 below is a reasonable guess but could change: it certainly comes into the
139 ; CHECK-NEXT: sub [[NEW:w[0-9]+]], w[[OLD]], w0
140 ; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
141 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
144 ; CHECK: mov x0, x[[OLD]]
148 define i64 @test_atomic_load_sub_i64(i64 %offset) nounwind {
149 ; CHECK: test_atomic_load_sub_i64:
150 %old = atomicrmw sub i64* @var64, i64 %offset seq_cst
152 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
153 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
155 ; CHECK: .LBB{{[0-9]+}}_1:
156 ; CHECK-NEXT: ldaxr x[[OLD:[0-9]+]], [x[[ADDR]]]
157 ; x0 below is a reasonable guess but could change: it certainly comes into the
159 ; CHECK-NEXT: sub [[NEW:x[0-9]+]], x[[OLD]], x0
160 ; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
161 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
164 ; CHECK: mov x0, x[[OLD]]
168 define i8 @test_atomic_load_and_i8(i8 %offset) nounwind {
169 ; CHECK: test_atomic_load_and_i8:
170 %old = atomicrmw and i8* @var8, i8 %offset release
172 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
173 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
175 ; CHECK: .LBB{{[0-9]+}}_1:
176 ; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
177 ; w0 below is a reasonable guess but could change: it certainly comes into the
179 ; CHECK-NEXT: and [[NEW:w[0-9]+]], w[[OLD]], w0
180 ; CHECK-NEXT: stlxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
181 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
184 ; CHECK: mov x0, x[[OLD]]
188 define i16 @test_atomic_load_and_i16(i16 %offset) nounwind {
189 ; CHECK: test_atomic_load_and_i16:
190 %old = atomicrmw and i16* @var16, i16 %offset monotonic
192 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
193 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
195 ; CHECK: .LBB{{[0-9]+}}_1:
196 ; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
197 ; w0 below is a reasonable guess but could change: it certainly comes into the
199 ; CHECK-NEXT: and [[NEW:w[0-9]+]], w[[OLD]], w0
200 ; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
201 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
204 ; CHECK: mov x0, x[[OLD]]
208 define i32 @test_atomic_load_and_i32(i32 %offset) nounwind {
209 ; CHECK: test_atomic_load_and_i32:
210 %old = atomicrmw and i32* @var32, i32 %offset seq_cst
212 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
213 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
215 ; CHECK: .LBB{{[0-9]+}}_1:
216 ; CHECK-NEXT: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
217 ; w0 below is a reasonable guess but could change: it certainly comes into the
219 ; CHECK-NEXT: and [[NEW:w[0-9]+]], w[[OLD]], w0
220 ; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
221 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
224 ; CHECK: mov x0, x[[OLD]]
228 define i64 @test_atomic_load_and_i64(i64 %offset) nounwind {
229 ; CHECK: test_atomic_load_and_i64:
230 %old = atomicrmw and i64* @var64, i64 %offset acquire
232 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
233 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
235 ; CHECK: .LBB{{[0-9]+}}_1:
236 ; CHECK-NEXT: ldaxr x[[OLD:[0-9]+]], [x[[ADDR]]]
237 ; x0 below is a reasonable guess but could change: it certainly comes into the
239 ; CHECK-NEXT: and [[NEW:x[0-9]+]], x[[OLD]], x0
240 ; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
241 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
244 ; CHECK: mov x0, x[[OLD]]
248 define i8 @test_atomic_load_or_i8(i8 %offset) nounwind {
249 ; CHECK: test_atomic_load_or_i8:
250 %old = atomicrmw or i8* @var8, i8 %offset seq_cst
252 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
253 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
255 ; CHECK: .LBB{{[0-9]+}}_1:
256 ; CHECK-NEXT: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
257 ; w0 below is a reasonable guess but could change: it certainly comes into the
259 ; CHECK-NEXT: orr [[NEW:w[0-9]+]], w[[OLD]], w0
260 ; CHECK-NEXT: stlxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
261 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
264 ; CHECK: mov x0, x[[OLD]]
268 define i16 @test_atomic_load_or_i16(i16 %offset) nounwind {
269 ; CHECK: test_atomic_load_or_i16:
270 %old = atomicrmw or i16* @var16, i16 %offset monotonic
272 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
273 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
275 ; CHECK: .LBB{{[0-9]+}}_1:
276 ; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
277 ; w0 below is a reasonable guess but could change: it certainly comes into the
279 ; CHECK-NEXT: orr [[NEW:w[0-9]+]], w[[OLD]], w0
280 ; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
281 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
284 ; CHECK: mov x0, x[[OLD]]
288 define i32 @test_atomic_load_or_i32(i32 %offset) nounwind {
289 ; CHECK: test_atomic_load_or_i32:
290 %old = atomicrmw or i32* @var32, i32 %offset acquire
292 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
293 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
295 ; CHECK: .LBB{{[0-9]+}}_1:
296 ; CHECK-NEXT: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
297 ; w0 below is a reasonable guess but could change: it certainly comes into the
299 ; CHECK-NEXT: orr [[NEW:w[0-9]+]], w[[OLD]], w0
300 ; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
301 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
304 ; CHECK: mov x0, x[[OLD]]
308 define i64 @test_atomic_load_or_i64(i64 %offset) nounwind {
309 ; CHECK: test_atomic_load_or_i64:
310 %old = atomicrmw or i64* @var64, i64 %offset release
312 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
313 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
315 ; CHECK: .LBB{{[0-9]+}}_1:
316 ; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
317 ; x0 below is a reasonable guess but could change: it certainly comes into the
319 ; CHECK-NEXT: orr [[NEW:x[0-9]+]], x[[OLD]], x0
320 ; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
321 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
324 ; CHECK: mov x0, x[[OLD]]
328 define i8 @test_atomic_load_xor_i8(i8 %offset) nounwind {
329 ; CHECK: test_atomic_load_xor_i8:
330 %old = atomicrmw xor i8* @var8, i8 %offset acquire
332 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
333 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
335 ; CHECK: .LBB{{[0-9]+}}_1:
336 ; CHECK-NEXT: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
337 ; w0 below is a reasonable guess but could change: it certainly comes into the
339 ; CHECK-NEXT: eor [[NEW:w[0-9]+]], w[[OLD]], w0
340 ; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
341 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
344 ; CHECK: mov x0, x[[OLD]]
348 define i16 @test_atomic_load_xor_i16(i16 %offset) nounwind {
349 ; CHECK: test_atomic_load_xor_i16:
350 %old = atomicrmw xor i16* @var16, i16 %offset release
352 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
353 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
355 ; CHECK: .LBB{{[0-9]+}}_1:
356 ; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
357 ; w0 below is a reasonable guess but could change: it certainly comes into the
359 ; CHECK-NEXT: eor [[NEW:w[0-9]+]], w[[OLD]], w0
360 ; CHECK-NEXT: stlxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
361 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
364 ; CHECK: mov x0, x[[OLD]]
368 define i32 @test_atomic_load_xor_i32(i32 %offset) nounwind {
369 ; CHECK: test_atomic_load_xor_i32:
370 %old = atomicrmw xor i32* @var32, i32 %offset seq_cst
372 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
373 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
375 ; CHECK: .LBB{{[0-9]+}}_1:
376 ; CHECK-NEXT: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
377 ; w0 below is a reasonable guess but could change: it certainly comes into the
379 ; CHECK-NEXT: eor [[NEW:w[0-9]+]], w[[OLD]], w0
380 ; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
381 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
384 ; CHECK: mov x0, x[[OLD]]
388 define i64 @test_atomic_load_xor_i64(i64 %offset) nounwind {
389 ; CHECK: test_atomic_load_xor_i64:
390 %old = atomicrmw xor i64* @var64, i64 %offset monotonic
392 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
393 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
395 ; CHECK: .LBB{{[0-9]+}}_1:
396 ; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
397 ; x0 below is a reasonable guess but could change: it certainly comes into the
399 ; CHECK-NEXT: eor [[NEW:x[0-9]+]], x[[OLD]], x0
400 ; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
401 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
404 ; CHECK: mov x0, x[[OLD]]
408 define i8 @test_atomic_load_xchg_i8(i8 %offset) nounwind {
409 ; CHECK: test_atomic_load_xchg_i8:
410 %old = atomicrmw xchg i8* @var8, i8 %offset monotonic
412 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
413 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
415 ; CHECK: .LBB{{[0-9]+}}_1:
416 ; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
417 ; w0 below is a reasonable guess but could change: it certainly comes into the
419 ; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], w0, [x[[ADDR]]]
420 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
423 ; CHECK: mov x0, x[[OLD]]
427 define i16 @test_atomic_load_xchg_i16(i16 %offset) nounwind {
428 ; CHECK: test_atomic_load_xchg_i16:
429 %old = atomicrmw xchg i16* @var16, i16 %offset seq_cst
431 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
432 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
434 ; CHECK: .LBB{{[0-9]+}}_1:
435 ; CHECK-NEXT: ldaxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
436 ; w0 below is a reasonable guess but could change: it certainly comes into the
438 ; CHECK-NEXT: stlxrh [[STATUS:w[0-9]+]], w0, [x[[ADDR]]]
439 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
442 ; CHECK: mov x0, x[[OLD]]
446 define i32 @test_atomic_load_xchg_i32(i32 %offset) nounwind {
447 ; CHECK: test_atomic_load_xchg_i32:
448 %old = atomicrmw xchg i32* @var32, i32 %offset release
450 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
451 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
453 ; CHECK: .LBB{{[0-9]+}}_1:
454 ; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
455 ; w0 below is a reasonable guess but could change: it certainly comes into the
457 ; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], w0, [x[[ADDR]]]
458 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
461 ; CHECK: mov x0, x[[OLD]]
465 define i64 @test_atomic_load_xchg_i64(i64 %offset) nounwind {
466 ; CHECK: test_atomic_load_xchg_i64:
467 %old = atomicrmw xchg i64* @var64, i64 %offset acquire
469 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
470 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
472 ; CHECK: .LBB{{[0-9]+}}_1:
473 ; CHECK-NEXT: ldaxr x[[OLD:[0-9]+]], [x[[ADDR]]]
474 ; x0 below is a reasonable guess but could change: it certainly comes into the
476 ; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], x0, [x[[ADDR]]]
477 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
480 ; CHECK: mov x0, x[[OLD]]
485 define i8 @test_atomic_load_min_i8(i8 %offset) nounwind {
486 ; CHECK: test_atomic_load_min_i8:
487 %old = atomicrmw min i8* @var8, i8 %offset acquire
489 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
490 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
492 ; CHECK: .LBB{{[0-9]+}}_1:
493 ; CHECK-NEXT: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
494 ; w0 below is a reasonable guess but could change: it certainly comes into the
496 ; CHECK-NEXT: cmp w0, w[[OLD]], sxtb
497 ; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, gt
498 ; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
499 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
502 ; CHECK: mov x0, x[[OLD]]
506 define i16 @test_atomic_load_min_i16(i16 %offset) nounwind {
507 ; CHECK: test_atomic_load_min_i16:
508 %old = atomicrmw min i16* @var16, i16 %offset release
510 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
511 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
513 ; CHECK: .LBB{{[0-9]+}}_1:
514 ; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
515 ; w0 below is a reasonable guess but could change: it certainly comes into the
517 ; CHECK-NEXT: cmp w0, w[[OLD]], sxth
518 ; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, gt
519 ; CHECK-NEXT: stlxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
520 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
523 ; CHECK: mov x0, x[[OLD]]
527 define i32 @test_atomic_load_min_i32(i32 %offset) nounwind {
528 ; CHECK: test_atomic_load_min_i32:
529 %old = atomicrmw min i32* @var32, i32 %offset monotonic
531 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
532 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
534 ; CHECK: .LBB{{[0-9]+}}_1:
535 ; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
536 ; w0 below is a reasonable guess but could change: it certainly comes into the
538 ; CHECK-NEXT: cmp w0, w[[OLD]]
539 ; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, gt
540 ; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
541 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
544 ; CHECK: mov x0, x[[OLD]]
548 define i64 @test_atomic_load_min_i64(i64 %offset) nounwind {
549 ; CHECK: test_atomic_load_min_i64:
550 %old = atomicrmw min i64* @var64, i64 %offset seq_cst
552 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
553 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
555 ; CHECK: .LBB{{[0-9]+}}_1:
556 ; CHECK-NEXT: ldaxr x[[OLD:[0-9]+]], [x[[ADDR]]]
557 ; x0 below is a reasonable guess but could change: it certainly comes into the
559 ; CHECK-NEXT: cmp x0, x[[OLD]]
560 ; CHECK-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, gt
561 ; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
562 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
565 ; CHECK: mov x0, x[[OLD]]
569 define i8 @test_atomic_load_max_i8(i8 %offset) nounwind {
570 ; CHECK: test_atomic_load_max_i8:
571 %old = atomicrmw max i8* @var8, i8 %offset seq_cst
573 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
574 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
576 ; CHECK: .LBB{{[0-9]+}}_1:
577 ; CHECK-NEXT: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
578 ; w0 below is a reasonable guess but could change: it certainly comes into the
580 ; CHECK-NEXT: cmp w0, w[[OLD]], sxtb
581 ; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lt
582 ; CHECK-NEXT: stlxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
583 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
586 ; CHECK: mov x0, x[[OLD]]
590 define i16 @test_atomic_load_max_i16(i16 %offset) nounwind {
591 ; CHECK: test_atomic_load_max_i16:
592 %old = atomicrmw max i16* @var16, i16 %offset acquire
594 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
595 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
597 ; CHECK: .LBB{{[0-9]+}}_1:
598 ; CHECK-NEXT: ldaxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
599 ; w0 below is a reasonable guess but could change: it certainly comes into the
601 ; CHECK-NEXT: cmp w0, w[[OLD]], sxth
602 ; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lt
603 ; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
604 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
607 ; CHECK: mov x0, x[[OLD]]
611 define i32 @test_atomic_load_max_i32(i32 %offset) nounwind {
612 ; CHECK: test_atomic_load_max_i32:
613 %old = atomicrmw max i32* @var32, i32 %offset release
615 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
616 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
618 ; CHECK: .LBB{{[0-9]+}}_1:
619 ; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
620 ; w0 below is a reasonable guess but could change: it certainly comes into the
622 ; CHECK-NEXT: cmp w0, w[[OLD]]
623 ; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lt
624 ; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
625 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
628 ; CHECK: mov x0, x[[OLD]]
632 define i64 @test_atomic_load_max_i64(i64 %offset) nounwind {
633 ; CHECK: test_atomic_load_max_i64:
634 %old = atomicrmw max i64* @var64, i64 %offset monotonic
636 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
637 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
639 ; CHECK: .LBB{{[0-9]+}}_1:
640 ; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
641 ; x0 below is a reasonable guess but could change: it certainly comes into the
643 ; CHECK-NEXT: cmp x0, x[[OLD]]
644 ; CHECK-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, lt
645 ; CHECK-NEXT: stxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
646 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
649 ; CHECK: mov x0, x[[OLD]]
653 define i8 @test_atomic_load_umin_i8(i8 %offset) nounwind {
654 ; CHECK: test_atomic_load_umin_i8:
655 %old = atomicrmw umin i8* @var8, i8 %offset monotonic
657 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
658 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
660 ; CHECK: .LBB{{[0-9]+}}_1:
661 ; CHECK-NEXT: ldxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
662 ; w0 below is a reasonable guess but could change: it certainly comes into the
664 ; CHECK-NEXT: cmp w0, w[[OLD]], uxtb
665 ; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, hi
666 ; CHECK-NEXT: stxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
667 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
670 ; CHECK: mov x0, x[[OLD]]
674 define i16 @test_atomic_load_umin_i16(i16 %offset) nounwind {
675 ; CHECK: test_atomic_load_umin_i16:
676 %old = atomicrmw umin i16* @var16, i16 %offset acquire
678 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
679 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
681 ; CHECK: .LBB{{[0-9]+}}_1:
682 ; CHECK-NEXT: ldaxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
683 ; w0 below is a reasonable guess but could change: it certainly comes into the
685 ; CHECK-NEXT: cmp w0, w[[OLD]], uxth
686 ; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, hi
687 ; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
688 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
691 ; CHECK: mov x0, x[[OLD]]
695 define i32 @test_atomic_load_umin_i32(i32 %offset) nounwind {
696 ; CHECK: test_atomic_load_umin_i32:
697 %old = atomicrmw umin i32* @var32, i32 %offset seq_cst
699 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
700 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
702 ; CHECK: .LBB{{[0-9]+}}_1:
703 ; CHECK-NEXT: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
704 ; w0 below is a reasonable guess but could change: it certainly comes into the
706 ; CHECK-NEXT: cmp w0, w[[OLD]]
707 ; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, hi
708 ; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
709 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
712 ; CHECK: mov x0, x[[OLD]]
716 define i64 @test_atomic_load_umin_i64(i64 %offset) nounwind {
717 ; CHECK: test_atomic_load_umin_i64:
718 %old = atomicrmw umin i64* @var64, i64 %offset acq_rel
720 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
721 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
723 ; CHECK: .LBB{{[0-9]+}}_1:
724 ; CHECK-NEXT: ldaxr x[[OLD:[0-9]+]], [x[[ADDR]]]
725 ; x0 below is a reasonable guess but could change: it certainly comes into the
727 ; CHECK-NEXT: cmp x0, x[[OLD]]
728 ; CHECK-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, hi
729 ; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
730 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
733 ; CHECK: mov x0, x[[OLD]]
737 define i8 @test_atomic_load_umax_i8(i8 %offset) nounwind {
738 ; CHECK: test_atomic_load_umax_i8:
739 %old = atomicrmw umax i8* @var8, i8 %offset acq_rel
741 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
742 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
744 ; CHECK: .LBB{{[0-9]+}}_1:
745 ; CHECK-NEXT: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
746 ; w0 below is a reasonable guess but could change: it certainly comes into the
748 ; CHECK-NEXT: cmp w0, w[[OLD]], uxtb
749 ; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lo
750 ; CHECK-NEXT: stlxrb [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
751 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
754 ; CHECK: mov x0, x[[OLD]]
758 define i16 @test_atomic_load_umax_i16(i16 %offset) nounwind {
759 ; CHECK: test_atomic_load_umax_i16:
760 %old = atomicrmw umax i16* @var16, i16 %offset monotonic
762 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
763 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
765 ; CHECK: .LBB{{[0-9]+}}_1:
766 ; CHECK-NEXT: ldxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
767 ; w0 below is a reasonable guess but could change: it certainly comes into the
769 ; CHECK-NEXT: cmp w0, w[[OLD]], uxth
770 ; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lo
771 ; CHECK-NEXT: stxrh [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
772 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
775 ; CHECK: mov x0, x[[OLD]]
779 define i32 @test_atomic_load_umax_i32(i32 %offset) nounwind {
780 ; CHECK: test_atomic_load_umax_i32:
781 %old = atomicrmw umax i32* @var32, i32 %offset seq_cst
783 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
784 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
786 ; CHECK: .LBB{{[0-9]+}}_1:
787 ; CHECK-NEXT: ldaxr w[[OLD:[0-9]+]], [x[[ADDR]]]
788 ; w0 below is a reasonable guess but could change: it certainly comes into the
790 ; CHECK-NEXT: cmp w0, w[[OLD]]
791 ; CHECK-NEXT: csel [[NEW:w[0-9]+]], w[[OLD]], w0, lo
792 ; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
793 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
796 ; CHECK: mov x0, x[[OLD]]
800 define i64 @test_atomic_load_umax_i64(i64 %offset) nounwind {
801 ; CHECK: test_atomic_load_umax_i64:
802 %old = atomicrmw umax i64* @var64, i64 %offset release
804 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
805 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
807 ; CHECK: .LBB{{[0-9]+}}_1:
808 ; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
809 ; x0 below is a reasonable guess but could change: it certainly comes into the
811 ; CHECK-NEXT: cmp x0, x[[OLD]]
812 ; CHECK-NEXT: csel [[NEW:x[0-9]+]], x[[OLD]], x0, lo
813 ; CHECK-NEXT: stlxr [[STATUS:w[0-9]+]], [[NEW]], [x[[ADDR]]]
814 ; CHECK-NEXT: cbnz [[STATUS]], .LBB{{[0-9]+}}_1
817 ; CHECK: mov x0, x[[OLD]]
821 define i8 @test_atomic_cmpxchg_i8(i8 %wanted, i8 %new) nounwind {
822 ; CHECK: test_atomic_cmpxchg_i8:
823 %old = cmpxchg i8* @var8, i8 %wanted, i8 %new acquire
825 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
826 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
828 ; CHECK: [[STARTAGAIN:.LBB[0-9]+_[0-9]+]]:
829 ; CHECK-NEXT: ldaxrb w[[OLD:[0-9]+]], [x[[ADDR]]]
830 ; w0 below is a reasonable guess but could change: it certainly comes into the
832 ; CHECK-NEXT: cmp w[[OLD]], w0
833 ; CHECK-NEXT: b.ne [[GET_OUT:.LBB[0-9]+_[0-9]+]]
834 ; As above, w1 is a reasonable guess.
835 ; CHECK: stxrb [[STATUS:w[0-9]+]], w1, [x[[ADDR]]]
836 ; CHECK-NEXT: cbnz [[STATUS]], [[STARTAGAIN]]
839 ; CHECK: mov x0, x[[OLD]]
843 define i16 @test_atomic_cmpxchg_i16(i16 %wanted, i16 %new) nounwind {
844 ; CHECK: test_atomic_cmpxchg_i16:
845 %old = cmpxchg i16* @var16, i16 %wanted, i16 %new seq_cst
847 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var16
848 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var16
850 ; CHECK: [[STARTAGAIN:.LBB[0-9]+_[0-9]+]]:
851 ; CHECK-NEXT: ldaxrh w[[OLD:[0-9]+]], [x[[ADDR]]]
852 ; w0 below is a reasonable guess but could change: it certainly comes into the
854 ; CHECK-NEXT: cmp w[[OLD]], w0
855 ; CHECK-NEXT: b.ne [[GET_OUT:.LBB[0-9]+_[0-9]+]]
856 ; As above, w1 is a reasonable guess.
857 ; CHECK: stlxrh [[STATUS:w[0-9]+]], w1, [x[[ADDR]]]
858 ; CHECK-NEXT: cbnz [[STATUS]], [[STARTAGAIN]]
861 ; CHECK: mov x0, x[[OLD]]
865 define i32 @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
866 ; CHECK: test_atomic_cmpxchg_i32:
867 %old = cmpxchg i32* @var32, i32 %wanted, i32 %new release
869 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
870 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var32
872 ; CHECK: [[STARTAGAIN:.LBB[0-9]+_[0-9]+]]:
873 ; CHECK-NEXT: ldxr w[[OLD:[0-9]+]], [x[[ADDR]]]
874 ; w0 below is a reasonable guess but could change: it certainly comes into the
876 ; CHECK-NEXT: cmp w[[OLD]], w0
877 ; CHECK-NEXT: b.ne [[GET_OUT:.LBB[0-9]+_[0-9]+]]
878 ; As above, w1 is a reasonable guess.
879 ; CHECK: stlxr [[STATUS:w[0-9]+]], w1, [x[[ADDR]]]
880 ; CHECK-NEXT: cbnz [[STATUS]], [[STARTAGAIN]]
883 ; CHECK: mov x0, x[[OLD]]
887 define i64 @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind {
888 ; CHECK: test_atomic_cmpxchg_i64:
889 %old = cmpxchg i64* @var64, i64 %wanted, i64 %new monotonic
891 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
892 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var64
894 ; CHECK: [[STARTAGAIN:.LBB[0-9]+_[0-9]+]]:
895 ; CHECK-NEXT: ldxr x[[OLD:[0-9]+]], [x[[ADDR]]]
896 ; w0 below is a reasonable guess but could change: it certainly comes into the
898 ; CHECK-NEXT: cmp x[[OLD]], x0
899 ; CHECK-NEXT: b.ne [[GET_OUT:.LBB[0-9]+_[0-9]+]]
900 ; As above, w1 is a reasonable guess.
901 ; CHECK: stxr [[STATUS:w[0-9]+]], x1, [x[[ADDR]]]
902 ; CHECK-NEXT: cbnz [[STATUS]], [[STARTAGAIN]]
905 ; CHECK: mov x0, x[[OLD]]
909 define i8 @test_atomic_load_monotonic_i8() nounwind {
910 ; CHECK: test_atomic_load_monotonic_i8:
911 %val = load atomic i8* @var8 monotonic, align 1
913 ; CHECK: adrp x[[HIADDR:[0-9]+]], var8
914 ; CHECK: ldrb w0, [x[[HIADDR]], #:lo12:var8]
920 define i8 @test_atomic_load_monotonic_regoff_i8(i64 %base, i64 %off) nounwind {
921 ; CHECK: test_atomic_load_monotonic_regoff_i8:
922 %addr_int = add i64 %base, %off
923 %addr = inttoptr i64 %addr_int to i8*
925 %val = load atomic i8* %addr monotonic, align 1
927 ; CHECK: ldrb w0, [x0, x1]
933 define i8 @test_atomic_load_acquire_i8() nounwind {
934 ; CHECK: test_atomic_load_acquire_i8:
935 %val = load atomic i8* @var8 acquire, align 1
937 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var8
939 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], #:lo12:var8
941 ; CHECK: ldarb w0, [x[[ADDR]]]
946 define i8 @test_atomic_load_seq_cst_i8() nounwind {
947 ; CHECK: test_atomic_load_seq_cst_i8:
948 %val = load atomic i8* @var8 seq_cst, align 1
950 ; CHECK: adrp [[HIADDR:x[0-9]+]], var8
952 ; CHECK: add x[[ADDR:[0-9]+]], [[HIADDR]], #:lo12:var8
954 ; CHECK: ldarb w0, [x[[ADDR]]]
959 define i16 @test_atomic_load_monotonic_i16() nounwind {
960 ; CHECK: test_atomic_load_monotonic_i16:
961 %val = load atomic i16* @var16 monotonic, align 2
963 ; CHECK: adrp x[[HIADDR:[0-9]+]], var16
965 ; CHECK: ldrh w0, [x[[HIADDR]], #:lo12:var16]
971 define i32 @test_atomic_load_monotonic_regoff_i32(i64 %base, i64 %off) nounwind {
972 ; CHECK: test_atomic_load_monotonic_regoff_i32:
973 %addr_int = add i64 %base, %off
974 %addr = inttoptr i64 %addr_int to i32*
976 %val = load atomic i32* %addr monotonic, align 4
978 ; CHECK: ldr w0, [x0, x1]
984 define i64 @test_atomic_load_seq_cst_i64() nounwind {
985 ; CHECK: test_atomic_load_seq_cst_i64:
986 %val = load atomic i64* @var64 seq_cst, align 8
988 ; CHECK: adrp [[HIADDR:x[0-9]+]], var64
990 ; CHECK: add x[[ADDR:[0-9]+]], [[HIADDR]], #:lo12:var64
992 ; CHECK: ldar x0, [x[[ADDR]]]
997 define void @test_atomic_store_monotonic_i8(i8 %val) nounwind {
998 ; CHECK: test_atomic_store_monotonic_i8:
999 store atomic i8 %val, i8* @var8 monotonic, align 1
1000 ; CHECK: adrp x[[HIADDR:[0-9]+]], var8
1001 ; CHECK: strb w0, [x[[HIADDR]], #:lo12:var8]
1006 define void @test_atomic_store_monotonic_regoff_i8(i64 %base, i64 %off, i8 %val) nounwind {
1007 ; CHECK: test_atomic_store_monotonic_regoff_i8:
1009 %addr_int = add i64 %base, %off
1010 %addr = inttoptr i64 %addr_int to i8*
1012 store atomic i8 %val, i8* %addr monotonic, align 1
1013 ; CHECK: strb w2, [x0, x1]
1017 define void @test_atomic_store_release_i8(i8 %val) nounwind {
1018 ; CHECK: test_atomic_store_release_i8:
1019 store atomic i8 %val, i8* @var8 release, align 1
1021 ; CHECK: adrp [[HIADDR:x[0-9]+]], var8
1023 ; CHECK: add x[[ADDR:[0-9]+]], [[HIADDR]], #:lo12:var8
1025 ; CHECK: stlrb w0, [x[[ADDR]]]
1030 define void @test_atomic_store_seq_cst_i8(i8 %val) nounwind {
1031 ; CHECK: test_atomic_store_seq_cst_i8:
1032 store atomic i8 %val, i8* @var8 seq_cst, align 1
1034 ; CHECK: adrp [[HIADDR:x[0-9]+]], var8
1036 ; CHECK: add x[[ADDR:[0-9]+]], [[HIADDR]], #:lo12:var8
1038 ; CHECK: stlrb w0, [x[[ADDR]]]
1044 define void @test_atomic_store_monotonic_i16(i16 %val) nounwind {
1045 ; CHECK: test_atomic_store_monotonic_i16:
1046 store atomic i16 %val, i16* @var16 monotonic, align 2
1048 ; CHECK: adrp x[[HIADDR:[0-9]+]], var16
1050 ; CHECK: strh w0, [x[[HIADDR]], #:lo12:var16]
1055 define void @test_atomic_store_monotonic_regoff_i32(i64 %base, i64 %off, i32 %val) nounwind {
1056 ; CHECK: test_atomic_store_monotonic_regoff_i32:
1058 %addr_int = add i64 %base, %off
1059 %addr = inttoptr i64 %addr_int to i32*
1061 store atomic i32 %val, i32* %addr monotonic, align 4
1063 ; CHECK: str w2, [x0, x1]
1069 define void @test_atomic_store_release_i64(i64 %val) nounwind {
1070 ; CHECK: test_atomic_store_release_i64:
1071 store atomic i64 %val, i64* @var64 release, align 8
1073 ; CHECK: adrp [[HIADDR:x[0-9]+]], var64
1075 ; CHECK: add x[[ADDR:[0-9]+]], [[HIADDR]], #:lo12:var64
1077 ; CHECK: stlr x0, [x[[ADDR]]]