1 ; RUN: llc -mtriple=armv8-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
2 ; RUN: llc -mtriple=thumbv8-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s
9 define i8 @test_atomic_load_add_i8(i8 %offset) nounwind {
10 ; CHECK-LABEL: test_atomic_load_add_i8:
11 %old = atomicrmw add i8* @var8, i8 %offset seq_cst
14 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
15 ; CHECK: movt r[[ADDR]], :upper16:var8
17 ; CHECK: .LBB{{[0-9]+}}_1:
18 ; CHECK-NEXT: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
19 ; r0 below is a reasonable guess but could change: it certainly comes into the
21 ; CHECK-NEXT: add{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
22 ; CHECK-NEXT: stlexb [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
23 ; CHECK-NEXT: cmp [[STATUS]], #0
24 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
28 ; CHECK: mov r0, r[[OLD]]
32 define i16 @test_atomic_load_add_i16(i16 %offset) nounwind {
33 ; CHECK-LABEL: test_atomic_load_add_i16:
34 %old = atomicrmw add i16* @var16, i16 %offset acquire
37 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
38 ; CHECK: movt r[[ADDR]], :upper16:var16
40 ; CHECK: .LBB{{[0-9]+}}_1:
41 ; CHECK-NEXT: ldaexh r[[OLD:[0-9]+]], [r[[ADDR]]]
42 ; r0 below is a reasonable guess but could change: it certainly comes into the
44 ; CHECK-NEXT: add{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
45 ; CHECK-NEXT: strexh [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
46 ; CHECK-NEXT: cmp [[STATUS]], #0
47 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
51 ; CHECK: mov r0, r[[OLD]]
55 define i32 @test_atomic_load_add_i32(i32 %offset) nounwind {
56 ; CHECK-LABEL: test_atomic_load_add_i32:
57 %old = atomicrmw add i32* @var32, i32 %offset release
60 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
61 ; CHECK: movt r[[ADDR]], :upper16:var32
63 ; CHECK: .LBB{{[0-9]+}}_1:
64 ; CHECK-NEXT: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
65 ; r0 below is a reasonable guess but could change: it certainly comes into the
67 ; CHECK-NEXT: add{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
68 ; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
69 ; CHECK-NEXT: cmp [[STATUS]], #0
70 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
74 ; CHECK: mov r0, r[[OLD]]
78 define i64 @test_atomic_load_add_i64(i64 %offset) nounwind {
79 ; CHECK-LABEL: test_atomic_load_add_i64:
80 %old = atomicrmw add i64* @var64, i64 %offset monotonic
83 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
84 ; CHECK: movt r[[ADDR]], :upper16:var64
86 ; CHECK: .LBB{{[0-9]+}}_1:
87 ; CHECK-NEXT: ldrexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
88 ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
90 ; CHECK-NEXT: adds [[NEW1:r[0-9]+]], r[[OLD1]], r0
91 ; CHECK-NEXT: adc{{(\.w)?}} [[NEW2:r[0-9]+]], r[[OLD2]], r1
92 ; CHECK-NEXT: strexd [[STATUS:r[0-9]+]], [[NEW1]], [[NEW2]], [r[[ADDR]]]
93 ; CHECK-NEXT: cmp [[STATUS]], #0
94 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
98 ; CHECK: mov r0, r[[OLD1]]
99 ; CHECK-NEXT: mov r1, r[[OLD2]]
103 define i8 @test_atomic_load_sub_i8(i8 %offset) nounwind {
104 ; CHECK-LABEL: test_atomic_load_sub_i8:
105 %old = atomicrmw sub i8* @var8, i8 %offset monotonic
108 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
109 ; CHECK: movt r[[ADDR]], :upper16:var8
111 ; CHECK: .LBB{{[0-9]+}}_1:
112 ; CHECK-NEXT: ldrexb r[[OLD:[0-9]+]], [r[[ADDR]]]
113 ; r0 below is a reasonable guess but could change: it certainly comes into the
115 ; CHECK-NEXT: sub{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
116 ; CHECK-NEXT: strexb [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
117 ; CHECK-NEXT: cmp [[STATUS]], #0
118 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
122 ; CHECK: mov r0, r[[OLD]]
126 define i16 @test_atomic_load_sub_i16(i16 %offset) nounwind {
127 ; CHECK-LABEL: test_atomic_load_sub_i16:
128 %old = atomicrmw sub i16* @var16, i16 %offset release
131 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
132 ; CHECK: movt r[[ADDR]], :upper16:var16
134 ; CHECK: .LBB{{[0-9]+}}_1:
135 ; CHECK-NEXT: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
136 ; r0 below is a reasonable guess but could change: it certainly comes into the
138 ; CHECK-NEXT: sub{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
139 ; CHECK-NEXT: stlexh [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
140 ; CHECK-NEXT: cmp [[STATUS]], #0
141 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
145 ; CHECK: mov r0, r[[OLD]]
149 define i32 @test_atomic_load_sub_i32(i32 %offset) nounwind {
150 ; CHECK-LABEL: test_atomic_load_sub_i32:
151 %old = atomicrmw sub i32* @var32, i32 %offset acquire
154 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
155 ; CHECK: movt r[[ADDR]], :upper16:var32
157 ; CHECK: .LBB{{[0-9]+}}_1:
158 ; CHECK-NEXT: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
159 ; r0 below is a reasonable guess but could change: it certainly comes into the
161 ; CHECK-NEXT: sub{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
162 ; CHECK-NEXT: strex [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
163 ; CHECK-NEXT: cmp [[STATUS]], #0
164 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
168 ; CHECK: mov r0, r[[OLD]]
172 define i64 @test_atomic_load_sub_i64(i64 %offset) nounwind {
173 ; CHECK-LABEL: test_atomic_load_sub_i64:
174 %old = atomicrmw sub i64* @var64, i64 %offset seq_cst
177 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
178 ; CHECK: movt r[[ADDR]], :upper16:var64
180 ; CHECK: .LBB{{[0-9]+}}_1:
181 ; CHECK-NEXT: ldaexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
182 ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
184 ; CHECK-NEXT: subs [[NEW1:r[0-9]+]], r[[OLD1]], r0
185 ; CHECK-NEXT: sbc{{(\.w)?}} [[NEW2:r[0-9]+]], r[[OLD2]], r1
186 ; CHECK-NEXT: stlexd [[STATUS:r[0-9]+]], [[NEW1]], [[NEW2]], [r[[ADDR]]]
187 ; CHECK-NEXT: cmp [[STATUS]], #0
188 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
192 ; CHECK: mov r0, r[[OLD1]]
193 ; CHECK-NEXT: mov r1, r[[OLD2]]
197 define i8 @test_atomic_load_and_i8(i8 %offset) nounwind {
198 ; CHECK-LABEL: test_atomic_load_and_i8:
199 %old = atomicrmw and i8* @var8, i8 %offset release
202 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
203 ; CHECK: movt r[[ADDR]], :upper16:var8
205 ; CHECK: .LBB{{[0-9]+}}_1:
206 ; CHECK-NEXT: ldrexb r[[OLD:[0-9]+]], [r[[ADDR]]]
207 ; r0 below is a reasonable guess but could change: it certainly comes into the
209 ; CHECK-NEXT: and{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
210 ; CHECK-NEXT: stlexb [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
211 ; CHECK-NEXT: cmp [[STATUS]], #0
212 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
216 ; CHECK: mov r0, r[[OLD]]
220 define i16 @test_atomic_load_and_i16(i16 %offset) nounwind {
221 ; CHECK-LABEL: test_atomic_load_and_i16:
222 %old = atomicrmw and i16* @var16, i16 %offset monotonic
225 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
226 ; CHECK: movt r[[ADDR]], :upper16:var16
228 ; CHECK: .LBB{{[0-9]+}}_1:
229 ; CHECK-NEXT: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
230 ; r0 below is a reasonable guess but could change: it certainly comes into the
232 ; CHECK-NEXT: and{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
233 ; CHECK-NEXT: strexh [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
234 ; CHECK-NEXT: cmp [[STATUS]], #0
235 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
239 ; CHECK: mov r0, r[[OLD]]
243 define i32 @test_atomic_load_and_i32(i32 %offset) nounwind {
244 ; CHECK-LABEL: test_atomic_load_and_i32:
245 %old = atomicrmw and i32* @var32, i32 %offset seq_cst
248 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
249 ; CHECK: movt r[[ADDR]], :upper16:var32
251 ; CHECK: .LBB{{[0-9]+}}_1:
252 ; CHECK-NEXT: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
253 ; r0 below is a reasonable guess but could change: it certainly comes into the
255 ; CHECK-NEXT: and{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
256 ; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
257 ; CHECK-NEXT: cmp [[STATUS]], #0
258 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
262 ; CHECK: mov r0, r[[OLD]]
266 define i64 @test_atomic_load_and_i64(i64 %offset) nounwind {
267 ; CHECK-LABEL: test_atomic_load_and_i64:
268 %old = atomicrmw and i64* @var64, i64 %offset acquire
271 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
272 ; CHECK: movt r[[ADDR]], :upper16:var64
274 ; CHECK: .LBB{{[0-9]+}}_1:
275 ; CHECK-NEXT: ldaexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
276 ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
278 ; CHECK-NEXT: and{{(\.w)?}} [[NEW1:r[0-9]+]], r[[OLD1]], r0
279 ; CHECK-NEXT: and{{(\.w)?}} [[NEW2:r[0-9]+]], r[[OLD2]], r1
280 ; CHECK-NEXT: strexd [[STATUS:r[0-9]+]], [[NEW1]], [[NEW2]], [r[[ADDR]]]
281 ; CHECK-NEXT: cmp [[STATUS]], #0
282 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
286 ; CHECK: mov r0, r[[OLD1]]
287 ; CHECK-NEXT: mov r1, r[[OLD2]]
291 define i8 @test_atomic_load_or_i8(i8 %offset) nounwind {
292 ; CHECK-LABEL: test_atomic_load_or_i8:
293 %old = atomicrmw or i8* @var8, i8 %offset seq_cst
296 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
297 ; CHECK: movt r[[ADDR]], :upper16:var8
299 ; CHECK: .LBB{{[0-9]+}}_1:
300 ; CHECK-NEXT: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
301 ; r0 below is a reasonable guess but could change: it certainly comes into the
303 ; CHECK-NEXT: orr{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
304 ; CHECK-NEXT: stlexb [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
305 ; CHECK-NEXT: cmp [[STATUS]], #0
306 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
310 ; CHECK: mov r0, r[[OLD]]
314 define i16 @test_atomic_load_or_i16(i16 %offset) nounwind {
315 ; CHECK-LABEL: test_atomic_load_or_i16:
316 %old = atomicrmw or i16* @var16, i16 %offset monotonic
319 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
320 ; CHECK: movt r[[ADDR]], :upper16:var16
322 ; CHECK: .LBB{{[0-9]+}}_1:
323 ; CHECK-NEXT: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
324 ; r0 below is a reasonable guess but could change: it certainly comes into the
326 ; CHECK-NEXT: orr{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
327 ; CHECK-NEXT: strexh [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
328 ; CHECK-NEXT: cmp [[STATUS]], #0
329 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
333 ; CHECK: mov r0, r[[OLD]]
337 define i32 @test_atomic_load_or_i32(i32 %offset) nounwind {
338 ; CHECK-LABEL: test_atomic_load_or_i32:
339 %old = atomicrmw or i32* @var32, i32 %offset acquire
342 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
343 ; CHECK: movt r[[ADDR]], :upper16:var32
345 ; CHECK: .LBB{{[0-9]+}}_1:
346 ; CHECK-NEXT: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
347 ; r0 below is a reasonable guess but could change: it certainly comes into the
349 ; CHECK-NEXT: orr{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
350 ; CHECK-NEXT: strex [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
351 ; CHECK-NEXT: cmp [[STATUS]], #0
352 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
356 ; CHECK: mov r0, r[[OLD]]
360 define i64 @test_atomic_load_or_i64(i64 %offset) nounwind {
361 ; CHECK-LABEL: test_atomic_load_or_i64:
362 %old = atomicrmw or i64* @var64, i64 %offset release
365 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
366 ; CHECK: movt r[[ADDR]], :upper16:var64
368 ; CHECK: .LBB{{[0-9]+}}_1:
369 ; CHECK-NEXT: ldrexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
370 ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
372 ; CHECK-NEXT: orr{{(\.w)?}} [[NEW1:r[0-9]+]], r[[OLD1]], r0
373 ; CHECK-NEXT: orr{{(\.w)?}} [[NEW2:r[0-9]+]], r[[OLD2]], r1
374 ; CHECK-NEXT: stlexd [[STATUS:r[0-9]+]], [[NEW1]], [[NEW2]], [r[[ADDR]]]
375 ; CHECK-NEXT: cmp [[STATUS]], #0
376 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
380 ; CHECK: mov r0, r[[OLD1]]
381 ; CHECK-NEXT: mov r1, r[[OLD2]]
385 define i8 @test_atomic_load_xor_i8(i8 %offset) nounwind {
386 ; CHECK-LABEL: test_atomic_load_xor_i8:
387 %old = atomicrmw xor i8* @var8, i8 %offset acquire
390 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
391 ; CHECK: movt r[[ADDR]], :upper16:var8
393 ; CHECK: .LBB{{[0-9]+}}_1:
394 ; CHECK-NEXT: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
395 ; r0 below is a reasonable guess but could change: it certainly comes into the
397 ; CHECK-NEXT: eor{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
398 ; CHECK-NEXT: strexb [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
399 ; CHECK-NEXT: cmp [[STATUS]], #0
400 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
404 ; CHECK: mov r0, r[[OLD]]
408 define i16 @test_atomic_load_xor_i16(i16 %offset) nounwind {
409 ; CHECK-LABEL: test_atomic_load_xor_i16:
410 %old = atomicrmw xor i16* @var16, i16 %offset release
413 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
414 ; CHECK: movt r[[ADDR]], :upper16:var16
416 ; CHECK: .LBB{{[0-9]+}}_1:
417 ; CHECK-NEXT: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
418 ; r0 below is a reasonable guess but could change: it certainly comes into the
420 ; CHECK-NEXT: eor{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
421 ; CHECK-NEXT: stlexh [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
422 ; CHECK-NEXT: cmp [[STATUS]], #0
423 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
427 ; CHECK: mov r0, r[[OLD]]
431 define i32 @test_atomic_load_xor_i32(i32 %offset) nounwind {
432 ; CHECK-LABEL: test_atomic_load_xor_i32:
433 %old = atomicrmw xor i32* @var32, i32 %offset seq_cst
436 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
437 ; CHECK: movt r[[ADDR]], :upper16:var32
439 ; CHECK: .LBB{{[0-9]+}}_1:
440 ; CHECK-NEXT: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
441 ; r0 below is a reasonable guess but could change: it certainly comes into the
443 ; CHECK-NEXT: eor{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
444 ; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
445 ; CHECK-NEXT: cmp [[STATUS]], #0
446 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
450 ; CHECK: mov r0, r[[OLD]]
454 define i64 @test_atomic_load_xor_i64(i64 %offset) nounwind {
455 ; CHECK-LABEL: test_atomic_load_xor_i64:
456 %old = atomicrmw xor i64* @var64, i64 %offset monotonic
459 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
460 ; CHECK: movt r[[ADDR]], :upper16:var64
462 ; CHECK: .LBB{{[0-9]+}}_1:
463 ; CHECK-NEXT: ldrexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
464 ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
466 ; CHECK-NEXT: eor{{(\.w)?}} [[NEW1:r[0-9]+]], r[[OLD1]], r0
467 ; CHECK-NEXT: eor{{(\.w)?}} [[NEW2:r[0-9]+]], r[[OLD2]], r1
468 ; CHECK-NEXT: strexd [[STATUS:r[0-9]+]], [[NEW1]], [[NEW2]], [r[[ADDR]]]
469 ; CHECK-NEXT: cmp [[STATUS]], #0
470 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
474 ; CHECK: mov r0, r[[OLD1]]
475 ; CHECK-NEXT: mov r1, r[[OLD2]]
479 define i8 @test_atomic_load_xchg_i8(i8 %offset) nounwind {
480 ; CHECK-LABEL: test_atomic_load_xchg_i8:
481 %old = atomicrmw xchg i8* @var8, i8 %offset monotonic
484 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
485 ; CHECK: movt r[[ADDR]], :upper16:var8
487 ; CHECK: .LBB{{[0-9]+}}_1:
488 ; CHECK-NEXT: ldrexb r[[OLD:[0-9]+]], [r[[ADDR]]]
489 ; r0 below is a reasonable guess but could change: it certainly comes into the
491 ; CHECK-NEXT: strexb [[STATUS:r[0-9]+]], r0, [r[[ADDR]]]
492 ; CHECK-NEXT: cmp [[STATUS]], #0
493 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
497 ; CHECK: mov r0, r[[OLD]]
501 define i16 @test_atomic_load_xchg_i16(i16 %offset) nounwind {
502 ; CHECK-LABEL: test_atomic_load_xchg_i16:
503 %old = atomicrmw xchg i16* @var16, i16 %offset seq_cst
506 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
507 ; CHECK: movt r[[ADDR]], :upper16:var16
509 ; CHECK: .LBB{{[0-9]+}}_1:
510 ; CHECK-NEXT: ldaexh r[[OLD:[0-9]+]], [r[[ADDR]]]
511 ; r0 below is a reasonable guess but could change: it certainly comes into the
513 ; CHECK-NEXT: stlexh [[STATUS:r[0-9]+]], r0, [r[[ADDR]]]
514 ; CHECK-NEXT: cmp [[STATUS]], #0
515 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
519 ; CHECK: mov r0, r[[OLD]]
523 define i32 @test_atomic_load_xchg_i32(i32 %offset) nounwind {
524 ; CHECK-LABEL: test_atomic_load_xchg_i32:
525 %old = atomicrmw xchg i32* @var32, i32 %offset release
528 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
529 ; CHECK: movt r[[ADDR]], :upper16:var32
531 ; CHECK: .LBB{{[0-9]+}}_1:
532 ; CHECK-NEXT: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
533 ; r0 below is a reasonable guess but could change: it certainly comes into the
535 ; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], r0, [r[[ADDR]]]
536 ; CHECK-NEXT: cmp [[STATUS]], #0
537 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
541 ; CHECK: mov r0, r[[OLD]]
545 define i64 @test_atomic_load_xchg_i64(i64 %offset) nounwind {
546 ; CHECK-LABEL: test_atomic_load_xchg_i64:
547 %old = atomicrmw xchg i64* @var64, i64 %offset acquire
550 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
551 ; CHECK: movt r[[ADDR]], :upper16:var64
553 ; CHECK: .LBB{{[0-9]+}}_1:
554 ; CHECK-NEXT: ldaexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
555 ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
557 ; CHECK-NEXT: strexd [[STATUS:r[0-9]+]], r0, r1, [r[[ADDR]]]
558 ; CHECK-NEXT: cmp [[STATUS]], #0
559 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
563 ; CHECK: mov r0, r[[OLD1]]
564 ; CHECK-NEXT: mov r1, r[[OLD2]]
568 define i8 @test_atomic_load_min_i8(i8 %offset) nounwind {
569 ; CHECK-LABEL: test_atomic_load_min_i8:
570 %old = atomicrmw min i8* @var8, i8 %offset acquire
573 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
574 ; CHECK: movt r[[ADDR]], :upper16:var8
576 ; CHECK: .LBB{{[0-9]+}}_1:
577 ; CHECK-NEXT: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
578 ; CHECK-NEXT: sxtb r[[OLDX:[0-9]+]], r[[OLD]]
579 ; r0 below is a reasonable guess but could change: it certainly comes into the
581 ; CHECK-NEXT: cmp r[[OLDX]], r0
583 ; CHECK: movge r[[OLDX]], r0
584 ; CHECK-NEXT: strexb [[STATUS:r[0-9]+]], r[[OLDX]], [r[[ADDR]]]
585 ; CHECK-NEXT: cmp [[STATUS]], #0
586 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
590 ; CHECK: mov r0, r[[OLD]]
594 define i16 @test_atomic_load_min_i16(i16 %offset) nounwind {
595 ; CHECK-LABEL: test_atomic_load_min_i16:
596 %old = atomicrmw min i16* @var16, i16 %offset release
599 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
600 ; CHECK: movt r[[ADDR]], :upper16:var16
602 ; CHECK: .LBB{{[0-9]+}}_1:
603 ; CHECK-NEXT: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
604 ; CHECK-NEXT: sxth r[[OLDX:[0-9]+]], r[[OLD]]
605 ; r0 below is a reasonable guess but could change: it certainly comes into the
607 ; CHECK-NEXT: cmp r[[OLDX]], r0
609 ; CHECK: movge r[[OLDX]], r0
610 ; CHECK-NEXT: stlexh [[STATUS:r[0-9]+]], r[[OLDX]], [r[[ADDR]]]
611 ; CHECK-NEXT: cmp [[STATUS]], #0
612 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
616 ; CHECK: mov r0, r[[OLD]]
620 define i32 @test_atomic_load_min_i32(i32 %offset) nounwind {
621 ; CHECK-LABEL: test_atomic_load_min_i32:
622 %old = atomicrmw min i32* @var32, i32 %offset monotonic
625 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
626 ; CHECK: movt r[[ADDR]], :upper16:var32
628 ; CHECK: .LBB{{[0-9]+}}_1:
629 ; CHECK-NEXT: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
630 ; r0 below is a reasonable guess but could change: it certainly comes into the
632 ; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
633 ; CHECK-NEXT: cmp r[[OLD]], r0
635 ; CHECK: movlt r[[NEW]], r[[OLD]]
636 ; CHECK-NEXT: strex [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
637 ; CHECK-NEXT: cmp [[STATUS]], #0
638 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
642 ; CHECK: mov r0, r[[OLD]]
646 define i64 @test_atomic_load_min_i64(i64 %offset) nounwind {
647 ; CHECK-LABEL: test_atomic_load_min_i64:
648 %old = atomicrmw min i64* @var64, i64 %offset seq_cst
651 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
652 ; CHECK: movt r[[ADDR]], :upper16:var64
654 ; CHECK: .LBB{{[0-9]+}}_1:
655 ; CHECK-NEXT: ldaexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
656 ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
658 ; CHECK-NEXT: subs [[NEW:r[0-9]+]], r[[OLD1]], r0
659 ; CHECK-NEXT: sbcs{{(\.w)?}} [[NEW]], r[[OLD2]], r1
660 ; CHECK-NEXT: blt .LBB{{[0-9]+}}_3
662 ; CHECK-NEXT: stlexd [[STATUS:r[0-9]+]], r0, r1, [r[[ADDR]]]
663 ; CHECK-NEXT: cmp [[STATUS]], #0
664 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
668 ; CHECK: mov r0, r[[OLD1]]
669 ; CHECK-NEXT: mov r1, r[[OLD2]]
673 define i8 @test_atomic_load_max_i8(i8 %offset) nounwind {
674 ; CHECK-LABEL: test_atomic_load_max_i8:
675 %old = atomicrmw max i8* @var8, i8 %offset seq_cst
678 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
679 ; CHECK: movt r[[ADDR]], :upper16:var8
681 ; CHECK: .LBB{{[0-9]+}}_1:
682 ; CHECK-NEXT: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
683 ; CHECK-NEXT: sxtb r[[OLDX:[0-9]+]], r[[OLD]]
684 ; r0 below is a reasonable guess but could change: it certainly comes into the
686 ; CHECK-NEXT: cmp r[[OLDX]], r0
688 ; CHECK: movle r[[OLDX]], r0
689 ; CHECK-NEXT: stlexb [[STATUS:r[0-9]+]], r[[OLDX]], [r[[ADDR]]]
690 ; CHECK-NEXT: cmp [[STATUS]], #0
691 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
695 ; CHECK: mov r0, r[[OLD]]
699 define i16 @test_atomic_load_max_i16(i16 %offset) nounwind {
700 ; CHECK-LABEL: test_atomic_load_max_i16:
701 %old = atomicrmw max i16* @var16, i16 %offset acquire
704 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
705 ; CHECK: movt r[[ADDR]], :upper16:var16
707 ; CHECK: .LBB{{[0-9]+}}_1:
708 ; CHECK-NEXT: ldaexh r[[OLD:[0-9]+]], [r[[ADDR]]]
709 ; CHECK-NEXT: sxth r[[OLDX:[0-9]+]], r[[OLD]]
710 ; r0 below is a reasonable guess but could change: it certainly comes into the
712 ; CHECK-NEXT: cmp r[[OLDX]], r0
714 ; CHECK: movle r[[OLDX]], r0
715 ; CHECK-NEXT: strexh [[STATUS:r[0-9]+]], r[[OLDX]], [r[[ADDR]]]
716 ; CHECK-NEXT: cmp [[STATUS]], #0
717 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
721 ; CHECK: mov r0, r[[OLD]]
725 define i32 @test_atomic_load_max_i32(i32 %offset) nounwind {
726 ; CHECK-LABEL: test_atomic_load_max_i32:
727 %old = atomicrmw max i32* @var32, i32 %offset release
730 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
731 ; CHECK: movt r[[ADDR]], :upper16:var32
733 ; CHECK: .LBB{{[0-9]+}}_1:
734 ; CHECK-NEXT: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
735 ; r0 below is a reasonable guess but could change: it certainly comes into the
737 ; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
738 ; CHECK-NEXT: cmp r[[OLD]], r0
740 ; CHECK: movgt r[[NEW]], r[[OLD]]
741 ; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
742 ; CHECK-NEXT: cmp [[STATUS]], #0
743 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
747 ; CHECK: mov r0, r[[OLD]]
751 define i64 @test_atomic_load_max_i64(i64 %offset) nounwind {
752 ; CHECK-LABEL: test_atomic_load_max_i64:
753 %old = atomicrmw max i64* @var64, i64 %offset monotonic
756 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
757 ; CHECK: movt r[[ADDR]], :upper16:var64
759 ; CHECK: .LBB{{[0-9]+}}_1:
760 ; CHECK-NEXT: ldrexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
761 ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
763 ; CHECK-NEXT: subs [[NEW:r[0-9]+]], r[[OLD1]], r0
764 ; CHECK-NEXT: sbcs{{(\.w)?}} [[NEW]], r[[OLD2]], r1
765 ; CHECK-NEXT: bge .LBB{{[0-9]+}}_3
767 ; CHECK-NEXT: strexd [[STATUS:r[0-9]+]], r0, r1, [r[[ADDR]]]
768 ; CHECK-NEXT: cmp [[STATUS]], #0
769 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
773 ; CHECK: mov r0, r[[OLD1]]
774 ; CHECK-NEXT: mov r1, r[[OLD2]]
778 define i8 @test_atomic_load_umin_i8(i8 %offset) nounwind {
779 ; CHECK-LABEL: test_atomic_load_umin_i8:
780 %old = atomicrmw umin i8* @var8, i8 %offset monotonic
783 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
784 ; CHECK: movt r[[ADDR]], :upper16:var8
786 ; CHECK: .LBB{{[0-9]+}}_1:
787 ; CHECK-NEXT: ldrexb r[[OLD:[0-9]+]], [r[[ADDR]]]
788 ; r0 below is a reasonable guess but could change: it certainly comes into the
790 ; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
791 ; CHECK-NEXT: cmp r[[OLD]], r0
793 ; CHECK: movlo r[[NEW]], r[[OLD]]
794 ; CHECK-NEXT: strexb [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
795 ; CHECK-NEXT: cmp [[STATUS]], #0
796 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
800 ; CHECK: mov r0, r[[OLD]]
804 define i16 @test_atomic_load_umin_i16(i16 %offset) nounwind {
805 ; CHECK-LABEL: test_atomic_load_umin_i16:
806 %old = atomicrmw umin i16* @var16, i16 %offset acquire
809 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
810 ; CHECK: movt r[[ADDR]], :upper16:var16
812 ; CHECK: .LBB{{[0-9]+}}_1:
813 ; CHECK-NEXT: ldaexh r[[OLD:[0-9]+]], [r[[ADDR]]]
814 ; r0 below is a reasonable guess but could change: it certainly comes into the
816 ; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
817 ; CHECK-NEXT: cmp r[[OLD]], r0
819 ; CHECK: movlo r[[NEW]], r[[OLD]]
820 ; CHECK-NEXT: strexh [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
821 ; CHECK-NEXT: cmp [[STATUS]], #0
822 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
826 ; CHECK: mov r0, r[[OLD]]
830 define i32 @test_atomic_load_umin_i32(i32 %offset) nounwind {
831 ; CHECK-LABEL: test_atomic_load_umin_i32:
832 %old = atomicrmw umin i32* @var32, i32 %offset seq_cst
835 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
836 ; CHECK: movt r[[ADDR]], :upper16:var32
838 ; CHECK: .LBB{{[0-9]+}}_1:
839 ; CHECK-NEXT: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
840 ; r0 below is a reasonable guess but could change: it certainly comes into the
842 ; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
843 ; CHECK-NEXT: cmp r[[OLD]], r0
845 ; CHECK: movlo r[[NEW]], r[[OLD]]
846 ; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
847 ; CHECK-NEXT: cmp [[STATUS]], #0
848 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
852 ; CHECK: mov r0, r[[OLD]]
856 define i64 @test_atomic_load_umin_i64(i64 %offset) nounwind {
857 ; CHECK-LABEL: test_atomic_load_umin_i64:
858 %old = atomicrmw umin i64* @var64, i64 %offset acq_rel
861 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
862 ; CHECK: movt r[[ADDR]], :upper16:var64
864 ; CHECK: .LBB{{[0-9]+}}_1:
865 ; CHECK-NEXT: ldaexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
866 ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
868 ; CHECK-NEXT: subs [[NEW:r[0-9]+]], r[[OLD1]], r0
869 ; CHECK-NEXT: sbcs{{(\.w)?}} [[NEW]], r[[OLD2]], r1
870 ; CHECK-NEXT: blo .LBB{{[0-9]+}}_3
872 ; CHECK-NEXT: stlexd [[STATUS:r[0-9]+]], r0, r1, [r[[ADDR]]]
873 ; CHECK-NEXT: cmp [[STATUS]], #0
874 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
878 ; CHECK: mov r0, r[[OLD1]]
879 ; CHECK-NEXT: mov r1, r[[OLD2]]
883 define i8 @test_atomic_load_umax_i8(i8 %offset) nounwind {
884 ; CHECK-LABEL: test_atomic_load_umax_i8:
885 %old = atomicrmw umax i8* @var8, i8 %offset acq_rel
888 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
889 ; CHECK: movt r[[ADDR]], :upper16:var8
891 ; CHECK: .LBB{{[0-9]+}}_1:
892 ; CHECK-NEXT: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
893 ; r0 below is a reasonable guess but could change: it certainly comes into the
895 ; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
896 ; CHECK-NEXT: cmp r[[OLD]], r0
898 ; CHECK: movhi r[[NEW]], r[[OLD]]
899 ; CHECK-NEXT: stlexb [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
900 ; CHECK-NEXT: cmp [[STATUS]], #0
901 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
905 ; CHECK: mov r0, r[[OLD]]
909 define i16 @test_atomic_load_umax_i16(i16 %offset) nounwind {
910 ; CHECK-LABEL: test_atomic_load_umax_i16:
911 %old = atomicrmw umax i16* @var16, i16 %offset monotonic
914 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
915 ; CHECK: movt r[[ADDR]], :upper16:var16
917 ; CHECK: .LBB{{[0-9]+}}_1:
918 ; CHECK-NEXT: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
919 ; r0 below is a reasonable guess but could change: it certainly comes into the
921 ; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
922 ; CHECK-NEXT: cmp r[[OLD]], r0
924 ; CHECK: movhi r[[NEW]], r[[OLD]]
925 ; CHECK-NEXT: strexh [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
926 ; CHECK-NEXT: cmp [[STATUS]], #0
927 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
931 ; CHECK: mov r0, r[[OLD]]
935 define i32 @test_atomic_load_umax_i32(i32 %offset) nounwind {
936 ; CHECK-LABEL: test_atomic_load_umax_i32:
937 %old = atomicrmw umax i32* @var32, i32 %offset seq_cst
940 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
941 ; CHECK: movt r[[ADDR]], :upper16:var32
943 ; CHECK: .LBB{{[0-9]+}}_1:
944 ; CHECK-NEXT: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
945 ; r0 below is a reasonable guess but could change: it certainly comes into the
947 ; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
948 ; CHECK-NEXT: cmp r[[OLD]], r0
950 ; CHECK: movhi r[[NEW]], r[[OLD]]
951 ; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
952 ; CHECK-NEXT: cmp [[STATUS]], #0
953 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
957 ; CHECK: mov r0, r[[OLD]]
961 define i64 @test_atomic_load_umax_i64(i64 %offset) nounwind {
962 ; CHECK-LABEL: test_atomic_load_umax_i64:
963 %old = atomicrmw umax i64* @var64, i64 %offset release
966 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
967 ; CHECK: movt r[[ADDR]], :upper16:var64
969 ; CHECK: .LBB{{[0-9]+}}_1:
970 ; CHECK-NEXT: ldrexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
971 ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
973 ; CHECK-NEXT: subs [[NEW:r[0-9]+]], r[[OLD1]], r0
974 ; CHECK-NEXT: sbcs{{(\.w)?}} [[NEW]], r[[OLD2]], r1
975 ; CHECK-NEXT: bhs .LBB{{[0-9]+}}_3
977 ; CHECK-NEXT: stlexd [[STATUS:r[0-9]+]], r0, r1, [r[[ADDR]]]
978 ; CHECK-NEXT: cmp [[STATUS]], #0
979 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
983 ; CHECK: mov r0, r[[OLD1]]
984 ; CHECK-NEXT: mov r1, r[[OLD2]]
988 define i8 @test_atomic_cmpxchg_i8(i8 %wanted, i8 %new) nounwind {
989 ; CHECK-LABEL: test_atomic_cmpxchg_i8:
990 %old = cmpxchg i8* @var8, i8 %wanted, i8 %new acquire
993 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
994 ; CHECK: movt r[[ADDR]], :upper16:var8
996 ; CHECK: .LBB{{[0-9]+}}_1:
997 ; CHECK-NEXT: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
998 ; r0 below is a reasonable guess but could change: it certainly comes into the
1000 ; CHECK-NEXT: cmp r[[OLD]], r0
1001 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_3
1003 ; As above, r1 is a reasonable guess.
1004 ; CHECK-NEXT: strexb [[STATUS:r[0-9]+]], r1, [r[[ADDR]]]
1005 ; CHECK-NEXT: cmp [[STATUS]], #0
1006 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
1010 ; CHECK: mov r0, r[[OLD]]
1014 define i16 @test_atomic_cmpxchg_i16(i16 %wanted, i16 %new) nounwind {
1015 ; CHECK-LABEL: test_atomic_cmpxchg_i16:
1016 %old = cmpxchg i16* @var16, i16 %wanted, i16 %new seq_cst
1019 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
1020 ; CHECK: movt r[[ADDR]], :upper16:var16
1022 ; CHECK: .LBB{{[0-9]+}}_1:
1023 ; CHECK-NEXT: ldaexh r[[OLD:[0-9]+]], [r[[ADDR]]]
1024 ; r0 below is a reasonable guess but could change: it certainly comes into the
1026 ; CHECK-NEXT: cmp r[[OLD]], r0
1027 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_3
1029 ; As above, r1 is a reasonable guess.
1030 ; CHECK-NEXT: stlexh [[STATUS:r[0-9]+]], r1, [r[[ADDR]]]
1031 ; CHECK-NEXT: cmp [[STATUS]], #0
1032 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
1036 ; CHECK: mov r0, r[[OLD]]
1040 define i32 @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
1041 ; CHECK-LABEL: test_atomic_cmpxchg_i32:
1042 %old = cmpxchg i32* @var32, i32 %wanted, i32 %new release
1045 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
1046 ; CHECK: movt r[[ADDR]], :upper16:var32
1048 ; CHECK: .LBB{{[0-9]+}}_1:
1049 ; CHECK-NEXT: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
1050 ; r0 below is a reasonable guess but could change: it certainly comes into the
1052 ; CHECK-NEXT: cmp r[[OLD]], r0
1053 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_3
1055 ; As above, r1 is a reasonable guess.
1056 ; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], r1, [r[[ADDR]]]
1057 ; CHECK-NEXT: cmp [[STATUS]], #0
1058 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
1062 ; CHECK: mov r0, r[[OLD]]
1066 define i64 @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind {
1067 ; CHECK-LABEL: test_atomic_cmpxchg_i64:
1068 %old = cmpxchg i64* @var64, i64 %wanted, i64 %new monotonic
1071 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
1072 ; CHECK: movt r[[ADDR]], :upper16:var64
1074 ; CHECK: .LBB{{[0-9]+}}_1:
1075 ; CHECK-NEXT: ldrexd [[OLD1:r[0-9]+|lr]], [[OLD2:r[0-9]+|lr]], [r[[ADDR]]]
1076 ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
1078 ; CHECK-NEXT: cmp [[OLD1]], r0
1080 ; CHECK: cmpeq [[OLD2]], r1
1081 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_3
1083 ; As above, r2, r3 is a reasonable guess.
1084 ; CHECK-NEXT: strexd [[STATUS:r[0-9]+]], r2, r3, [r[[ADDR]]]
1085 ; CHECK-NEXT: cmp [[STATUS]], #0
1086 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
1090 ; CHECK: mov r0, [[OLD1]]
1091 ; CHECK-NEXT: mov r1, [[OLD2]]
1095 define i8 @test_atomic_load_monotonic_i8() nounwind {
1096 ; CHECK-LABEL: test_atomic_load_monotonic_i8:
1097 %val = load atomic i8* @var8 monotonic, align 1
1100 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
1101 ; CHECK: movt r[[ADDR]], :upper16:var8
1102 ; CHECK: ldrb r0, [r[[ADDR]]]
1109 define i8 @test_atomic_load_monotonic_regoff_i8(i64 %base, i64 %off) nounwind {
1110 ; CHECK-LABEL: test_atomic_load_monotonic_regoff_i8:
1111 %addr_int = add i64 %base, %off
1112 %addr = inttoptr i64 %addr_int to i8*
1114 %val = load atomic i8* %addr monotonic, align 1
1117 ; CHECK: ldrb r0, [r0, r2]
1124 define i8 @test_atomic_load_acquire_i8() nounwind {
1125 ; CHECK-LABEL: test_atomic_load_acquire_i8:
1126 %val = load atomic i8* @var8 acquire, align 1
1129 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
1132 ; CHECK: movt r[[ADDR]], :upper16:var8
1135 ; CHECK: ldab r0, [r[[ADDR]]]
1141 define i8 @test_atomic_load_seq_cst_i8() nounwind {
1142 ; CHECK-LABEL: test_atomic_load_seq_cst_i8:
1143 %val = load atomic i8* @var8 seq_cst, align 1
1146 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
1149 ; CHECK: movt r[[ADDR]], :upper16:var8
1152 ; CHECK: ldab r0, [r[[ADDR]]]
1158 define i16 @test_atomic_load_monotonic_i16() nounwind {
1159 ; CHECK-LABEL: test_atomic_load_monotonic_i16:
1160 %val = load atomic i16* @var16 monotonic, align 2
1163 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
1166 ; CHECK: movt r[[ADDR]], :upper16:var16
1169 ; CHECK: ldrh r0, [r[[ADDR]]]
1176 define i32 @test_atomic_load_monotonic_regoff_i32(i64 %base, i64 %off) nounwind {
1177 ; CHECK-LABEL: test_atomic_load_monotonic_regoff_i32:
1178 %addr_int = add i64 %base, %off
1179 %addr = inttoptr i64 %addr_int to i32*
1181 %val = load atomic i32* %addr monotonic, align 4
1184 ; CHECK: ldr r0, [r0, r2]
1191 define i64 @test_atomic_load_seq_cst_i64() nounwind {
1192 ; CHECK-LABEL: test_atomic_load_seq_cst_i64:
1193 %val = load atomic i64* @var64 seq_cst, align 8
1196 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
1199 ; CHECK: movt r[[ADDR]], :upper16:var64
1202 ; CHECK: ldaexd r0, r1, [r[[ADDR]]]
1208 define void @test_atomic_store_monotonic_i8(i8 %val) nounwind {
1209 ; CHECK-LABEL: test_atomic_store_monotonic_i8:
1210 store atomic i8 %val, i8* @var8 monotonic, align 1
1211 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
1212 ; CHECK: movt r[[ADDR]], :upper16:var8
1213 ; CHECK: strb r0, [r[[ADDR]]]
1218 define void @test_atomic_store_monotonic_regoff_i8(i64 %base, i64 %off, i8 %val) nounwind {
1219 ; CHECK-LABEL: test_atomic_store_monotonic_regoff_i8:
1221 %addr_int = add i64 %base, %off
1222 %addr = inttoptr i64 %addr_int to i8*
1224 store atomic i8 %val, i8* %addr monotonic, align 1
1225 ; CHECK: ldrb{{(\.w)?}} [[VAL:r[0-9]+]], [sp]
1226 ; CHECK: strb [[VAL]], [r0, r2]
1231 define void @test_atomic_store_release_i8(i8 %val) nounwind {
1232 ; CHECK-LABEL: test_atomic_store_release_i8:
1233 store atomic i8 %val, i8* @var8 release, align 1
1236 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
1239 ; CHECK: movt r[[ADDR]], :upper16:var8
1242 ; CHECK: stlb r0, [r[[ADDR]]]
1248 define void @test_atomic_store_seq_cst_i8(i8 %val) nounwind {
1249 ; CHECK-LABEL: test_atomic_store_seq_cst_i8:
1250 store atomic i8 %val, i8* @var8 seq_cst, align 1
1253 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
1256 ; CHECK: movt r[[ADDR]], :upper16:var8
1259 ; CHECK: stlb r0, [r[[ADDR]]]
1265 define void @test_atomic_store_monotonic_i16(i16 %val) nounwind {
1266 ; CHECK-LABEL: test_atomic_store_monotonic_i16:
1267 store atomic i16 %val, i16* @var16 monotonic, align 2
1270 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
1273 ; CHECK: movt r[[ADDR]], :upper16:var16
1276 ; CHECK: strh r0, [r[[ADDR]]]
1282 define void @test_atomic_store_monotonic_regoff_i32(i64 %base, i64 %off, i32 %val) nounwind {
1283 ; CHECK-LABEL: test_atomic_store_monotonic_regoff_i32:
1285 %addr_int = add i64 %base, %off
1286 %addr = inttoptr i64 %addr_int to i32*
1288 store atomic i32 %val, i32* %addr monotonic, align 4
1291 ; CHECK: ldr [[VAL:r[0-9]+]], [sp]
1294 ; CHECK: str [[VAL]], [r0, r2]
1301 define void @test_atomic_store_release_i64(i64 %val) nounwind {
1302 ; CHECK-LABEL: test_atomic_store_release_i64:
1303 store atomic i64 %val, i64* @var64 release, align 8
1306 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
1307 ; CHECK: movt r[[ADDR]], :upper16:var64
1309 ; CHECK: .LBB{{[0-9]+}}_1:
1310 ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
1312 ; CHECK: stlexd [[STATUS:r[0-9]+]], r0, r1, [r[[ADDR]]]
1313 ; CHECK-NEXT: cmp [[STATUS]], #0
1314 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
1321 define i32 @not.barriers(i32* %var, i1 %cond) {
1322 ; CHECK-LABEL: not.barriers:
1323 br i1 %cond, label %atomic_ver, label %simple_ver
1325 %oldval = load i32* %var
1326 %newval = add nsw i32 %oldval, -1
1327 store i32 %newval, i32* %var
1331 %val = atomicrmw add i32* %var, i32 -1 monotonic
1337 ; The key point here is that the second dmb isn't immediately followed by the
1338 ; simple_ver basic block, which LLVM attempted to do when DMB had been marked
1339 ; with isBarrier. For now, look for something that looks like "somewhere".
1342 %combined = phi i32 [ %val, %atomic_ver ], [ %newval, %simple_ver]