1 ; RUN: llc -mtriple=armv8-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-ARM
2 ; RUN: llc -mtriple=thumbv8-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-THUMB
9 define i8 @test_atomic_load_add_i8(i8 %offset) nounwind {
10 ; CHECK-LABEL: test_atomic_load_add_i8:
11 %old = atomicrmw add i8* @var8, i8 %offset seq_cst
14 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
15 ; CHECK: movt r[[ADDR]], :upper16:var8
17 ; CHECK: .LBB{{[0-9]+}}_1:
18 ; CHECK: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
19 ; r0 below is a reasonable guess but could change: it certainly comes into the
21 ; CHECK-NEXT: add{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
22 ; CHECK-NEXT: stlexb [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
23 ; CHECK-NEXT: cmp [[STATUS]], #0
24 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
28 ; CHECK: mov r0, r[[OLD]]
32 define i16 @test_atomic_load_add_i16(i16 %offset) nounwind {
33 ; CHECK-LABEL: test_atomic_load_add_i16:
34 %old = atomicrmw add i16* @var16, i16 %offset acquire
37 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
38 ; CHECK: movt r[[ADDR]], :upper16:var16
40 ; CHECK: .LBB{{[0-9]+}}_1:
41 ; CHECK: ldaexh r[[OLD:[0-9]+]], [r[[ADDR]]]
42 ; r0 below is a reasonable guess but could change: it certainly comes into the
44 ; CHECK-NEXT: add{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
45 ; CHECK-NEXT: strexh [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
46 ; CHECK-NEXT: cmp [[STATUS]], #0
47 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
51 ; CHECK: mov r0, r[[OLD]]
55 define i32 @test_atomic_load_add_i32(i32 %offset) nounwind {
56 ; CHECK-LABEL: test_atomic_load_add_i32:
57 %old = atomicrmw add i32* @var32, i32 %offset release
60 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
61 ; CHECK: movt r[[ADDR]], :upper16:var32
63 ; CHECK: .LBB{{[0-9]+}}_1:
64 ; CHECK: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
65 ; r0 below is a reasonable guess but could change: it certainly comes into the
67 ; CHECK-NEXT: add{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
68 ; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
69 ; CHECK-NEXT: cmp [[STATUS]], #0
70 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
74 ; CHECK: mov r0, r[[OLD]]
78 define void @test_atomic_load_add_i64(i64 %offset) nounwind {
79 ; CHECK-LABEL: test_atomic_load_add_i64:
80 %old = atomicrmw add i64* @var64, i64 %offset monotonic
83 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
84 ; CHECK: movt r[[ADDR]], :upper16:var64
86 ; CHECK: .LBB{{[0-9]+}}_1:
87 ; CHECK: ldrexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
88 ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
90 ; CHECK-NEXT: adds{{(\.w)?}} [[NEW1:r[0-9]+|lr]], r[[OLD1]], r0
91 ; CHECK-NEXT: adc{{(\.w)?}} [[NEW2:r[0-9]+]], r[[OLD2]], r1
92 ; CHECK-NEXT: strexd [[STATUS:r[0-9]+]], [[NEW1]], [[NEW2]], [r[[ADDR]]]
93 ; CHECK-NEXT: cmp [[STATUS]], #0
94 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
98 ; CHECK: strd r[[OLD1]], r[[OLD2]], [r[[ADDR]]]
99 store i64 %old, i64* @var64
103 define i8 @test_atomic_load_sub_i8(i8 %offset) nounwind {
104 ; CHECK-LABEL: test_atomic_load_sub_i8:
105 %old = atomicrmw sub i8* @var8, i8 %offset monotonic
108 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
109 ; CHECK: movt r[[ADDR]], :upper16:var8
111 ; CHECK: .LBB{{[0-9]+}}_1:
112 ; CHECK: ldrexb r[[OLD:[0-9]+]], [r[[ADDR]]]
113 ; r0 below is a reasonable guess but could change: it certainly comes into the
115 ; CHECK-NEXT: sub{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
116 ; CHECK-NEXT: strexb [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
117 ; CHECK-NEXT: cmp [[STATUS]], #0
118 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
122 ; CHECK: mov r0, r[[OLD]]
126 define i16 @test_atomic_load_sub_i16(i16 %offset) nounwind {
127 ; CHECK-LABEL: test_atomic_load_sub_i16:
128 %old = atomicrmw sub i16* @var16, i16 %offset release
131 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
132 ; CHECK: movt r[[ADDR]], :upper16:var16
134 ; CHECK: .LBB{{[0-9]+}}_1:
135 ; CHECK: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
136 ; r0 below is a reasonable guess but could change: it certainly comes into the
138 ; CHECK-NEXT: sub{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
139 ; CHECK-NEXT: stlexh [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
140 ; CHECK-NEXT: cmp [[STATUS]], #0
141 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
145 ; CHECK: mov r0, r[[OLD]]
149 define i32 @test_atomic_load_sub_i32(i32 %offset) nounwind {
150 ; CHECK-LABEL: test_atomic_load_sub_i32:
151 %old = atomicrmw sub i32* @var32, i32 %offset acquire
154 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
155 ; CHECK: movt r[[ADDR]], :upper16:var32
157 ; CHECK: .LBB{{[0-9]+}}_1:
158 ; CHECK: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
159 ; r0 below is a reasonable guess but could change: it certainly comes into the
161 ; CHECK-NEXT: sub{{s?}} [[NEW:r[0-9]+]], r[[OLD]], r0
162 ; CHECK-NEXT: strex [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
163 ; CHECK-NEXT: cmp [[STATUS]], #0
164 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
168 ; CHECK: mov r0, r[[OLD]]
172 define void @test_atomic_load_sub_i64(i64 %offset) nounwind {
173 ; CHECK-LABEL: test_atomic_load_sub_i64:
174 %old = atomicrmw sub i64* @var64, i64 %offset seq_cst
177 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
178 ; CHECK: movt r[[ADDR]], :upper16:var64
180 ; CHECK: .LBB{{[0-9]+}}_1:
181 ; CHECK: ldaexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
182 ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
184 ; CHECK-NEXT: subs{{(\.w)?}} [[NEW1:r[0-9]+|lr]], r[[OLD1]], r0
185 ; CHECK-NEXT: sbc{{(\.w)?}} [[NEW2:r[0-9]+]], r[[OLD2]], r1
186 ; CHECK-NEXT: stlexd [[STATUS:r[0-9]+]], [[NEW1]], [[NEW2]], [r[[ADDR]]]
187 ; CHECK-NEXT: cmp [[STATUS]], #0
188 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
192 ; CHECK: strd r[[OLD1]], r[[OLD2]], [r[[ADDR]]]
193 store i64 %old, i64* @var64
197 define i8 @test_atomic_load_and_i8(i8 %offset) nounwind {
198 ; CHECK-LABEL: test_atomic_load_and_i8:
199 %old = atomicrmw and i8* @var8, i8 %offset release
202 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
203 ; CHECK: movt r[[ADDR]], :upper16:var8
205 ; CHECK: .LBB{{[0-9]+}}_1:
206 ; CHECK: ldrexb r[[OLD:[0-9]+]], [r[[ADDR]]]
207 ; r0 below is a reasonable guess but could change: it certainly comes into the
209 ; CHECK-NEXT: and{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
210 ; CHECK-NEXT: stlexb [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
211 ; CHECK-NEXT: cmp [[STATUS]], #0
212 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
216 ; CHECK: mov r0, r[[OLD]]
220 define i16 @test_atomic_load_and_i16(i16 %offset) nounwind {
221 ; CHECK-LABEL: test_atomic_load_and_i16:
222 %old = atomicrmw and i16* @var16, i16 %offset monotonic
225 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
226 ; CHECK: movt r[[ADDR]], :upper16:var16
228 ; CHECK: .LBB{{[0-9]+}}_1:
229 ; CHECK: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
230 ; r0 below is a reasonable guess but could change: it certainly comes into the
232 ; CHECK-NEXT: and{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
233 ; CHECK-NEXT: strexh [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
234 ; CHECK-NEXT: cmp [[STATUS]], #0
235 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
239 ; CHECK: mov r0, r[[OLD]]
243 define i32 @test_atomic_load_and_i32(i32 %offset) nounwind {
244 ; CHECK-LABEL: test_atomic_load_and_i32:
245 %old = atomicrmw and i32* @var32, i32 %offset seq_cst
248 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
249 ; CHECK: movt r[[ADDR]], :upper16:var32
251 ; CHECK: .LBB{{[0-9]+}}_1:
252 ; CHECK: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
253 ; r0 below is a reasonable guess but could change: it certainly comes into the
255 ; CHECK-NEXT: and{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
256 ; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
257 ; CHECK-NEXT: cmp [[STATUS]], #0
258 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
262 ; CHECK: mov r0, r[[OLD]]
266 define void @test_atomic_load_and_i64(i64 %offset) nounwind {
267 ; CHECK-LABEL: test_atomic_load_and_i64:
268 %old = atomicrmw and i64* @var64, i64 %offset acquire
271 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
272 ; CHECK: movt r[[ADDR]], :upper16:var64
274 ; CHECK: .LBB{{[0-9]+}}_1:
275 ; CHECK: ldaexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
276 ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
278 ; CHECK-DAG: and{{(\.w)?}} [[NEW1:r[0-9]+]], r[[OLD1]], r0
279 ; CHECK-DAG: and{{(\.w)?}} [[NEW2:r[0-9]+|lr]], r[[OLD2]], r1
280 ; CHECK: strexd [[STATUS:r[0-9]+]], [[NEW1]], [[NEW2]], [r[[ADDR]]]
281 ; CHECK-NEXT: cmp [[STATUS]], #0
282 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
286 ; CHECK: strd r[[OLD1]], r[[OLD2]], [r[[ADDR]]]
287 store i64 %old, i64* @var64
291 define i8 @test_atomic_load_or_i8(i8 %offset) nounwind {
292 ; CHECK-LABEL: test_atomic_load_or_i8:
293 %old = atomicrmw or i8* @var8, i8 %offset seq_cst
296 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
297 ; CHECK: movt r[[ADDR]], :upper16:var8
299 ; CHECK: .LBB{{[0-9]+}}_1:
300 ; CHECK: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
301 ; r0 below is a reasonable guess but could change: it certainly comes into the
303 ; CHECK-NEXT: orr{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
304 ; CHECK-NEXT: stlexb [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
305 ; CHECK-NEXT: cmp [[STATUS]], #0
306 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
310 ; CHECK: mov r0, r[[OLD]]
314 define i16 @test_atomic_load_or_i16(i16 %offset) nounwind {
315 ; CHECK-LABEL: test_atomic_load_or_i16:
316 %old = atomicrmw or i16* @var16, i16 %offset monotonic
319 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
320 ; CHECK: movt r[[ADDR]], :upper16:var16
322 ; CHECK: .LBB{{[0-9]+}}_1:
323 ; CHECK: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
324 ; r0 below is a reasonable guess but could change: it certainly comes into the
326 ; CHECK-NEXT: orr{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
327 ; CHECK-NEXT: strexh [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
328 ; CHECK-NEXT: cmp [[STATUS]], #0
329 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
333 ; CHECK: mov r0, r[[OLD]]
337 define i32 @test_atomic_load_or_i32(i32 %offset) nounwind {
338 ; CHECK-LABEL: test_atomic_load_or_i32:
339 %old = atomicrmw or i32* @var32, i32 %offset acquire
342 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
343 ; CHECK: movt r[[ADDR]], :upper16:var32
345 ; CHECK: .LBB{{[0-9]+}}_1:
346 ; CHECK: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
347 ; r0 below is a reasonable guess but could change: it certainly comes into the
349 ; CHECK-NEXT: orr{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
350 ; CHECK-NEXT: strex [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
351 ; CHECK-NEXT: cmp [[STATUS]], #0
352 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
356 ; CHECK: mov r0, r[[OLD]]
360 define void @test_atomic_load_or_i64(i64 %offset) nounwind {
361 ; CHECK-LABEL: test_atomic_load_or_i64:
362 %old = atomicrmw or i64* @var64, i64 %offset release
365 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
366 ; CHECK: movt r[[ADDR]], :upper16:var64
368 ; CHECK: .LBB{{[0-9]+}}_1:
369 ; CHECK: ldrexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
370 ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
372 ; CHECK-DAG: orr{{(\.w)?}} [[NEW1:r[0-9]+]], r[[OLD1]], r0
373 ; CHECK-DAG: orr{{(\.w)?}} [[NEW2:r[0-9]+|lr]], r[[OLD2]], r1
374 ; CHECK: stlexd [[STATUS:r[0-9]+]], [[NEW1]], [[NEW2]], [r[[ADDR]]]
375 ; CHECK-NEXT: cmp [[STATUS]], #0
376 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
380 ; CHECK: strd r[[OLD1]], r[[OLD2]], [r[[ADDR]]]
381 store i64 %old, i64* @var64
385 define i8 @test_atomic_load_xor_i8(i8 %offset) nounwind {
386 ; CHECK-LABEL: test_atomic_load_xor_i8:
387 %old = atomicrmw xor i8* @var8, i8 %offset acquire
390 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
391 ; CHECK: movt r[[ADDR]], :upper16:var8
393 ; CHECK: .LBB{{[0-9]+}}_1:
394 ; CHECK: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
395 ; r0 below is a reasonable guess but could change: it certainly comes into the
397 ; CHECK-NEXT: eor{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
398 ; CHECK-NEXT: strexb [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
399 ; CHECK-NEXT: cmp [[STATUS]], #0
400 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
404 ; CHECK: mov r0, r[[OLD]]
408 define i16 @test_atomic_load_xor_i16(i16 %offset) nounwind {
409 ; CHECK-LABEL: test_atomic_load_xor_i16:
410 %old = atomicrmw xor i16* @var16, i16 %offset release
413 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
414 ; CHECK: movt r[[ADDR]], :upper16:var16
416 ; CHECK: .LBB{{[0-9]+}}_1:
417 ; CHECK: ldrexh r[[OLD:[0-9]+]], [r[[ADDR]]]
418 ; r0 below is a reasonable guess but could change: it certainly comes into the
420 ; CHECK-NEXT: eor{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
421 ; CHECK-NEXT: stlexh [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
422 ; CHECK-NEXT: cmp [[STATUS]], #0
423 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
427 ; CHECK: mov r0, r[[OLD]]
431 define i32 @test_atomic_load_xor_i32(i32 %offset) nounwind {
432 ; CHECK-LABEL: test_atomic_load_xor_i32:
433 %old = atomicrmw xor i32* @var32, i32 %offset seq_cst
436 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
437 ; CHECK: movt r[[ADDR]], :upper16:var32
439 ; CHECK: .LBB{{[0-9]+}}_1:
440 ; CHECK: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
441 ; r0 below is a reasonable guess but could change: it certainly comes into the
443 ; CHECK-NEXT: eor{{(\.w)?}} [[NEW:r[0-9]+]], r[[OLD]], r0
444 ; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], [[NEW]], [r[[ADDR]]]
445 ; CHECK-NEXT: cmp [[STATUS]], #0
446 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
450 ; CHECK: mov r0, r[[OLD]]
454 define void @test_atomic_load_xor_i64(i64 %offset) nounwind {
455 ; CHECK-LABEL: test_atomic_load_xor_i64:
456 %old = atomicrmw xor i64* @var64, i64 %offset monotonic
459 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
460 ; CHECK: movt r[[ADDR]], :upper16:var64
462 ; CHECK: .LBB{{[0-9]+}}_1:
463 ; CHECK: ldrexd r[[OLD1:[0-9]+]], r[[OLD2:[0-9]+]], [r[[ADDR]]]
464 ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
466 ; CHECK-DAG: eor{{(\.w)?}} [[NEW1:r[0-9]+]], r[[OLD1]], r0
467 ; CHECK-DAG: eor{{(\.w)?}} [[NEW2:r[0-9]+|lr]], r[[OLD2]], r1
468 ; CHECK: strexd [[STATUS:r[0-9]+]], [[NEW1]], [[NEW2]], [r[[ADDR]]]
469 ; CHECK-NEXT: cmp [[STATUS]], #0
470 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
474 ; CHECK: strd r[[OLD1]], r[[OLD2]], [r[[ADDR]]]
475 store i64 %old, i64* @var64
479 define i8 @test_atomic_load_xchg_i8(i8 %offset) nounwind {
480 ; CHECK-LABEL: test_atomic_load_xchg_i8:
481 %old = atomicrmw xchg i8* @var8, i8 %offset monotonic
484 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
485 ; CHECK: movt r[[ADDR]], :upper16:var8
487 ; CHECK: .LBB{{[0-9]+}}_1:
488 ; CHECK: ldrexb r[[OLD:[0-9]+]], [r[[ADDR]]]
489 ; r0 below is a reasonable guess but could change: it certainly comes into the
491 ; CHECK-NEXT: strexb [[STATUS:r[0-9]+]], r0, [r[[ADDR]]]
492 ; CHECK-NEXT: cmp [[STATUS]], #0
493 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
497 ; CHECK: mov r0, r[[OLD]]
501 define i16 @test_atomic_load_xchg_i16(i16 %offset) nounwind {
502 ; CHECK-LABEL: test_atomic_load_xchg_i16:
503 %old = atomicrmw xchg i16* @var16, i16 %offset seq_cst
506 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
507 ; CHECK: movt r[[ADDR]], :upper16:var16
509 ; CHECK: .LBB{{[0-9]+}}_1:
510 ; CHECK: ldaexh r[[OLD:[0-9]+]], [r[[ADDR]]]
511 ; r0 below is a reasonable guess but could change: it certainly comes into the
513 ; CHECK-NEXT: stlexh [[STATUS:r[0-9]+]], r0, [r[[ADDR]]]
514 ; CHECK-NEXT: cmp [[STATUS]], #0
515 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
519 ; CHECK: mov r0, r[[OLD]]
523 define i32 @test_atomic_load_xchg_i32(i32 %offset) nounwind {
524 ; CHECK-LABEL: test_atomic_load_xchg_i32:
525 %old = atomicrmw xchg i32* @var32, i32 %offset release
528 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
529 ; CHECK: movt r[[ADDR]], :upper16:var32
531 ; CHECK: .LBB{{[0-9]+}}_1:
532 ; CHECK: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
533 ; r0 below is a reasonable guess but could change: it certainly comes into the
535 ; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], r0, [r[[ADDR]]]
536 ; CHECK-NEXT: cmp [[STATUS]], #0
537 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
541 ; CHECK: mov r0, r[[OLD]]
545 define void @test_atomic_load_xchg_i64(i64 %offset) nounwind {
546 ; CHECK-LABEL: test_atomic_load_xchg_i64:
547 %old = atomicrmw xchg i64* @var64, i64 %offset acquire
550 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
551 ; CHECK: movt r[[ADDR]], :upper16:var64
553 ; CHECK: .LBB{{[0-9]+}}_1:
554 ; CHECK: ldaexd [[OLD1:r[0-9]+]], [[OLD2:r[0-9]+|lr]], [r[[ADDR]]]
555 ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
557 ; CHECK-NEXT: strexd [[STATUS:r[0-9]+]], r0, r1, [r[[ADDR]]]
558 ; CHECK-NEXT: cmp [[STATUS]], #0
559 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
563 ; CHECK: strd [[OLD1]], [[OLD2]], [r[[ADDR]]]
564 store i64 %old, i64* @var64
568 define i8 @test_atomic_load_min_i8(i8 signext %offset) nounwind {
569 ; CHECK-LABEL: test_atomic_load_min_i8:
570 %old = atomicrmw min i8* @var8, i8 %offset acquire
573 ; CHECK-DAG: movw [[ADDR:r[0-9]+|lr]], :lower16:var8
574 ; CHECK-DAG: movt [[ADDR]], :upper16:var8
576 ; CHECK: .LBB{{[0-9]+}}_1:
577 ; CHECK: ldaexb r[[OLD:[0-9]+]], {{.*}}[[ADDR]]
578 ; CHECK-NEXT: sxtb r[[OLDX:[0-9]+]], r[[OLD]]
579 ; r0 below is a reasonable guess but could change: it certainly comes into the
581 ; CHECK-NEXT: cmp r[[OLDX]], r0
583 ; CHECK: movle r[[OLDX]], r[[OLD]]
584 ; CHECK-NEXT: strexb [[STATUS:r[0-9]+]], r[[OLDX]], {{.*}}[[ADDR]]]
585 ; CHECK-NEXT: cmp [[STATUS]], #0
586 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
590 ; CHECK: mov r0, r[[OLD]]
594 define i16 @test_atomic_load_min_i16(i16 signext %offset) nounwind {
595 ; CHECK-LABEL: test_atomic_load_min_i16:
596 %old = atomicrmw min i16* @var16, i16 %offset release
599 ; CHECK: movw [[ADDR:r[0-9]+|lr]], :lower16:var16
600 ; CHECK: movt [[ADDR]], :upper16:var16
602 ; CHECK: .LBB{{[0-9]+}}_1:
603 ; CHECK: ldrexh r[[OLD:[0-9]+]], {{.*}}[[ADDR]]
604 ; CHECK-NEXT: sxth r[[OLDX:[0-9]+]], r[[OLD]]
605 ; r0 below is a reasonable guess but could change: it certainly comes into the
607 ; CHECK-NEXT: cmp r[[OLDX]], r0
609 ; CHECK: movle r[[OLDX]], r[[OLD]]
610 ; CHECK-NEXT: stlexh [[STATUS:r[0-9]+]], r[[OLDX]], {{.*}}[[ADDR]]
611 ; CHECK-NEXT: cmp [[STATUS]], #0
612 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
616 ; CHECK: mov r0, r[[OLD]]
620 define i32 @test_atomic_load_min_i32(i32 %offset) nounwind {
621 ; CHECK-LABEL: test_atomic_load_min_i32:
622 %old = atomicrmw min i32* @var32, i32 %offset monotonic
625 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
626 ; CHECK: movt r[[ADDR]], :upper16:var32
628 ; CHECK: .LBB{{[0-9]+}}_1:
629 ; CHECK: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
630 ; r0 below is a reasonable guess but could change: it certainly comes into the
632 ; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
633 ; CHECK-NEXT: cmp r[[OLD]], r0
635 ; CHECK: movle r[[NEW]], r[[OLD]]
636 ; CHECK-NEXT: strex [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
637 ; CHECK-NEXT: cmp [[STATUS]], #0
638 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
642 ; CHECK: mov r0, r[[OLD]]
646 define void @test_atomic_load_min_i64(i64 %offset) nounwind {
647 ; CHECK-LABEL: test_atomic_load_min_i64:
648 %old = atomicrmw min i64* @var64, i64 %offset seq_cst
651 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
652 ; CHECK: movt r[[ADDR]], :upper16:var64
654 ; CHECK: .LBB{{[0-9]+}}_1:
655 ; CHECK: ldaexd [[OLD1:r[0-9]+]], [[OLD2:r[0-9]+]], [r[[ADDR]]]
656 ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
658 ; CHECK-ARM: mov [[LOCARRY:r[0-9]+|lr]], #0
659 ; CHECK-ARM: mov [[HICARRY:r[0-9]+|lr]], #0
660 ; CHECK-ARM: cmp [[OLD1]], r0
661 ; CHECK-ARM: movwls [[LOCARRY]], #1
662 ; CHECK-ARM: cmp [[OLD2]], r1
663 ; CHECK-ARM: movwle [[HICARRY]], #1
664 ; CHECK-ARM: moveq [[HICARRY]], [[LOCARRY]]
665 ; CHECK-ARM: cmp [[HICARRY]], #0
666 ; CHECK-ARM: mov [[MINHI:r[0-9]+]], r1
667 ; CHECK-ARM: movne [[MINHI]], [[OLD2]]
668 ; CHECK-ARM: mov [[MINLO:r[0-9]+]], r0
669 ; CHECK-ARM: movne [[MINLO]], [[OLD1]]
670 ; CHECK-ARM: stlexd [[STATUS:r[0-9]+]], [[MINLO]], [[MINHI]], [r[[ADDR]]]
671 ; CHECK-THUMB: stlexd [[STATUS:r[0-9]+]], {{r[0-9]+}}, {{r[0-9]+}}, [r[[ADDR]]]
672 ; CHECK-NEXT: cmp [[STATUS]], #0
673 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
677 ; CHECK-ARM: strd [[OLD1]], [[OLD2]], [r[[ADDR]]]
678 store i64 %old, i64* @var64
682 define i8 @test_atomic_load_max_i8(i8 signext %offset) nounwind {
683 ; CHECK-LABEL: test_atomic_load_max_i8:
684 %old = atomicrmw max i8* @var8, i8 %offset seq_cst
687 ; CHECK: movw [[ADDR:r[0-9]+|lr]], :lower16:var8
688 ; CHECK: movt [[ADDR]], :upper16:var8
690 ; CHECK: .LBB{{[0-9]+}}_1:
691 ; CHECK: ldaexb r[[OLD:[0-9]+]], {{.*}}[[ADDR]]
692 ; CHECK-NEXT: sxtb r[[OLDX:[0-9]+]], r[[OLD]]
693 ; r0 below is a reasonable guess but could change: it certainly comes into the
695 ; CHECK-NEXT: cmp r[[OLDX]], r0
697 ; CHECK: movgt r[[OLDX]], r[[OLD]]
698 ; CHECK-NEXT: stlexb [[STATUS:r[0-9]+]], r[[OLDX]], {{.*}}[[ADDR]]
699 ; CHECK-NEXT: cmp [[STATUS]], #0
700 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
704 ; CHECK: mov r0, r[[OLD]]
708 define i16 @test_atomic_load_max_i16(i16 signext %offset) nounwind {
709 ; CHECK-LABEL: test_atomic_load_max_i16:
710 %old = atomicrmw max i16* @var16, i16 %offset acquire
713 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
714 ; CHECK: movt r[[ADDR]], :upper16:var16
716 ; CHECK: .LBB{{[0-9]+}}_1:
717 ; CHECK: ldaexh r[[OLD:[0-9]+]], [r[[ADDR]]]
718 ; CHECK-NEXT: sxth r[[OLDX:[0-9]+]], r[[OLD]]
719 ; r0 below is a reasonable guess but could change: it certainly comes into the
721 ; CHECK-NEXT: cmp r[[OLDX]], r0
723 ; CHECK: movgt r[[OLDX]], r[[OLD]]
724 ; CHECK-NEXT: strexh [[STATUS:r[0-9]+]], r[[OLDX]], [r[[ADDR]]]
725 ; CHECK-NEXT: cmp [[STATUS]], #0
726 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
730 ; CHECK: mov r0, r[[OLD]]
734 define i32 @test_atomic_load_max_i32(i32 %offset) nounwind {
735 ; CHECK-LABEL: test_atomic_load_max_i32:
736 %old = atomicrmw max i32* @var32, i32 %offset release
739 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
740 ; CHECK: movt r[[ADDR]], :upper16:var32
742 ; CHECK: .LBB{{[0-9]+}}_1:
743 ; CHECK: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
744 ; r0 below is a reasonable guess but could change: it certainly comes into the
746 ; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
747 ; CHECK-NEXT: cmp r[[OLD]], r0
749 ; CHECK: movgt r[[NEW]], r[[OLD]]
750 ; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
751 ; CHECK-NEXT: cmp [[STATUS]], #0
752 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
756 ; CHECK: mov r0, r[[OLD]]
760 define void @test_atomic_load_max_i64(i64 %offset) nounwind {
761 ; CHECK-LABEL: test_atomic_load_max_i64:
762 %old = atomicrmw max i64* @var64, i64 %offset monotonic
765 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
766 ; CHECK: movt r[[ADDR]], :upper16:var64
768 ; CHECK: .LBB{{[0-9]+}}_1:
769 ; CHECK: ldrexd [[OLD1:r[0-9]+]], [[OLD2:r[0-9]+]], [r[[ADDR]]]
770 ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
772 ; CHECK-ARM: mov [[LOCARRY:r[0-9]+|lr]], #0
773 ; CHECK-ARM: mov [[HICARRY:r[0-9]+|lr]], #0
774 ; CHECK-ARM: cmp [[OLD1]], r0
775 ; CHECK-ARM: movwhi [[LOCARRY]], #1
776 ; CHECK-ARM: cmp [[OLD2]], r1
777 ; CHECK-ARM: movwgt [[HICARRY]], #1
778 ; CHECK-ARM: moveq [[HICARRY]], [[LOCARRY]]
779 ; CHECK-ARM: cmp [[HICARRY]], #0
780 ; CHECK-ARM: mov [[MINHI:r[0-9]+]], r1
781 ; CHECK-ARM: movne [[MINHI]], [[OLD2]]
782 ; CHECK-ARM: mov [[MINLO:r[0-9]+]], r0
783 ; CHECK-ARM: movne [[MINLO]], [[OLD1]]
784 ; CHECK-ARM: strexd [[STATUS:r[0-9]+]], [[MINLO]], [[MINHI]], [r[[ADDR]]]
785 ; CHECK-THUMB: strexd [[STATUS:r[0-9]+]], {{r[0-9]+}}, {{r[0-9]+}}, [r[[ADDR]]]
786 ; CHECK-NEXT: cmp [[STATUS]], #0
787 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
791 ; CHECK-ARM: strd [[OLD1]], [[OLD2]], [r[[ADDR]]]
792 store i64 %old, i64* @var64
796 define i8 @test_atomic_load_umin_i8(i8 zeroext %offset) nounwind {
797 ; CHECK-LABEL: test_atomic_load_umin_i8:
798 %old = atomicrmw umin i8* @var8, i8 %offset monotonic
801 ; CHECK: movw [[ADDR:r[0-9]+|lr]], :lower16:var8
802 ; CHECK: movt [[ADDR]], :upper16:var8
804 ; CHECK: .LBB{{[0-9]+}}_1:
805 ; CHECK: ldrexb r[[OLD:[0-9]+]], {{.*}}[[ADDR]]
806 ; r0 below is a reasonable guess but could change: it certainly comes into the
808 ; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
809 ; CHECK-NEXT: cmp r[[OLD]], r0
811 ; CHECK: movls r[[NEW]], r[[OLD]]
812 ; CHECK-NEXT: strexb [[STATUS:r[0-9]+]], r[[NEW]], {{.*}}[[ADDR]]
813 ; CHECK-NEXT: cmp [[STATUS]], #0
814 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
818 ; CHECK: mov r0, r[[OLD]]
822 define i16 @test_atomic_load_umin_i16(i16 zeroext %offset) nounwind {
823 ; CHECK-LABEL: test_atomic_load_umin_i16:
824 %old = atomicrmw umin i16* @var16, i16 %offset acquire
827 ; CHECK: movw [[ADDR:r[0-9]+|lr]], :lower16:var16
828 ; CHECK: movt [[ADDR]], :upper16:var16
830 ; CHECK: .LBB{{[0-9]+}}_1:
831 ; CHECK: ldaexh r[[OLD:[0-9]+]], {{.*}}[[ADDR]]
832 ; r0 below is a reasonable guess but could change: it certainly comes into the
834 ; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
835 ; CHECK-NEXT: cmp r[[OLD]], r0
837 ; CHECK: movls r[[NEW]], r[[OLD]]
838 ; CHECK-NEXT: strexh [[STATUS:r[0-9]+]], r[[NEW]], {{.*}}[[ADDR]]
839 ; CHECK-NEXT: cmp [[STATUS]], #0
840 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
844 ; CHECK: mov r0, r[[OLD]]
848 define i32 @test_atomic_load_umin_i32(i32 %offset) nounwind {
849 ; CHECK-LABEL: test_atomic_load_umin_i32:
850 %old = atomicrmw umin i32* @var32, i32 %offset seq_cst
853 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
854 ; CHECK: movt r[[ADDR]], :upper16:var32
856 ; CHECK: .LBB{{[0-9]+}}_1:
857 ; CHECK: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
858 ; r0 below is a reasonable guess but could change: it certainly comes into the
860 ; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
861 ; CHECK-NEXT: cmp r[[OLD]], r0
863 ; CHECK: movls r[[NEW]], r[[OLD]]
864 ; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
865 ; CHECK-NEXT: cmp [[STATUS]], #0
866 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
870 ; CHECK: mov r0, r[[OLD]]
874 define void @test_atomic_load_umin_i64(i64 %offset) nounwind {
875 ; CHECK-LABEL: test_atomic_load_umin_i64:
876 %old = atomicrmw umin i64* @var64, i64 %offset seq_cst
879 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
880 ; CHECK: movt r[[ADDR]], :upper16:var64
882 ; CHECK: .LBB{{[0-9]+}}_1:
883 ; CHECK: ldaexd [[OLD1:r[0-9]+]], [[OLD2:r[0-9]+]], [r[[ADDR]]]
884 ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
886 ; CHECK-ARM: mov [[LOCARRY:r[0-9]+|lr]], #0
887 ; CHECK-ARM: mov [[HICARRY:r[0-9]+|lr]], #0
888 ; CHECK-ARM: cmp [[OLD1]], r0
889 ; CHECK-ARM: movwls [[LOCARRY]], #1
890 ; CHECK-ARM: cmp [[OLD2]], r1
891 ; CHECK-ARM: movwls [[HICARRY]], #1
892 ; CHECK-ARM: moveq [[HICARRY]], [[LOCARRY]]
893 ; CHECK-ARM: cmp [[HICARRY]], #0
894 ; CHECK-ARM: mov [[MINHI:r[0-9]+]], r1
895 ; CHECK-ARM: movne [[MINHI]], [[OLD2]]
896 ; CHECK-ARM: mov [[MINLO:r[0-9]+]], r0
897 ; CHECK-ARM: movne [[MINLO]], [[OLD1]]
898 ; CHECK-ARM: stlexd [[STATUS:r[0-9]+]], [[MINLO]], [[MINHI]], [r[[ADDR]]]
899 ; CHECK-THUMB: stlexd [[STATUS:r[0-9]+]], {{r[0-9]+}}, {{r[0-9]+}}, [r[[ADDR]]]
900 ; CHECK-NEXT: cmp [[STATUS]], #0
901 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
905 ; CHECK-ARM: strd [[OLD1]], [[OLD2]], [r[[ADDR]]]
906 store i64 %old, i64* @var64
910 define i8 @test_atomic_load_umax_i8(i8 zeroext %offset) nounwind {
911 ; CHECK-LABEL: test_atomic_load_umax_i8:
912 %old = atomicrmw umax i8* @var8, i8 %offset acq_rel
915 ; CHECK: movw [[ADDR:r[0-9]+|lr]], :lower16:var8
916 ; CHECK: movt [[ADDR]], :upper16:var8
918 ; CHECK: .LBB{{[0-9]+}}_1:
919 ; CHECK: ldaexb r[[OLD:[0-9]+]], {{.*}}[[ADDR]]
920 ; r0 below is a reasonable guess but could change: it certainly comes into the
922 ; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
923 ; CHECK-NEXT: cmp r[[OLD]], r0
925 ; CHECK: movhi r[[NEW]], r[[OLD]]
926 ; CHECK-NEXT: stlexb [[STATUS:r[0-9]+]], r[[NEW]], {{.*}}[[ADDR]]
927 ; CHECK-NEXT: cmp [[STATUS]], #0
928 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
932 ; CHECK: mov r0, r[[OLD]]
936 define i16 @test_atomic_load_umax_i16(i16 zeroext %offset) nounwind {
937 ; CHECK-LABEL: test_atomic_load_umax_i16:
938 %old = atomicrmw umax i16* @var16, i16 %offset monotonic
941 ; CHECK: movw [[ADDR:r[0-9]+|lr]], :lower16:var16
942 ; CHECK: movt [[ADDR]], :upper16:var16
944 ; CHECK: .LBB{{[0-9]+}}_1:
945 ; CHECK: ldrexh r[[OLD:[0-9]+]], {{.*}}[[ADDR]]
946 ; r0 below is a reasonable guess but could change: it certainly comes into the
948 ; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
949 ; CHECK-NEXT: cmp r[[OLD]], r0
951 ; CHECK: movhi r[[NEW]], r[[OLD]]
952 ; CHECK-NEXT: strexh [[STATUS:r[0-9]+]], r[[NEW]], {{.*}}[[ADDR]]
953 ; CHECK-NEXT: cmp [[STATUS]], #0
954 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
958 ; CHECK: mov r0, r[[OLD]]
962 define i32 @test_atomic_load_umax_i32(i32 %offset) nounwind {
963 ; CHECK-LABEL: test_atomic_load_umax_i32:
964 %old = atomicrmw umax i32* @var32, i32 %offset seq_cst
967 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
968 ; CHECK: movt r[[ADDR]], :upper16:var32
970 ; CHECK: .LBB{{[0-9]+}}_1:
971 ; CHECK: ldaex r[[OLD:[0-9]+]], [r[[ADDR]]]
972 ; r0 below is a reasonable guess but could change: it certainly comes into the
974 ; CHECK-NEXT: mov r[[NEW:[0-9]+]], r0
975 ; CHECK-NEXT: cmp r[[OLD]], r0
977 ; CHECK: movhi r[[NEW]], r[[OLD]]
978 ; CHECK-NEXT: stlex [[STATUS:r[0-9]+]], r[[NEW]], [r[[ADDR]]]
979 ; CHECK-NEXT: cmp [[STATUS]], #0
980 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
984 ; CHECK: mov r0, r[[OLD]]
988 define void @test_atomic_load_umax_i64(i64 %offset) nounwind {
989 ; CHECK-LABEL: test_atomic_load_umax_i64:
990 %old = atomicrmw umax i64* @var64, i64 %offset seq_cst
993 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
994 ; CHECK: movt r[[ADDR]], :upper16:var64
996 ; CHECK: .LBB{{[0-9]+}}_1:
997 ; CHECK: ldaexd [[OLD1:r[0-9]+]], [[OLD2:r[0-9]+]], [r[[ADDR]]]
998 ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
1000 ; CHECK-ARM: mov [[LOCARRY:r[0-9]+|lr]], #0
1001 ; CHECK-ARM: mov [[HICARRY:r[0-9]+|lr]], #0
1002 ; CHECK-ARM: cmp [[OLD1]], r0
1003 ; CHECK-ARM: movwhi [[LOCARRY]], #1
1004 ; CHECK-ARM: cmp [[OLD2]], r1
1005 ; CHECK-ARM: movwhi [[HICARRY]], #1
1006 ; CHECK-ARM: moveq [[HICARRY]], [[LOCARRY]]
1007 ; CHECK-ARM: cmp [[HICARRY]], #0
1008 ; CHECK-ARM: mov [[MINHI:r[0-9]+]], r1
1009 ; CHECK-ARM: movne [[MINHI]], [[OLD2]]
1010 ; CHECK-ARM: mov [[MINLO:r[0-9]+]], r0
1011 ; CHECK-ARM: movne [[MINLO]], [[OLD1]]
1012 ; CHECK-ARM: stlexd [[STATUS:r[0-9]+]], [[MINLO]], [[MINHI]], [r[[ADDR]]]
1013 ; CHECK-THUMB: stlexd [[STATUS:r[0-9]+]], {{r[0-9]+}}, {{r[0-9]+}}, [r[[ADDR]]]
1014 ; CHECK-NEXT: cmp [[STATUS]], #0
1015 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
1019 ; CHECK-ARM: strd [[OLD1]], [[OLD2]], [r[[ADDR]]]
1020 store i64 %old, i64* @var64
1024 define i8 @test_atomic_cmpxchg_i8(i8 zeroext %wanted, i8 zeroext %new) nounwind {
1025 ; CHECK-LABEL: test_atomic_cmpxchg_i8:
1026 %old = cmpxchg i8* @var8, i8 %wanted, i8 %new acquire acquire
1029 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
1030 ; CHECK: movt r[[ADDR]], :upper16:var8
1032 ; CHECK: .LBB{{[0-9]+}}_1:
1033 ; CHECK: ldaexb r[[OLD:[0-9]+]], [r[[ADDR]]]
1034 ; r0 below is a reasonable guess but could change: it certainly comes into the
1036 ; CHECK-NEXT: cmp r[[OLD]], r0
1037 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_3
1039 ; As above, r1 is a reasonable guess.
1040 ; CHECK: strexb [[STATUS:r[0-9]+]], r1, {{.*}}[[ADDR]]
1041 ; CHECK-NEXT: cmp [[STATUS]], #0
1042 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
1046 ; CHECK: mov r0, r[[OLD]]
1050 define i16 @test_atomic_cmpxchg_i16(i16 zeroext %wanted, i16 zeroext %new) nounwind {
1051 ; CHECK-LABEL: test_atomic_cmpxchg_i16:
1052 %old = cmpxchg i16* @var16, i16 %wanted, i16 %new seq_cst seq_cst
1055 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
1056 ; CHECK: movt r[[ADDR]], :upper16:var16
1058 ; CHECK: .LBB{{[0-9]+}}_1:
1059 ; CHECK: ldaexh r[[OLD:[0-9]+]], [r[[ADDR]]]
1060 ; r0 below is a reasonable guess but could change: it certainly comes into the
1062 ; CHECK-NEXT: cmp r[[OLD]], r0
1063 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_3
1065 ; As above, r1 is a reasonable guess.
1066 ; CHECK: stlexh [[STATUS:r[0-9]+]], r1, [r[[ADDR]]]
1067 ; CHECK-NEXT: cmp [[STATUS]], #0
1068 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
1072 ; CHECK: mov r0, r[[OLD]]
1076 define void @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
1077 ; CHECK-LABEL: test_atomic_cmpxchg_i32:
1078 %old = cmpxchg i32* @var32, i32 %wanted, i32 %new release monotonic
1079 store i32 %old, i32* @var32
1082 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var32
1083 ; CHECK: movt r[[ADDR]], :upper16:var32
1085 ; CHECK: .LBB{{[0-9]+}}_1:
1086 ; CHECK: ldrex r[[OLD:[0-9]+]], [r[[ADDR]]]
1087 ; r0 below is a reasonable guess but could change: it certainly comes into the
1089 ; CHECK-NEXT: cmp r[[OLD]], r0
1090 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_3
1092 ; As above, r1 is a reasonable guess.
1093 ; CHECK: stlex [[STATUS:r[0-9]+]], r1, [r[[ADDR]]]
1094 ; CHECK-NEXT: cmp [[STATUS]], #0
1095 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
1099 ; CHECK: str{{(.w)?}} r[[OLD]],
1103 define void @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind {
1104 ; CHECK-LABEL: test_atomic_cmpxchg_i64:
1105 %old = cmpxchg i64* @var64, i64 %wanted, i64 %new monotonic monotonic
1108 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
1109 ; CHECK: movt r[[ADDR]], :upper16:var64
1111 ; CHECK: .LBB{{[0-9]+}}_1:
1112 ; CHECK: ldrexd [[OLD1:r[0-9]+|lr]], [[OLD2:r[0-9]+|lr]], [r[[ADDR]]]
1113 ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
1115 ; CHECK-DAG: eor{{(\.w)?}} [[MISMATCH_LO:r[0-9]+|lr]], [[OLD1]], r0
1116 ; CHECK-DAG: eor{{(\.w)?}} [[MISMATCH_HI:r[0-9]+|lr]], [[OLD2]], r1
1117 ; CHECK: orrs{{(\.w)?}} {{r[0-9]+}}, [[MISMATCH_LO]], [[MISMATCH_HI]]
1118 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_3
1120 ; As above, r2, r3 is a reasonable guess.
1121 ; CHECK: strexd [[STATUS:r[0-9]+]], r2, r3, [r[[ADDR]]]
1122 ; CHECK-NEXT: cmp [[STATUS]], #0
1123 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
1127 ; CHECK-ARM: strd [[OLD1]], [[OLD2]], [r[[ADDR]]]
1128 store i64 %old, i64* @var64
1132 define i8 @test_atomic_load_monotonic_i8() nounwind {
1133 ; CHECK-LABEL: test_atomic_load_monotonic_i8:
1134 %val = load atomic i8* @var8 monotonic, align 1
1137 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
1138 ; CHECK: movt r[[ADDR]], :upper16:var8
1139 ; CHECK: ldrb r0, [r[[ADDR]]]
1146 define i8 @test_atomic_load_monotonic_regoff_i8(i64 %base, i64 %off) nounwind {
1147 ; CHECK-LABEL: test_atomic_load_monotonic_regoff_i8:
1148 %addr_int = add i64 %base, %off
1149 %addr = inttoptr i64 %addr_int to i8*
1151 %val = load atomic i8* %addr monotonic, align 1
1154 ; CHECK: ldrb r0, [r0, r2]
1161 define i8 @test_atomic_load_acquire_i8() nounwind {
1162 ; CHECK-LABEL: test_atomic_load_acquire_i8:
1163 %val = load atomic i8* @var8 acquire, align 1
1166 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
1169 ; CHECK: movt r[[ADDR]], :upper16:var8
1172 ; CHECK: ldab r0, [r[[ADDR]]]
1178 define i8 @test_atomic_load_seq_cst_i8() nounwind {
1179 ; CHECK-LABEL: test_atomic_load_seq_cst_i8:
1180 %val = load atomic i8* @var8 seq_cst, align 1
1183 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
1186 ; CHECK: movt r[[ADDR]], :upper16:var8
1189 ; CHECK: ldab r0, [r[[ADDR]]]
1195 define i16 @test_atomic_load_monotonic_i16() nounwind {
1196 ; CHECK-LABEL: test_atomic_load_monotonic_i16:
1197 %val = load atomic i16* @var16 monotonic, align 2
1200 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
1203 ; CHECK: movt r[[ADDR]], :upper16:var16
1206 ; CHECK: ldrh r0, [r[[ADDR]]]
1213 define i32 @test_atomic_load_monotonic_regoff_i32(i64 %base, i64 %off) nounwind {
1214 ; CHECK-LABEL: test_atomic_load_monotonic_regoff_i32:
1215 %addr_int = add i64 %base, %off
1216 %addr = inttoptr i64 %addr_int to i32*
1218 %val = load atomic i32* %addr monotonic, align 4
1221 ; CHECK: ldr r0, [r0, r2]
1228 define i64 @test_atomic_load_seq_cst_i64() nounwind {
1229 ; CHECK-LABEL: test_atomic_load_seq_cst_i64:
1230 %val = load atomic i64* @var64 seq_cst, align 8
1233 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var64
1236 ; CHECK: movt r[[ADDR]], :upper16:var64
1239 ; CHECK: ldaexd r0, r1, [r[[ADDR]]]
1245 define void @test_atomic_store_monotonic_i8(i8 %val) nounwind {
1246 ; CHECK-LABEL: test_atomic_store_monotonic_i8:
1247 store atomic i8 %val, i8* @var8 monotonic, align 1
1248 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
1249 ; CHECK: movt r[[ADDR]], :upper16:var8
1250 ; CHECK: strb r0, [r[[ADDR]]]
1255 define void @test_atomic_store_monotonic_regoff_i8(i64 %base, i64 %off, i8 %val) nounwind {
1256 ; CHECK-LABEL: test_atomic_store_monotonic_regoff_i8:
1258 %addr_int = add i64 %base, %off
1259 %addr = inttoptr i64 %addr_int to i8*
1261 store atomic i8 %val, i8* %addr monotonic, align 1
1262 ; CHECK: ldrb{{(\.w)?}} [[VAL:r[0-9]+]], [sp]
1263 ; CHECK: strb [[VAL]], [r0, r2]
1268 define void @test_atomic_store_release_i8(i8 %val) nounwind {
1269 ; CHECK-LABEL: test_atomic_store_release_i8:
1270 store atomic i8 %val, i8* @var8 release, align 1
1273 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
1276 ; CHECK: movt r[[ADDR]], :upper16:var8
1279 ; CHECK: stlb r0, [r[[ADDR]]]
1285 define void @test_atomic_store_seq_cst_i8(i8 %val) nounwind {
1286 ; CHECK-LABEL: test_atomic_store_seq_cst_i8:
1287 store atomic i8 %val, i8* @var8 seq_cst, align 1
1290 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var8
1293 ; CHECK: movt r[[ADDR]], :upper16:var8
1296 ; CHECK: stlb r0, [r[[ADDR]]]
1302 define void @test_atomic_store_monotonic_i16(i16 %val) nounwind {
1303 ; CHECK-LABEL: test_atomic_store_monotonic_i16:
1304 store atomic i16 %val, i16* @var16 monotonic, align 2
1307 ; CHECK: movw r[[ADDR:[0-9]+]], :lower16:var16
1310 ; CHECK: movt r[[ADDR]], :upper16:var16
1313 ; CHECK: strh r0, [r[[ADDR]]]
1319 define void @test_atomic_store_monotonic_regoff_i32(i64 %base, i64 %off, i32 %val) nounwind {
1320 ; CHECK-LABEL: test_atomic_store_monotonic_regoff_i32:
1322 %addr_int = add i64 %base, %off
1323 %addr = inttoptr i64 %addr_int to i32*
1325 store atomic i32 %val, i32* %addr monotonic, align 4
1328 ; CHECK: ldr [[VAL:r[0-9]+]], [sp]
1331 ; CHECK: str [[VAL]], [r0, r2]
1338 define void @test_atomic_store_release_i64(i64 %val) nounwind {
1339 ; CHECK-LABEL: test_atomic_store_release_i64:
1340 store atomic i64 %val, i64* @var64 release, align 8
1343 ; CHECK: movw [[ADDR:r[0-9]+|lr]], :lower16:var64
1344 ; CHECK: movt [[ADDR]], :upper16:var64
1346 ; CHECK: .LBB{{[0-9]+}}_1:
1347 ; r0, r1 below is a reasonable guess but could change: it certainly comes into the
1349 ; CHECK: stlexd [[STATUS:r[0-9]+]], r0, r1, {{.*}}[[ADDR]]
1350 ; CHECK-NEXT: cmp [[STATUS]], #0
1351 ; CHECK-NEXT: bne .LBB{{[0-9]+}}_1
1358 define i32 @not.barriers(i32* %var, i1 %cond) {
1359 ; CHECK-LABEL: not.barriers:
1360 br i1 %cond, label %atomic_ver, label %simple_ver
1362 %oldval = load i32* %var
1363 %newval = add nsw i32 %oldval, -1
1364 store i32 %newval, i32* %var
1368 %val = atomicrmw add i32* %var, i32 -1 monotonic
1374 ; The key point here is that the second dmb isn't immediately followed by the
1375 ; simple_ver basic block, which LLVM attempted to do when DMB had been marked
1376 ; with isBarrier. For now, look for something that looks like "somewhere".
1377 ; CHECK-NEXT: {{mov|bx}}
1379 %combined = phi i32 [ %val, %atomic_ver ], [ %newval, %simple_ver]