1 ; RUN: llc < %s -mtriple=armv7-apple-ios | FileCheck %s
3 define i64 @test1(i64* %ptr, i64 %val) {
9 ; CHECK: strexd {{[a-z0-9]+}}, r0, r1
13 %r = atomicrmw add i64* %ptr, i64 %val seq_cst
17 define i64 @test2(i64* %ptr, i64 %val) {
20 ; CHECK: ldrexd r2, r3
23 ; CHECK: strexd {{[a-z0-9]+}}, r0, r1
27 %r = atomicrmw sub i64* %ptr, i64 %val seq_cst
31 define i64 @test3(i64* %ptr, i64 %val) {
34 ; CHECK: ldrexd r2, r3
37 ; CHECK: strexd {{[a-z0-9]+}}, r0, r1
41 %r = atomicrmw and i64* %ptr, i64 %val seq_cst
45 define i64 @test4(i64* %ptr, i64 %val) {
48 ; CHECK: ldrexd r2, r3
51 ; CHECK: strexd {{[a-z0-9]+}}, r0, r1
55 %r = atomicrmw or i64* %ptr, i64 %val seq_cst
59 define i64 @test5(i64* %ptr, i64 %val) {
62 ; CHECK: ldrexd r2, r3
65 ; CHECK: strexd {{[a-z0-9]+}}, r0, r1
69 %r = atomicrmw xor i64* %ptr, i64 %val seq_cst
73 define i64 @test6(i64* %ptr, i64 %val) {
76 ; CHECK: ldrexd r2, r3
77 ; CHECK: strexd {{[a-z0-9]+}}, r0, r1
81 %r = atomicrmw xchg i64* %ptr, i64 %val seq_cst
85 define i64 @test7(i64* %ptr, i64 %val1, i64 %val2) {
88 ; CHECK: ldrexd r2, r3
92 ; CHECK: strexd {{[a-z0-9]+}}, r0, r1
96 %r = cmpxchg i64* %ptr, i64 %val1, i64 %val2 seq_cst
100 ; Compiles down to cmpxchg
101 ; FIXME: Should compile to a single ldrexd
102 define i64 @test8(i64* %ptr) {
104 ; CHECK: ldrexd r2, r3
108 ; CHECK: strexd {{[a-z0-9]+}}, r0, r1
112 %r = load atomic i64* %ptr seq_cst, align 8
116 ; Compiles down to atomicrmw xchg; there really isn't any more efficient
118 define void @test9(i64* %ptr, i64 %val) {
121 ; CHECK: ldrexd r2, r3
122 ; CHECK: strexd {{[a-z0-9]+}}, r0, r1
126 store atomic i64 %val, i64* %ptr seq_cst, align 8