1 ; RUN: opt < %s -tsan -S | FileCheck %s
2 ; Check that atomic memory operations are converted to calls into ThreadSanitizer runtime.
3 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
5 define i8 @atomic8_load_unordered(i8* %a) nounwind uwtable {
7 %0 = load atomic i8* %a unordered, align 1
10 ; CHECK: atomic8_load_unordered
11 ; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 1)
13 define i8 @atomic8_load_monotonic(i8* %a) nounwind uwtable {
15 %0 = load atomic i8* %a monotonic, align 1
18 ; CHECK: atomic8_load_monotonic
19 ; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 1)
21 define i8 @atomic8_load_acquire(i8* %a) nounwind uwtable {
23 %0 = load atomic i8* %a acquire, align 1
26 ; CHECK: atomic8_load_acquire
27 ; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 4)
29 define i8 @atomic8_load_seq_cst(i8* %a) nounwind uwtable {
31 %0 = load atomic i8* %a seq_cst, align 1
34 ; CHECK: atomic8_load_seq_cst
35 ; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 32)
37 define void @atomic8_store_unordered(i8* %a) nounwind uwtable {
39 store atomic i8 0, i8* %a unordered, align 1
42 ; CHECK: atomic8_store_unordered
43 ; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 1)
45 define void @atomic8_store_monotonic(i8* %a) nounwind uwtable {
47 store atomic i8 0, i8* %a monotonic, align 1
50 ; CHECK: atomic8_store_monotonic
51 ; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 1)
53 define void @atomic8_store_release(i8* %a) nounwind uwtable {
55 store atomic i8 0, i8* %a release, align 1
58 ; CHECK: atomic8_store_release
59 ; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 8)
61 define void @atomic8_store_seq_cst(i8* %a) nounwind uwtable {
63 store atomic i8 0, i8* %a seq_cst, align 1
66 ; CHECK: atomic8_store_seq_cst
67 ; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 32)
69 define i16 @atomic16_load_unordered(i16* %a) nounwind uwtable {
71 %0 = load atomic i16* %a unordered, align 2
74 ; CHECK: atomic16_load_unordered
75 ; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 1)
77 define i16 @atomic16_load_monotonic(i16* %a) nounwind uwtable {
79 %0 = load atomic i16* %a monotonic, align 2
82 ; CHECK: atomic16_load_monotonic
83 ; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 1)
85 define i16 @atomic16_load_acquire(i16* %a) nounwind uwtable {
87 %0 = load atomic i16* %a acquire, align 2
90 ; CHECK: atomic16_load_acquire
91 ; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 4)
93 define i16 @atomic16_load_seq_cst(i16* %a) nounwind uwtable {
95 %0 = load atomic i16* %a seq_cst, align 2
98 ; CHECK: atomic16_load_seq_cst
99 ; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 32)
101 define void @atomic16_store_unordered(i16* %a) nounwind uwtable {
103 store atomic i16 0, i16* %a unordered, align 2
106 ; CHECK: atomic16_store_unordered
107 ; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 1)
109 define void @atomic16_store_monotonic(i16* %a) nounwind uwtable {
111 store atomic i16 0, i16* %a monotonic, align 2
114 ; CHECK: atomic16_store_monotonic
115 ; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 1)
117 define void @atomic16_store_release(i16* %a) nounwind uwtable {
119 store atomic i16 0, i16* %a release, align 2
122 ; CHECK: atomic16_store_release
123 ; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 8)
125 define void @atomic16_store_seq_cst(i16* %a) nounwind uwtable {
127 store atomic i16 0, i16* %a seq_cst, align 2
130 ; CHECK: atomic16_store_seq_cst
131 ; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 32)
133 define i32 @atomic32_load_unordered(i32* %a) nounwind uwtable {
135 %0 = load atomic i32* %a unordered, align 4
138 ; CHECK: atomic32_load_unordered
139 ; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 1)
141 define i32 @atomic32_load_monotonic(i32* %a) nounwind uwtable {
143 %0 = load atomic i32* %a monotonic, align 4
146 ; CHECK: atomic32_load_monotonic
147 ; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 1)
149 define i32 @atomic32_load_acquire(i32* %a) nounwind uwtable {
151 %0 = load atomic i32* %a acquire, align 4
154 ; CHECK: atomic32_load_acquire
155 ; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 4)
157 define i32 @atomic32_load_seq_cst(i32* %a) nounwind uwtable {
159 %0 = load atomic i32* %a seq_cst, align 4
162 ; CHECK: atomic32_load_seq_cst
163 ; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 32)
165 define void @atomic32_store_unordered(i32* %a) nounwind uwtable {
167 store atomic i32 0, i32* %a unordered, align 4
170 ; CHECK: atomic32_store_unordered
171 ; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 1)
173 define void @atomic32_store_monotonic(i32* %a) nounwind uwtable {
175 store atomic i32 0, i32* %a monotonic, align 4
178 ; CHECK: atomic32_store_monotonic
179 ; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 1)
181 define void @atomic32_store_release(i32* %a) nounwind uwtable {
183 store atomic i32 0, i32* %a release, align 4
186 ; CHECK: atomic32_store_release
187 ; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 8)
189 define void @atomic32_store_seq_cst(i32* %a) nounwind uwtable {
191 store atomic i32 0, i32* %a seq_cst, align 4
194 ; CHECK: atomic32_store_seq_cst
195 ; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 32)
197 define i64 @atomic64_load_unordered(i64* %a) nounwind uwtable {
199 %0 = load atomic i64* %a unordered, align 8
202 ; CHECK: atomic64_load_unordered
203 ; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 1)
205 define i64 @atomic64_load_monotonic(i64* %a) nounwind uwtable {
207 %0 = load atomic i64* %a monotonic, align 8
210 ; CHECK: atomic64_load_monotonic
211 ; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 1)
213 define i64 @atomic64_load_acquire(i64* %a) nounwind uwtable {
215 %0 = load atomic i64* %a acquire, align 8
218 ; CHECK: atomic64_load_acquire
219 ; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 4)
221 define i64 @atomic64_load_seq_cst(i64* %a) nounwind uwtable {
223 %0 = load atomic i64* %a seq_cst, align 8
226 ; CHECK: atomic64_load_seq_cst
227 ; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 32)
229 define void @atomic64_store_unordered(i64* %a) nounwind uwtable {
231 store atomic i64 0, i64* %a unordered, align 8
234 ; CHECK: atomic64_store_unordered
235 ; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 1)
237 define void @atomic64_store_monotonic(i64* %a) nounwind uwtable {
239 store atomic i64 0, i64* %a monotonic, align 8
242 ; CHECK: atomic64_store_monotonic
243 ; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 1)
245 define void @atomic64_store_release(i64* %a) nounwind uwtable {
247 store atomic i64 0, i64* %a release, align 8
250 ; CHECK: atomic64_store_release
251 ; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 8)
253 define void @atomic64_store_seq_cst(i64* %a) nounwind uwtable {
255 store atomic i64 0, i64* %a seq_cst, align 8
258 ; CHECK: atomic64_store_seq_cst
259 ; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 32)
261 define i128 @atomic128_load_unordered(i128* %a) nounwind uwtable {
263 %0 = load atomic i128* %a unordered, align 16
266 ; CHECK: atomic128_load_unordered
267 ; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 1)
269 define i128 @atomic128_load_monotonic(i128* %a) nounwind uwtable {
271 %0 = load atomic i128* %a monotonic, align 16
274 ; CHECK: atomic128_load_monotonic
275 ; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 1)
277 define i128 @atomic128_load_acquire(i128* %a) nounwind uwtable {
279 %0 = load atomic i128* %a acquire, align 16
282 ; CHECK: atomic128_load_acquire
283 ; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 4)
285 define i128 @atomic128_load_seq_cst(i128* %a) nounwind uwtable {
287 %0 = load atomic i128* %a seq_cst, align 16
290 ; CHECK: atomic128_load_seq_cst
291 ; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 32)
293 define void @atomic128_store_unordered(i128* %a) nounwind uwtable {
295 store atomic i128 0, i128* %a unordered, align 16
298 ; CHECK: atomic128_store_unordered
299 ; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 1)
301 define void @atomic128_store_monotonic(i128* %a) nounwind uwtable {
303 store atomic i128 0, i128* %a monotonic, align 16
306 ; CHECK: atomic128_store_monotonic
307 ; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 1)
309 define void @atomic128_store_release(i128* %a) nounwind uwtable {
311 store atomic i128 0, i128* %a release, align 16
314 ; CHECK: atomic128_store_release
315 ; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 8)
317 define void @atomic128_store_seq_cst(i128* %a) nounwind uwtable {
319 store atomic i128 0, i128* %a seq_cst, align 16
322 ; CHECK: atomic128_store_seq_cst
323 ; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 32)