1 ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
3 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
4 target triple = "x86_64-unknown-linux-gnu"
6 ; atomicrmw xchg: store clean shadow, return clean shadow
8 define i32 @AtomicRmwXchg(i32* %p, i32 %x) sanitize_memory {
10 %0 = atomicrmw xchg i32* %p, i32 %x seq_cst
14 ; CHECK: @AtomicRmwXchg
16 ; CHECK: atomicrmw xchg {{.*}} seq_cst
17 ; CHECK: store i32 0, {{.*}} @__msan_retval_tls
21 ; atomicrmw max: exactly the same as above
23 define i32 @AtomicRmwMax(i32* %p, i32 %x) sanitize_memory {
25 %0 = atomicrmw max i32* %p, i32 %x seq_cst
29 ; CHECK: @AtomicRmwMax
31 ; CHECK: atomicrmw max {{.*}} seq_cst
32 ; CHECK: store i32 0, {{.*}} @__msan_retval_tls
36 ; cmpxchg: the same as above, but also check %a shadow
38 define i32 @Cmpxchg(i32* %p, i32 %a, i32 %b) sanitize_memory {
40 %pair = cmpxchg i32* %p, i32 %a, i32 %b seq_cst seq_cst
41 %0 = extractvalue { i32, i1 } %pair, 0
46 ; CHECK: store { i32, i1 } zeroinitializer,
49 ; CHECK: @__msan_warning
50 ; CHECK: cmpxchg {{.*}} seq_cst seq_cst
51 ; CHECK: store i32 0, {{.*}} @__msan_retval_tls
55 ; relaxed cmpxchg: bump up to "release monotonic"
57 define i32 @CmpxchgMonotonic(i32* %p, i32 %a, i32 %b) sanitize_memory {
59 %pair = cmpxchg i32* %p, i32 %a, i32 %b monotonic monotonic
60 %0 = extractvalue { i32, i1 } %pair, 0
64 ; CHECK: @CmpxchgMonotonic
65 ; CHECK: store { i32, i1 } zeroinitializer,
68 ; CHECK: @__msan_warning
69 ; CHECK: cmpxchg {{.*}} release monotonic
70 ; CHECK: store i32 0, {{.*}} @__msan_retval_tls
74 ; atomic load: preserve alignment, load shadow value after app value
76 define i32 @AtomicLoad(i32* %p) sanitize_memory {
78 %0 = load atomic i32* %p seq_cst, align 16
83 ; CHECK: load atomic i32* {{.*}} seq_cst, align 16
84 ; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32* {{.*}}, align 16
85 ; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
89 ; atomic load: preserve alignment, load shadow value after app value
91 define i32 @AtomicLoadAcquire(i32* %p) sanitize_memory {
93 %0 = load atomic i32* %p acquire, align 16
97 ; CHECK: @AtomicLoadAcquire
98 ; CHECK: load atomic i32* {{.*}} acquire, align 16
99 ; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32* {{.*}}, align 16
100 ; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
104 ; atomic load monotonic: bump up to load acquire
106 define i32 @AtomicLoadMonotonic(i32* %p) sanitize_memory {
108 %0 = load atomic i32* %p monotonic, align 16
112 ; CHECK: @AtomicLoadMonotonic
113 ; CHECK: load atomic i32* {{.*}} acquire, align 16
114 ; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32* {{.*}}, align 16
115 ; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
119 ; atomic load unordered: bump up to load acquire
121 define i32 @AtomicLoadUnordered(i32* %p) sanitize_memory {
123 %0 = load atomic i32* %p unordered, align 16
127 ; CHECK: @AtomicLoadUnordered
128 ; CHECK: load atomic i32* {{.*}} acquire, align 16
129 ; CHECK: [[SHADOW:%[01-9a-z_]+]] = load i32* {{.*}}, align 16
130 ; CHECK: store i32 {{.*}}[[SHADOW]], {{.*}} @__msan_retval_tls
134 ; atomic store: preserve alignment, store clean shadow value before app value
136 define void @AtomicStore(i32* %p, i32 %x) sanitize_memory {
138 store atomic i32 %x, i32* %p seq_cst, align 16
142 ; CHECK: @AtomicStore
143 ; CHECK-NOT: @__msan_param_tls
144 ; CHECK: store i32 0, i32* {{.*}}, align 16
145 ; CHECK: store atomic i32 %x, i32* %p seq_cst, align 16
149 ; atomic store: preserve alignment, store clean shadow value before app value
151 define void @AtomicStoreRelease(i32* %p, i32 %x) sanitize_memory {
153 store atomic i32 %x, i32* %p release, align 16
157 ; CHECK: @AtomicStoreRelease
158 ; CHECK-NOT: @__msan_param_tls
159 ; CHECK: store i32 0, i32* {{.*}}, align 16
160 ; CHECK: store atomic i32 %x, i32* %p release, align 16
164 ; atomic store monotonic: bumped up to store release
166 define void @AtomicStoreMonotonic(i32* %p, i32 %x) sanitize_memory {
168 store atomic i32 %x, i32* %p monotonic, align 16
172 ; CHECK: @AtomicStoreMonotonic
173 ; CHECK-NOT: @__msan_param_tls
174 ; CHECK: store i32 0, i32* {{.*}}, align 16
175 ; CHECK: store atomic i32 %x, i32* %p release, align 16
179 ; atomic store unordered: bumped up to store release
181 define void @AtomicStoreUnordered(i32* %p, i32 %x) sanitize_memory {
183 store atomic i32 %x, i32* %p unordered, align 16
187 ; CHECK: @AtomicStoreUnordered
188 ; CHECK-NOT: @__msan_param_tls
189 ; CHECK: store i32 0, i32* {{.*}}, align 16
190 ; CHECK: store atomic i32 %x, i32* %p release, align 16