Fix broken check lines.
[oota-llvm.git] / test / CodeGen / X86 / atomic6432.ll
1 ; RUN: llc < %s -O0 -march=x86 -mcpu=corei7 | FileCheck %s --check-prefix X32
2 ; XFAIL: *
3
4 @sc64 = external global i64
5
6 define void @atomic_fetch_add64() nounwind {
7 ; X32:   atomic_fetch_add64
8 entry:
9   %t1 = atomicrmw add  i64* @sc64, i64 1 acquire
10 ; X32:       addl
11 ; X32:       adcl
12 ; X32:       lock
13 ; X32:       cmpxchg8b
14   %t2 = atomicrmw add  i64* @sc64, i64 3 acquire
15 ; X32:       addl
16 ; X32:       adcl
17 ; X32:       lock
18 ; X32:       cmpxchg8b
19   %t3 = atomicrmw add  i64* @sc64, i64 5 acquire
20 ; X32:       addl
21 ; X32:       adcl
22 ; X32:       lock
23 ; X32:       cmpxchg8b
24   %t4 = atomicrmw add  i64* @sc64, i64 %t3 acquire
25 ; X32:       addl
26 ; X32:       adcl
27 ; X32:       lock
28 ; X32:       cmpxchg8b
29   ret void
30 ; X32:       ret
31 }
32
33 define void @atomic_fetch_sub64() nounwind {
34 ; X32:   atomic_fetch_sub64
35   %t1 = atomicrmw sub  i64* @sc64, i64 1 acquire
36 ; X32:       subl
37 ; X32:       sbbl
38 ; X32:       lock
39 ; X32:       cmpxchg8b
40   %t2 = atomicrmw sub  i64* @sc64, i64 3 acquire
41 ; X32:       subl
42 ; X32:       sbbl
43 ; X32:       lock
44 ; X32:       cmpxchg8b
45   %t3 = atomicrmw sub  i64* @sc64, i64 5 acquire
46 ; X32:       subl
47 ; X32:       sbbl
48 ; X32:       lock
49 ; X32:       cmpxchg8b
50   %t4 = atomicrmw sub  i64* @sc64, i64 %t3 acquire
51 ; X32:       subl
52 ; X32:       sbbl
53 ; X32:       lock
54 ; X32:       cmpxchg8b
55   ret void
56 ; X32:       ret
57 }
58
59 define void @atomic_fetch_and64() nounwind {
60 ; X32:   atomic_fetch_and64
61   %t1 = atomicrmw and  i64* @sc64, i64 3 acquire
62 ; X32:       andl
63 ; X32:       andl
64 ; X32:       lock
65 ; X32:       cmpxchg8b
66   %t2 = atomicrmw and  i64* @sc64, i64 5 acquire
67 ; X32:       andl
68 ; X32:       andl
69 ; X32:       lock
70 ; X32:       cmpxchg8b
71   %t3 = atomicrmw and  i64* @sc64, i64 %t2 acquire
72 ; X32:       andl
73 ; X32:       andl
74 ; X32:       lock
75 ; X32:       cmpxchg8b
76   ret void
77 ; X32:       ret
78 }
79
80 define void @atomic_fetch_or64() nounwind {
81 ; X32:   atomic_fetch_or64
82   %t1 = atomicrmw or   i64* @sc64, i64 3 acquire
83 ; X32:       orl
84 ; X32:       orl
85 ; X32:       lock
86 ; X32:       cmpxchg8b
87   %t2 = atomicrmw or   i64* @sc64, i64 5 acquire
88 ; X32:       orl
89 ; X32:       orl
90 ; X32:       lock
91 ; X32:       cmpxchg8b
92   %t3 = atomicrmw or   i64* @sc64, i64 %t2 acquire
93 ; X32:       orl
94 ; X32:       orl
95 ; X32:       lock
96 ; X32:       cmpxchg8b
97   ret void
98 ; X32:       ret
99 }
100
101 define void @atomic_fetch_xor64() nounwind {
102 ; X32:   atomic_fetch_xor64
103   %t1 = atomicrmw xor  i64* @sc64, i64 3 acquire
104 ; X32:       xorl
105 ; X32:       xorl
106 ; X32:       lock
107 ; X32:       cmpxchg8b
108   %t2 = atomicrmw xor  i64* @sc64, i64 5 acquire
109 ; X32:       xorl
110 ; X32:       xorl
111 ; X32:       lock
112 ; X32:       cmpxchg8b
113   %t3 = atomicrmw xor  i64* @sc64, i64 %t2 acquire
114 ; X32:       xorl
115 ; X32:       xorl
116 ; X32:       lock
117 ; X32:       cmpxchg8b
118   ret void
119 ; X32:       ret
120 }
121
122 define void @atomic_fetch_nand64(i64 %x) nounwind {
123 ; X32:   atomic_fetch_nand64
124   %t1 = atomicrmw nand i64* @sc64, i64 %x acquire
125 ; X32:       andl
126 ; X32:       andl
127 ; X32:       notl
128 ; X32:       notl
129 ; X32:       lock
130 ; X32:       cmpxchg8b
131   ret void
132 ; X32:       ret
133 }
134
135 define void @atomic_fetch_max64(i64 %x) nounwind {
136   %t1 = atomicrmw max  i64* @sc64, i64 %x acquire
137 ; X32:       cmpl
138 ; X32:       cmpl
139 ; X32:       cmov
140 ; X32:       cmov
141 ; X32:       cmov
142 ; X32:       lock
143 ; X32:       cmpxchg8b
144   ret void
145 ; X32:       ret
146 }
147
148 define void @atomic_fetch_min64(i64 %x) nounwind {
149   %t1 = atomicrmw min  i64* @sc64, i64 %x acquire
150 ; X32:       cmpl
151 ; X32:       cmpl
152 ; X32:       cmov
153 ; X32:       cmov
154 ; X32:       cmov
155 ; X32:       lock
156 ; X32:       cmpxchg8b
157   ret void
158 ; X32:       ret
159 }
160
161 define void @atomic_fetch_umax64(i64 %x) nounwind {
162   %t1 = atomicrmw umax i64* @sc64, i64 %x acquire
163 ; X32:       cmpl
164 ; X32:       cmpl
165 ; X32:       cmov
166 ; X32:       cmov
167 ; X32:       cmov
168 ; X32:       lock
169 ; X32:       cmpxchg8b
170   ret void
171 ; X32:       ret
172 }
173
174 define void @atomic_fetch_umin64(i64 %x) nounwind {
175   %t1 = atomicrmw umin i64* @sc64, i64 %x acquire
176 ; X32:       cmpl
177 ; X32:       cmpl
178 ; X32:       cmov
179 ; X32:       cmov
180 ; X32:       cmov
181 ; X32:       lock
182 ; X32:       cmpxchg8b
183   ret void
184 ; X32:       ret
185 }
186
187 define void @atomic_fetch_cmpxchg64() nounwind {
188   %t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire
189 ; X32:       lock
190 ; X32:       cmpxchg8b
191   ret void
192 ; X32:       ret
193 }
194
195 define void @atomic_fetch_store64(i64 %x) nounwind {
196   store atomic i64 %x, i64* @sc64 release, align 8
197 ; X32:       lock
198 ; X32:       cmpxchg8b
199   ret void
200 ; X32:       ret
201 }
202
203 define void @atomic_fetch_swap64(i64 %x) nounwind {
204   %t1 = atomicrmw xchg i64* @sc64, i64 %x acquire
205 ; X32:       lock
206 ; X32:       xchg8b
207   ret void
208 ; X32:       ret
209 }