1 /* Atomic operations usable in machine independent code */
2 #ifndef _LINUX_ATOMIC_H
3 #define _LINUX_ATOMIC_H
4 #include <asm/atomic.h>
5 #include <asm/barrier.h>
8 * Relaxed variants of xchg, cmpxchg and some atomic operations.
10 * We support four variants:
12 * - Fully ordered: The default implementation, no suffix required.
13 * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
14 * - Release: Provides RELEASE semantics, _release suffix.
15 * - Relaxed: No ordering guarantees, _relaxed suffix.
17 * For compound atomics performing both a load and a store, ACQUIRE
18 * semantics apply only to the load and RELEASE semantics only to the
19 * store portion of the operation. Note that a failed cmpxchg_acquire
20 * does -not- imply any memory ordering constraints.
22 * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
25 #ifndef atomic_read_acquire
26 #define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
29 #ifndef atomic_set_release
30 #define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
34 * The idea here is to build acquire/release variants by adding explicit
35 * barriers on top of the relaxed variant. In the case where the relaxed
36 * variant is already fully ordered, no additional barriers are needed.
38 #define __atomic_op_acquire(op, args...) \
40 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
41 smp_mb__after_atomic(); \
45 #define __atomic_op_release(op, args...) \
47 smp_mb__before_atomic(); \
51 #define __atomic_op_fence(op, args...) \
53 typeof(op##_relaxed(args)) __ret; \
54 smp_mb__before_atomic(); \
55 __ret = op##_relaxed(args); \
56 smp_mb__after_atomic(); \
60 /* atomic_add_return_relaxed */
61 #ifndef atomic_add_return_relaxed
62 #define atomic_add_return_relaxed atomic_add_return
63 #define atomic_add_return_acquire atomic_add_return
64 #define atomic_add_return_release atomic_add_return
66 #else /* atomic_add_return_relaxed */
68 #ifndef atomic_add_return_acquire
69 #define atomic_add_return_acquire(...) \
70 __atomic_op_acquire(atomic_add_return, __VA_ARGS__)
73 #ifndef atomic_add_return_release
74 #define atomic_add_return_release(...) \
75 __atomic_op_release(atomic_add_return, __VA_ARGS__)
78 #ifndef atomic_add_return
79 #define atomic_add_return(...) \
80 __atomic_op_fence(atomic_add_return, __VA_ARGS__)
82 #endif /* atomic_add_return_relaxed */
84 /* atomic_sub_return_relaxed */
85 #ifndef atomic_sub_return_relaxed
86 #define atomic_sub_return_relaxed atomic_sub_return
87 #define atomic_sub_return_acquire atomic_sub_return
88 #define atomic_sub_return_release atomic_sub_return
90 #else /* atomic_sub_return_relaxed */
92 #ifndef atomic_sub_return_acquire
93 #define atomic_sub_return_acquire(...) \
94 __atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
97 #ifndef atomic_sub_return_release
98 #define atomic_sub_return_release(...) \
99 __atomic_op_release(atomic_sub_return, __VA_ARGS__)
102 #ifndef atomic_sub_return
103 #define atomic_sub_return(...) \
104 __atomic_op_fence(atomic_sub_return, __VA_ARGS__)
106 #endif /* atomic_sub_return_relaxed */
108 /* atomic_xchg_relaxed */
109 #ifndef atomic_xchg_relaxed
110 #define atomic_xchg_relaxed atomic_xchg
111 #define atomic_xchg_acquire atomic_xchg
112 #define atomic_xchg_release atomic_xchg
114 #else /* atomic_xchg_relaxed */
116 #ifndef atomic_xchg_acquire
117 #define atomic_xchg_acquire(...) \
118 __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
121 #ifndef atomic_xchg_release
122 #define atomic_xchg_release(...) \
123 __atomic_op_release(atomic_xchg, __VA_ARGS__)
127 #define atomic_xchg(...) \
128 __atomic_op_fence(atomic_xchg, __VA_ARGS__)
130 #endif /* atomic_xchg_relaxed */
132 /* atomic_cmpxchg_relaxed */
133 #ifndef atomic_cmpxchg_relaxed
134 #define atomic_cmpxchg_relaxed atomic_cmpxchg
135 #define atomic_cmpxchg_acquire atomic_cmpxchg
136 #define atomic_cmpxchg_release atomic_cmpxchg
138 #else /* atomic_cmpxchg_relaxed */
140 #ifndef atomic_cmpxchg_acquire
141 #define atomic_cmpxchg_acquire(...) \
142 __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
145 #ifndef atomic_cmpxchg_release
146 #define atomic_cmpxchg_release(...) \
147 __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
150 #ifndef atomic_cmpxchg
151 #define atomic_cmpxchg(...) \
152 __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
154 #endif /* atomic_cmpxchg_relaxed */
156 #ifndef atomic64_read_acquire
157 #define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
160 #ifndef atomic64_set_release
161 #define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
164 /* atomic64_add_return_relaxed */
165 #ifndef atomic64_add_return_relaxed
166 #define atomic64_add_return_relaxed atomic64_add_return
167 #define atomic64_add_return_acquire atomic64_add_return
168 #define atomic64_add_return_release atomic64_add_return
170 #else /* atomic64_add_return_relaxed */
172 #ifndef atomic64_add_return_acquire
173 #define atomic64_add_return_acquire(...) \
174 __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
177 #ifndef atomic64_add_return_release
178 #define atomic64_add_return_release(...) \
179 __atomic_op_release(atomic64_add_return, __VA_ARGS__)
182 #ifndef atomic64_add_return
183 #define atomic64_add_return(...) \
184 __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
186 #endif /* atomic64_add_return_relaxed */
188 /* atomic64_sub_return_relaxed */
189 #ifndef atomic64_sub_return_relaxed
190 #define atomic64_sub_return_relaxed atomic64_sub_return
191 #define atomic64_sub_return_acquire atomic64_sub_return
192 #define atomic64_sub_return_release atomic64_sub_return
194 #else /* atomic64_sub_return_relaxed */
196 #ifndef atomic64_sub_return_acquire
197 #define atomic64_sub_return_acquire(...) \
198 __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
201 #ifndef atomic64_sub_return_release
202 #define atomic64_sub_return_release(...) \
203 __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
206 #ifndef atomic64_sub_return
207 #define atomic64_sub_return(...) \
208 __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
210 #endif /* atomic64_sub_return_relaxed */
212 /* atomic64_xchg_relaxed */
213 #ifndef atomic64_xchg_relaxed
214 #define atomic64_xchg_relaxed atomic64_xchg
215 #define atomic64_xchg_acquire atomic64_xchg
216 #define atomic64_xchg_release atomic64_xchg
218 #else /* atomic64_xchg_relaxed */
220 #ifndef atomic64_xchg_acquire
221 #define atomic64_xchg_acquire(...) \
222 __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
225 #ifndef atomic64_xchg_release
226 #define atomic64_xchg_release(...) \
227 __atomic_op_release(atomic64_xchg, __VA_ARGS__)
230 #ifndef atomic64_xchg
231 #define atomic64_xchg(...) \
232 __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
234 #endif /* atomic64_xchg_relaxed */
236 /* atomic64_cmpxchg_relaxed */
237 #ifndef atomic64_cmpxchg_relaxed
238 #define atomic64_cmpxchg_relaxed atomic64_cmpxchg
239 #define atomic64_cmpxchg_acquire atomic64_cmpxchg
240 #define atomic64_cmpxchg_release atomic64_cmpxchg
242 #else /* atomic64_cmpxchg_relaxed */
244 #ifndef atomic64_cmpxchg_acquire
245 #define atomic64_cmpxchg_acquire(...) \
246 __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
249 #ifndef atomic64_cmpxchg_release
250 #define atomic64_cmpxchg_release(...) \
251 __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
254 #ifndef atomic64_cmpxchg
255 #define atomic64_cmpxchg(...) \
256 __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
258 #endif /* atomic64_cmpxchg_relaxed */
260 /* cmpxchg_relaxed */
261 #ifndef cmpxchg_relaxed
262 #define cmpxchg_relaxed cmpxchg
263 #define cmpxchg_acquire cmpxchg
264 #define cmpxchg_release cmpxchg
266 #else /* cmpxchg_relaxed */
268 #ifndef cmpxchg_acquire
269 #define cmpxchg_acquire(...) \
270 __atomic_op_acquire(cmpxchg, __VA_ARGS__)
273 #ifndef cmpxchg_release
274 #define cmpxchg_release(...) \
275 __atomic_op_release(cmpxchg, __VA_ARGS__)
279 #define cmpxchg(...) \
280 __atomic_op_fence(cmpxchg, __VA_ARGS__)
282 #endif /* cmpxchg_relaxed */
284 /* cmpxchg64_relaxed */
285 #ifndef cmpxchg64_relaxed
286 #define cmpxchg64_relaxed cmpxchg64
287 #define cmpxchg64_acquire cmpxchg64
288 #define cmpxchg64_release cmpxchg64
290 #else /* cmpxchg64_relaxed */
292 #ifndef cmpxchg64_acquire
293 #define cmpxchg64_acquire(...) \
294 __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
297 #ifndef cmpxchg64_release
298 #define cmpxchg64_release(...) \
299 __atomic_op_release(cmpxchg64, __VA_ARGS__)
303 #define cmpxchg64(...) \
304 __atomic_op_fence(cmpxchg64, __VA_ARGS__)
306 #endif /* cmpxchg64_relaxed */
310 #define xchg_relaxed xchg
311 #define xchg_acquire xchg
312 #define xchg_release xchg
314 #else /* xchg_relaxed */
317 #define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
321 #define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
325 #define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
327 #endif /* xchg_relaxed */
330 * atomic_add_unless - add unless the number is already a given value
331 * @v: pointer of type atomic_t
332 * @a: the amount to add to v...
333 * @u: ...unless v is equal to u.
335 * Atomically adds @a to @v, so long as @v was not already @u.
336 * Returns non-zero if @v was not @u, and zero otherwise.
338 static inline int atomic_add_unless(atomic_t *v, int a, int u)
340 return __atomic_add_unless(v, a, u) != u;
344 * atomic_inc_not_zero - increment unless the number is zero
345 * @v: pointer of type atomic_t
347 * Atomically increments @v by 1, so long as @v is non-zero.
348 * Returns non-zero if @v was non-zero, and zero otherwise.
350 #ifndef atomic_inc_not_zero
351 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
354 #ifndef atomic_andnot
355 static inline void atomic_andnot(int i, atomic_t *v)
361 static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
363 atomic_andnot(mask, v);
366 static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
372 * atomic_inc_not_zero_hint - increment if not null
373 * @v: pointer of type atomic_t
374 * @hint: probable value of the atomic before the increment
376 * This version of atomic_inc_not_zero() gives a hint of probable
377 * value of the atomic. This helps processor to not read the memory
378 * before doing the atomic read/modify/write cycle, lowering
379 * number of bus transactions on some arches.
381 * Returns: 0 if increment was not done, 1 otherwise.
383 #ifndef atomic_inc_not_zero_hint
384 static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
388 /* sanity test, should be removed by compiler if hint is a constant */
390 return atomic_inc_not_zero(v);
393 val = atomic_cmpxchg(v, c, c + 1);
403 #ifndef atomic_inc_unless_negative
404 static inline int atomic_inc_unless_negative(atomic_t *p)
407 for (v = 0; v >= 0; v = v1) {
408 v1 = atomic_cmpxchg(p, v, v + 1);
416 #ifndef atomic_dec_unless_positive
417 static inline int atomic_dec_unless_positive(atomic_t *p)
420 for (v = 0; v <= 0; v = v1) {
421 v1 = atomic_cmpxchg(p, v, v - 1);
430 * atomic_dec_if_positive - decrement by 1 if old value positive
431 * @v: pointer of type atomic_t
433 * The function returns the old value of *v minus 1, even if
434 * the atomic variable, v, was not decremented.
436 #ifndef atomic_dec_if_positive
437 static inline int atomic_dec_if_positive(atomic_t *v)
443 if (unlikely(dec < 0))
445 old = atomic_cmpxchg((v), c, dec);
446 if (likely(old == c))
454 #include <asm-generic/atomic-long.h>
455 #ifdef CONFIG_GENERIC_ATOMIC64
456 #include <asm-generic/atomic64.h>
459 #ifndef atomic64_andnot
460 static inline void atomic64_andnot(long long i, atomic64_t *v)
466 #endif /* _LINUX_ATOMIC_H */