1 /* Atomic operations usable in machine independent code */
2 #ifndef _LINUX_ATOMIC_H
3 #define _LINUX_ATOMIC_H
4 #include <asm/atomic.h>
5 #include <asm/barrier.h>
8 * Relaxed variants of xchg, cmpxchg and some atomic operations.
10 * We support four variants:
12 * - Fully ordered: The default implementation, no suffix required.
13 * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
14 * - Release: Provides RELEASE semantics, _release suffix.
15 * - Relaxed: No ordering guarantees, _relaxed suffix.
17 * For compound atomics performing both a load and a store, ACQUIRE
18 * semantics apply only to the load and RELEASE semantics only to the
19 * store portion of the operation. Note that a failed cmpxchg_acquire
20 * does -not- imply any memory ordering constraints.
22 * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
25 #ifndef atomic_read_acquire
26 #define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
29 #ifndef atomic_set_release
30 #define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
34 * The idea here is to build acquire/release variants by adding explicit
35 * barriers on top of the relaxed variant. In the case where the relaxed
36 * variant is already fully ordered, no additional barriers are needed.
38 #define __atomic_op_acquire(op, args...) \
40 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
41 smp_mb__after_atomic(); \
45 #define __atomic_op_release(op, args...) \
47 smp_mb__before_atomic(); \
51 #define __atomic_op_fence(op, args...) \
53 typeof(op##_relaxed(args)) __ret; \
54 smp_mb__before_atomic(); \
55 __ret = op##_relaxed(args); \
56 smp_mb__after_atomic(); \
60 /* atomic_add_return_relaxed */
61 #ifndef atomic_add_return_relaxed
62 #define atomic_add_return_relaxed atomic_add_return
63 #define atomic_add_return_acquire atomic_add_return
64 #define atomic_add_return_release atomic_add_return
66 #else /* atomic_add_return_relaxed */
68 #ifndef atomic_add_return_acquire
69 #define atomic_add_return_acquire(...) \
70 __atomic_op_acquire(atomic_add_return, __VA_ARGS__)
73 #ifndef atomic_add_return_release
74 #define atomic_add_return_release(...) \
75 __atomic_op_release(atomic_add_return, __VA_ARGS__)
78 #ifndef atomic_add_return
79 #define atomic_add_return(...) \
80 __atomic_op_fence(atomic_add_return, __VA_ARGS__)
82 #endif /* atomic_add_return_relaxed */
84 /* atomic_inc_return_relaxed */
85 #ifndef atomic_inc_return_relaxed
86 #define atomic_inc_return_relaxed atomic_inc_return
87 #define atomic_inc_return_acquire atomic_inc_return
88 #define atomic_inc_return_release atomic_inc_return
90 #else /* atomic_inc_return_relaxed */
92 #ifndef atomic_inc_return_acquire
93 #define atomic_inc_return_acquire(...) \
94 __atomic_op_acquire(atomic_inc_return, __VA_ARGS__)
97 #ifndef atomic_inc_return_release
98 #define atomic_inc_return_release(...) \
99 __atomic_op_release(atomic_inc_return, __VA_ARGS__)
102 #ifndef atomic_inc_return
103 #define atomic_inc_return(...) \
104 __atomic_op_fence(atomic_inc_return, __VA_ARGS__)
106 #endif /* atomic_inc_return_relaxed */
108 /* atomic_sub_return_relaxed */
109 #ifndef atomic_sub_return_relaxed
110 #define atomic_sub_return_relaxed atomic_sub_return
111 #define atomic_sub_return_acquire atomic_sub_return
112 #define atomic_sub_return_release atomic_sub_return
114 #else /* atomic_sub_return_relaxed */
116 #ifndef atomic_sub_return_acquire
117 #define atomic_sub_return_acquire(...) \
118 __atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
121 #ifndef atomic_sub_return_release
122 #define atomic_sub_return_release(...) \
123 __atomic_op_release(atomic_sub_return, __VA_ARGS__)
126 #ifndef atomic_sub_return
127 #define atomic_sub_return(...) \
128 __atomic_op_fence(atomic_sub_return, __VA_ARGS__)
130 #endif /* atomic_sub_return_relaxed */
132 /* atomic_dec_return_relaxed */
133 #ifndef atomic_dec_return_relaxed
134 #define atomic_dec_return_relaxed atomic_dec_return
135 #define atomic_dec_return_acquire atomic_dec_return
136 #define atomic_dec_return_release atomic_dec_return
138 #else /* atomic_dec_return_relaxed */
140 #ifndef atomic_dec_return_acquire
141 #define atomic_dec_return_acquire(...) \
142 __atomic_op_acquire(atomic_dec_return, __VA_ARGS__)
145 #ifndef atomic_dec_return_release
146 #define atomic_dec_return_release(...) \
147 __atomic_op_release(atomic_dec_return, __VA_ARGS__)
150 #ifndef atomic_dec_return
151 #define atomic_dec_return(...) \
152 __atomic_op_fence(atomic_dec_return, __VA_ARGS__)
154 #endif /* atomic_dec_return_relaxed */
156 /* atomic_xchg_relaxed */
157 #ifndef atomic_xchg_relaxed
158 #define atomic_xchg_relaxed atomic_xchg
159 #define atomic_xchg_acquire atomic_xchg
160 #define atomic_xchg_release atomic_xchg
162 #else /* atomic_xchg_relaxed */
164 #ifndef atomic_xchg_acquire
165 #define atomic_xchg_acquire(...) \
166 __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
169 #ifndef atomic_xchg_release
170 #define atomic_xchg_release(...) \
171 __atomic_op_release(atomic_xchg, __VA_ARGS__)
175 #define atomic_xchg(...) \
176 __atomic_op_fence(atomic_xchg, __VA_ARGS__)
178 #endif /* atomic_xchg_relaxed */
180 /* atomic_cmpxchg_relaxed */
181 #ifndef atomic_cmpxchg_relaxed
182 #define atomic_cmpxchg_relaxed atomic_cmpxchg
183 #define atomic_cmpxchg_acquire atomic_cmpxchg
184 #define atomic_cmpxchg_release atomic_cmpxchg
186 #else /* atomic_cmpxchg_relaxed */
188 #ifndef atomic_cmpxchg_acquire
189 #define atomic_cmpxchg_acquire(...) \
190 __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
193 #ifndef atomic_cmpxchg_release
194 #define atomic_cmpxchg_release(...) \
195 __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
198 #ifndef atomic_cmpxchg
199 #define atomic_cmpxchg(...) \
200 __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
202 #endif /* atomic_cmpxchg_relaxed */
204 #ifndef atomic64_read_acquire
205 #define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
208 #ifndef atomic64_set_release
209 #define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
212 /* atomic64_add_return_relaxed */
213 #ifndef atomic64_add_return_relaxed
214 #define atomic64_add_return_relaxed atomic64_add_return
215 #define atomic64_add_return_acquire atomic64_add_return
216 #define atomic64_add_return_release atomic64_add_return
218 #else /* atomic64_add_return_relaxed */
220 #ifndef atomic64_add_return_acquire
221 #define atomic64_add_return_acquire(...) \
222 __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
225 #ifndef atomic64_add_return_release
226 #define atomic64_add_return_release(...) \
227 __atomic_op_release(atomic64_add_return, __VA_ARGS__)
230 #ifndef atomic64_add_return
231 #define atomic64_add_return(...) \
232 __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
234 #endif /* atomic64_add_return_relaxed */
236 /* atomic64_inc_return_relaxed */
237 #ifndef atomic64_inc_return_relaxed
238 #define atomic64_inc_return_relaxed atomic64_inc_return
239 #define atomic64_inc_return_acquire atomic64_inc_return
240 #define atomic64_inc_return_release atomic64_inc_return
242 #else /* atomic64_inc_return_relaxed */
244 #ifndef atomic64_inc_return_acquire
245 #define atomic64_inc_return_acquire(...) \
246 __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
249 #ifndef atomic64_inc_return_release
250 #define atomic64_inc_return_release(...) \
251 __atomic_op_release(atomic64_inc_return, __VA_ARGS__)
254 #ifndef atomic64_inc_return
255 #define atomic64_inc_return(...) \
256 __atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
258 #endif /* atomic64_inc_return_relaxed */
261 /* atomic64_sub_return_relaxed */
262 #ifndef atomic64_sub_return_relaxed
263 #define atomic64_sub_return_relaxed atomic64_sub_return
264 #define atomic64_sub_return_acquire atomic64_sub_return
265 #define atomic64_sub_return_release atomic64_sub_return
267 #else /* atomic64_sub_return_relaxed */
269 #ifndef atomic64_sub_return_acquire
270 #define atomic64_sub_return_acquire(...) \
271 __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
274 #ifndef atomic64_sub_return_release
275 #define atomic64_sub_return_release(...) \
276 __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
279 #ifndef atomic64_sub_return
280 #define atomic64_sub_return(...) \
281 __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
283 #endif /* atomic64_sub_return_relaxed */
285 /* atomic64_dec_return_relaxed */
286 #ifndef atomic64_dec_return_relaxed
287 #define atomic64_dec_return_relaxed atomic64_dec_return
288 #define atomic64_dec_return_acquire atomic64_dec_return
289 #define atomic64_dec_return_release atomic64_dec_return
291 #else /* atomic64_dec_return_relaxed */
293 #ifndef atomic64_dec_return_acquire
294 #define atomic64_dec_return_acquire(...) \
295 __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
298 #ifndef atomic64_dec_return_release
299 #define atomic64_dec_return_release(...) \
300 __atomic_op_release(atomic64_dec_return, __VA_ARGS__)
303 #ifndef atomic64_dec_return
304 #define atomic64_dec_return(...) \
305 __atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
307 #endif /* atomic64_dec_return_relaxed */
309 /* atomic64_xchg_relaxed */
310 #ifndef atomic64_xchg_relaxed
311 #define atomic64_xchg_relaxed atomic64_xchg
312 #define atomic64_xchg_acquire atomic64_xchg
313 #define atomic64_xchg_release atomic64_xchg
315 #else /* atomic64_xchg_relaxed */
317 #ifndef atomic64_xchg_acquire
318 #define atomic64_xchg_acquire(...) \
319 __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
322 #ifndef atomic64_xchg_release
323 #define atomic64_xchg_release(...) \
324 __atomic_op_release(atomic64_xchg, __VA_ARGS__)
327 #ifndef atomic64_xchg
328 #define atomic64_xchg(...) \
329 __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
331 #endif /* atomic64_xchg_relaxed */
333 /* atomic64_cmpxchg_relaxed */
334 #ifndef atomic64_cmpxchg_relaxed
335 #define atomic64_cmpxchg_relaxed atomic64_cmpxchg
336 #define atomic64_cmpxchg_acquire atomic64_cmpxchg
337 #define atomic64_cmpxchg_release atomic64_cmpxchg
339 #else /* atomic64_cmpxchg_relaxed */
341 #ifndef atomic64_cmpxchg_acquire
342 #define atomic64_cmpxchg_acquire(...) \
343 __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
346 #ifndef atomic64_cmpxchg_release
347 #define atomic64_cmpxchg_release(...) \
348 __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
351 #ifndef atomic64_cmpxchg
352 #define atomic64_cmpxchg(...) \
353 __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
355 #endif /* atomic64_cmpxchg_relaxed */
357 /* cmpxchg_relaxed */
358 #ifndef cmpxchg_relaxed
359 #define cmpxchg_relaxed cmpxchg
360 #define cmpxchg_acquire cmpxchg
361 #define cmpxchg_release cmpxchg
363 #else /* cmpxchg_relaxed */
365 #ifndef cmpxchg_acquire
366 #define cmpxchg_acquire(...) \
367 __atomic_op_acquire(cmpxchg, __VA_ARGS__)
370 #ifndef cmpxchg_release
371 #define cmpxchg_release(...) \
372 __atomic_op_release(cmpxchg, __VA_ARGS__)
376 #define cmpxchg(...) \
377 __atomic_op_fence(cmpxchg, __VA_ARGS__)
379 #endif /* cmpxchg_relaxed */
381 /* cmpxchg64_relaxed */
382 #ifndef cmpxchg64_relaxed
383 #define cmpxchg64_relaxed cmpxchg64
384 #define cmpxchg64_acquire cmpxchg64
385 #define cmpxchg64_release cmpxchg64
387 #else /* cmpxchg64_relaxed */
389 #ifndef cmpxchg64_acquire
390 #define cmpxchg64_acquire(...) \
391 __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
394 #ifndef cmpxchg64_release
395 #define cmpxchg64_release(...) \
396 __atomic_op_release(cmpxchg64, __VA_ARGS__)
400 #define cmpxchg64(...) \
401 __atomic_op_fence(cmpxchg64, __VA_ARGS__)
403 #endif /* cmpxchg64_relaxed */
407 #define xchg_relaxed xchg
408 #define xchg_acquire xchg
409 #define xchg_release xchg
411 #else /* xchg_relaxed */
414 #define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
418 #define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
422 #define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
424 #endif /* xchg_relaxed */
427 * atomic_add_unless - add unless the number is already a given value
428 * @v: pointer of type atomic_t
429 * @a: the amount to add to v...
430 * @u: ...unless v is equal to u.
432 * Atomically adds @a to @v, so long as @v was not already @u.
433 * Returns non-zero if @v was not @u, and zero otherwise.
435 static inline int atomic_add_unless(atomic_t *v, int a, int u)
437 return __atomic_add_unless(v, a, u) != u;
441 * atomic_inc_not_zero - increment unless the number is zero
442 * @v: pointer of type atomic_t
444 * Atomically increments @v by 1, so long as @v is non-zero.
445 * Returns non-zero if @v was non-zero, and zero otherwise.
447 #ifndef atomic_inc_not_zero
448 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
451 #ifndef atomic_andnot
452 static inline void atomic_andnot(int i, atomic_t *v)
458 static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
460 atomic_andnot(mask, v);
463 static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
469 * atomic_inc_not_zero_hint - increment if not null
470 * @v: pointer of type atomic_t
471 * @hint: probable value of the atomic before the increment
473 * This version of atomic_inc_not_zero() gives a hint of probable
474 * value of the atomic. This helps processor to not read the memory
475 * before doing the atomic read/modify/write cycle, lowering
476 * number of bus transactions on some arches.
478 * Returns: 0 if increment was not done, 1 otherwise.
480 #ifndef atomic_inc_not_zero_hint
481 static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
485 /* sanity test, should be removed by compiler if hint is a constant */
487 return atomic_inc_not_zero(v);
490 val = atomic_cmpxchg(v, c, c + 1);
500 #ifndef atomic_inc_unless_negative
501 static inline int atomic_inc_unless_negative(atomic_t *p)
504 for (v = 0; v >= 0; v = v1) {
505 v1 = atomic_cmpxchg(p, v, v + 1);
513 #ifndef atomic_dec_unless_positive
514 static inline int atomic_dec_unless_positive(atomic_t *p)
517 for (v = 0; v <= 0; v = v1) {
518 v1 = atomic_cmpxchg(p, v, v - 1);
527 * atomic_dec_if_positive - decrement by 1 if old value positive
528 * @v: pointer of type atomic_t
530 * The function returns the old value of *v minus 1, even if
531 * the atomic variable, v, was not decremented.
533 #ifndef atomic_dec_if_positive
534 static inline int atomic_dec_if_positive(atomic_t *v)
540 if (unlikely(dec < 0))
542 old = atomic_cmpxchg((v), c, dec);
543 if (likely(old == c))
551 #ifdef CONFIG_GENERIC_ATOMIC64
552 #include <asm-generic/atomic64.h>
555 #ifndef atomic64_andnot
556 static inline void atomic64_andnot(long long i, atomic64_t *v)
562 #include <asm-generic/atomic-long.h>
564 #endif /* _LINUX_ATOMIC_H */