2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #ifndef _ASM_ARC_ATOMIC_H
10 #define _ASM_ARC_ATOMIC_H
14 #include <linux/types.h>
15 #include <linux/compiler.h>
16 #include <asm/cmpxchg.h>
17 #include <asm/barrier.h>
20 #define atomic_read(v) ((v)->counter)
22 #ifdef CONFIG_ARC_HAS_LLSC
24 #define atomic_set(v, i) (((v)->counter) = (i))
26 #define ATOMIC_OP(op, c_op, asm_op) \
27 static inline void atomic_##op(int i, atomic_t *v) \
31 __asm__ __volatile__( \
32 "1: llock %0, [%1] \n" \
33 " " #asm_op " %0, %0, %2 \n" \
34 " scond %0, [%1] \n" \
36 : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \
37 : "r"(&v->counter), "ir"(i) \
41 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
42 static inline int atomic_##op##_return(int i, atomic_t *v) \
47 * Explicit full memory barrier needed before/after as \
48 * LLOCK/SCOND thmeselves don't provide any such semantics \
52 __asm__ __volatile__( \
53 "1: llock %0, [%1] \n" \
54 " " #asm_op " %0, %0, %2 \n" \
55 " scond %0, [%1] \n" \
58 : "r"(&v->counter), "ir"(i) \
66 #else /* !CONFIG_ARC_HAS_LLSC */
70 /* violating atomic_xxx API locking protocol in UP for optimization sake */
71 #define atomic_set(v, i) (((v)->counter) = (i))
75 static inline void atomic_set(atomic_t *v, int i)
78 * Independent of hardware support, all of the atomic_xxx() APIs need
79 * to follow the same locking rules to make sure that a "hardware"
80 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
83 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
84 * requires the locking.
88 atomic_ops_lock(flags);
90 atomic_ops_unlock(flags);
96 * Non hardware assisted Atomic-R-M-W
97 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
100 #define ATOMIC_OP(op, c_op, asm_op) \
101 static inline void atomic_##op(int i, atomic_t *v) \
103 unsigned long flags; \
105 atomic_ops_lock(flags); \
107 atomic_ops_unlock(flags); \
110 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
111 static inline int atomic_##op##_return(int i, atomic_t *v) \
113 unsigned long flags; \
114 unsigned long temp; \
117 * spin lock/unlock provides the needed smp_mb() before/after \
119 atomic_ops_lock(flags); \
123 atomic_ops_unlock(flags); \
128 #endif /* !CONFIG_ARC_HAS_LLSC */
130 #define ATOMIC_OPS(op, c_op, asm_op) \
131 ATOMIC_OP(op, c_op, asm_op) \
132 ATOMIC_OP_RETURN(op, c_op, asm_op)
134 ATOMIC_OPS(add, +=, add)
135 ATOMIC_OPS(sub, -=, sub)
136 ATOMIC_OP(and, &=, and)
138 #define atomic_clear_mask(mask, v) atomic_and(~(mask), (v))
141 #undef ATOMIC_OP_RETURN
145 * __atomic_add_unless - add unless the number is a given value
146 * @v: pointer of type atomic_t
147 * @a: the amount to add to v...
148 * @u: ...unless v is equal to u.
150 * Atomically adds @a to @v, so long as it was not @u.
151 * Returns the old value of @v
153 #define __atomic_add_unless(v, a, u) \
158 * Explicit full memory barrier needed before/after as \
159 * LLOCK/SCOND thmeselves don't provide any such semantics \
163 c = atomic_read(v); \
164 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
172 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
174 #define atomic_inc(v) atomic_add(1, v)
175 #define atomic_dec(v) atomic_sub(1, v)
177 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
178 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
179 #define atomic_inc_return(v) atomic_add_return(1, (v))
180 #define atomic_dec_return(v) atomic_sub_return(1, (v))
181 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
183 #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
185 #define ATOMIC_INIT(i) { (i) }
187 #include <asm-generic/atomic64.h>