locking,arch,m68k: Fold atomic_ops
authorPeter Zijlstra <peterz@infradead.org>
Sun, 23 Mar 2014 18:06:34 +0000 (19:06 +0100)
committerIngo Molnar <mingo@kernel.org>
Thu, 14 Aug 2014 10:48:08 +0000 (12:48 +0200)
Many of the atomic op implementations are the same except for one
instruction; fold the lot into a few CPP macros and reduce LoC.

This also prepares for easy addition of new ops.

Requires asm_op due to eor.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: linux-m68k@lists.linux-m68k.org
Link: http://lkml.kernel.org/r/20140509091646.GO30445@twins.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/m68k/include/asm/atomic.h

index 55695212a2ae98ec9f4e6c6e5c7289a6cd36651b..663d4ba2462c858fb6645909f8583190724d65c4 100644 (file)
 #define        ASM_DI  "di"
 #endif
 
-static inline void atomic_add(int i, atomic_t *v)
-{
-       __asm__ __volatile__("addl %1,%0" : "+m" (*v) : ASM_DI (i));
+#define ATOMIC_OP(op, c_op, asm_op)                                    \
+static inline void atomic_##op(int i, atomic_t *v)                     \
+{                                                                      \
+       __asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
+}                                                                      \
+
+#ifdef CONFIG_RMW_INSNS
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op)                             \
+static inline int atomic_##op##_return(int i, atomic_t *v)             \
+{                                                                      \
+       int t, tmp;                                                     \
+                                                                       \
+       __asm__ __volatile__(                                           \
+                       "1:     movel %2,%1\n"                          \
+                       "       " #asm_op "l %3,%1\n"                   \
+                       "       casl %2,%1,%0\n"                        \
+                       "       jne 1b"                                 \
+                       : "+m" (*v), "=&d" (t), "=&d" (tmp)             \
+                       : "g" (i), "2" (atomic_read(v)));               \
+       return t;                                                       \
 }
 
-static inline void atomic_sub(int i, atomic_t *v)
-{
-       __asm__ __volatile__("subl %1,%0" : "+m" (*v) : ASM_DI (i));
+#else
+
+#define ATOMIC_OP_RETURN(op, c_op, asm_op)                             \
+static inline int atomic_##op##_return(int i, atomic_t * v)            \
+{                                                                      \
+       unsigned long flags;                                            \
+       int t;                                                          \
+                                                                       \
+       local_irq_save(flags);                                          \
+       t = (v->counter c_op i);                                        \
+       local_irq_restore(flags);                                       \
+                                                                       \
+       return t;                                                       \
 }
 
+#endif /* CONFIG_RMW_INSNS */
+
+#define ATOMIC_OPS(op, c_op, asm_op)                                   \
+       ATOMIC_OP(op, c_op, asm_op)                                     \
+       ATOMIC_OP_RETURN(op, c_op, asm_op)
+
+ATOMIC_OPS(add, +=, add)
+ATOMIC_OPS(sub, -=, sub)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
 static inline void atomic_inc(atomic_t *v)
 {
        __asm__ __volatile__("addql #1,%0" : "+m" (*v));
@@ -76,67 +117,11 @@ static inline int atomic_inc_and_test(atomic_t *v)
 
 #ifdef CONFIG_RMW_INSNS
 
-static inline int atomic_add_return(int i, atomic_t *v)
-{
-       int t, tmp;
-
-       __asm__ __volatile__(
-                       "1:     movel %2,%1\n"
-                       "       addl %3,%1\n"
-                       "       casl %2,%1,%0\n"
-                       "       jne 1b"
-                       : "+m" (*v), "=&d" (t), "=&d" (tmp)
-                       : "g" (i), "2" (atomic_read(v)));
-       return t;
-}
-
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
-       int t, tmp;
-
-       __asm__ __volatile__(
-                       "1:     movel %2,%1\n"
-                       "       subl %3,%1\n"
-                       "       casl %2,%1,%0\n"
-                       "       jne 1b"
-                       : "+m" (*v), "=&d" (t), "=&d" (tmp)
-                       : "g" (i), "2" (atomic_read(v)));
-       return t;
-}
-
 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
 #else /* !CONFIG_RMW_INSNS */
 
-static inline int atomic_add_return(int i, atomic_t * v)
-{
-       unsigned long flags;
-       int t;
-
-       local_irq_save(flags);
-       t = atomic_read(v);
-       t += i;
-       atomic_set(v, t);
-       local_irq_restore(flags);
-
-       return t;
-}
-
-static inline int atomic_sub_return(int i, atomic_t * v)
-{
-       unsigned long flags;
-       int t;
-
-       local_irq_save(flags);
-       t = atomic_read(v);
-       t -= i;
-       atomic_set(v, t);
-       local_irq_restore(flags);
-
-       return t;
-}
-
 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
        unsigned long flags;