locking,arch,ia64: Fold atomic_ops
authorPeter Zijlstra <peterz@infradead.org>
Sun, 23 Mar 2014 17:20:30 +0000 (18:20 +0100)
committerIngo Molnar <mingo@kernel.org>
Thu, 14 Aug 2014 10:48:07 +0000 (12:48 +0200)
Many of the atomic op implementations are the same except for one
instruction; fold the lot into a few CPP macros and reduce LoC.

This also prepares for easy addition of new ops.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: linux-ia64@vger.kernel.org
Link: http://lkml.kernel.org/r/20140508135852.245224472@infradead.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/ia64/include/asm/atomic.h

index 0f8bf48dadf3cee5c20c518a0bd3aa5e44d7055f..42919a831c6c2a76ae9089261f6dd665143e383f 100644 (file)
 #define atomic_set(v,i)                (((v)->counter) = (i))
 #define atomic64_set(v,i)      (((v)->counter) = (i))
 
-static __inline__ int
-ia64_atomic_add (int i, atomic_t *v)
-{
-       __s32 old, new;
-       CMPXCHG_BUGCHECK_DECL
-
-       do {
-               CMPXCHG_BUGCHECK(v);
-               old = atomic_read(v);
-               new = old + i;
-       } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
-       return new;
+#define ATOMIC_OP(op, c_op)                                            \
+static __inline__ int                                                  \
+ia64_atomic_##op (int i, atomic_t *v)                                  \
+{                                                                      \
+       __s32 old, new;                                                 \
+       CMPXCHG_BUGCHECK_DECL                                           \
+                                                                       \
+       do {                                                            \
+               CMPXCHG_BUGCHECK(v);                                    \
+               old = atomic_read(v);                                   \
+               new = old c_op i;                                       \
+       } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
+       return new;                                                     \
 }
 
-static __inline__ long
-ia64_atomic64_add (__s64 i, atomic64_t *v)
-{
-       __s64 old, new;
-       CMPXCHG_BUGCHECK_DECL
-
-       do {
-               CMPXCHG_BUGCHECK(v);
-               old = atomic64_read(v);
-               new = old + i;
-       } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
-       return new;
-}
+ATOMIC_OP(add, +)
+ATOMIC_OP(sub, -)
 
-static __inline__ int
-ia64_atomic_sub (int i, atomic_t *v)
-{
-       __s32 old, new;
-       CMPXCHG_BUGCHECK_DECL
-
-       do {
-               CMPXCHG_BUGCHECK(v);
-               old = atomic_read(v);
-               new = old - i;
-       } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
-       return new;
-}
+#undef ATOMIC_OP
 
-static __inline__ long
-ia64_atomic64_sub (__s64 i, atomic64_t *v)
-{
-       __s64 old, new;
-       CMPXCHG_BUGCHECK_DECL
-
-       do {
-               CMPXCHG_BUGCHECK(v);
-               old = atomic64_read(v);
-               new = old - i;
-       } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
-       return new;
+#define atomic_add_return(i,v)                                         \
+({                                                                     \
+       int __ia64_aar_i = (i);                                         \
+       (__builtin_constant_p(i)                                        \
+        && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)           \
+            || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)           \
+            || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)           \
+            || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))         \
+               ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)       \
+               : ia64_atomic_add(__ia64_aar_i, v);                     \
+})
+
+#define atomic_sub_return(i,v)                                         \
+({                                                                     \
+       int __ia64_asr_i = (i);                                         \
+       (__builtin_constant_p(i)                                        \
+        && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)          \
+            || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)          \
+            || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)          \
+            || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))        \
+               ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)      \
+               : ia64_atomic_sub(__ia64_asr_i, v);                     \
+})
+
+#define ATOMIC64_OP(op, c_op)                                          \
+static __inline__ long                                                 \
+ia64_atomic64_##op (__s64 i, atomic64_t *v)                            \
+{                                                                      \
+       __s64 old, new;                                                 \
+       CMPXCHG_BUGCHECK_DECL                                           \
+                                                                       \
+       do {                                                            \
+               CMPXCHG_BUGCHECK(v);                                    \
+               old = atomic64_read(v);                                 \
+               new = old c_op i;                                       \
+       } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
+       return new;                                                     \
 }
 
+ATOMIC64_OP(add, +)
+ATOMIC64_OP(sub, -)
+
+#undef ATOMIC64_OP
+
+#define atomic64_add_return(i,v)                                       \
+({                                                                     \
+       long __ia64_aar_i = (i);                                        \
+       (__builtin_constant_p(i)                                        \
+        && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)           \
+            || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)           \
+            || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)           \
+            || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))         \
+               ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)       \
+               : ia64_atomic64_add(__ia64_aar_i, v);                   \
+})
+
+#define atomic64_sub_return(i,v)                                       \
+({                                                                     \
+       long __ia64_asr_i = (i);                                        \
+       (__builtin_constant_p(i)                                        \
+        && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)          \
+            || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)          \
+            || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)          \
+            || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))        \
+               ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)      \
+               : ia64_atomic64_sub(__ia64_asr_i, v);                   \
+})
+
 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
@@ -123,30 +155,6 @@ static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
 
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
 
-#define atomic_add_return(i,v)                                         \
-({                                                                     \
-       int __ia64_aar_i = (i);                                         \
-       (__builtin_constant_p(i)                                        \
-        && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)           \
-            || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)           \
-            || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)           \
-            || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))         \
-               ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)       \
-               : ia64_atomic_add(__ia64_aar_i, v);                     \
-})
-
-#define atomic64_add_return(i,v)                                       \
-({                                                                     \
-       long __ia64_aar_i = (i);                                        \
-       (__builtin_constant_p(i)                                        \
-        && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)           \
-            || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)           \
-            || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)           \
-            || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))         \
-               ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)       \
-               : ia64_atomic64_add(__ia64_aar_i, v);                   \
-})
-
 /*
  * Atomically add I to V and return TRUE if the resulting value is
  * negative.
@@ -163,30 +171,6 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
        return atomic64_add_return(i, v) < 0;
 }
 
-#define atomic_sub_return(i,v)                                         \
-({                                                                     \
-       int __ia64_asr_i = (i);                                         \
-       (__builtin_constant_p(i)                                        \
-        && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)          \
-            || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)          \
-            || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)          \
-            || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))        \
-               ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)      \
-               : ia64_atomic_sub(__ia64_asr_i, v);                     \
-})
-
-#define atomic64_sub_return(i,v)                                       \
-({                                                                     \
-       long __ia64_asr_i = (i);                                        \
-       (__builtin_constant_p(i)                                        \
-        && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)          \
-            || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)          \
-            || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)          \
-            || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))        \
-               ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)      \
-               : ia64_atomic64_sub(__ia64_asr_i, v);                   \
-})
-
 #define atomic_dec_return(v)           atomic_sub_return(1, (v))
 #define atomic_inc_return(v)           atomic_add_return(1, (v))
 #define atomic64_dec_return(v)         atomic64_sub_return(1, (v))
@@ -199,13 +183,13 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
 #define atomic64_dec_and_test(v)       (atomic64_sub_return(1, (v)) == 0)
 #define atomic64_inc_and_test(v)       (atomic64_add_return(1, (v)) == 0)
 
-#define atomic_add(i,v)                        atomic_add_return((i), (v))
-#define atomic_sub(i,v)                        atomic_sub_return((i), (v))
+#define atomic_add(i,v)                        (void)atomic_add_return((i), (v))
+#define atomic_sub(i,v)                        (void)atomic_sub_return((i), (v))
 #define atomic_inc(v)                  atomic_add(1, (v))
 #define atomic_dec(v)                  atomic_sub(1, (v))
 
-#define atomic64_add(i,v)              atomic64_add_return((i), (v))
-#define atomic64_sub(i,v)              atomic64_sub_return((i), (v))
+#define atomic64_add(i,v)              (void)atomic64_add_return((i), (v))
+#define atomic64_sub(i,v)              (void)atomic64_sub_return((i), (v))
 #define atomic64_inc(v)                        atomic64_add(1, (v))
 #define atomic64_dec(v)                        atomic64_sub(1, (v))