sh: Fix sh4a llsc-based cmpxchg()
authorAoi Shinkai <shinkoi2005@gmail.com>
Wed, 10 Jun 2009 16:15:42 +0000 (16:15 +0000)
committerPaul Mundt <lethal@linux-sh.org>
Thu, 11 Jun 2009 06:31:55 +0000 (09:31 +0300)
This fixes up a typo in the ll/sc based cmpxchg code which apparently
wasn't getting a lot of testing due to the swapped old/new pair. With
that fixed up, the ll/sc code also starts using it and provides its own
atomic_add_unless().

Signed-off-by: Aoi Shinkai <shinkoi2005@gmail.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
arch/sh/include/asm/atomic-llsc.h
arch/sh/include/asm/atomic.h
arch/sh/include/asm/cmpxchg-llsc.h
arch/sh/include/asm/spinlock.h

index 4b00b78e3f4f3a6a5be4a70a924123c84f32c895..b040e1e086108c606c6da8ecbd712b0dca2b1734 100644 (file)
@@ -104,4 +104,31 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
        : "t");
 }
 
+#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+
+/**
+ * atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+static inline int atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int c, old;
+       c = atomic_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+
+       return c != (u);
+}
+
 #endif /* __ASM_SH_ATOMIC_LLSC_H */
index 6327ffbb19928e51305a2d472cbdcd31863a0823..978b58efb1e92e18464010bfeeeae5a5296de3c0 100644 (file)
@@ -45,7 +45,7 @@
 #define atomic_inc(v) atomic_add(1,(v))
 #define atomic_dec(v) atomic_sub(1,(v))
 
-#ifndef CONFIG_GUSA_RB
+#if !defined(CONFIG_GUSA_RB) && !defined(CONFIG_CPU_SH4A)
 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
        int ret;
@@ -73,7 +73,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
 
        return ret != u;
 }
-#endif
+#endif /* !CONFIG_GUSA_RB && !CONFIG_CPU_SH4A */
 
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
index 0fac3da536ca025797385130a9972296c578276c..47136661a203a96ad2c5d676fff377bf789cbafb 100644 (file)
@@ -55,7 +55,7 @@ __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new)
                "mov            %0, %1                          \n\t"
                "cmp/eq         %1, %3                          \n\t"
                "bf             2f                              \n\t"
-               "mov            %3, %0                          \n\t"
+               "mov            %4, %0                          \n\t"
                "2:                                             \n\t"
                "movco.l        %0, @%2                         \n\t"
                "bf             1b                              \n\t"
index 60283565f89b56a83582053a8624cf49fec801b3..a28c9f0053fd4663f52d8c9fc1fb5a78feb31f48 100644 (file)
@@ -26,7 +26,7 @@
 #define __raw_spin_is_locked(x)                ((x)->lock <= 0)
 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
 #define __raw_spin_unlock_wait(x) \
-       do { cpu_relax(); } while ((x)->lock)
+       do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0)
 
 /*
  * Simple spin lock operations.  There are two variants, one clears IRQ's