[IA64] SMT friendly version of spin_unlock_wait()
authorTony Luck <tony.luck@intel.com>
Mon, 12 Oct 2009 16:51:41 +0000 (09:51 -0700)
committerTony Luck <tony.luck@intel.com>
Tue, 13 Oct 2009 21:28:31 +0000 (14:28 -0700)
We can be kinder to SMT systems in spin_unlock_wait.

Signed-off-by: Tony Luck <tony.luck@intel.com>
arch/ia64/include/asm/spinlock.h

index 4fa502739d64397433936240f4630a87ce27f69d..239ecdc9516d042e69fc5b1773cfa2fb29fcad4a 100644 (file)
@@ -75,6 +75,20 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
        ACCESS_ONCE(*p) = (tmp + 2) & ~1;
 }
 
+static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
+{
+       int     *p = (int *)&lock->lock, ticket;
+
+       ia64_invala();
+
+       for (;;) {
+               asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : "memory");
+               if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
+                       return;
+               cpu_relax();
+       }
+}
+
 static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
 {
        long tmp = ACCESS_ONCE(lock->lock);
@@ -123,8 +137,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
 
 static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
 {
-       while (__raw_spin_is_locked(lock))
-               cpu_relax();
+       __ticket_spin_unlock_wait(lock);
 }
 
 #define __raw_read_can_lock(rw)                (*(volatile int *)(rw) >= 0)