s390/spinlock: refactor arch_spin_lock_wait[_flags]
authorMartin Schwidefsky <schwidefsky@de.ibm.com>
Fri, 16 May 2014 13:11:12 +0000 (15:11 +0200)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Tue, 20 May 2014 06:58:55 +0000 (08:58 +0200)
Reorder the spinlock wait code to make it more readable.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/lib/spinlock.c

index 1dd282c742b5e2e3c103e3f7d61c7cc7ae02e7c8..5b0e445bc3f39930317bafc745c76c674874bf0e 100644 (file)
@@ -31,23 +31,31 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
        int count;
 
        while (1) {
-               owner = lp->lock;
-               if (!owner || smp_vcpu_scheduled(~owner)) {
-                       count = spin_retry;
-                       do {
-                               if (arch_spin_is_locked(lp))
-                                       continue;
-                               if (_raw_compare_and_swap(&lp->lock, 0, cpu))
-                                       return;
-                       } while (count-- > 0);
-                       if (MACHINE_IS_LPAR)
-                               continue;
+               owner = ACCESS_ONCE(lp->lock);
+               /* Try to get the lock if it is free. */
+               if (!owner) {
+                       if (_raw_compare_and_swap(&lp->lock, 0, cpu))
+                               return;
+                       continue;
                }
-               owner = lp->lock;
-               if (owner)
+               /* Check if the lock owner is running. */
+               if (!smp_vcpu_scheduled(~owner)) {
+                       smp_yield_cpu(~owner);
+                       continue;
+               }
+               /* Loop for a while on the lock value. */
+               count = spin_retry;
+               do {
+                       owner = ACCESS_ONCE(lp->lock);
+               } while (owner && count-- > 0);
+               if (!owner)
+                       continue;
+               /*
+                * For multiple layers of hypervisors, e.g. z/VM + LPAR
+                * yield the CPU if the lock is still unavailable.
+                */
+               if (!MACHINE_IS_LPAR)
                        smp_yield_cpu(~owner);
-               if (_raw_compare_and_swap(&lp->lock, 0, cpu))
-                       return;
        }
 }
 EXPORT_SYMBOL(arch_spin_lock_wait);
@@ -60,27 +68,32 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
 
        local_irq_restore(flags);
        while (1) {
-               owner = lp->lock;
-               if (!owner || smp_vcpu_scheduled(~owner)) {
-                       count = spin_retry;
-                       do {
-                               if (arch_spin_is_locked(lp))
-                                       continue;
-                               local_irq_disable();
-                               if (_raw_compare_and_swap(&lp->lock, 0, cpu))
-                                       return;
-                               local_irq_restore(flags);
-                       } while (count-- > 0);
-                       if (MACHINE_IS_LPAR)
-                               continue;
+               owner = ACCESS_ONCE(lp->lock);
+               /* Try to get the lock if it is free. */
+               if (!owner) {
+                       local_irq_disable();
+                       if (_raw_compare_and_swap(&lp->lock, 0, cpu))
+                               return;
+                       local_irq_restore(flags);
                }
-               owner = lp->lock;
-               if (owner)
+               /* Check if the lock owner is running. */
+               if (!smp_vcpu_scheduled(~owner)) {
+                       smp_yield_cpu(~owner);
+                       continue;
+               }
+               /* Loop for a while on the lock value. */
+               count = spin_retry;
+               do {
+                       owner = ACCESS_ONCE(lp->lock);
+               } while (owner && count-- > 0);
+               if (!owner)
+                       continue;
+               /*
+                * For multiple layers of hypervisors, e.g. z/VM + LPAR
+                * yield the CPU if the lock is still unavailable.
+                */
+               if (!MACHINE_IS_LPAR)
                        smp_yield_cpu(~owner);
-               local_irq_disable();
-               if (_raw_compare_and_swap(&lp->lock, 0, cpu))
-                       return;
-               local_irq_restore(flags);
        }
 }
 EXPORT_SYMBOL(arch_spin_lock_wait_flags);