Merge tag 'v4.3-rc1' into locking/core, to refresh the tree
authorIngo Molnar <mingo@kernel.org>
Sun, 13 Sep 2015 08:01:24 +0000 (10:01 +0200)
committerIngo Molnar <mingo@kernel.org>
Sun, 13 Sep 2015 08:01:24 +0000 (10:01 +0200)
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Documentation/static-keys.txt
arch/x86/include/asm/qspinlock.h
include/asm-generic/qspinlock.h
include/linux/jump_label.h
kernel/locking/qspinlock.c

index f4cb0b2d5cd79048c51cf7b89f803561c2070e28..ec911583f6c56c9a23721f301b6210458657eccf 100644 (file)
@@ -16,7 +16,7 @@ The updated API replacements are:
 DEFINE_STATIC_KEY_TRUE(key);
 DEFINE_STATIC_KEY_FALSE(key);
 static_key_likely()
-statick_key_unlikely()
+static_key_unlikely()
 
 0) Abstract
 
index 9d51fae1cba345e5cf01387a6c16207a6b5be872..eaba0807603009e6ed1612a7049bb35bafeaf31e 100644 (file)
@@ -39,18 +39,27 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
 }
 #endif
 
-#define virt_queued_spin_lock virt_queued_spin_lock
-
-static inline bool virt_queued_spin_lock(struct qspinlock *lock)
+#ifdef CONFIG_PARAVIRT
+#define virt_spin_lock virt_spin_lock
+static inline bool virt_spin_lock(struct qspinlock *lock)
 {
        if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
                return false;
 
-       while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0)
-               cpu_relax();
+       /*
+        * On hypervisors without PARAVIRT_SPINLOCKS support we fall
+        * back to a Test-and-Set spinlock, because fair locks have
+        * horrible lock 'holder' preemption issues.
+        */
+
+       do {
+               while (atomic_read(&lock->val) != 0)
+                       cpu_relax();
+       } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
 
        return true;
 }
+#endif /* CONFIG_PARAVIRT */
 
 #include <asm-generic/qspinlock.h>
 
index 83bfb87f5bf18e92ea794dd3ca3afec1b1ba6f11..e2aadbc7151f4cd69b8745e80a0af403257f1678 100644 (file)
@@ -111,8 +111,8 @@ static inline void queued_spin_unlock_wait(struct qspinlock *lock)
                cpu_relax();
 }
 
-#ifndef virt_queued_spin_lock
-static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock)
+#ifndef virt_spin_lock
+static __always_inline bool virt_spin_lock(struct qspinlock *lock)
 {
        return false;
 }
index 7f653e8f66900049c358ed4907fab8121047dd91..0684bd3a48fc66624f43751fcdd2455c67349d92 100644 (file)
@@ -22,7 +22,7 @@
  * DEFINE_STATIC_KEY_TRUE(key);
  * DEFINE_STATIC_KEY_FALSE(key);
  * static_key_likely()
- * statick_key_unlikely()
+ * static_key_unlikely()
  *
  * Jump labels provide an interface to generate dynamic branches using
  * self-modifying code. Assuming toolchain and architecture support, if we
index 337c8818541d339aac3fd1e3e6af32dac6dff4c9..87e9ce6a63c5d0e78a17977e2e9271ffaf0bb946 100644 (file)
@@ -289,7 +289,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
        if (pv_enabled())
                goto queue;
 
-       if (virt_queued_spin_lock(lock))
+       if (virt_spin_lock(lock))
                return;
 
        /*