locking/pvqspinlock: Rename QUEUED_SPINLOCK to QUEUED_SPINLOCKS
authorIngo Molnar <mingo@kernel.org>
Mon, 11 May 2015 07:47:23 +0000 (09:47 +0200)
committerIngo Molnar <mingo@kernel.org>
Mon, 11 May 2015 07:52:09 +0000 (09:52 +0200)
Valentin Rothberg reported that we use CONFIG_QUEUED_SPINLOCKS
in arch/x86/kernel/paravirt_patch_32.c, while the symbol is
called CONFIG_QUEUED_SPINLOCK. (Note the extra 'S')

But the typo was natural: the proper English term for such
a generic object would be 'queued spinlocks' - so rename
this and related symbols accordingly to the plural form.

Reported-by: Valentin Rothberg <valentinrothberg@gmail.com>
Cc: Douglas Hatch <doug.hatch@hp.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman Long <Waiman.Long@hp.com>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/Kconfig
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt_types.h
arch/x86/include/asm/spinlock.h
arch/x86/include/asm/spinlock_types.h
arch/x86/kernel/kvm.c
arch/x86/kernel/paravirt-spinlocks.c
arch/x86/kernel/paravirt_patch_64.c
arch/x86/xen/spinlock.c
kernel/Kconfig.locks
kernel/locking/Makefile

index 50ec043a920da54aa8bab0b6e5e9db2a6bec5208..f8dc6abbe6aefa3fedaa984879332d361f3549e7 100644 (file)
@@ -127,7 +127,7 @@ config X86
        select MODULES_USE_ELF_RELA if X86_64
        select CLONE_BACKWARDS if X86_32
        select ARCH_USE_BUILTIN_BSWAP
-       select ARCH_USE_QUEUED_SPINLOCK
+       select ARCH_USE_QUEUED_SPINLOCKS
        select ARCH_USE_QUEUE_RWLOCK
        select OLD_SIGSUSPEND3 if X86_32 || IA32_EMULATION
        select OLD_SIGACTION if X86_32
@@ -667,7 +667,7 @@ config PARAVIRT_DEBUG
 config PARAVIRT_SPINLOCKS
        bool "Paravirtualization layer for spinlocks"
        depends on PARAVIRT && SMP
-       select UNINLINE_SPIN_UNLOCK if !QUEUED_SPINLOCK
+       select UNINLINE_SPIN_UNLOCK if !QUEUED_SPINLOCKS
        ---help---
          Paravirtualized spinlocks allow a pvops backend to replace the
          spinlock implementation with something virtualization-friendly
index 266c35381b624933c6efa48f98fa9450710ea695..d143bfad45d70f98e541c14f0e4f94a312d7b2ad 100644 (file)
@@ -712,7 +712,7 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
 
 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
 
-#ifdef CONFIG_QUEUED_SPINLOCK
+#ifdef CONFIG_QUEUED_SPINLOCKS
 
 static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
                                                        u32 val)
@@ -735,7 +735,7 @@ static __always_inline void pv_kick(int cpu)
        PVOP_VCALL1(pv_lock_ops.kick, cpu);
 }
 
-#else /* !CONFIG_QUEUED_SPINLOCK */
+#else /* !CONFIG_QUEUED_SPINLOCKS */
 
 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
                                                        __ticket_t ticket)
@@ -749,7 +749,7 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
        PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
 }
 
-#endif /* CONFIG_QUEUED_SPINLOCK */
+#endif /* CONFIG_QUEUED_SPINLOCKS */
 
 #endif /* SMP && PARAVIRT_SPINLOCKS */
 
index 76cd68426af8f2b26c0458324a68550ada47c2e8..8766c7c395c27b961096af406dd7c0da6c0dcac5 100644 (file)
@@ -336,16 +336,16 @@ typedef u16 __ticket_t;
 struct qspinlock;
 
 struct pv_lock_ops {
-#ifdef CONFIG_QUEUED_SPINLOCK
+#ifdef CONFIG_QUEUED_SPINLOCKS
        void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
        struct paravirt_callee_save queued_spin_unlock;
 
        void (*wait)(u8 *ptr, u8 val);
        void (*kick)(int cpu);
-#else /* !CONFIG_QUEUED_SPINLOCK */
+#else /* !CONFIG_QUEUED_SPINLOCKS */
        struct paravirt_callee_save lock_spinning;
        void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
-#endif /* !CONFIG_QUEUED_SPINLOCK */
+#endif /* !CONFIG_QUEUED_SPINLOCKS */
 };
 
 /* This contains all the paravirt structures: we get a convenient
index 4ec5413156ca5a6357d52a1a3afe894d767327b6..be0a05913b9105b62122afbbbef99c6146b2dbf6 100644 (file)
@@ -42,7 +42,7 @@
 extern struct static_key paravirt_ticketlocks_enabled;
 static __always_inline bool static_key_false(struct static_key *key);
 
-#ifdef CONFIG_QUEUED_SPINLOCK
+#ifdef CONFIG_QUEUED_SPINLOCKS
 #include <asm/qspinlock.h>
 #else
 
@@ -200,7 +200,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
                cpu_relax();
        }
 }
-#endif /* CONFIG_QUEUED_SPINLOCK */
+#endif /* CONFIG_QUEUED_SPINLOCKS */
 
 /*
  * Read-write spinlocks, allowing multiple readers
index 5df1f1b9a4b0df8a9a81c435c8df12270a9457b3..65c3e37f879aced6501861eff95d56e08735236a 100644 (file)
@@ -23,7 +23,7 @@ typedef u32 __ticketpair_t;
 
 #define TICKET_SHIFT   (sizeof(__ticket_t) * 8)
 
-#ifdef CONFIG_QUEUED_SPINLOCK
+#ifdef CONFIG_QUEUED_SPINLOCKS
 #include <asm-generic/qspinlock_types.h>
 #else
 typedef struct arch_spinlock {
@@ -36,7 +36,7 @@ typedef struct arch_spinlock {
 } arch_spinlock_t;
 
 #define __ARCH_SPIN_LOCK_UNLOCKED      { { 0 } }
-#endif /* CONFIG_QUEUED_SPINLOCK */
+#endif /* CONFIG_QUEUED_SPINLOCKS */
 
 #include <asm-generic/qrwlock_types.h>
 
index 6c21d931bd248858503882ef368c3cdc947b3866..1681504e44a4c3479d26fd1c9d10f1fbee264d1f 100644 (file)
@@ -585,7 +585,7 @@ static void kvm_kick_cpu(int cpu)
 }
 
 
-#ifdef CONFIG_QUEUED_SPINLOCK
+#ifdef CONFIG_QUEUED_SPINLOCKS
 
 #include <asm/qspinlock.h>
 
@@ -615,7 +615,7 @@ out:
        local_irq_restore(flags);
 }
 
-#else /* !CONFIG_QUEUED_SPINLOCK */
+#else /* !CONFIG_QUEUED_SPINLOCKS */
 
 enum kvm_contention_stat {
        TAKEN_SLOW,
@@ -850,7 +850,7 @@ static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
        }
 }
 
-#endif /* !CONFIG_QUEUED_SPINLOCK */
+#endif /* !CONFIG_QUEUED_SPINLOCKS */
 
 /*
  * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
@@ -863,13 +863,13 @@ void __init kvm_spinlock_init(void)
        if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
                return;
 
-#ifdef CONFIG_QUEUED_SPINLOCK
+#ifdef CONFIG_QUEUED_SPINLOCKS
        __pv_init_lock_hash();
        pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
        pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
        pv_lock_ops.wait = kvm_wait;
        pv_lock_ops.kick = kvm_kick_cpu;
-#else /* !CONFIG_QUEUED_SPINLOCK */
+#else /* !CONFIG_QUEUED_SPINLOCKS */
        pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
        pv_lock_ops.unlock_kick = kvm_unlock_kick;
 #endif
index a33f1eb15003f0e355551aa69c72b00aed8c1889..33ee3e0efd65bccc9ca049b07a97e81428ead282 100644 (file)
@@ -8,7 +8,7 @@
 
 #include <asm/paravirt.h>
 
-#ifdef CONFIG_QUEUED_SPINLOCK
+#ifdef CONFIG_QUEUED_SPINLOCKS
 __visible void __native_queued_spin_unlock(struct qspinlock *lock)
 {
        native_queued_spin_unlock(lock);
@@ -25,15 +25,15 @@ bool pv_is_native_spin_unlock(void)
 
 struct pv_lock_ops pv_lock_ops = {
 #ifdef CONFIG_SMP
-#ifdef CONFIG_QUEUED_SPINLOCK
+#ifdef CONFIG_QUEUED_SPINLOCKS
        .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
        .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
        .wait = paravirt_nop,
        .kick = paravirt_nop,
-#else /* !CONFIG_QUEUED_SPINLOCK */
+#else /* !CONFIG_QUEUED_SPINLOCKS */
        .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
        .unlock_kick = paravirt_nop,
-#endif /* !CONFIG_QUEUED_SPINLOCK */
+#endif /* !CONFIG_QUEUED_SPINLOCKS */
 #endif /* SMP */
 };
 EXPORT_SYMBOL(pv_lock_ops);
index e0fb41c8255bcd2f9ffbaa8566bcab630b2237fd..a1fa867821866d31736e635a6868898ef302b75b 100644 (file)
@@ -21,7 +21,7 @@ DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
 DEF_NATIVE(, mov32, "mov %edi, %eax");
 DEF_NATIVE(, mov64, "mov %rdi, %rax");
 
-#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCK)
+#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
 DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
 #endif
 
@@ -65,7 +65,7 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                PATCH_SITE(pv_cpu_ops, clts);
                PATCH_SITE(pv_mmu_ops, flush_tlb_single);
                PATCH_SITE(pv_cpu_ops, wbinvd);
-#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCK)
+#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
                case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
                        if (pv_is_native_spin_unlock()) {
                                start = start_pv_lock_ops_queued_spin_unlock;
index af907a90fb19ffb3a4990d8554a109ddd15f5e0d..9e2ba5c6e1dd7be4a0b10a70b315cf5f0f20c081 100644 (file)
@@ -21,7 +21,7 @@ static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
 static DEFINE_PER_CPU(char *, irq_name);
 static bool xen_pvspin = true;
 
-#ifdef CONFIG_QUEUED_SPINLOCK
+#ifdef CONFIG_QUEUED_SPINLOCKS
 
 #include <asm/qspinlock.h>
 
@@ -65,7 +65,7 @@ static void xen_qlock_wait(u8 *byte, u8 val)
        xen_poll_irq(irq);
 }
 
-#else /* CONFIG_QUEUED_SPINLOCK */
+#else /* CONFIG_QUEUED_SPINLOCKS */
 
 enum xen_contention_stat {
        TAKEN_SLOW,
@@ -264,7 +264,7 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
                }
        }
 }
-#endif /* CONFIG_QUEUED_SPINLOCK */
+#endif /* CONFIG_QUEUED_SPINLOCKS */
 
 static irqreturn_t dummy_handler(int irq, void *dev_id)
 {
@@ -328,7 +328,7 @@ void __init xen_init_spinlocks(void)
                return;
        }
        printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
-#ifdef CONFIG_QUEUED_SPINLOCK
+#ifdef CONFIG_QUEUED_SPINLOCKS
        __pv_init_lock_hash();
        pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
        pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
@@ -366,7 +366,7 @@ static __init int xen_parse_nopvspin(char *arg)
 }
 early_param("xen_nopvspin", xen_parse_nopvspin);
 
-#if defined(CONFIG_XEN_DEBUG_FS) && !defined(CONFIG_QUEUED_SPINLOCK)
+#if defined(CONFIG_XEN_DEBUG_FS) && !defined(CONFIG_QUEUED_SPINLOCKS)
 
 static struct dentry *d_spin_debug;
 
index 95dd7587ec342e375ad66bcc8ed0b262edbaae8f..65d755b6a663818d756d05b2fc81c591d6254818 100644 (file)
@@ -235,11 +235,11 @@ config LOCK_SPIN_ON_OWNER
        def_bool y
        depends on MUTEX_SPIN_ON_OWNER || RWSEM_SPIN_ON_OWNER
 
-config ARCH_USE_QUEUED_SPINLOCK
+config ARCH_USE_QUEUED_SPINLOCKS
        bool
 
-config QUEUED_SPINLOCK
-       def_bool y if ARCH_USE_QUEUED_SPINLOCK
+config QUEUED_SPINLOCKS
+       def_bool y if ARCH_USE_QUEUED_SPINLOCKS
        depends on SMP
 
 config ARCH_USE_QUEUE_RWLOCK
index abfcef3c1ef9830bc6dfc8e4ac4f07d8818cb0a5..132aff9d3fbe577209c4515994dff91c570a5204 100644 (file)
@@ -17,7 +17,7 @@ obj-$(CONFIG_SMP) += spinlock.o
 obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o
 obj-$(CONFIG_SMP) += lglock.o
 obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
-obj-$(CONFIG_QUEUED_SPINLOCK) += qspinlock.o
+obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o
 obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
 obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
 obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o