s390/uaccess, locking/static_keys: employ static_branch_likely()
authorHeiko Carstens <heiko.carstens@de.ibm.com>
Wed, 29 Jul 2015 06:31:24 +0000 (08:31 +0200)
committerIngo Molnar <mingo@kernel.org>
Mon, 3 Aug 2015 09:34:17 +0000 (11:34 +0200)
Use the new static_branch_likely() primitive to make sure that the
most likely case is executed without taking an unconditional branch.
This wasn't possible with the old jump label primitives.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/20150729064600.GB3953@osiris
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/s390/lib/uaccess.c

index 4614d415bb58c96fdbbbdccd062074442ee5d681..93cb1d09493dd68729ffdb3fa12be34ab1cd776b 100644 (file)
@@ -15,7 +15,7 @@
 #include <asm/mmu_context.h>
 #include <asm/facility.h>
 
-static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE;
+static DEFINE_STATIC_KEY_FALSE(have_mvcos);
 
 static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
                                                 unsigned long size)
@@ -104,7 +104,7 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
 
 unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-       if (static_key_false(&have_mvcos))
+       if (static_branch_likely(&have_mvcos))
                return copy_from_user_mvcos(to, from, n);
        return copy_from_user_mvcp(to, from, n);
 }
@@ -177,7 +177,7 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
 
 unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
-       if (static_key_false(&have_mvcos))
+       if (static_branch_likely(&have_mvcos))
                return copy_to_user_mvcos(to, from, n);
        return copy_to_user_mvcs(to, from, n);
 }
@@ -240,7 +240,7 @@ static inline unsigned long copy_in_user_mvc(void __user *to, const void __user
 
 unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n)
 {
-       if (static_key_false(&have_mvcos))
+       if (static_branch_likely(&have_mvcos))
                return copy_in_user_mvcos(to, from, n);
        return copy_in_user_mvc(to, from, n);
 }
@@ -312,7 +312,7 @@ static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
 
 unsigned long __clear_user(void __user *to, unsigned long size)
 {
-       if (static_key_false(&have_mvcos))
+       if (static_branch_likely(&have_mvcos))
                        return clear_user_mvcos(to, size);
        return clear_user_xc(to, size);
 }
@@ -386,7 +386,7 @@ early_param("uaccess_primary", parse_uaccess_pt);
 static int __init uaccess_init(void)
 {
        if (!uaccess_primary && test_facility(27))
-               static_key_slow_inc(&have_mvcos);
+               static_branch_enable(&have_mvcos);
        return 0;
 }
 early_initcall(uaccess_init);