context_tracking: Optimize guest APIs off case with static key
authorFrederic Weisbecker <fweisbec@gmail.com>
Wed, 10 Jul 2013 00:44:35 +0000 (02:44 +0200)
committerFrederic Weisbecker <fweisbec@gmail.com>
Wed, 14 Aug 2013 15:14:46 +0000 (17:14 +0200)
Optimize guest entry/exit APIs with static keys. This minimize
the overhead for those who enable CONFIG_NO_HZ_FULL without
always using it. Having no range passed to nohz_full= should
result in the probes overhead to be minimized.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Li Zhong <zhong@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Kevin Hilman <khilman@linaro.org>
include/linux/context_tracking.h
kernel/context_tracking.c
kernel/sched/cputime.c

index 38ab60b3f3a640656f912f0ad33454db1dd675b2..8854eadb2142737a7cb332664bf26f0645a27a76 100644 (file)
@@ -95,8 +95,23 @@ static inline void context_tracking_init(void) { }
 
 
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-extern void guest_enter(void);
-extern void guest_exit(void);
+static inline void guest_enter(void)
+{
+       if (static_key_false(&context_tracking_enabled) &&
+           vtime_accounting_enabled())
+               vtime_guest_enter(current);
+       else
+               current->flags |= PF_VCPU;
+}
+
+static inline void guest_exit(void)
+{
+       if (static_key_false(&context_tracking_enabled) &&
+           vtime_accounting_enabled())
+               vtime_guest_exit(current);
+       else
+               current->flags &= ~PF_VCPU;
+}
 #else
 static inline void guest_enter(void)
 {
index 6e89e094c80e5835799ab8c251f89a02548ff329..b6a186c4b886c3fcad3270351ada706e8ea95cc2 100644 (file)
 #include <linux/export.h>
 
 struct static_key context_tracking_enabled = STATIC_KEY_INIT_FALSE;
+EXPORT_SYMBOL_GPL(context_tracking_enabled);
 
 DEFINE_PER_CPU(struct context_tracking, context_tracking);
+EXPORT_SYMBOL_GPL(context_tracking);
 
 void context_tracking_cpu_set(int cpu)
 {
@@ -163,27 +165,6 @@ void context_tracking_user_exit(void)
        local_irq_restore(flags);
 }
 
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-void guest_enter(void)
-{
-       if (vtime_accounting_enabled())
-               vtime_guest_enter(current);
-       else
-               current->flags |= PF_VCPU;
-}
-EXPORT_SYMBOL_GPL(guest_enter);
-
-void guest_exit(void)
-{
-       if (vtime_accounting_enabled())
-               vtime_guest_exit(current);
-       else
-               current->flags &= ~PF_VCPU;
-}
-EXPORT_SYMBOL_GPL(guest_exit);
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
-
-
 /**
  * context_tracking_task_switch - context switch the syscall callbacks
  * @prev: the task that is being switched out
index 223a35efa0a6bb4abbcc55ff627a5257716d1e75..bb6b29a3067cf2a9069a458e5ee5bfd0c9347126 100644 (file)
@@ -724,6 +724,7 @@ void vtime_guest_enter(struct task_struct *tsk)
        current->flags |= PF_VCPU;
        write_sequnlock(&tsk->vtime_seqlock);
 }
+EXPORT_SYMBOL_GPL(vtime_guest_enter);
 
 void vtime_guest_exit(struct task_struct *tsk)
 {
@@ -732,6 +733,7 @@ void vtime_guest_exit(struct task_struct *tsk)
        current->flags &= ~PF_VCPU;
        write_sequnlock(&tsk->vtime_seqlock);
 }
+EXPORT_SYMBOL_GPL(vtime_guest_exit);
 
 void vtime_account_idle(struct task_struct *tsk)
 {