rcu: Switch task's syscall hooks on context switch
authorFrederic Weisbecker <fweisbec@gmail.com>
Mon, 16 Jul 2012 22:06:40 +0000 (15:06 -0700)
committerFrederic Weisbecker <fweisbec@gmail.com>
Wed, 26 Sep 2012 13:47:02 +0000 (15:47 +0200)
Clear the syscalls hook of a task when it's scheduled out so that if
the task migrates, it doesn't run the syscall slow path on a CPU
that might not need it.

Also set the syscalls hook on the next task if needed.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Alessio Igor Bogani <abogani@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Avi Kivity <avi@redhat.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Geoff Levand <geoff@infradead.org>
Cc: Gilad Ben Yossef <gilad@benyossef.com>
Cc: Hakan Akkan <hakanakkan@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Kevin Hilman <khilman@ti.com>
Cc: Max Krasnyansky <maxk@qualcomm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephen Hemminger <shemminger@vyatta.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Sven-Thorsten Dietrich <thebigcorporation@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
arch/um/drivers/mconsole_kern.c
include/linux/rcupdate.h
include/linux/sched.h
kernel/rcutree.c
kernel/sched/core.c

index 664a60e8dfb442fe2cb75c1ed5ba388a7bdb42b0..c17de0db6736e1bcef0fca9ab1a3d624cd271580 100644 (file)
@@ -705,6 +705,7 @@ static void stack_proc(void *arg)
        struct task_struct *from = current, *to = arg;
 
        to->thread.saved_task = from;
+       rcu_switch(from, to);
        switch_to(from, to, from);
 }
 
index f5034f22e94bffc8bdc53148dc9c72bb17591362..7c968e4f929ea49806c5e17e53bdfdfb997e4c79 100644 (file)
@@ -197,6 +197,8 @@ extern void rcu_user_enter(void);
 extern void rcu_user_exit(void);
 extern void rcu_user_enter_after_irq(void);
 extern void rcu_user_exit_after_irq(void);
+extern void rcu_user_hooks_switch(struct task_struct *prev,
+                                 struct task_struct *next);
 #else
 static inline void rcu_user_enter(void) { }
 static inline void rcu_user_exit(void) { }
index 23bddac4bad8d08f3781d1e8a453aa41edb28632..335720a1fc33362bfa62e238eaea609a01a04246 100644 (file)
@@ -1885,6 +1885,14 @@ static inline void rcu_copy_process(struct task_struct *p)
 
 #endif
 
+static inline void rcu_switch(struct task_struct *prev,
+                             struct task_struct *next)
+{
+#ifdef CONFIG_RCU_USER_QS
+       rcu_user_hooks_switch(prev, next);
+#endif
+}
+
 static inline void tsk_restore_flags(struct task_struct *task,
                                unsigned long orig_flags, unsigned long flags)
 {
index 6b82a956514940a3a4da17338b8c5bb02d2176fb..d2e74c8d4b0e5330d1f4f422cf876041f70688bb 100644 (file)
@@ -717,6 +717,21 @@ int rcu_is_cpu_idle(void)
 }
 EXPORT_SYMBOL(rcu_is_cpu_idle);
 
+#ifdef CONFIG_RCU_USER_QS
+void rcu_user_hooks_switch(struct task_struct *prev,
+                          struct task_struct *next)
+{
+       struct rcu_dynticks *rdtp;
+
+       /* Interrupts are disabled in context switch */
+       rdtp = &__get_cpu_var(rcu_dynticks);
+       if (!rdtp->ignore_user_qs) {
+               clear_tsk_thread_flag(prev, TIF_NOHZ);
+               set_tsk_thread_flag(next, TIF_NOHZ);
+       }
+}
+#endif /* #ifdef CONFIG_RCU_USER_QS */
+
 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
 
 /*
index 1a48cdbc86314624696d9c9f554f588a938e65b8..ea2213b07d9d84e4f52db7bc3a6256847840e27f 100644 (file)
@@ -2081,6 +2081,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
 #endif
 
        /* Here we just switch the register state and the stack. */
+       rcu_switch(prev, next);
        switch_to(prev, next, prev);
 
        barrier();