idle: Implement generic idle function
authorThomas Gleixner <tglx@linutronix.de>
Thu, 21 Mar 2013 21:49:35 +0000 (22:49 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Mon, 8 Apr 2013 15:39:23 +0000 (17:39 +0200)
All idle functions in arch/* are more or less the same, plus minus a
few bugs and extra instrumentation, tickless support and other
optional items.

Implement a generic idle function which resembles the functionality
found in arch/. Provide weak arch_cpu_idle_* functions which can be
overridden by the architecture code if needed.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Reviewed-by: Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Cc: Magnus Damm <magnus.damm@gmail.com>
Link: http://lkml.kernel.org/r/20130321215233.646635455@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/Kconfig
include/linux/cpu.h
kernel/cpu/idle.c

index 1455579791ec02514a5d616a4b6749cbec3185b0..a699f3767be4d83ef0efbfac64a7245f75ca1ce0 100644 (file)
@@ -216,6 +216,9 @@ config USE_GENERIC_SMP_HELPERS
 config GENERIC_SMP_IDLE_THREAD
        bool
 
+config GENERIC_IDLE_LOOP
+       bool
+
 # Select if arch init_task initializer is different to init/init_task.c
 config ARCH_INIT_TASK
        bool
index 7419e30c55fb305465c0edd7e68f81068b7c0aa3..c6f6e0839b618611723507d4a5dcab33648a9d02 100644 (file)
@@ -220,4 +220,12 @@ enum cpuhp_state {
 void cpu_startup_entry(enum cpuhp_state state);
 void cpu_idle(void);
 
+void cpu_idle_poll_ctrl(bool enable);
+
+void arch_cpu_idle(void);
+void arch_cpu_idle_prepare(void);
+void arch_cpu_idle_enter(void);
+void arch_cpu_idle_exit(void);
+void arch_cpu_idle_dead(void);
+
 #endif /* _LINUX_CPU_H_ */
index 1908f00e0e98677d122817356249e3596f03f4e0..54c3203839342f7d4ead28f3051692979ef629b4 100644 (file)
@@ -3,8 +3,113 @@
  */
 #include <linux/sched.h>
 #include <linux/cpu.h>
+#include <linux/tick.h>
+#include <linux/mm.h>
 
+#include <asm/tlb.h>
+
+#include <trace/events/power.h>
+
+#ifndef CONFIG_GENERIC_IDLE_LOOP
 void cpu_startup_entry(enum cpuhp_state state)
 {
        cpu_idle();
 }
+#else
+
+static int __read_mostly cpu_idle_force_poll;
+
+void cpu_idle_poll_ctrl(bool enable)
+{
+       if (enable) {
+               cpu_idle_force_poll++;
+       } else {
+               cpu_idle_force_poll--;
+               WARN_ON_ONCE(cpu_idle_force_poll < 0);
+       }
+}
+
+#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
+static int __init cpu_idle_poll_setup(char *__unused)
+{
+       cpu_idle_force_poll = 1;
+       return 1;
+}
+__setup("nohlt", cpu_idle_poll_setup);
+
+static int __init cpu_idle_nopoll_setup(char *__unused)
+{
+       cpu_idle_force_poll = 0;
+       return 1;
+}
+__setup("hlt", cpu_idle_nopoll_setup);
+#endif
+
+static inline int cpu_idle_poll(void)
+{
+       trace_cpu_idle_rcuidle(0, smp_processor_id());
+       local_irq_enable();
+       while (!need_resched())
+               cpu_relax();
+       trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
+       return 1;
+}
+
+/* Weak implementations for optional arch specific functions */
+void __weak arch_cpu_idle_prepare(void) { }
+void __weak arch_cpu_idle_enter(void) { }
+void __weak arch_cpu_idle_exit(void) { }
+void __weak arch_cpu_idle_dead(void) { }
+void __weak arch_cpu_idle(void)
+{
+       cpu_idle_force_poll = 1;
+}
+
+/*
+ * Generic idle loop implementation
+ */
+static void cpu_idle_loop(void)
+{
+       while (1) {
+               tick_nohz_idle_enter();
+
+               while (!need_resched()) {
+                       check_pgt_cache();
+                       rmb();
+
+                       if (cpu_is_offline(smp_processor_id()))
+                               arch_cpu_idle_dead();
+
+                       local_irq_disable();
+                       arch_cpu_idle_enter();
+
+                       if (cpu_idle_force_poll) {
+                               cpu_idle_poll();
+                       } else {
+                               current_clr_polling();
+                               if (!need_resched()) {
+                                       stop_critical_timings();
+                                       rcu_idle_enter();
+                                       arch_cpu_idle();
+                                       WARN_ON_ONCE(irqs_disabled());
+                                       rcu_idle_exit();
+                                       start_critical_timings();
+                               } else {
+                                       local_irq_enable();
+                               }
+                               current_set_polling();
+                       }
+                       arch_cpu_idle_exit();
+               }
+               tick_nohz_idle_exit();
+               schedule_preempt_disabled();
+       }
+}
+
+void cpu_startup_entry(enum cpuhp_state state)
+{
+       current_set_polling();
+       arch_cpu_idle_prepare();
+       cpu_idle_loop();
+}
+#endif