2 * Generic entry point for the idle threads
4 #include <linux/sched.h>
6 #include <linux/cpuidle.h>
7 #include <linux/tick.h>
9 #include <linux/stackprotector.h>
13 #include <trace/events/power.h>
15 static int __read_mostly cpu_idle_force_poll;
17 void cpu_idle_poll_ctrl(bool enable)
20 cpu_idle_force_poll++;
22 cpu_idle_force_poll--;
23 WARN_ON_ONCE(cpu_idle_force_poll < 0);
27 #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
28 static int __init cpu_idle_poll_setup(char *__unused)
30 cpu_idle_force_poll = 1;
33 __setup("nohlt", cpu_idle_poll_setup);
35 static int __init cpu_idle_nopoll_setup(char *__unused)
37 cpu_idle_force_poll = 0;
40 __setup("hlt", cpu_idle_nopoll_setup);
43 static inline int cpu_idle_poll(void)
46 trace_cpu_idle_rcuidle(0, smp_processor_id());
48 while (!tif_need_resched())
50 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
55 /* Weak implementations for optional arch specific functions */
56 void __weak arch_cpu_idle_prepare(void) { }
57 void __weak arch_cpu_idle_enter(void) { }
58 void __weak arch_cpu_idle_exit(void) { }
59 void __weak arch_cpu_idle_dead(void) { }
60 void __weak arch_cpu_idle(void)
62 cpu_idle_force_poll = 1;
67 * cpuidle_idle_call - the main idle function
69 * NOTE: no locks or semaphores should be used here
70 * return non-zero on failure
72 static int cpuidle_idle_call(void)
74 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
75 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
76 int next_state, entered_state;
80 * Check if the idle task must be rescheduled. If it is the
81 * case, exit the function after re-enabling the local irq.
89 * During the idle period, stop measuring the disabled irqs
90 * critical sections latencies
92 stop_critical_timings();
95 * Tell the RCU framework we are entering an idle section,
96 * so no more rcu read side critical sections and one more
97 * step to the grace period
102 * Check if the cpuidle framework is ready, otherwise fallback
103 * to the default arch specific idle method
105 if (cpuidle_enabled(drv, dev)) {
108 * We can't use the cpuidle framework, let's use the default
111 if (current_clr_polling_and_test())
120 * Ask the governor to choose an idle state it thinks
121 * it is convenient to go to. There is *always* a
122 * convenient idle state
124 next_state = cpuidle_select(drv, dev);
127 * The idle task must be scheduled, it is pointless to
128 * go to idle, just update no idle residency and get
129 * out of this function
131 if (current_clr_polling_and_test()) {
132 dev->last_residency = 0;
133 entered_state = next_state;
138 broadcast = !!(drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP);
141 * Tell the time framework to switch to a broadcast timer
142 * because our local timer will be shutdown. If a local timer
143 * is used from another cpu as a broadcast timer, this call may
144 * fail if it is not available
147 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu))
150 trace_cpu_idle_rcuidle(next_state, dev->cpu);
153 * Enter the idle state previously returned by the governor decision.
154 * This function will block until an interrupt occurs and will take
155 * care of re-enabling the local interrupts
157 entered_state = cpuidle_enter(drv, dev, next_state);
159 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
162 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
165 * Give the governor an opportunity to reflect on the outcome
167 cpuidle_reflect(dev, entered_state);
170 __current_set_polling();
173 * It is up to the idle functions to reenable local interrupts
175 if (WARN_ON_ONCE(irqs_disabled()))
179 start_critical_timings();
185 * Generic idle loop implementation
187 static void cpu_idle_loop(void)
190 tick_nohz_idle_enter();
192 while (!need_resched()) {
196 if (cpu_is_offline(smp_processor_id()))
197 arch_cpu_idle_dead();
200 arch_cpu_idle_enter();
203 * In poll mode we reenable interrupts and spin.
205 * Also if we detected in the wakeup from idle
206 * path that the tick broadcast device expired
207 * for us, we don't want to go deep idle as we
208 * know that the IPI is going to arrive right
211 if (cpu_idle_force_poll || tick_check_broadcast_expired())
216 arch_cpu_idle_exit();
220 * Since we fell out of the loop above, we know
221 * TIF_NEED_RESCHED must be set, propagate it into
222 * PREEMPT_NEED_RESCHED.
224 * This is required because for polling idle loops we will
225 * not have had an IPI to fold the state for us.
227 preempt_set_need_resched();
228 tick_nohz_idle_exit();
229 schedule_preempt_disabled();
233 void cpu_startup_entry(enum cpuhp_state state)
236 * This #ifdef needs to die, but it's too late in the cycle to
237 * make this generic (arm and sh have never invoked the canary
238 * init for the non boot cpus!). Will be fixed in 3.11
242 * If we're the non-boot CPU, nothing set the stack canary up
243 * for us. The boot CPU already has it initialized but no harm
244 * in doing it again. This is a good place for updating it, as
245 * we wont ever return from this function (so the invalid
246 * canaries already on the stack wont ever trigger).
248 boot_init_stack_canary();
250 __current_set_polling();
251 arch_cpu_idle_prepare();