2 * Generic entry point for the idle threads
4 #include <linux/sched.h>
6 #include <linux/cpuidle.h>
7 #include <linux/tick.h>
9 #include <linux/stackprotector.h>
13 #include <trace/events/power.h>
17 static int __read_mostly cpu_idle_force_poll;
19 void cpu_idle_poll_ctrl(bool enable)
22 cpu_idle_force_poll++;
24 cpu_idle_force_poll--;
25 WARN_ON_ONCE(cpu_idle_force_poll < 0);
29 #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
30 static int __init cpu_idle_poll_setup(char *__unused)
32 cpu_idle_force_poll = 1;
35 __setup("nohlt", cpu_idle_poll_setup);
37 static int __init cpu_idle_nopoll_setup(char *__unused)
39 cpu_idle_force_poll = 0;
42 __setup("hlt", cpu_idle_nopoll_setup);
45 static inline int cpu_idle_poll(void)
48 trace_cpu_idle_rcuidle(0, smp_processor_id());
50 while (!tif_need_resched() &&
51 (cpu_idle_force_poll || tick_check_broadcast_expired()))
53 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
58 /* Weak implementations for optional arch specific functions */
59 void __weak arch_cpu_idle_prepare(void) { }
60 void __weak arch_cpu_idle_enter(void) { }
61 void __weak arch_cpu_idle_exit(void) { }
62 void __weak arch_cpu_idle_dead(void) { }
63 void __weak arch_cpu_idle(void)
65 cpu_idle_force_poll = 1;
70 * cpuidle_idle_call - the main idle function
72 * NOTE: no locks or semaphores should be used here
74 * On archs that support TIF_POLLING_NRFLAG, is called with polling
75 * set, and it returns with polling set. If it ever stops polling, it
76 * must clear the polling bit.
78 static void cpuidle_idle_call(void)
80 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
81 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
82 int next_state, entered_state;
83 unsigned int broadcast;
86 * Check if the idle task must be rescheduled. If it is the
87 * case, exit the function after re-enabling the local irq.
95 * During the idle period, stop measuring the disabled irqs
96 * critical sections latencies
98 stop_critical_timings();
101 * Tell the RCU framework we are entering an idle section,
102 * so no more rcu read side critical sections and one more
103 * step to the grace period
108 * Ask the cpuidle framework to choose a convenient idle state.
109 * Fall back to the default arch idle method on errors.
111 next_state = cpuidle_select(drv, dev);
112 if (next_state < 0) {
115 * We can't use the cpuidle framework, let's use the default
118 if (current_clr_polling_and_test())
128 * The idle task must be scheduled, it is pointless to
129 * go to idle, just update no idle residency and get
130 * out of this function
132 if (current_clr_polling_and_test()) {
133 dev->last_residency = 0;
134 entered_state = next_state;
139 broadcast = drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP;
142 * Tell the time framework to switch to a broadcast timer
143 * because our local timer will be shutdown. If a local timer
144 * is used from another cpu as a broadcast timer, this call may
145 * fail if it is not available
148 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu))
151 /* Take note of the planned idle state. */
152 idle_set_state(this_rq(), &drv->states[next_state]);
155 * Enter the idle state previously returned by the governor decision.
156 * This function will block until an interrupt occurs and will take
157 * care of re-enabling the local interrupts
159 entered_state = cpuidle_enter(drv, dev, next_state);
161 /* The cpu is no longer idle or about to enter idle. */
162 idle_set_state(this_rq(), NULL);
165 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
168 * Give the governor an opportunity to reflect on the outcome
170 cpuidle_reflect(dev, entered_state);
173 __current_set_polling();
176 * It is up to the idle functions to reenable local interrupts
178 if (WARN_ON_ONCE(irqs_disabled()))
182 start_critical_timings();
186 * Generic idle loop implementation
188 * Called with polling cleared.
190 static void cpu_idle_loop(void)
194 * If the arch has a polling bit, we maintain an invariant:
196 * Our polling bit is clear if we're not scheduled (i.e. if
197 * rq->curr != rq->idle). This means that, if rq->idle has
198 * the polling bit set, then setting need_resched is
199 * guaranteed to cause the cpu to reschedule.
202 __current_set_polling();
203 tick_nohz_idle_enter();
205 while (!need_resched()) {
209 if (cpu_is_offline(smp_processor_id()))
210 arch_cpu_idle_dead();
213 arch_cpu_idle_enter();
216 * In poll mode we reenable interrupts and spin.
218 * Also if we detected in the wakeup from idle
219 * path that the tick broadcast device expired
220 * for us, we don't want to go deep idle as we
221 * know that the IPI is going to arrive right
224 if (cpu_idle_force_poll || tick_check_broadcast_expired())
229 arch_cpu_idle_exit();
233 * Since we fell out of the loop above, we know
234 * TIF_NEED_RESCHED must be set, propagate it into
235 * PREEMPT_NEED_RESCHED.
237 * This is required because for polling idle loops we will
238 * not have had an IPI to fold the state for us.
240 preempt_set_need_resched();
241 tick_nohz_idle_exit();
242 __current_clr_polling();
245 * We promise to call sched_ttwu_pending and reschedule
246 * if need_resched is set while polling is set. That
247 * means that clearing polling needs to be visible
248 * before doing these things.
250 smp_mb__after_atomic();
252 sched_ttwu_pending();
253 schedule_preempt_disabled();
257 void cpu_startup_entry(enum cpuhp_state state)
260 * This #ifdef needs to die, but it's too late in the cycle to
261 * make this generic (arm and sh have never invoked the canary
262 * init for the non boot cpus!). Will be fixed in 3.11
266 * If we're the non-boot CPU, nothing set the stack canary up
267 * for us. The boot CPU already has it initialized but no harm
268 * in doing it again. This is a good place for updating it, as
269 * we wont ever return from this function (so the invalid
270 * canaries already on the stack wont ever trigger).
272 boot_init_stack_canary();
274 arch_cpu_idle_prepare();