2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
4 * or preemptible semantics.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
27 #include <linux/delay.h>
29 #define RCU_KTHREAD_PRIO 1
31 #ifdef CONFIG_RCU_BOOST
32 #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
34 #define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
38 * Check the RCU kernel configuration parameters and print informative
39 * messages about anything out of the ordinary. If you like #ifdef, you
40 * will love this function.
42 static void __init rcu_bootup_announce_oddness(void)
44 #ifdef CONFIG_RCU_TRACE
45 printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
47 #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
48 printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
51 #ifdef CONFIG_RCU_FANOUT_EXACT
52 printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
54 #ifdef CONFIG_RCU_FAST_NO_HZ
56 "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
58 #ifdef CONFIG_PROVE_RCU
59 printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
61 #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
62 printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
64 #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
65 printk(KERN_INFO "\tDump stacks of tasks blocking RCU-preempt GP.\n");
67 #if defined(CONFIG_RCU_CPU_STALL_INFO)
68 printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n");
70 #if NUM_RCU_LVL_4 != 0
71 printk(KERN_INFO "\tFour-level hierarchy is enabled.\n");
73 if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
74 printk(KERN_INFO "\tExperimental boot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
77 #ifdef CONFIG_TREE_PREEMPT_RCU
79 struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt);
80 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
81 static struct rcu_state *rcu_state = &rcu_preempt_state;
83 static void rcu_read_unlock_special(struct task_struct *t);
84 static int rcu_preempted_readers_exp(struct rcu_node *rnp);
87 * Tell them what RCU they are running.
89 static void __init rcu_bootup_announce(void)
91 printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n");
92 rcu_bootup_announce_oddness();
96 * Return the number of RCU-preempt batches processed thus far
97 * for debug and statistics.
99 long rcu_batches_completed_preempt(void)
101 return rcu_preempt_state.completed;
103 EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
106 * Return the number of RCU batches processed thus far for debug & stats.
108 long rcu_batches_completed(void)
110 return rcu_batches_completed_preempt();
112 EXPORT_SYMBOL_GPL(rcu_batches_completed);
115 * Force a quiescent state for preemptible RCU.
117 void rcu_force_quiescent_state(void)
119 force_quiescent_state(&rcu_preempt_state, 0);
121 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
124 * Record a preemptible-RCU quiescent state for the specified CPU. Note
125 * that this just means that the task currently running on the CPU is
126 * not in a quiescent state. There might be any number of tasks blocked
127 * while in an RCU read-side critical section.
129 * Unlike the other rcu_*_qs() functions, callers to this function
130 * must disable irqs in order to protect the assignment to
131 * ->rcu_read_unlock_special.
133 static void rcu_preempt_qs(int cpu)
135 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
137 rdp->passed_quiesce_gpnum = rdp->gpnum;
139 if (rdp->passed_quiesce == 0)
140 trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
141 rdp->passed_quiesce = 1;
142 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
146 * We have entered the scheduler, and the current task might soon be
147 * context-switched away from. If this task is in an RCU read-side
148 * critical section, we will no longer be able to rely on the CPU to
149 * record that fact, so we enqueue the task on the blkd_tasks list.
150 * The task will dequeue itself when it exits the outermost enclosing
151 * RCU read-side critical section. Therefore, the current grace period
152 * cannot be permitted to complete until the blkd_tasks list entries
153 * predating the current grace period drain, in other words, until
154 * rnp->gp_tasks becomes NULL.
156 * Caller must disable preemption.
158 static void rcu_preempt_note_context_switch(int cpu)
160 struct task_struct *t = current;
162 struct rcu_data *rdp;
163 struct rcu_node *rnp;
165 if (t->rcu_read_lock_nesting > 0 &&
166 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
168 /* Possibly blocking in an RCU read-side critical section. */
169 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
171 raw_spin_lock_irqsave(&rnp->lock, flags);
172 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
173 t->rcu_blocked_node = rnp;
176 * If this CPU has already checked in, then this task
177 * will hold up the next grace period rather than the
178 * current grace period. Queue the task accordingly.
179 * If the task is queued for the current grace period
180 * (i.e., this CPU has not yet passed through a quiescent
181 * state for the current grace period), then as long
182 * as that task remains queued, the current grace period
183 * cannot end. Note that there is some uncertainty as
184 * to exactly when the current grace period started.
185 * We take a conservative approach, which can result
186 * in unnecessarily waiting on tasks that started very
187 * slightly after the current grace period began. C'est
190 * But first, note that the current CPU must still be
193 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
194 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
195 if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
196 list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
197 rnp->gp_tasks = &t->rcu_node_entry;
198 #ifdef CONFIG_RCU_BOOST
199 if (rnp->boost_tasks != NULL)
200 rnp->boost_tasks = rnp->gp_tasks;
201 #endif /* #ifdef CONFIG_RCU_BOOST */
203 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
204 if (rnp->qsmask & rdp->grpmask)
205 rnp->gp_tasks = &t->rcu_node_entry;
207 trace_rcu_preempt_task(rdp->rsp->name,
209 (rnp->qsmask & rdp->grpmask)
212 raw_spin_unlock_irqrestore(&rnp->lock, flags);
213 } else if (t->rcu_read_lock_nesting < 0 &&
214 t->rcu_read_unlock_special) {
217 * Complete exit from RCU read-side critical section on
218 * behalf of preempted instance of __rcu_read_unlock().
220 rcu_read_unlock_special(t);
224 * Either we were not in an RCU read-side critical section to
225 * begin with, or we have now recorded that critical section
226 * globally. Either way, we can now note a quiescent state
227 * for this CPU. Again, if we were in an RCU read-side critical
228 * section, and if that critical section was blocking the current
229 * grace period, then the fact that the task has been enqueued
230 * means that we continue to block the current grace period.
232 local_irq_save(flags);
234 local_irq_restore(flags);
238 * Tree-preemptible RCU implementation for rcu_read_lock().
239 * Just increment ->rcu_read_lock_nesting, shared state will be updated
242 void __rcu_read_lock(void)
244 current->rcu_read_lock_nesting++;
245 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
247 EXPORT_SYMBOL_GPL(__rcu_read_lock);
250 * Check for preempted RCU readers blocking the current grace period
251 * for the specified rcu_node structure. If the caller needs a reliable
252 * answer, it must hold the rcu_node's ->lock.
254 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
256 return rnp->gp_tasks != NULL;
260 * Record a quiescent state for all tasks that were previously queued
261 * on the specified rcu_node structure and that were blocking the current
262 * RCU grace period. The caller must hold the specified rnp->lock with
263 * irqs disabled, and this lock is released upon return, but irqs remain
266 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
267 __releases(rnp->lock)
270 struct rcu_node *rnp_p;
272 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
273 raw_spin_unlock_irqrestore(&rnp->lock, flags);
274 return; /* Still need more quiescent states! */
280 * Either there is only one rcu_node in the tree,
281 * or tasks were kicked up to root rcu_node due to
282 * CPUs going offline.
284 rcu_report_qs_rsp(&rcu_preempt_state, flags);
288 /* Report up the rest of the hierarchy. */
290 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
291 raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
292 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
296 * Advance a ->blkd_tasks-list pointer to the next entry, instead
297 * returning NULL if at the end of the list.
299 static struct list_head *rcu_next_node_entry(struct task_struct *t,
300 struct rcu_node *rnp)
302 struct list_head *np;
304 np = t->rcu_node_entry.next;
305 if (np == &rnp->blkd_tasks)
311 * Handle special cases during rcu_read_unlock(), such as needing to
312 * notify RCU core processing or task having blocked during the RCU
313 * read-side critical section.
315 static noinline void rcu_read_unlock_special(struct task_struct *t)
321 struct list_head *np;
322 #ifdef CONFIG_RCU_BOOST
323 struct rt_mutex *rbmp = NULL;
324 #endif /* #ifdef CONFIG_RCU_BOOST */
325 struct rcu_node *rnp;
328 /* NMI handlers cannot block and cannot safely manipulate state. */
332 local_irq_save(flags);
335 * If RCU core is waiting for this CPU to exit critical section,
336 * let it know that we have done so.
338 special = t->rcu_read_unlock_special;
339 if (special & RCU_READ_UNLOCK_NEED_QS) {
340 rcu_preempt_qs(smp_processor_id());
343 /* Hardware IRQ handlers cannot block. */
344 if (in_irq() || in_serving_softirq()) {
345 local_irq_restore(flags);
349 /* Clean up if blocked during RCU read-side critical section. */
350 if (special & RCU_READ_UNLOCK_BLOCKED) {
351 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
354 * Remove this task from the list it blocked on. The
355 * task can migrate while we acquire the lock, but at
356 * most one time. So at most two passes through loop.
359 rnp = t->rcu_blocked_node;
360 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
361 if (rnp == t->rcu_blocked_node)
363 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
365 empty = !rcu_preempt_blocked_readers_cgp(rnp);
366 empty_exp = !rcu_preempted_readers_exp(rnp);
367 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
368 np = rcu_next_node_entry(t, rnp);
369 list_del_init(&t->rcu_node_entry);
370 t->rcu_blocked_node = NULL;
371 trace_rcu_unlock_preempted_task("rcu_preempt",
373 if (&t->rcu_node_entry == rnp->gp_tasks)
375 if (&t->rcu_node_entry == rnp->exp_tasks)
377 #ifdef CONFIG_RCU_BOOST
378 if (&t->rcu_node_entry == rnp->boost_tasks)
379 rnp->boost_tasks = np;
380 /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
381 if (t->rcu_boost_mutex) {
382 rbmp = t->rcu_boost_mutex;
383 t->rcu_boost_mutex = NULL;
385 #endif /* #ifdef CONFIG_RCU_BOOST */
388 * If this was the last task on the current list, and if
389 * we aren't waiting on any CPUs, report the quiescent state.
390 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
391 * so we must take a snapshot of the expedited state.
393 empty_exp_now = !rcu_preempted_readers_exp(rnp);
394 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
395 trace_rcu_quiescent_state_report("preempt_rcu",
402 rcu_report_unblock_qs_rnp(rnp, flags);
404 raw_spin_unlock_irqrestore(&rnp->lock, flags);
406 #ifdef CONFIG_RCU_BOOST
407 /* Unboost if we were boosted. */
409 rt_mutex_unlock(rbmp);
410 #endif /* #ifdef CONFIG_RCU_BOOST */
413 * If this was the last task on the expedited lists,
414 * then we need to report up the rcu_node hierarchy.
416 if (!empty_exp && empty_exp_now)
417 rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
419 local_irq_restore(flags);
424 * Tree-preemptible RCU implementation for rcu_read_unlock().
425 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
426 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
427 * invoke rcu_read_unlock_special() to clean up after a context switch
428 * in an RCU read-side critical section and other special cases.
430 void __rcu_read_unlock(void)
432 struct task_struct *t = current;
434 if (t->rcu_read_lock_nesting != 1)
435 --t->rcu_read_lock_nesting;
437 barrier(); /* critical section before exit code. */
438 t->rcu_read_lock_nesting = INT_MIN;
439 barrier(); /* assign before ->rcu_read_unlock_special load */
440 if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
441 rcu_read_unlock_special(t);
442 barrier(); /* ->rcu_read_unlock_special load before assign */
443 t->rcu_read_lock_nesting = 0;
445 #ifdef CONFIG_PROVE_LOCKING
447 int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
449 WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
451 #endif /* #ifdef CONFIG_PROVE_LOCKING */
453 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
455 #ifdef CONFIG_RCU_CPU_STALL_VERBOSE
458 * Dump detailed information for all tasks blocking the current RCU
459 * grace period on the specified rcu_node structure.
461 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
464 struct task_struct *t;
466 if (!rcu_preempt_blocked_readers_cgp(rnp))
468 raw_spin_lock_irqsave(&rnp->lock, flags);
469 t = list_entry(rnp->gp_tasks,
470 struct task_struct, rcu_node_entry);
471 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
473 raw_spin_unlock_irqrestore(&rnp->lock, flags);
477 * Dump detailed information for all tasks blocking the current RCU
480 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
482 struct rcu_node *rnp = rcu_get_root(rsp);
484 rcu_print_detail_task_stall_rnp(rnp);
485 rcu_for_each_leaf_node(rsp, rnp)
486 rcu_print_detail_task_stall_rnp(rnp);
489 #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
491 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
495 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
497 #ifdef CONFIG_RCU_CPU_STALL_INFO
499 static void rcu_print_task_stall_begin(struct rcu_node *rnp)
501 printk(KERN_ERR "\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
502 rnp->level, rnp->grplo, rnp->grphi);
505 static void rcu_print_task_stall_end(void)
507 printk(KERN_CONT "\n");
510 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
512 static void rcu_print_task_stall_begin(struct rcu_node *rnp)
516 static void rcu_print_task_stall_end(void)
520 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
523 * Scan the current list of tasks blocked within RCU read-side critical
524 * sections, printing out the tid of each.
526 static int rcu_print_task_stall(struct rcu_node *rnp)
528 struct task_struct *t;
531 if (!rcu_preempt_blocked_readers_cgp(rnp))
533 rcu_print_task_stall_begin(rnp);
534 t = list_entry(rnp->gp_tasks,
535 struct task_struct, rcu_node_entry);
536 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
537 printk(KERN_CONT " P%d", t->pid);
540 rcu_print_task_stall_end();
545 * Suppress preemptible RCU's CPU stall warnings by pushing the
546 * time of the next stall-warning message comfortably far into the
549 static void rcu_preempt_stall_reset(void)
551 rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2;
555 * Check that the list of blocked tasks for the newly completed grace
556 * period is in fact empty. It is a serious bug to complete a grace
557 * period that still has RCU readers blocked! This function must be
558 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
559 * must be held by the caller.
561 * Also, if there are blocked tasks on the list, they automatically
562 * block the newly created grace period, so set up ->gp_tasks accordingly.
564 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
566 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
567 if (!list_empty(&rnp->blkd_tasks))
568 rnp->gp_tasks = rnp->blkd_tasks.next;
569 WARN_ON_ONCE(rnp->qsmask);
572 #ifdef CONFIG_HOTPLUG_CPU
575 * Handle tasklist migration for case in which all CPUs covered by the
576 * specified rcu_node have gone offline. Move them up to the root
577 * rcu_node. The reason for not just moving them to the immediate
578 * parent is to remove the need for rcu_read_unlock_special() to
579 * make more than two attempts to acquire the target rcu_node's lock.
580 * Returns true if there were tasks blocking the current RCU grace
583 * Returns 1 if there was previously a task blocking the current grace
584 * period on the specified rcu_node structure.
586 * The caller must hold rnp->lock with irqs disabled.
588 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
589 struct rcu_node *rnp,
590 struct rcu_data *rdp)
592 struct list_head *lp;
593 struct list_head *lp_root;
595 struct rcu_node *rnp_root = rcu_get_root(rsp);
596 struct task_struct *t;
598 if (rnp == rnp_root) {
599 WARN_ONCE(1, "Last CPU thought to be offlined?");
600 return 0; /* Shouldn't happen: at least one CPU online. */
603 /* If we are on an internal node, complain bitterly. */
604 WARN_ON_ONCE(rnp != rdp->mynode);
607 * Move tasks up to root rcu_node. Don't try to get fancy for
608 * this corner-case operation -- just put this node's tasks
609 * at the head of the root node's list, and update the root node's
610 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
611 * if non-NULL. This might result in waiting for more tasks than
612 * absolutely necessary, but this is a good performance/complexity
615 if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
616 retval |= RCU_OFL_TASKS_NORM_GP;
617 if (rcu_preempted_readers_exp(rnp))
618 retval |= RCU_OFL_TASKS_EXP_GP;
619 lp = &rnp->blkd_tasks;
620 lp_root = &rnp_root->blkd_tasks;
621 while (!list_empty(lp)) {
622 t = list_entry(lp->next, typeof(*t), rcu_node_entry);
623 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
624 list_del(&t->rcu_node_entry);
625 t->rcu_blocked_node = rnp_root;
626 list_add(&t->rcu_node_entry, lp_root);
627 if (&t->rcu_node_entry == rnp->gp_tasks)
628 rnp_root->gp_tasks = rnp->gp_tasks;
629 if (&t->rcu_node_entry == rnp->exp_tasks)
630 rnp_root->exp_tasks = rnp->exp_tasks;
631 #ifdef CONFIG_RCU_BOOST
632 if (&t->rcu_node_entry == rnp->boost_tasks)
633 rnp_root->boost_tasks = rnp->boost_tasks;
634 #endif /* #ifdef CONFIG_RCU_BOOST */
635 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
638 #ifdef CONFIG_RCU_BOOST
639 /* In case root is being boosted and leaf is not. */
640 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
641 if (rnp_root->boost_tasks != NULL &&
642 rnp_root->boost_tasks != rnp_root->gp_tasks)
643 rnp_root->boost_tasks = rnp_root->gp_tasks;
644 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
645 #endif /* #ifdef CONFIG_RCU_BOOST */
647 rnp->gp_tasks = NULL;
648 rnp->exp_tasks = NULL;
652 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
655 * Do CPU-offline processing for preemptible RCU.
657 static void rcu_preempt_cleanup_dead_cpu(int cpu)
659 rcu_cleanup_dead_cpu(cpu, &rcu_preempt_state);
663 * Check for a quiescent state from the current CPU. When a task blocks,
664 * the task is recorded in the corresponding CPU's rcu_node structure,
665 * which is checked elsewhere.
667 * Caller must disable hard irqs.
669 static void rcu_preempt_check_callbacks(int cpu)
671 struct task_struct *t = current;
673 if (t->rcu_read_lock_nesting == 0) {
677 if (t->rcu_read_lock_nesting > 0 &&
678 per_cpu(rcu_preempt_data, cpu).qs_pending)
679 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
683 * Process callbacks for preemptible RCU.
685 static void rcu_preempt_process_callbacks(void)
687 __rcu_process_callbacks(&rcu_preempt_state,
688 &__get_cpu_var(rcu_preempt_data));
691 #ifdef CONFIG_RCU_BOOST
693 static void rcu_preempt_do_callbacks(void)
695 rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
698 #endif /* #ifdef CONFIG_RCU_BOOST */
701 * Queue a preemptible-RCU callback for invocation after a grace period.
703 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
705 __call_rcu(head, func, &rcu_preempt_state, 0);
707 EXPORT_SYMBOL_GPL(call_rcu);
710 * Queue an RCU callback for lazy invocation after a grace period.
711 * This will likely be later named something like "call_rcu_lazy()",
712 * but this change will require some way of tagging the lazy RCU
713 * callbacks in the list of pending callbacks. Until then, this
714 * function may only be called from __kfree_rcu().
716 void kfree_call_rcu(struct rcu_head *head,
717 void (*func)(struct rcu_head *rcu))
719 __call_rcu(head, func, &rcu_preempt_state, 1);
721 EXPORT_SYMBOL_GPL(kfree_call_rcu);
724 * synchronize_rcu - wait until a grace period has elapsed.
726 * Control will return to the caller some time after a full grace
727 * period has elapsed, in other words after all currently executing RCU
728 * read-side critical sections have completed. Note, however, that
729 * upon return from synchronize_rcu(), the caller might well be executing
730 * concurrently with new RCU read-side critical sections that began while
731 * synchronize_rcu() was waiting. RCU read-side critical sections are
732 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
734 void synchronize_rcu(void)
736 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
737 !lock_is_held(&rcu_lock_map) &&
738 !lock_is_held(&rcu_sched_lock_map),
739 "Illegal synchronize_rcu() in RCU read-side critical section");
740 if (!rcu_scheduler_active)
742 wait_rcu_gp(call_rcu);
744 EXPORT_SYMBOL_GPL(synchronize_rcu);
746 static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
747 static long sync_rcu_preempt_exp_count;
748 static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
751 * Return non-zero if there are any tasks in RCU read-side critical
752 * sections blocking the current preemptible-RCU expedited grace period.
753 * If there is no preemptible-RCU expedited grace period currently in
754 * progress, returns zero unconditionally.
756 static int rcu_preempted_readers_exp(struct rcu_node *rnp)
758 return rnp->exp_tasks != NULL;
762 * return non-zero if there is no RCU expedited grace period in progress
763 * for the specified rcu_node structure, in other words, if all CPUs and
764 * tasks covered by the specified rcu_node structure have done their bit
765 * for the current expedited grace period. Works only for preemptible
766 * RCU -- other RCU implementation use other means.
768 * Caller must hold sync_rcu_preempt_exp_mutex.
770 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
772 return !rcu_preempted_readers_exp(rnp) &&
773 ACCESS_ONCE(rnp->expmask) == 0;
777 * Report the exit from RCU read-side critical section for the last task
778 * that queued itself during or before the current expedited preemptible-RCU
779 * grace period. This event is reported either to the rcu_node structure on
780 * which the task was queued or to one of that rcu_node structure's ancestors,
781 * recursively up the tree. (Calm down, calm down, we do the recursion
784 * Most callers will set the "wake" flag, but the task initiating the
785 * expedited grace period need not wake itself.
787 * Caller must hold sync_rcu_preempt_exp_mutex.
789 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
795 raw_spin_lock_irqsave(&rnp->lock, flags);
797 if (!sync_rcu_preempt_exp_done(rnp)) {
798 raw_spin_unlock_irqrestore(&rnp->lock, flags);
801 if (rnp->parent == NULL) {
802 raw_spin_unlock_irqrestore(&rnp->lock, flags);
804 wake_up(&sync_rcu_preempt_exp_wq);
808 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
810 raw_spin_lock(&rnp->lock); /* irqs already disabled */
811 rnp->expmask &= ~mask;
816 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
817 * grace period for the specified rcu_node structure. If there are no such
818 * tasks, report it up the rcu_node hierarchy.
820 * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
823 sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
828 raw_spin_lock_irqsave(&rnp->lock, flags);
829 if (list_empty(&rnp->blkd_tasks))
830 raw_spin_unlock_irqrestore(&rnp->lock, flags);
832 rnp->exp_tasks = rnp->blkd_tasks.next;
833 rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
837 rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
841 * synchronize_rcu_expedited - Brute-force RCU grace period
843 * Wait for an RCU-preempt grace period, but expedite it. The basic
844 * idea is to invoke synchronize_sched_expedited() to push all the tasks to
845 * the ->blkd_tasks lists and wait for this list to drain. This consumes
846 * significant time on all CPUs and is unfriendly to real-time workloads,
847 * so is thus not recommended for any sort of common-case code.
848 * In fact, if you are using synchronize_rcu_expedited() in a loop,
849 * please restructure your code to batch your updates, and then Use a
850 * single synchronize_rcu() instead.
852 * Note that it is illegal to call this function while holding any lock
853 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
854 * to call this function from a CPU-hotplug notifier. Failing to observe
855 * these restriction will result in deadlock.
857 void synchronize_rcu_expedited(void)
860 struct rcu_node *rnp;
861 struct rcu_state *rsp = &rcu_preempt_state;
865 smp_mb(); /* Caller's modifications seen first by other CPUs. */
866 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
867 smp_mb(); /* Above access cannot bleed into critical section. */
870 * Acquire lock, falling back to synchronize_rcu() if too many
871 * lock-acquisition failures. Of course, if someone does the
872 * expedited grace period for us, just leave.
874 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
876 udelay(trycount * num_online_cpus());
881 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
882 goto mb_ret; /* Others did our work for us. */
884 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
885 goto unlock_mb_ret; /* Others did our work for us. */
887 /* force all RCU readers onto ->blkd_tasks lists. */
888 synchronize_sched_expedited();
890 raw_spin_lock_irqsave(&rsp->onofflock, flags);
892 /* Initialize ->expmask for all non-leaf rcu_node structures. */
893 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
894 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
895 rnp->expmask = rnp->qsmaskinit;
896 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
899 /* Snapshot current state of ->blkd_tasks lists. */
900 rcu_for_each_leaf_node(rsp, rnp)
901 sync_rcu_preempt_exp_init(rsp, rnp);
902 if (NUM_RCU_NODES > 1)
903 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
905 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
907 /* Wait for snapshotted ->blkd_tasks lists to drain. */
908 rnp = rcu_get_root(rsp);
909 wait_event(sync_rcu_preempt_exp_wq,
910 sync_rcu_preempt_exp_done(rnp));
912 /* Clean up and exit. */
913 smp_mb(); /* ensure expedited GP seen before counter increment. */
914 ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
916 mutex_unlock(&sync_rcu_preempt_exp_mutex);
918 smp_mb(); /* ensure subsequent action seen after grace period. */
920 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
923 * Check to see if there is any immediate preemptible-RCU-related work
926 static int rcu_preempt_pending(int cpu)
928 return __rcu_pending(&rcu_preempt_state,
929 &per_cpu(rcu_preempt_data, cpu));
933 * Does preemptible RCU have callbacks on this CPU?
935 static int rcu_preempt_cpu_has_callbacks(int cpu)
937 return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
941 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
943 void rcu_barrier(void)
945 _rcu_barrier(&rcu_preempt_state, call_rcu);
947 EXPORT_SYMBOL_GPL(rcu_barrier);
950 * Initialize preemptible RCU's per-CPU data.
952 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
954 rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
958 * Move preemptible RCU's callbacks from dying CPU to other online CPU
959 * and record a quiescent state.
961 static void rcu_preempt_cleanup_dying_cpu(void)
963 rcu_cleanup_dying_cpu(&rcu_preempt_state);
967 * Initialize preemptible RCU's state structures.
969 static void __init __rcu_init_preempt(void)
971 rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
974 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
976 static struct rcu_state *rcu_state = &rcu_sched_state;
979 * Tell them what RCU they are running.
981 static void __init rcu_bootup_announce(void)
983 printk(KERN_INFO "Hierarchical RCU implementation.\n");
984 rcu_bootup_announce_oddness();
988 * Return the number of RCU batches processed thus far for debug & stats.
990 long rcu_batches_completed(void)
992 return rcu_batches_completed_sched();
994 EXPORT_SYMBOL_GPL(rcu_batches_completed);
997 * Force a quiescent state for RCU, which, because there is no preemptible
998 * RCU, becomes the same as rcu-sched.
1000 void rcu_force_quiescent_state(void)
1002 rcu_sched_force_quiescent_state();
1004 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
1007 * Because preemptible RCU does not exist, we never have to check for
1008 * CPUs being in quiescent states.
1010 static void rcu_preempt_note_context_switch(int cpu)
1015 * Because preemptible RCU does not exist, there are never any preempted
1018 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
1023 #ifdef CONFIG_HOTPLUG_CPU
1025 /* Because preemptible RCU does not exist, no quieting of tasks. */
1026 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
1028 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1031 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1034 * Because preemptible RCU does not exist, we never have to check for
1035 * tasks blocked within RCU read-side critical sections.
1037 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
1042 * Because preemptible RCU does not exist, we never have to check for
1043 * tasks blocked within RCU read-side critical sections.
1045 static int rcu_print_task_stall(struct rcu_node *rnp)
1051 * Because preemptible RCU does not exist, there is no need to suppress
1052 * its CPU stall warnings.
1054 static void rcu_preempt_stall_reset(void)
1059 * Because there is no preemptible RCU, there can be no readers blocked,
1060 * so there is no need to check for blocked tasks. So check only for
1061 * bogus qsmask values.
1063 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
1065 WARN_ON_ONCE(rnp->qsmask);
1068 #ifdef CONFIG_HOTPLUG_CPU
1071 * Because preemptible RCU does not exist, it never needs to migrate
1072 * tasks that were blocked within RCU read-side critical sections, and
1073 * such non-existent tasks cannot possibly have been blocking the current
1076 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1077 struct rcu_node *rnp,
1078 struct rcu_data *rdp)
1083 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1086 * Because preemptible RCU does not exist, it never needs CPU-offline
1089 static void rcu_preempt_cleanup_dead_cpu(int cpu)
1094 * Because preemptible RCU does not exist, it never has any callbacks
1097 static void rcu_preempt_check_callbacks(int cpu)
1102 * Because preemptible RCU does not exist, it never has any callbacks
1105 static void rcu_preempt_process_callbacks(void)
1110 * Queue an RCU callback for lazy invocation after a grace period.
1111 * This will likely be later named something like "call_rcu_lazy()",
1112 * but this change will require some way of tagging the lazy RCU
1113 * callbacks in the list of pending callbacks. Until then, this
1114 * function may only be called from __kfree_rcu().
1116 * Because there is no preemptible RCU, we use RCU-sched instead.
1118 void kfree_call_rcu(struct rcu_head *head,
1119 void (*func)(struct rcu_head *rcu))
1121 __call_rcu(head, func, &rcu_sched_state, 1);
1123 EXPORT_SYMBOL_GPL(kfree_call_rcu);
1126 * Wait for an rcu-preempt grace period, but make it happen quickly.
1127 * But because preemptible RCU does not exist, map to rcu-sched.
1129 void synchronize_rcu_expedited(void)
1131 synchronize_sched_expedited();
1133 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1135 #ifdef CONFIG_HOTPLUG_CPU
1138 * Because preemptible RCU does not exist, there is never any need to
1139 * report on tasks preempted in RCU read-side critical sections during
1140 * expedited RCU grace periods.
1142 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
1147 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1150 * Because preemptible RCU does not exist, it never has any work to do.
1152 static int rcu_preempt_pending(int cpu)
1158 * Because preemptible RCU does not exist, it never has callbacks
1160 static int rcu_preempt_cpu_has_callbacks(int cpu)
1166 * Because preemptible RCU does not exist, rcu_barrier() is just
1167 * another name for rcu_barrier_sched().
1169 void rcu_barrier(void)
1171 rcu_barrier_sched();
1173 EXPORT_SYMBOL_GPL(rcu_barrier);
1176 * Because preemptible RCU does not exist, there is no per-CPU
1177 * data to initialize.
1179 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
1184 * Because there is no preemptible RCU, there is no cleanup to do.
1186 static void rcu_preempt_cleanup_dying_cpu(void)
1191 * Because preemptible RCU does not exist, it need not be initialized.
1193 static void __init __rcu_init_preempt(void)
1197 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1199 #ifdef CONFIG_RCU_BOOST
1201 #include "rtmutex_common.h"
1203 #ifdef CONFIG_RCU_TRACE
1205 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1207 if (list_empty(&rnp->blkd_tasks))
1208 rnp->n_balk_blkd_tasks++;
1209 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1210 rnp->n_balk_exp_gp_tasks++;
1211 else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
1212 rnp->n_balk_boost_tasks++;
1213 else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
1214 rnp->n_balk_notblocked++;
1215 else if (rnp->gp_tasks != NULL &&
1216 ULONG_CMP_LT(jiffies, rnp->boost_time))
1217 rnp->n_balk_notyet++;
1222 #else /* #ifdef CONFIG_RCU_TRACE */
1224 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1228 #endif /* #else #ifdef CONFIG_RCU_TRACE */
1231 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1232 * or ->boost_tasks, advancing the pointer to the next task in the
1233 * ->blkd_tasks list.
1235 * Note that irqs must be enabled: boosting the task can block.
1236 * Returns 1 if there are more tasks needing to be boosted.
1238 static int rcu_boost(struct rcu_node *rnp)
1240 unsigned long flags;
1241 struct rt_mutex mtx;
1242 struct task_struct *t;
1243 struct list_head *tb;
1245 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
1246 return 0; /* Nothing left to boost. */
1248 raw_spin_lock_irqsave(&rnp->lock, flags);
1251 * Recheck under the lock: all tasks in need of boosting
1252 * might exit their RCU read-side critical sections on their own.
1254 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1255 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1260 * Preferentially boost tasks blocking expedited grace periods.
1261 * This cannot starve the normal grace periods because a second
1262 * expedited grace period must boost all blocked tasks, including
1263 * those blocking the pre-existing normal grace period.
1265 if (rnp->exp_tasks != NULL) {
1266 tb = rnp->exp_tasks;
1267 rnp->n_exp_boosts++;
1269 tb = rnp->boost_tasks;
1270 rnp->n_normal_boosts++;
1272 rnp->n_tasks_boosted++;
1275 * We boost task t by manufacturing an rt_mutex that appears to
1276 * be held by task t. We leave a pointer to that rt_mutex where
1277 * task t can find it, and task t will release the mutex when it
1278 * exits its outermost RCU read-side critical section. Then
1279 * simply acquiring this artificial rt_mutex will boost task
1280 * t's priority. (Thanks to tglx for suggesting this approach!)
1282 * Note that task t must acquire rnp->lock to remove itself from
1283 * the ->blkd_tasks list, which it will do from exit() if from
1284 * nowhere else. We therefore are guaranteed that task t will
1285 * stay around at least until we drop rnp->lock. Note that
1286 * rnp->lock also resolves races between our priority boosting
1287 * and task t's exiting its outermost RCU read-side critical
1290 t = container_of(tb, struct task_struct, rcu_node_entry);
1291 rt_mutex_init_proxy_locked(&mtx, t);
1292 t->rcu_boost_mutex = &mtx;
1293 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1294 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
1295 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
1297 return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1298 ACCESS_ONCE(rnp->boost_tasks) != NULL;
1302 * Timer handler to initiate waking up of boost kthreads that
1303 * have yielded the CPU due to excessive numbers of tasks to
1304 * boost. We wake up the per-rcu_node kthread, which in turn
1305 * will wake up the booster kthread.
1307 static void rcu_boost_kthread_timer(unsigned long arg)
1309 invoke_rcu_node_kthread((struct rcu_node *)arg);
1313 * Priority-boosting kthread. One per leaf rcu_node and one for the
1316 static int rcu_boost_kthread(void *arg)
1318 struct rcu_node *rnp = (struct rcu_node *)arg;
1322 trace_rcu_utilization("Start boost kthread@init");
1324 rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1325 trace_rcu_utilization("End boost kthread@rcu_wait");
1326 rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
1327 trace_rcu_utilization("Start boost kthread@rcu_wait");
1328 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1329 more2boost = rcu_boost(rnp);
1335 trace_rcu_utilization("End boost kthread@rcu_yield");
1336 rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp);
1337 trace_rcu_utilization("Start boost kthread@rcu_yield");
1342 trace_rcu_utilization("End boost kthread@notreached");
1347 * Check to see if it is time to start boosting RCU readers that are
1348 * blocking the current grace period, and, if so, tell the per-rcu_node
1349 * kthread to start boosting them. If there is an expedited grace
1350 * period in progress, it is always time to boost.
1352 * The caller must hold rnp->lock, which this function releases,
1353 * but irqs remain disabled. The ->boost_kthread_task is immortal,
1354 * so we don't need to worry about it going away.
1356 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1358 struct task_struct *t;
1360 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1361 rnp->n_balk_exp_gp_tasks++;
1362 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1365 if (rnp->exp_tasks != NULL ||
1366 (rnp->gp_tasks != NULL &&
1367 rnp->boost_tasks == NULL &&
1369 ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1370 if (rnp->exp_tasks == NULL)
1371 rnp->boost_tasks = rnp->gp_tasks;
1372 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1373 t = rnp->boost_kthread_task;
1377 rcu_initiate_boost_trace(rnp);
1378 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1383 * Wake up the per-CPU kthread to invoke RCU callbacks.
1385 static void invoke_rcu_callbacks_kthread(void)
1387 unsigned long flags;
1389 local_irq_save(flags);
1390 __this_cpu_write(rcu_cpu_has_work, 1);
1391 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1392 current != __this_cpu_read(rcu_cpu_kthread_task))
1393 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
1394 local_irq_restore(flags);
1398 * Is the current CPU running the RCU-callbacks kthread?
1399 * Caller must have preemption disabled.
1401 static bool rcu_is_callbacks_kthread(void)
1403 return __get_cpu_var(rcu_cpu_kthread_task) == current;
1407 * Set the affinity of the boost kthread. The CPU-hotplug locks are
1408 * held, so no one should be messing with the existence of the boost
1411 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
1414 struct task_struct *t;
1416 t = rnp->boost_kthread_task;
1418 set_cpus_allowed_ptr(rnp->boost_kthread_task, cm);
1421 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1424 * Do priority-boost accounting for the start of a new grace period.
1426 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1428 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1432 * Create an RCU-boost kthread for the specified node if one does not
1433 * already exist. We only create this kthread for preemptible RCU.
1434 * Returns zero if all is well, a negated errno otherwise.
1436 static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1437 struct rcu_node *rnp,
1440 unsigned long flags;
1441 struct sched_param sp;
1442 struct task_struct *t;
1444 if (&rcu_preempt_state != rsp)
1447 if (rnp->boost_kthread_task != NULL)
1449 t = kthread_create(rcu_boost_kthread, (void *)rnp,
1450 "rcub/%d", rnp_index);
1453 raw_spin_lock_irqsave(&rnp->lock, flags);
1454 rnp->boost_kthread_task = t;
1455 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1456 sp.sched_priority = RCU_BOOST_PRIO;
1457 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1458 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1462 #ifdef CONFIG_HOTPLUG_CPU
1465 * Stop the RCU's per-CPU kthread when its CPU goes offline,.
1467 static void rcu_stop_cpu_kthread(int cpu)
1469 struct task_struct *t;
1471 /* Stop the CPU's kthread. */
1472 t = per_cpu(rcu_cpu_kthread_task, cpu);
1474 per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
1479 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1481 static void rcu_kthread_do_work(void)
1483 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1484 rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1485 rcu_preempt_do_callbacks();
1489 * Wake up the specified per-rcu_node-structure kthread.
1490 * Because the per-rcu_node kthreads are immortal, we don't need
1491 * to do anything to keep them alive.
1493 static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1495 struct task_struct *t;
1497 t = rnp->node_kthread_task;
1503 * Set the specified CPU's kthread to run RT or not, as specified by
1504 * the to_rt argument. The CPU-hotplug locks are held, so the task
1505 * is not going away.
1507 static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1510 struct sched_param sp;
1511 struct task_struct *t;
1513 t = per_cpu(rcu_cpu_kthread_task, cpu);
1517 policy = SCHED_FIFO;
1518 sp.sched_priority = RCU_KTHREAD_PRIO;
1520 policy = SCHED_NORMAL;
1521 sp.sched_priority = 0;
1523 sched_setscheduler_nocheck(t, policy, &sp);
1527 * Timer handler to initiate the waking up of per-CPU kthreads that
1528 * have yielded the CPU due to excess numbers of RCU callbacks.
1529 * We wake up the per-rcu_node kthread, which in turn will wake up
1530 * the booster kthread.
1532 static void rcu_cpu_kthread_timer(unsigned long arg)
1534 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
1535 struct rcu_node *rnp = rdp->mynode;
1537 atomic_or(rdp->grpmask, &rnp->wakemask);
1538 invoke_rcu_node_kthread(rnp);
1542 * Drop to non-real-time priority and yield, but only after posting a
1543 * timer that will cause us to regain our real-time priority if we
1544 * remain preempted. Either way, we restore our real-time priority
1547 static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1549 struct sched_param sp;
1550 struct timer_list yield_timer;
1551 int prio = current->rt_priority;
1553 setup_timer_on_stack(&yield_timer, f, arg);
1554 mod_timer(&yield_timer, jiffies + 2);
1555 sp.sched_priority = 0;
1556 sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1557 set_user_nice(current, 19);
1559 set_user_nice(current, 0);
1560 sp.sched_priority = prio;
1561 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1562 del_timer(&yield_timer);
1566 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1567 * This can happen while the corresponding CPU is either coming online
1568 * or going offline. We cannot wait until the CPU is fully online
1569 * before starting the kthread, because the various notifier functions
1570 * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
1571 * the corresponding CPU is online.
1573 * Return 1 if the kthread needs to stop, 0 otherwise.
1575 * Caller must disable bh. This function can momentarily enable it.
1577 static int rcu_cpu_kthread_should_stop(int cpu)
1579 while (cpu_is_offline(cpu) ||
1580 !cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) ||
1581 smp_processor_id() != cpu) {
1582 if (kthread_should_stop())
1584 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1585 per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
1587 schedule_timeout_uninterruptible(1);
1588 if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)))
1589 set_cpus_allowed_ptr(current, cpumask_of(cpu));
1592 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1597 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1598 * RCU softirq used in flavors and configurations of RCU that do not
1599 * support RCU priority boosting.
1601 static int rcu_cpu_kthread(void *arg)
1603 int cpu = (int)(long)arg;
1604 unsigned long flags;
1606 unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
1608 char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1610 trace_rcu_utilization("Start CPU kthread@init");
1612 *statusp = RCU_KTHREAD_WAITING;
1613 trace_rcu_utilization("End CPU kthread@rcu_wait");
1614 rcu_wait(*workp != 0 || kthread_should_stop());
1615 trace_rcu_utilization("Start CPU kthread@rcu_wait");
1617 if (rcu_cpu_kthread_should_stop(cpu)) {
1621 *statusp = RCU_KTHREAD_RUNNING;
1622 per_cpu(rcu_cpu_kthread_loops, cpu)++;
1623 local_irq_save(flags);
1626 local_irq_restore(flags);
1628 rcu_kthread_do_work();
1635 *statusp = RCU_KTHREAD_YIELDING;
1636 trace_rcu_utilization("End CPU kthread@rcu_yield");
1637 rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
1638 trace_rcu_utilization("Start CPU kthread@rcu_yield");
1642 *statusp = RCU_KTHREAD_STOPPED;
1643 trace_rcu_utilization("End CPU kthread@term");
1648 * Spawn a per-CPU kthread, setting up affinity and priority.
1649 * Because the CPU hotplug lock is held, no other CPU will be attempting
1650 * to manipulate rcu_cpu_kthread_task. There might be another CPU
1651 * attempting to access it during boot, but the locking in kthread_bind()
1652 * will enforce sufficient ordering.
1654 * Please note that we cannot simply refuse to wake up the per-CPU
1655 * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
1656 * which can result in softlockup complaints if the task ends up being
1657 * idle for more than a couple of minutes.
1659 * However, please note also that we cannot bind the per-CPU kthread to its
1660 * CPU until that CPU is fully online. We also cannot wait until the
1661 * CPU is fully online before we create its per-CPU kthread, as this would
1662 * deadlock the system when CPU notifiers tried waiting for grace
1663 * periods. So we bind the per-CPU kthread to its CPU only if the CPU
1664 * is online. If its CPU is not yet fully online, then the code in
1665 * rcu_cpu_kthread() will wait until it is fully online, and then do
1668 static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1670 struct sched_param sp;
1671 struct task_struct *t;
1673 if (!rcu_scheduler_fully_active ||
1674 per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1676 t = kthread_create_on_node(rcu_cpu_kthread,
1682 if (cpu_online(cpu))
1683 kthread_bind(t, cpu);
1684 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1685 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1686 sp.sched_priority = RCU_KTHREAD_PRIO;
1687 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1688 per_cpu(rcu_cpu_kthread_task, cpu) = t;
1689 wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
1694 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1695 * kthreads when needed. We ignore requests to wake up kthreads
1696 * for offline CPUs, which is OK because force_quiescent_state()
1697 * takes care of this case.
1699 static int rcu_node_kthread(void *arg)
1702 unsigned long flags;
1704 struct rcu_node *rnp = (struct rcu_node *)arg;
1705 struct sched_param sp;
1706 struct task_struct *t;
1709 rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1710 rcu_wait(atomic_read(&rnp->wakemask) != 0);
1711 rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1712 raw_spin_lock_irqsave(&rnp->lock, flags);
1713 mask = atomic_xchg(&rnp->wakemask, 0);
1714 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
1715 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
1716 if ((mask & 0x1) == 0)
1719 t = per_cpu(rcu_cpu_kthread_task, cpu);
1720 if (!cpu_online(cpu) || t == NULL) {
1724 per_cpu(rcu_cpu_has_work, cpu) = 1;
1725 sp.sched_priority = RCU_KTHREAD_PRIO;
1726 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1731 rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
1736 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1737 * served by the rcu_node in question. The CPU hotplug lock is still
1738 * held, so the value of rnp->qsmaskinit will be stable.
1740 * We don't include outgoingcpu in the affinity set, use -1 if there is
1741 * no outgoing CPU. If there are no CPUs left in the affinity set,
1742 * this function allows the kthread to execute on any CPU.
1744 static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1748 unsigned long mask = rnp->qsmaskinit;
1750 if (rnp->node_kthread_task == NULL)
1752 if (!alloc_cpumask_var(&cm, GFP_KERNEL))
1755 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1756 if ((mask & 0x1) && cpu != outgoingcpu)
1757 cpumask_set_cpu(cpu, cm);
1758 if (cpumask_weight(cm) == 0) {
1760 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1761 cpumask_clear_cpu(cpu, cm);
1762 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1764 set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
1765 rcu_boost_kthread_setaffinity(rnp, cm);
1766 free_cpumask_var(cm);
1770 * Spawn a per-rcu_node kthread, setting priority and affinity.
1771 * Called during boot before online/offline can happen, or, if
1772 * during runtime, with the main CPU-hotplug locks held. So only
1773 * one of these can be executing at a time.
1775 static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1776 struct rcu_node *rnp)
1778 unsigned long flags;
1779 int rnp_index = rnp - &rsp->node[0];
1780 struct sched_param sp;
1781 struct task_struct *t;
1783 if (!rcu_scheduler_fully_active ||
1784 rnp->qsmaskinit == 0)
1786 if (rnp->node_kthread_task == NULL) {
1787 t = kthread_create(rcu_node_kthread, (void *)rnp,
1788 "rcun/%d", rnp_index);
1791 raw_spin_lock_irqsave(&rnp->lock, flags);
1792 rnp->node_kthread_task = t;
1793 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1794 sp.sched_priority = 99;
1795 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1796 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1798 return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
1802 * Spawn all kthreads -- called as soon as the scheduler is running.
1804 static int __init rcu_spawn_kthreads(void)
1807 struct rcu_node *rnp;
1809 rcu_scheduler_fully_active = 1;
1810 for_each_possible_cpu(cpu) {
1811 per_cpu(rcu_cpu_has_work, cpu) = 0;
1812 if (cpu_online(cpu))
1813 (void)rcu_spawn_one_cpu_kthread(cpu);
1815 rnp = rcu_get_root(rcu_state);
1816 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1817 if (NUM_RCU_NODES > 1) {
1818 rcu_for_each_leaf_node(rcu_state, rnp)
1819 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1823 early_initcall(rcu_spawn_kthreads);
1825 static void __cpuinit rcu_prepare_kthreads(int cpu)
1827 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1828 struct rcu_node *rnp = rdp->mynode;
1830 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1831 if (rcu_scheduler_fully_active) {
1832 (void)rcu_spawn_one_cpu_kthread(cpu);
1833 if (rnp->node_kthread_task == NULL)
1834 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1838 #else /* #ifdef CONFIG_RCU_BOOST */
1840 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1842 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1845 static void invoke_rcu_callbacks_kthread(void)
1850 static bool rcu_is_callbacks_kthread(void)
1855 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1859 #ifdef CONFIG_HOTPLUG_CPU
1861 static void rcu_stop_cpu_kthread(int cpu)
1865 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1867 static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1871 static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1875 static int __init rcu_scheduler_really_started(void)
1877 rcu_scheduler_fully_active = 1;
1880 early_initcall(rcu_scheduler_really_started);
1882 static void __cpuinit rcu_prepare_kthreads(int cpu)
1886 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1888 #if !defined(CONFIG_RCU_FAST_NO_HZ)
1891 * Check to see if any future RCU-related work will need to be done
1892 * by the current CPU, even if none need be done immediately, returning
1893 * 1 if so. This function is part of the RCU implementation; it is -not-
1894 * an exported member of the RCU API.
1896 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1897 * any flavor of RCU.
1899 int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
1901 *delta_jiffies = ULONG_MAX;
1902 return rcu_cpu_has_callbacks(cpu);
1906 * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
1908 static void rcu_prepare_for_idle_init(int cpu)
1913 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1916 static void rcu_cleanup_after_idle(int cpu)
1921 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1924 static void rcu_prepare_for_idle(int cpu)
1929 * Don't bother keeping a running count of the number of RCU callbacks
1930 * posted because CONFIG_RCU_FAST_NO_HZ=n.
1932 static void rcu_idle_count_callbacks_posted(void)
1936 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1939 * This code is invoked when a CPU goes idle, at which point we want
1940 * to have the CPU do everything required for RCU so that it can enter
1941 * the energy-efficient dyntick-idle mode. This is handled by a
1942 * state machine implemented by rcu_prepare_for_idle() below.
1944 * The following three proprocessor symbols control this state machine:
1946 * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt
1947 * to satisfy RCU. Beyond this point, it is better to incur a periodic
1948 * scheduling-clock interrupt than to loop through the state machine
1950 * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are
1951 * optional if RCU does not need anything immediately from this
1952 * CPU, even if this CPU still has RCU callbacks queued. The first
1953 * times through the state machine are mandatory: we need to give
1954 * the state machine a chance to communicate a quiescent state
1956 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
1957 * to sleep in dyntick-idle mode with RCU callbacks pending. This
1958 * is sized to be roughly one RCU grace period. Those energy-efficiency
1959 * benchmarkers who might otherwise be tempted to set this to a large
1960 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
1961 * system. And if you are -that- concerned about energy efficiency,
1962 * just power the system down and be done with it!
1963 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
1964 * permitted to sleep in dyntick-idle mode with only lazy RCU
1965 * callbacks pending. Setting this too high can OOM your system.
1967 * The values below work well in practice. If future workloads require
1968 * adjustment, they can be converted into kernel config parameters, though
1969 * making the state machine smarter might be a better option.
1971 #define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */
1972 #define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */
1973 #define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */
1974 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
1977 * Does the specified flavor of RCU have non-lazy callbacks pending on
1978 * the specified CPU? Both RCU flavor and CPU are specified by the
1979 * rcu_data structure.
1981 static bool __rcu_cpu_has_nonlazy_callbacks(struct rcu_data *rdp)
1983 return rdp->qlen != rdp->qlen_lazy;
1986 #ifdef CONFIG_TREE_PREEMPT_RCU
1989 * Are there non-lazy RCU-preempt callbacks? (There cannot be if there
1990 * is no RCU-preempt in the kernel.)
1992 static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
1994 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
1996 return __rcu_cpu_has_nonlazy_callbacks(rdp);
1999 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2001 static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
2006 #endif /* else #ifdef CONFIG_TREE_PREEMPT_RCU */
2009 * Does any flavor of RCU have non-lazy callbacks on the specified CPU?
2011 static bool rcu_cpu_has_nonlazy_callbacks(int cpu)
2013 return __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_sched_data, cpu)) ||
2014 __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_bh_data, cpu)) ||
2015 rcu_preempt_cpu_has_nonlazy_callbacks(cpu);
2019 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
2020 * callbacks on this CPU, (2) this CPU has not yet attempted to enter
2021 * dyntick-idle mode, or (3) this CPU is in the process of attempting to
2022 * enter dyntick-idle mode. Otherwise, if we have recently tried and failed
2023 * to enter dyntick-idle mode, we refuse to try to enter it. After all,
2024 * it is better to incur scheduling-clock interrupts than to spin
2025 * continuously for the same time duration!
2027 * The delta_jiffies argument is used to store the time when RCU is
2028 * going to need the CPU again if it still has callbacks. The reason
2029 * for this is that rcu_prepare_for_idle() might need to post a timer,
2030 * but if so, it will do so after tick_nohz_stop_sched_tick() has set
2031 * the wakeup time for this CPU. This means that RCU's timer can be
2032 * delayed until the wakeup time, which defeats the purpose of posting
2035 int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
2037 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2039 /* Flag a new idle sojourn to the idle-entry state machine. */
2040 rdtp->idle_first_pass = 1;
2041 /* If no callbacks, RCU doesn't need the CPU. */
2042 if (!rcu_cpu_has_callbacks(cpu)) {
2043 *delta_jiffies = ULONG_MAX;
2046 if (rdtp->dyntick_holdoff == jiffies) {
2047 /* RCU recently tried and failed, so don't try again. */
2051 /* Set up for the possibility that RCU will post a timer. */
2052 if (rcu_cpu_has_nonlazy_callbacks(cpu))
2053 *delta_jiffies = RCU_IDLE_GP_DELAY;
2055 *delta_jiffies = RCU_IDLE_LAZY_GP_DELAY;
2060 * Handler for smp_call_function_single(). The only point of this
2061 * handler is to wake the CPU up, so the handler does only tracing.
2063 void rcu_idle_demigrate(void *unused)
2065 trace_rcu_prep_idle("Demigrate");
2069 * Timer handler used to force CPU to start pushing its remaining RCU
2070 * callbacks in the case where it entered dyntick-idle mode with callbacks
2071 * pending. The hander doesn't really need to do anything because the
2072 * real work is done upon re-entry to idle, or by the next scheduling-clock
2073 * interrupt should idle not be re-entered.
2075 * One special case: the timer gets migrated without awakening the CPU
2076 * on which the timer was scheduled on. In this case, we must wake up
2077 * that CPU. We do so with smp_call_function_single().
2079 static void rcu_idle_gp_timer_func(unsigned long cpu_in)
2081 int cpu = (int)cpu_in;
2083 trace_rcu_prep_idle("Timer");
2084 if (cpu != smp_processor_id())
2085 smp_call_function_single(cpu, rcu_idle_demigrate, NULL, 0);
2087 WARN_ON_ONCE(1); /* Getting here can hang the system... */
2091 * Initialize the timer used to pull CPUs out of dyntick-idle mode.
2093 static void rcu_prepare_for_idle_init(int cpu)
2095 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2097 rdtp->dyntick_holdoff = jiffies - 1;
2098 setup_timer(&rdtp->idle_gp_timer, rcu_idle_gp_timer_func, cpu);
2099 rdtp->idle_gp_timer_expires = jiffies - 1;
2100 rdtp->idle_first_pass = 1;
2104 * Clean up for exit from idle. Because we are exiting from idle, there
2105 * is no longer any point to ->idle_gp_timer, so cancel it. This will
2106 * do nothing if this timer is not active, so just cancel it unconditionally.
2108 static void rcu_cleanup_after_idle(int cpu)
2110 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2112 del_timer(&rdtp->idle_gp_timer);
2113 trace_rcu_prep_idle("Cleanup after idle");
2117 * Check to see if any RCU-related work can be done by the current CPU,
2118 * and if so, schedule a softirq to get it done. This function is part
2119 * of the RCU implementation; it is -not- an exported member of the RCU API.
2121 * The idea is for the current CPU to clear out all work required by the
2122 * RCU core for the current grace period, so that this CPU can be permitted
2123 * to enter dyntick-idle mode. In some cases, it will need to be awakened
2124 * at the end of the grace period by whatever CPU ends the grace period.
2125 * This allows CPUs to go dyntick-idle more quickly, and to reduce the
2126 * number of wakeups by a modest integer factor.
2128 * Because it is not legal to invoke rcu_process_callbacks() with irqs
2129 * disabled, we do one pass of force_quiescent_state(), then do a
2130 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
2131 * later. The ->dyntick_drain field controls the sequencing.
2133 * The caller must have disabled interrupts.
2135 static void rcu_prepare_for_idle(int cpu)
2137 struct timer_list *tp;
2138 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2141 * If this is an idle re-entry, for example, due to use of
2142 * RCU_NONIDLE() or the new idle-loop tracing API within the idle
2143 * loop, then don't take any state-machine actions, unless the
2144 * momentary exit from idle queued additional non-lazy callbacks.
2145 * Instead, repost the ->idle_gp_timer if this CPU has callbacks
2148 if (!rdtp->idle_first_pass &&
2149 (rdtp->nonlazy_posted == rdtp->nonlazy_posted_snap)) {
2150 if (rcu_cpu_has_callbacks(cpu)) {
2151 tp = &rdtp->idle_gp_timer;
2152 mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
2156 rdtp->idle_first_pass = 0;
2157 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted - 1;
2160 * If there are no callbacks on this CPU, enter dyntick-idle mode.
2161 * Also reset state to avoid prejudicing later attempts.
2163 if (!rcu_cpu_has_callbacks(cpu)) {
2164 rdtp->dyntick_holdoff = jiffies - 1;
2165 rdtp->dyntick_drain = 0;
2166 trace_rcu_prep_idle("No callbacks");
2171 * If in holdoff mode, just return. We will presumably have
2172 * refrained from disabling the scheduling-clock tick.
2174 if (rdtp->dyntick_holdoff == jiffies) {
2175 trace_rcu_prep_idle("In holdoff");
2179 /* Check and update the ->dyntick_drain sequencing. */
2180 if (rdtp->dyntick_drain <= 0) {
2181 /* First time through, initialize the counter. */
2182 rdtp->dyntick_drain = RCU_IDLE_FLUSHES;
2183 } else if (rdtp->dyntick_drain <= RCU_IDLE_OPT_FLUSHES &&
2184 !rcu_pending(cpu) &&
2185 !local_softirq_pending()) {
2186 /* Can we go dyntick-idle despite still having callbacks? */
2187 rdtp->dyntick_drain = 0;
2188 rdtp->dyntick_holdoff = jiffies;
2189 if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
2190 trace_rcu_prep_idle("Dyntick with callbacks");
2191 rdtp->idle_gp_timer_expires =
2192 jiffies + RCU_IDLE_GP_DELAY;
2194 rdtp->idle_gp_timer_expires =
2195 jiffies + RCU_IDLE_LAZY_GP_DELAY;
2196 trace_rcu_prep_idle("Dyntick with lazy callbacks");
2198 tp = &rdtp->idle_gp_timer;
2199 mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
2200 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
2201 return; /* Nothing more to do immediately. */
2202 } else if (--(rdtp->dyntick_drain) <= 0) {
2203 /* We have hit the limit, so time to give up. */
2204 rdtp->dyntick_holdoff = jiffies;
2205 trace_rcu_prep_idle("Begin holdoff");
2206 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
2211 * Do one step of pushing the remaining RCU callbacks through
2212 * the RCU core state machine.
2214 #ifdef CONFIG_TREE_PREEMPT_RCU
2215 if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
2216 rcu_preempt_qs(cpu);
2217 force_quiescent_state(&rcu_preempt_state, 0);
2219 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2220 if (per_cpu(rcu_sched_data, cpu).nxtlist) {
2222 force_quiescent_state(&rcu_sched_state, 0);
2224 if (per_cpu(rcu_bh_data, cpu).nxtlist) {
2226 force_quiescent_state(&rcu_bh_state, 0);
2230 * If RCU callbacks are still pending, RCU still needs this CPU.
2231 * So try forcing the callbacks through the grace period.
2233 if (rcu_cpu_has_callbacks(cpu)) {
2234 trace_rcu_prep_idle("More callbacks");
2237 trace_rcu_prep_idle("Callbacks drained");
2241 * Keep a running count of the number of non-lazy callbacks posted
2242 * on this CPU. This running counter (which is never decremented) allows
2243 * rcu_prepare_for_idle() to detect when something out of the idle loop
2244 * posts a callback, even if an equal number of callbacks are invoked.
2245 * Of course, callbacks should only be posted from within a trace event
2246 * designed to be called from idle or from within RCU_NONIDLE().
2248 static void rcu_idle_count_callbacks_posted(void)
2250 __this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
2253 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
2255 #ifdef CONFIG_RCU_CPU_STALL_INFO
2257 #ifdef CONFIG_RCU_FAST_NO_HZ
2259 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
2261 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2262 struct timer_list *tltp = &rdtp->idle_gp_timer;
2264 sprintf(cp, "drain=%d %c timer=%lu",
2265 rdtp->dyntick_drain,
2266 rdtp->dyntick_holdoff == jiffies ? 'H' : '.',
2267 timer_pending(tltp) ? tltp->expires - jiffies : -1);
2270 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
2272 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
2276 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
2278 /* Initiate the stall-info list. */
2279 static void print_cpu_stall_info_begin(void)
2281 printk(KERN_CONT "\n");
2285 * Print out diagnostic information for the specified stalled CPU.
2287 * If the specified CPU is aware of the current RCU grace period
2288 * (flavor specified by rsp), then print the number of scheduling
2289 * clock interrupts the CPU has taken during the time that it has
2290 * been aware. Otherwise, print the number of RCU grace periods
2291 * that this CPU is ignorant of, for example, "1" if the CPU was
2292 * aware of the previous grace period.
2294 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
2296 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
2298 char fast_no_hz[72];
2299 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2300 struct rcu_dynticks *rdtp = rdp->dynticks;
2302 unsigned long ticks_value;
2304 if (rsp->gpnum == rdp->gpnum) {
2305 ticks_title = "ticks this GP";
2306 ticks_value = rdp->ticks_this_gp;
2308 ticks_title = "GPs behind";
2309 ticks_value = rsp->gpnum - rdp->gpnum;
2311 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
2312 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
2313 cpu, ticks_value, ticks_title,
2314 atomic_read(&rdtp->dynticks) & 0xfff,
2315 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
2319 /* Terminate the stall-info list. */
2320 static void print_cpu_stall_info_end(void)
2322 printk(KERN_ERR "\t");
2325 /* Zero ->ticks_this_gp for all flavors of RCU. */
2326 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
2328 rdp->ticks_this_gp = 0;
2331 /* Increment ->ticks_this_gp for all flavors of RCU. */
2332 static void increment_cpu_stall_ticks(void)
2334 __get_cpu_var(rcu_sched_data).ticks_this_gp++;
2335 __get_cpu_var(rcu_bh_data).ticks_this_gp++;
2336 #ifdef CONFIG_TREE_PREEMPT_RCU
2337 __get_cpu_var(rcu_preempt_data).ticks_this_gp++;
2338 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2341 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
2343 static void print_cpu_stall_info_begin(void)
2345 printk(KERN_CONT " {");
2348 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
2350 printk(KERN_CONT " %d", cpu);
2353 static void print_cpu_stall_info_end(void)
2355 printk(KERN_CONT "} ");
2358 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
2362 static void increment_cpu_stall_ticks(void)
2366 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */