2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
4 * or preemptable semantics.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
27 #include <linux/delay.h>
30 * Check the RCU kernel configuration parameters and print informative
31 * messages about anything out of the ordinary. If you like #ifdef, you
32 * will love this function.
34 static void __init rcu_bootup_announce_oddness(void)
36 #ifdef CONFIG_RCU_TRACE
37 printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
39 #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
40 printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
43 #ifdef CONFIG_RCU_FANOUT_EXACT
44 printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
46 #ifdef CONFIG_RCU_FAST_NO_HZ
48 "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
50 #ifdef CONFIG_PROVE_RCU
51 printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
53 #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
54 printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
56 #ifndef CONFIG_RCU_CPU_STALL_DETECTOR
58 "\tRCU-based detection of stalled CPUs is disabled.\n");
60 #ifndef CONFIG_RCU_CPU_STALL_VERBOSE
61 printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n");
63 #if NUM_RCU_LVL_4 != 0
64 printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n");
68 #ifdef CONFIG_TREE_PREEMPT_RCU
70 struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
71 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
73 static int rcu_preempted_readers_exp(struct rcu_node *rnp);
76 * Tell them what RCU they are running.
78 static void __init rcu_bootup_announce(void)
80 printk(KERN_INFO "Preemptable hierarchical RCU implementation.\n");
81 rcu_bootup_announce_oddness();
85 * Return the number of RCU-preempt batches processed thus far
86 * for debug and statistics.
88 long rcu_batches_completed_preempt(void)
90 return rcu_preempt_state.completed;
92 EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
95 * Return the number of RCU batches processed thus far for debug & stats.
97 long rcu_batches_completed(void)
99 return rcu_batches_completed_preempt();
101 EXPORT_SYMBOL_GPL(rcu_batches_completed);
104 * Force a quiescent state for preemptible RCU.
106 void rcu_force_quiescent_state(void)
108 force_quiescent_state(&rcu_preempt_state, 0);
110 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
113 * Record a preemptable-RCU quiescent state for the specified CPU. Note
114 * that this just means that the task currently running on the CPU is
115 * not in a quiescent state. There might be any number of tasks blocked
116 * while in an RCU read-side critical section.
118 * Unlike the other rcu_*_qs() functions, callers to this function
119 * must disable irqs in order to protect the assignment to
120 * ->rcu_read_unlock_special.
122 static void rcu_preempt_qs(int cpu)
124 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
126 rdp->passed_quiesc_completed = rdp->gpnum - 1;
128 rdp->passed_quiesc = 1;
129 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
133 * We have entered the scheduler, and the current task might soon be
134 * context-switched away from. If this task is in an RCU read-side
135 * critical section, we will no longer be able to rely on the CPU to
136 * record that fact, so we enqueue the task on the appropriate entry
137 * of the blocked_tasks[] array. The task will dequeue itself when
138 * it exits the outermost enclosing RCU read-side critical section.
139 * Therefore, the current grace period cannot be permitted to complete
140 * until the blocked_tasks[] entry indexed by the low-order bit of
141 * rnp->gpnum empties.
143 * Caller must disable preemption.
145 static void rcu_preempt_note_context_switch(int cpu)
147 struct task_struct *t = current;
150 struct rcu_data *rdp;
151 struct rcu_node *rnp;
153 if (t->rcu_read_lock_nesting &&
154 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
156 /* Possibly blocking in an RCU read-side critical section. */
157 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
159 raw_spin_lock_irqsave(&rnp->lock, flags);
160 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
161 t->rcu_blocked_node = rnp;
164 * If this CPU has already checked in, then this task
165 * will hold up the next grace period rather than the
166 * current grace period. Queue the task accordingly.
167 * If the task is queued for the current grace period
168 * (i.e., this CPU has not yet passed through a quiescent
169 * state for the current grace period), then as long
170 * as that task remains queued, the current grace period
173 * But first, note that the current CPU must still be
176 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
177 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
178 phase = (rnp->gpnum + !(rnp->qsmask & rdp->grpmask)) & 0x1;
179 list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]);
180 raw_spin_unlock_irqrestore(&rnp->lock, flags);
184 * Either we were not in an RCU read-side critical section to
185 * begin with, or we have now recorded that critical section
186 * globally. Either way, we can now note a quiescent state
187 * for this CPU. Again, if we were in an RCU read-side critical
188 * section, and if that critical section was blocking the current
189 * grace period, then the fact that the task has been enqueued
190 * means that we continue to block the current grace period.
192 local_irq_save(flags);
194 local_irq_restore(flags);
198 * Tree-preemptable RCU implementation for rcu_read_lock().
199 * Just increment ->rcu_read_lock_nesting, shared state will be updated
202 void __rcu_read_lock(void)
204 ACCESS_ONCE(current->rcu_read_lock_nesting)++;
205 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
207 EXPORT_SYMBOL_GPL(__rcu_read_lock);
210 * Check for preempted RCU readers blocking the current grace period
211 * for the specified rcu_node structure. If the caller needs a reliable
212 * answer, it must hold the rcu_node's ->lock.
214 static int rcu_preempted_readers(struct rcu_node *rnp)
216 int phase = rnp->gpnum & 0x1;
218 return !list_empty(&rnp->blocked_tasks[phase]) ||
219 !list_empty(&rnp->blocked_tasks[phase + 2]);
223 * Record a quiescent state for all tasks that were previously queued
224 * on the specified rcu_node structure and that were blocking the current
225 * RCU grace period. The caller must hold the specified rnp->lock with
226 * irqs disabled, and this lock is released upon return, but irqs remain
229 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
230 __releases(rnp->lock)
233 struct rcu_node *rnp_p;
235 if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) {
236 raw_spin_unlock_irqrestore(&rnp->lock, flags);
237 return; /* Still need more quiescent states! */
243 * Either there is only one rcu_node in the tree,
244 * or tasks were kicked up to root rcu_node due to
245 * CPUs going offline.
247 rcu_report_qs_rsp(&rcu_preempt_state, flags);
251 /* Report up the rest of the hierarchy. */
253 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
254 raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
255 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
259 * Handle special cases during rcu_read_unlock(), such as needing to
260 * notify RCU core processing or task having blocked during the RCU
261 * read-side critical section.
263 static void rcu_read_unlock_special(struct task_struct *t)
268 struct rcu_node *rnp;
271 /* NMI handlers cannot block and cannot safely manipulate state. */
275 local_irq_save(flags);
278 * If RCU core is waiting for this CPU to exit critical section,
279 * let it know that we have done so.
281 special = t->rcu_read_unlock_special;
282 if (special & RCU_READ_UNLOCK_NEED_QS) {
283 rcu_preempt_qs(smp_processor_id());
286 /* Hardware IRQ handlers cannot block. */
288 local_irq_restore(flags);
292 /* Clean up if blocked during RCU read-side critical section. */
293 if (special & RCU_READ_UNLOCK_BLOCKED) {
294 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
297 * Remove this task from the list it blocked on. The
298 * task can migrate while we acquire the lock, but at
299 * most one time. So at most two passes through loop.
302 rnp = t->rcu_blocked_node;
303 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
304 if (rnp == t->rcu_blocked_node)
306 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
308 empty = !rcu_preempted_readers(rnp);
309 empty_exp = !rcu_preempted_readers_exp(rnp);
310 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
311 list_del_init(&t->rcu_node_entry);
312 t->rcu_blocked_node = NULL;
315 * If this was the last task on the current list, and if
316 * we aren't waiting on any CPUs, report the quiescent state.
317 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
320 raw_spin_unlock_irqrestore(&rnp->lock, flags);
322 rcu_report_unblock_qs_rnp(rnp, flags);
325 * If this was the last task on the expedited lists,
326 * then we need to report up the rcu_node hierarchy.
328 if (!empty_exp && !rcu_preempted_readers_exp(rnp))
329 rcu_report_exp_rnp(&rcu_preempt_state, rnp);
331 local_irq_restore(flags);
336 * Tree-preemptable RCU implementation for rcu_read_unlock().
337 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
338 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
339 * invoke rcu_read_unlock_special() to clean up after a context switch
340 * in an RCU read-side critical section and other special cases.
342 void __rcu_read_unlock(void)
344 struct task_struct *t = current;
346 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
347 if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
348 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
349 rcu_read_unlock_special(t);
350 #ifdef CONFIG_PROVE_LOCKING
351 WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0);
352 #endif /* #ifdef CONFIG_PROVE_LOCKING */
354 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
356 #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
358 #ifdef CONFIG_RCU_CPU_STALL_VERBOSE
361 * Dump detailed information for all tasks blocking the current RCU
362 * grace period on the specified rcu_node structure.
364 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
367 struct list_head *lp;
369 struct task_struct *t;
371 if (rcu_preempted_readers(rnp)) {
372 raw_spin_lock_irqsave(&rnp->lock, flags);
373 phase = rnp->gpnum & 0x1;
374 lp = &rnp->blocked_tasks[phase];
375 list_for_each_entry(t, lp, rcu_node_entry)
377 raw_spin_unlock_irqrestore(&rnp->lock, flags);
382 * Dump detailed information for all tasks blocking the current RCU
385 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
387 struct rcu_node *rnp = rcu_get_root(rsp);
389 rcu_print_detail_task_stall_rnp(rnp);
390 rcu_for_each_leaf_node(rsp, rnp)
391 rcu_print_detail_task_stall_rnp(rnp);
394 #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
396 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
400 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
403 * Scan the current list of tasks blocked within RCU read-side critical
404 * sections, printing out the tid of each.
406 static void rcu_print_task_stall(struct rcu_node *rnp)
408 struct list_head *lp;
410 struct task_struct *t;
412 if (rcu_preempted_readers(rnp)) {
413 phase = rnp->gpnum & 0x1;
414 lp = &rnp->blocked_tasks[phase];
415 list_for_each_entry(t, lp, rcu_node_entry)
416 printk(" P%d", t->pid);
420 #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
423 * Check that the list of blocked tasks for the newly completed grace
424 * period is in fact empty. It is a serious bug to complete a grace
425 * period that still has RCU readers blocked! This function must be
426 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
427 * must be held by the caller.
429 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
431 WARN_ON_ONCE(rcu_preempted_readers(rnp));
432 WARN_ON_ONCE(rnp->qsmask);
435 #ifdef CONFIG_HOTPLUG_CPU
438 * Handle tasklist migration for case in which all CPUs covered by the
439 * specified rcu_node have gone offline. Move them up to the root
440 * rcu_node. The reason for not just moving them to the immediate
441 * parent is to remove the need for rcu_read_unlock_special() to
442 * make more than two attempts to acquire the target rcu_node's lock.
443 * Returns true if there were tasks blocking the current RCU grace
446 * Returns 1 if there was previously a task blocking the current grace
447 * period on the specified rcu_node structure.
449 * The caller must hold rnp->lock with irqs disabled.
451 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
452 struct rcu_node *rnp,
453 struct rcu_data *rdp)
456 struct list_head *lp;
457 struct list_head *lp_root;
459 struct rcu_node *rnp_root = rcu_get_root(rsp);
460 struct task_struct *tp;
462 if (rnp == rnp_root) {
463 WARN_ONCE(1, "Last CPU thought to be offlined?");
464 return 0; /* Shouldn't happen: at least one CPU online. */
466 WARN_ON_ONCE(rnp != rdp->mynode &&
467 (!list_empty(&rnp->blocked_tasks[0]) ||
468 !list_empty(&rnp->blocked_tasks[1]) ||
469 !list_empty(&rnp->blocked_tasks[2]) ||
470 !list_empty(&rnp->blocked_tasks[3])));
473 * Move tasks up to root rcu_node. Rely on the fact that the
474 * root rcu_node can be at most one ahead of the rest of the
475 * rcu_nodes in terms of gp_num value. This fact allows us to
476 * move the blocked_tasks[] array directly, element by element.
478 if (rcu_preempted_readers(rnp))
479 retval |= RCU_OFL_TASKS_NORM_GP;
480 if (rcu_preempted_readers_exp(rnp))
481 retval |= RCU_OFL_TASKS_EXP_GP;
482 for (i = 0; i < 4; i++) {
483 lp = &rnp->blocked_tasks[i];
484 lp_root = &rnp_root->blocked_tasks[i];
485 while (!list_empty(lp)) {
486 tp = list_entry(lp->next, typeof(*tp), rcu_node_entry);
487 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
488 list_del(&tp->rcu_node_entry);
489 tp->rcu_blocked_node = rnp_root;
490 list_add(&tp->rcu_node_entry, lp_root);
491 raw_spin_unlock(&rnp_root->lock); /* irqs remain disabled */
498 * Do CPU-offline processing for preemptable RCU.
500 static void rcu_preempt_offline_cpu(int cpu)
502 __rcu_offline_cpu(cpu, &rcu_preempt_state);
505 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
508 * Check for a quiescent state from the current CPU. When a task blocks,
509 * the task is recorded in the corresponding CPU's rcu_node structure,
510 * which is checked elsewhere.
512 * Caller must disable hard irqs.
514 static void rcu_preempt_check_callbacks(int cpu)
516 struct task_struct *t = current;
518 if (t->rcu_read_lock_nesting == 0) {
522 if (per_cpu(rcu_preempt_data, cpu).qs_pending)
523 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
527 * Process callbacks for preemptable RCU.
529 static void rcu_preempt_process_callbacks(void)
531 __rcu_process_callbacks(&rcu_preempt_state,
532 &__get_cpu_var(rcu_preempt_data));
536 * Queue a preemptable-RCU callback for invocation after a grace period.
538 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
540 __call_rcu(head, func, &rcu_preempt_state);
542 EXPORT_SYMBOL_GPL(call_rcu);
545 * synchronize_rcu - wait until a grace period has elapsed.
547 * Control will return to the caller some time after a full grace
548 * period has elapsed, in other words after all currently executing RCU
549 * read-side critical sections have completed. Note, however, that
550 * upon return from synchronize_rcu(), the caller might well be executing
551 * concurrently with new RCU read-side critical sections that began while
552 * synchronize_rcu() was waiting. RCU read-side critical sections are
553 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
555 void synchronize_rcu(void)
557 struct rcu_synchronize rcu;
559 if (!rcu_scheduler_active)
562 init_rcu_head_on_stack(&rcu.head);
563 init_completion(&rcu.completion);
564 /* Will wake me after RCU finished. */
565 call_rcu(&rcu.head, wakeme_after_rcu);
567 wait_for_completion(&rcu.completion);
568 destroy_rcu_head_on_stack(&rcu.head);
570 EXPORT_SYMBOL_GPL(synchronize_rcu);
572 static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
573 static long sync_rcu_preempt_exp_count;
574 static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
577 * Return non-zero if there are any tasks in RCU read-side critical
578 * sections blocking the current preemptible-RCU expedited grace period.
579 * If there is no preemptible-RCU expedited grace period currently in
580 * progress, returns zero unconditionally.
582 static int rcu_preempted_readers_exp(struct rcu_node *rnp)
584 return !list_empty(&rnp->blocked_tasks[2]) ||
585 !list_empty(&rnp->blocked_tasks[3]);
589 * return non-zero if there is no RCU expedited grace period in progress
590 * for the specified rcu_node structure, in other words, if all CPUs and
591 * tasks covered by the specified rcu_node structure have done their bit
592 * for the current expedited grace period. Works only for preemptible
593 * RCU -- other RCU implementation use other means.
595 * Caller must hold sync_rcu_preempt_exp_mutex.
597 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
599 return !rcu_preempted_readers_exp(rnp) &&
600 ACCESS_ONCE(rnp->expmask) == 0;
604 * Report the exit from RCU read-side critical section for the last task
605 * that queued itself during or before the current expedited preemptible-RCU
606 * grace period. This event is reported either to the rcu_node structure on
607 * which the task was queued or to one of that rcu_node structure's ancestors,
608 * recursively up the tree. (Calm down, calm down, we do the recursion
611 * Caller must hold sync_rcu_preempt_exp_mutex.
613 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
618 raw_spin_lock_irqsave(&rnp->lock, flags);
620 if (!sync_rcu_preempt_exp_done(rnp))
622 if (rnp->parent == NULL) {
623 wake_up(&sync_rcu_preempt_exp_wq);
627 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
629 raw_spin_lock(&rnp->lock); /* irqs already disabled */
630 rnp->expmask &= ~mask;
632 raw_spin_unlock_irqrestore(&rnp->lock, flags);
636 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
637 * grace period for the specified rcu_node structure. If there are no such
638 * tasks, report it up the rcu_node hierarchy.
640 * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
643 sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
647 raw_spin_lock(&rnp->lock); /* irqs already disabled */
648 list_splice_init(&rnp->blocked_tasks[0], &rnp->blocked_tasks[2]);
649 list_splice_init(&rnp->blocked_tasks[1], &rnp->blocked_tasks[3]);
650 must_wait = rcu_preempted_readers_exp(rnp);
651 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
653 rcu_report_exp_rnp(rsp, rnp);
657 * Wait for an rcu-preempt grace period, but expedite it. The basic idea
658 * is to invoke synchronize_sched_expedited() to push all the tasks to
659 * the ->blocked_tasks[] lists, move all entries from the first set of
660 * ->blocked_tasks[] lists to the second set, and finally wait for this
661 * second set to drain.
663 void synchronize_rcu_expedited(void)
666 struct rcu_node *rnp;
667 struct rcu_state *rsp = &rcu_preempt_state;
671 smp_mb(); /* Caller's modifications seen first by other CPUs. */
672 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
673 smp_mb(); /* Above access cannot bleed into critical section. */
676 * Acquire lock, falling back to synchronize_rcu() if too many
677 * lock-acquisition failures. Of course, if someone does the
678 * expedited grace period for us, just leave.
680 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
682 udelay(trycount * num_online_cpus());
687 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
688 goto mb_ret; /* Others did our work for us. */
690 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
691 goto unlock_mb_ret; /* Others did our work for us. */
693 /* force all RCU readers onto blocked_tasks[]. */
694 synchronize_sched_expedited();
696 raw_spin_lock_irqsave(&rsp->onofflock, flags);
698 /* Initialize ->expmask for all non-leaf rcu_node structures. */
699 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
700 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
701 rnp->expmask = rnp->qsmaskinit;
702 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
705 /* Snapshot current state of ->blocked_tasks[] lists. */
706 rcu_for_each_leaf_node(rsp, rnp)
707 sync_rcu_preempt_exp_init(rsp, rnp);
708 if (NUM_RCU_NODES > 1)
709 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
711 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
713 /* Wait for snapshotted ->blocked_tasks[] lists to drain. */
714 rnp = rcu_get_root(rsp);
715 wait_event(sync_rcu_preempt_exp_wq,
716 sync_rcu_preempt_exp_done(rnp));
718 /* Clean up and exit. */
719 smp_mb(); /* ensure expedited GP seen before counter increment. */
720 ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
722 mutex_unlock(&sync_rcu_preempt_exp_mutex);
724 smp_mb(); /* ensure subsequent action seen after grace period. */
726 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
729 * Check to see if there is any immediate preemptable-RCU-related work
732 static int rcu_preempt_pending(int cpu)
734 return __rcu_pending(&rcu_preempt_state,
735 &per_cpu(rcu_preempt_data, cpu));
739 * Does preemptable RCU need the CPU to stay out of dynticks mode?
741 static int rcu_preempt_needs_cpu(int cpu)
743 return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
747 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
749 void rcu_barrier(void)
751 _rcu_barrier(&rcu_preempt_state, call_rcu);
753 EXPORT_SYMBOL_GPL(rcu_barrier);
756 * Initialize preemptable RCU's per-CPU data.
758 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
760 rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
764 * Move preemptable RCU's callbacks to ->orphan_cbs_list.
766 static void rcu_preempt_send_cbs_to_orphanage(void)
768 rcu_send_cbs_to_orphanage(&rcu_preempt_state);
772 * Initialize preemptable RCU's state structures.
774 static void __init __rcu_init_preempt(void)
776 rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
780 * Check for a task exiting while in a preemptable-RCU read-side
781 * critical section, clean up if so. No need to issue warnings,
782 * as debug_check_no_locks_held() already does this if lockdep
787 struct task_struct *t = current;
789 if (t->rcu_read_lock_nesting == 0)
791 t->rcu_read_lock_nesting = 1;
795 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
798 * Tell them what RCU they are running.
800 static void __init rcu_bootup_announce(void)
802 printk(KERN_INFO "Hierarchical RCU implementation.\n");
803 rcu_bootup_announce_oddness();
807 * Return the number of RCU batches processed thus far for debug & stats.
809 long rcu_batches_completed(void)
811 return rcu_batches_completed_sched();
813 EXPORT_SYMBOL_GPL(rcu_batches_completed);
816 * Force a quiescent state for RCU, which, because there is no preemptible
817 * RCU, becomes the same as rcu-sched.
819 void rcu_force_quiescent_state(void)
821 rcu_sched_force_quiescent_state();
823 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
826 * Because preemptable RCU does not exist, we never have to check for
827 * CPUs being in quiescent states.
829 static void rcu_preempt_note_context_switch(int cpu)
834 * Because preemptable RCU does not exist, there are never any preempted
837 static int rcu_preempted_readers(struct rcu_node *rnp)
842 #ifdef CONFIG_HOTPLUG_CPU
844 /* Because preemptible RCU does not exist, no quieting of tasks. */
845 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
847 raw_spin_unlock_irqrestore(&rnp->lock, flags);
850 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
852 #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
855 * Because preemptable RCU does not exist, we never have to check for
856 * tasks blocked within RCU read-side critical sections.
858 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
863 * Because preemptable RCU does not exist, we never have to check for
864 * tasks blocked within RCU read-side critical sections.
866 static void rcu_print_task_stall(struct rcu_node *rnp)
870 #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
873 * Because there is no preemptable RCU, there can be no readers blocked,
874 * so there is no need to check for blocked tasks. So check only for
875 * bogus qsmask values.
877 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
879 WARN_ON_ONCE(rnp->qsmask);
882 #ifdef CONFIG_HOTPLUG_CPU
885 * Because preemptable RCU does not exist, it never needs to migrate
886 * tasks that were blocked within RCU read-side critical sections, and
887 * such non-existent tasks cannot possibly have been blocking the current
890 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
891 struct rcu_node *rnp,
892 struct rcu_data *rdp)
898 * Because preemptable RCU does not exist, it never needs CPU-offline
901 static void rcu_preempt_offline_cpu(int cpu)
905 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
908 * Because preemptable RCU does not exist, it never has any callbacks
911 static void rcu_preempt_check_callbacks(int cpu)
916 * Because preemptable RCU does not exist, it never has any callbacks
919 static void rcu_preempt_process_callbacks(void)
924 * In classic RCU, call_rcu() is just call_rcu_sched().
926 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
928 call_rcu_sched(head, func);
930 EXPORT_SYMBOL_GPL(call_rcu);
933 * Wait for an rcu-preempt grace period, but make it happen quickly.
934 * But because preemptable RCU does not exist, map to rcu-sched.
936 void synchronize_rcu_expedited(void)
938 synchronize_sched_expedited();
940 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
942 #ifdef CONFIG_HOTPLUG_CPU
945 * Because preemptable RCU does not exist, there is never any need to
946 * report on tasks preempted in RCU read-side critical sections during
947 * expedited RCU grace periods.
949 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
954 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
957 * Because preemptable RCU does not exist, it never has any work to do.
959 static int rcu_preempt_pending(int cpu)
965 * Because preemptable RCU does not exist, it never needs any CPU.
967 static int rcu_preempt_needs_cpu(int cpu)
973 * Because preemptable RCU does not exist, rcu_barrier() is just
974 * another name for rcu_barrier_sched().
976 void rcu_barrier(void)
980 EXPORT_SYMBOL_GPL(rcu_barrier);
983 * Because preemptable RCU does not exist, there is no per-CPU
984 * data to initialize.
986 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
991 * Because there is no preemptable RCU, there are no callbacks to move.
993 static void rcu_preempt_send_cbs_to_orphanage(void)
998 * Because preemptable RCU does not exist, it need not be initialized.
1000 static void __init __rcu_init_preempt(void)
1004 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1006 #if !defined(CONFIG_RCU_FAST_NO_HZ)
1009 * Check to see if any future RCU-related work will need to be done
1010 * by the current CPU, even if none need be done immediately, returning
1011 * 1 if so. This function is part of the RCU implementation; it is -not-
1012 * an exported member of the RCU API.
1014 * Because we have preemptible RCU, just check whether this CPU needs
1015 * any flavor of RCU. Do not chew up lots of CPU cycles with preemption
1016 * disabled in a most-likely vain attempt to cause RCU not to need this CPU.
1018 int rcu_needs_cpu(int cpu)
1020 return rcu_needs_cpu_quick_check(cpu);
1024 * Check to see if we need to continue a callback-flush operations to
1025 * allow the last CPU to enter dyntick-idle mode. But fast dyntick-idle
1026 * entry is not configured, so we never do need to.
1028 static void rcu_needs_cpu_flush(void)
1032 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1034 #define RCU_NEEDS_CPU_FLUSHES 5
1035 static DEFINE_PER_CPU(int, rcu_dyntick_drain);
1036 static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
1039 * Check to see if any future RCU-related work will need to be done
1040 * by the current CPU, even if none need be done immediately, returning
1041 * 1 if so. This function is part of the RCU implementation; it is -not-
1042 * an exported member of the RCU API.
1044 * Because we are not supporting preemptible RCU, attempt to accelerate
1045 * any current grace periods so that RCU no longer needs this CPU, but
1046 * only if all other CPUs are already in dynticks-idle mode. This will
1047 * allow the CPU cores to be powered down immediately, as opposed to after
1048 * waiting many milliseconds for grace periods to elapse.
1050 * Because it is not legal to invoke rcu_process_callbacks() with irqs
1051 * disabled, we do one pass of force_quiescent_state(), then do a
1052 * raise_softirq() to cause rcu_process_callbacks() to be invoked later.
1053 * The per-cpu rcu_dyntick_drain variable controls the sequencing.
1055 int rcu_needs_cpu(int cpu)
1062 /* Check for being in the holdoff period. */
1063 if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies)
1064 return rcu_needs_cpu_quick_check(cpu);
1066 /* Don't bother unless we are the last non-dyntick-idle CPU. */
1067 for_each_online_cpu(thatcpu) {
1070 snap = per_cpu(rcu_dynticks, thatcpu).dynticks;
1071 snap_nmi = per_cpu(rcu_dynticks, thatcpu).dynticks_nmi;
1072 smp_mb(); /* Order sampling of snap with end of grace period. */
1073 if (((snap & 0x1) != 0) || ((snap_nmi & 0x1) != 0)) {
1074 per_cpu(rcu_dyntick_drain, cpu) = 0;
1075 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
1076 return rcu_needs_cpu_quick_check(cpu);
1080 /* Check and update the rcu_dyntick_drain sequencing. */
1081 if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
1082 /* First time through, initialize the counter. */
1083 per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES;
1084 } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
1085 /* We have hit the limit, so time to give up. */
1086 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
1087 return rcu_needs_cpu_quick_check(cpu);
1090 /* Do one step pushing remaining RCU callbacks through. */
1091 if (per_cpu(rcu_sched_data, cpu).nxtlist) {
1093 force_quiescent_state(&rcu_sched_state, 0);
1094 c = c || per_cpu(rcu_sched_data, cpu).nxtlist;
1096 if (per_cpu(rcu_bh_data, cpu).nxtlist) {
1098 force_quiescent_state(&rcu_bh_state, 0);
1099 c = c || per_cpu(rcu_bh_data, cpu).nxtlist;
1102 /* If RCU callbacks are still pending, RCU still needs this CPU. */
1104 raise_softirq(RCU_SOFTIRQ);
1109 * Check to see if we need to continue a callback-flush operations to
1110 * allow the last CPU to enter dyntick-idle mode.
1112 static void rcu_needs_cpu_flush(void)
1114 int cpu = smp_processor_id();
1115 unsigned long flags;
1117 if (per_cpu(rcu_dyntick_drain, cpu) <= 0)
1119 local_irq_save(flags);
1120 (void)rcu_needs_cpu(cpu);
1121 local_irq_restore(flags);
1124 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */