static struct rcu_state *rcu_state;
+/*
+ * The rcu_scheduler_active variable transitions from zero to one just
+ * before the first task is spawned. So when this variable is zero, RCU
+ * can assume that there is but one task, allowing RCU to (for example)
+ * optimized synchronize_sched() to a simple barrier(). When this variable
+ * is one, RCU must actually do all the hard work required to detect real
+ * grace periods. This variable is also used to suppress boot-time false
+ * positives from lockdep-RCU error checking.
+ */
int rcu_scheduler_active __read_mostly;
EXPORT_SYMBOL_GPL(rcu_scheduler_active);
+/*
+ * The rcu_scheduler_fully_active variable transitions from zero to one
+ * during the early_initcall() processing, which is after the scheduler
+ * is capable of creating new tasks. So RCU processing (for example,
+ * creating tasks for RCU priority boosting) must be delayed until after
+ * rcu_scheduler_fully_active transitions from zero to one. We also
+ * currently delay invocation of any RCU callbacks until after this point.
+ *
+ * It might later prove better for people registering RCU callbacks during
+ * early boot to take responsibility for these callbacks, but one step at
+ * a time.
+ */
+static int rcu_scheduler_fully_active __read_mostly;
+
#ifdef CONFIG_RCU_BOOST
/*
DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
DEFINE_PER_CPU(char, rcu_cpu_has_work);
-static char rcu_kthreads_spawnable;
#endif /* #ifdef CONFIG_RCU_BOOST */
*/
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
{
+ if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
+ return;
if (likely(!rsp->boost)) {
rcu_do_batch(rsp, rdp);
return;
struct sched_param sp;
struct task_struct *t;
- if (!rcu_kthreads_spawnable ||
+ if (!rcu_scheduler_fully_active ||
per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
return 0;
t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
struct sched_param sp;
struct task_struct *t;
- if (!rcu_kthreads_spawnable ||
+ if (!rcu_scheduler_fully_active ||
rnp->qsmaskinit == 0)
return 0;
if (rnp->node_kthread_task == NULL) {
int cpu;
struct rcu_node *rnp;
- rcu_kthreads_spawnable = 1;
+ rcu_scheduler_fully_active = 1;
for_each_possible_cpu(cpu) {
per_cpu(rcu_cpu_has_work, cpu) = 0;
if (cpu_online(cpu))
struct rcu_node *rnp = rdp->mynode;
/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
- if (rcu_kthreads_spawnable) {
+ if (rcu_scheduler_fully_active) {
(void)rcu_spawn_one_cpu_kthread(cpu);
if (rnp->node_kthread_task == NULL)
(void)rcu_spawn_one_node_kthread(rcu_state, rnp);
{
}
+static int __init rcu_scheduler_really_started(void)
+{
+ rcu_scheduler_fully_active = 1;
+ return 0;
+}
+early_initcall(rcu_scheduler_really_started);
+
static void __cpuinit rcu_prepare_kthreads(int cpu)
{
}