rcu: Create rcuo kthreads only for onlined CPUs
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Fri, 11 Jul 2014 18:30:24 +0000 (11:30 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tue, 16 Sep 2014 17:08:02 +0000 (10:08 -0700)
RCU currently uses for_each_possible_cpu() to spawn rcuo kthreads,
which can result in more rcuo kthreads than one would expect, for
example, derRichard reported 64 CPUs worth of rcuo kthreads on an
8-CPU image.  This commit therefore creates rcuo kthreads only for
those CPUs that actually come online.

This was reported by derRichard on the OFTC IRC network.

Reported-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Tested-by: Paul Gortmaker <paul.gortmaker@windriver.com>
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_plugin.h

index 9be47f43903b1fa4501768c9402742e60c652669..b49c8433f8344891ff48ec56e60585847ce8d746 100644 (file)
@@ -3442,6 +3442,7 @@ static int rcu_cpu_notify(struct notifier_block *self,
        case CPU_UP_PREPARE_FROZEN:
                rcu_prepare_cpu(cpu);
                rcu_prepare_kthreads(cpu);
+               rcu_spawn_all_nocb_kthreads(cpu);
                break;
        case CPU_ONLINE:
        case CPU_DOWN_FAILED:
@@ -3506,8 +3507,8 @@ static int __init rcu_spawn_gp_kthread(void)
                raw_spin_lock_irqsave(&rnp->lock, flags);
                rsp->gp_kthread = t;
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
-               rcu_spawn_nocb_kthreads(rsp);
        }
+       rcu_spawn_nocb_kthreads();
        rcu_spawn_boost_kthreads();
        return 0;
 }
index a966092fdfd7b3b7af575da66436baa2d3864067..a9a226d2e80aff68c8e0645fc58eb749b9d17cab 100644 (file)
@@ -593,7 +593,11 @@ static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
 static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
 static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
 static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
-static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
+static void rcu_spawn_all_nocb_kthreads(int cpu);
+static void __init rcu_spawn_nocb_kthreads(void);
+#ifdef CONFIG_RCU_NOCB_CPU
+static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
+#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
 static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
 static bool init_nocb_callback_list(struct rcu_data *rdp);
 static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq);
index 410c74424d962a6cdedee4b4428079d7b73c8d8d..31c7afb611fdb8ca179b3cad165e864f75486653 100644 (file)
@@ -2479,6 +2479,7 @@ void __init rcu_init_nohz(void)
                                     rdp->nxttail[RCU_NEXT_TAIL] != NULL);
                        init_nocb_callback_list(rdp);
                }
+               rcu_organize_nocb_kthreads(rsp);
        }
 }
 
@@ -2490,15 +2491,85 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
        rdp->nocb_follower_tail = &rdp->nocb_follower_head;
 }
 
+/*
+ * If the specified CPU is a no-CBs CPU that does not already have its
+ * rcuo kthread for the specified RCU flavor, spawn it.  If the CPUs are
+ * brought online out of order, this can require re-organizing the
+ * leader-follower relationships.
+ */
+static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
+{
+       struct rcu_data *rdp;
+       struct rcu_data *rdp_last;
+       struct rcu_data *rdp_old_leader;
+       struct rcu_data *rdp_spawn = per_cpu_ptr(rsp->rda, cpu);
+       struct task_struct *t;
+
+       /*
+        * If this isn't a no-CBs CPU or if it already has an rcuo kthread,
+        * then nothing to do.
+        */
+       if (!rcu_is_nocb_cpu(cpu) || rdp_spawn->nocb_kthread)
+               return;
+
+       /* If we didn't spawn the leader first, reorganize! */
+       rdp_old_leader = rdp_spawn->nocb_leader;
+       if (rdp_old_leader != rdp_spawn && !rdp_old_leader->nocb_kthread) {
+               rdp_last = NULL;
+               rdp = rdp_old_leader;
+               do {
+                       rdp->nocb_leader = rdp_spawn;
+                       if (rdp_last && rdp != rdp_spawn)
+                               rdp_last->nocb_next_follower = rdp;
+                       rdp_last = rdp;
+                       rdp = rdp->nocb_next_follower;
+                       rdp_last->nocb_next_follower = NULL;
+               } while (rdp);
+               rdp_spawn->nocb_next_follower = rdp_old_leader;
+       }
+
+       /* Spawn the kthread for this CPU and RCU flavor. */
+       t = kthread_run(rcu_nocb_kthread, rdp_spawn,
+                       "rcuo%c/%d", rsp->abbr, cpu);
+       BUG_ON(IS_ERR(t));
+       ACCESS_ONCE(rdp_spawn->nocb_kthread) = t;
+}
+
+/*
+ * If the specified CPU is a no-CBs CPU that does not already have its
+ * rcuo kthreads, spawn them.
+ */
+static void rcu_spawn_all_nocb_kthreads(int cpu)
+{
+       struct rcu_state *rsp;
+
+       if (rcu_scheduler_fully_active)
+               for_each_rcu_flavor(rsp)
+                       rcu_spawn_one_nocb_kthread(rsp, cpu);
+}
+
+/*
+ * Once the scheduler is running, spawn rcuo kthreads for all online
+ * no-CBs CPUs.  This assumes that the early_initcall()s happen before
+ * non-boot CPUs come online -- if this changes, we will need to add
+ * some mutual exclusion.
+ */
+static void __init rcu_spawn_nocb_kthreads(void)
+{
+       int cpu;
+
+       for_each_online_cpu(cpu)
+               rcu_spawn_all_nocb_kthreads(cpu);
+}
+
 /* How many follower CPU IDs per leader?  Default of -1 for sqrt(nr_cpu_ids). */
 static int rcu_nocb_leader_stride = -1;
 module_param(rcu_nocb_leader_stride, int, 0444);
 
 /*
- * Create a kthread for each RCU flavor for each no-CBs CPU.
- * Also initialize leader-follower relationships.
+ * Initialize leader-follower relationships for all no-CBs CPU.
  */
-static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
+static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp)
 {
        int cpu;
        int ls = rcu_nocb_leader_stride;
@@ -2506,7 +2577,6 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
        struct rcu_data *rdp;
        struct rcu_data *rdp_leader = NULL;  /* Suppress misguided gcc warn. */
        struct rcu_data *rdp_prev = NULL;
-       struct task_struct *t;
 
        if (rcu_nocb_mask == NULL)
                return;
@@ -2532,12 +2602,6 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
                        rdp_prev->nocb_next_follower = rdp;
                }
                rdp_prev = rdp;
-
-               /* Spawn the kthread for this CPU. */
-               t = kthread_run(rcu_nocb_kthread, rdp,
-                               "rcuo%c/%d", rsp->abbr, cpu);
-               BUG_ON(IS_ERR(t));
-               ACCESS_ONCE(rdp->nocb_kthread) = t;
        }
 }
 
@@ -2591,7 +2655,11 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
 {
 }
 
-static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
+static void rcu_spawn_all_nocb_kthreads(int cpu)
+{
+}
+
+static void __init rcu_spawn_nocb_kthreads(void)
 {
 }