rcu: Avoid IPIing idle CPUs from synchronize_sched_expedited()
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Wed, 3 Sep 2014 19:25:56 +0000 (12:25 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tue, 28 Oct 2014 20:49:30 +0000 (13:49 -0700)
Currently, synchronize_sched_expedited() sends IPIs to all online CPUs,
even those that are idle or executing in nohz_full= userspace.  Because
idle CPUs and nohz_full= userspace CPUs are in extended quiescent states,
there is no need to IPI them in the first place.  This commit therefore
avoids IPIing CPUs that are already in extended quiescent states.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
kernel/rcu/tree.c

index 7f73c5edf8cfa1a22ac91308d6e2d045841d0795..9e3c20f117cd635ec41b4c0988082e3c7c44545e 100644 (file)
@@ -2950,6 +2950,9 @@ static int synchronize_sched_expedited_cpu_stop(void *data)
  */
 void synchronize_sched_expedited(void)
 {
+       cpumask_var_t cm;
+       bool cma = false;
+       int cpu;
        long firstsnap, s, snap;
        int trycount = 0;
        struct rcu_state *rsp = &rcu_sched_state;
@@ -2984,11 +2987,26 @@ void synchronize_sched_expedited(void)
        }
        WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
 
+       /* Offline CPUs, idle CPUs, and any CPU we run on are quiescent. */
+       cma = zalloc_cpumask_var(&cm, GFP_KERNEL);
+       if (cma) {
+               cpumask_copy(cm, cpu_online_mask);
+               cpumask_clear_cpu(raw_smp_processor_id(), cm);
+               for_each_cpu(cpu, cm) {
+                       struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+                       if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
+                               cpumask_clear_cpu(cpu, cm);
+               }
+               if (cpumask_weight(cm) == 0)
+                       goto all_cpus_idle;
+       }
+
        /*
         * Each pass through the following loop attempts to force a
         * context switch on each CPU.
         */
-       while (try_stop_cpus(cpu_online_mask,
+       while (try_stop_cpus(cma ? cm : cpu_online_mask,
                             synchronize_sched_expedited_cpu_stop,
                             NULL) == -EAGAIN) {
                put_online_cpus();
@@ -3000,6 +3018,7 @@ void synchronize_sched_expedited(void)
                        /* ensure test happens before caller kfree */
                        smp_mb__before_atomic(); /* ^^^ */
                        atomic_long_inc(&rsp->expedited_workdone1);
+                       free_cpumask_var(cm);
                        return;
                }
 
@@ -3009,6 +3028,7 @@ void synchronize_sched_expedited(void)
                } else {
                        wait_rcu_gp(call_rcu_sched);
                        atomic_long_inc(&rsp->expedited_normal);
+                       free_cpumask_var(cm);
                        return;
                }
 
@@ -3018,6 +3038,7 @@ void synchronize_sched_expedited(void)
                        /* ensure test happens before caller kfree */
                        smp_mb__before_atomic(); /* ^^^ */
                        atomic_long_inc(&rsp->expedited_workdone2);
+                       free_cpumask_var(cm);
                        return;
                }
 
@@ -3032,6 +3053,7 @@ void synchronize_sched_expedited(void)
                        /* CPU hotplug operation in flight, use normal GP. */
                        wait_rcu_gp(call_rcu_sched);
                        atomic_long_inc(&rsp->expedited_normal);
+                       free_cpumask_var(cm);
                        return;
                }
                snap = atomic_long_read(&rsp->expedited_start);
@@ -3039,6 +3061,9 @@ void synchronize_sched_expedited(void)
        }
        atomic_long_inc(&rsp->expedited_stoppedcpus);
 
+all_cpus_idle:
+       free_cpumask_var(cm);
+
        /*
         * Everyone up to our most recent fetch is covered by our grace
         * period.  Update the counter, but only if our work is still