rcu: Stop disabling CPU hotplug in synchronize_rcu_expedited()
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Thu, 11 Jun 2015 21:50:22 +0000 (14:50 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Fri, 17 Jul 2015 21:58:42 +0000 (14:58 -0700)
The fact that tasks could be migrated from leaf to root rcu_node
structures meant that synchronize_rcu_expedited() had to disable
CPU hotplug.  However, tasks now stay put, so this commit removes the
CPU-hotplug disabling from synchronize_rcu_expedited().

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
kernel/rcu/tree_plugin.h

index 5dac0a10a985fb6c0869d08ed9e26709b1a39771..7234f03e0aa26b274f6525238c3b098fdfe8bb5c 100644 (file)
@@ -727,20 +727,6 @@ void synchronize_rcu_expedited(void)
        snap = READ_ONCE(sync_rcu_preempt_exp_count) + 1;
        smp_mb(); /* Above access cannot bleed into critical section. */
 
-       /*
-        * Block CPU-hotplug operations.  This means that any CPU-hotplug
-        * operation that finds an rcu_node structure with tasks in the
-        * process of being boosted will know that all tasks blocking
-        * this expedited grace period will already be in the process of
-        * being boosted.  This simplifies the process of moving tasks
-        * from leaf to root rcu_node structures.
-        */
-       if (!try_get_online_cpus()) {
-               /* CPU-hotplug operation in flight, fall back to normal GP. */
-               wait_rcu_gp(call_rcu);
-               return;
-       }
-
        /*
         * Acquire lock, falling back to synchronize_rcu() if too many
         * lock-acquisition failures.  Of course, if someone does the
@@ -748,22 +734,17 @@ void synchronize_rcu_expedited(void)
         */
        while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
                if (ULONG_CMP_LT(snap,
-                   READ_ONCE(sync_rcu_preempt_exp_count))) {
-                       put_online_cpus();
+                   READ_ONCE(sync_rcu_preempt_exp_count)))
                        goto mb_ret; /* Others did our work for us. */
-               }
                if (trycount++ < 10) {
                        udelay(trycount * num_online_cpus());
                } else {
-                       put_online_cpus();
                        wait_rcu_gp(call_rcu);
                        return;
                }
        }
-       if (ULONG_CMP_LT(snap, READ_ONCE(sync_rcu_preempt_exp_count))) {
-               put_online_cpus();
+       if (ULONG_CMP_LT(snap, READ_ONCE(sync_rcu_preempt_exp_count)))
                goto unlock_mb_ret; /* Others did our work for us. */
-       }
 
        /* force all RCU readers onto ->blkd_tasks lists. */
        synchronize_sched_expedited();
@@ -779,8 +760,6 @@ void synchronize_rcu_expedited(void)
        rcu_for_each_leaf_node(rsp, rnp)
                sync_rcu_preempt_exp_init2(rsp, rnp);
 
-       put_online_cpus();
-
        /* Wait for snapshotted ->blkd_tasks lists to drain. */
        rnp = rcu_get_root(rsp);
        wait_event(sync_rcu_preempt_exp_wq,