rcu: Improve RCU-tasks energy efficiency
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Mon, 28 Jul 2014 21:39:25 +0000 (14:39 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Sun, 7 Sep 2014 23:27:27 +0000 (16:27 -0700)
The current RCU-tasks implementation uses strict polling to detect
callback arrivals.  This works quite well, but is not so good for
energy efficiency.  This commit therefore replaces the strict polling
with a wait queue.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
kernel/rcu/update.c

index bad7dbd4c2e38103f99df634f07d7ace07e0c159..444c8a30396393a68d66910dd5cd911971af476a 100644 (file)
@@ -365,6 +365,7 @@ early_initcall(check_cpu_stall_init);
 /* Global list of callbacks and associated lock. */
 static struct rcu_head *rcu_tasks_cbs_head;
 static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
+static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq);
 static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
 
 /* Track exiting tasks in order to allow them to be waited for. */
@@ -378,13 +379,17 @@ module_param(rcu_task_stall_timeout, int, 0644);
 void call_rcu_tasks(struct rcu_head *rhp, void (*func)(struct rcu_head *rhp))
 {
        unsigned long flags;
+       bool needwake;
 
        rhp->next = NULL;
        rhp->func = func;
        raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
+       needwake = !rcu_tasks_cbs_head;
        *rcu_tasks_cbs_tail = rhp;
        rcu_tasks_cbs_tail = &rhp->next;
        raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
+       if (needwake)
+               wake_up(&rcu_tasks_cbs_wq);
 }
 EXPORT_SYMBOL_GPL(call_rcu_tasks);
 
@@ -495,8 +500,12 @@ static int __noreturn rcu_tasks_kthread(void *arg)
 
                /* If there were none, wait a bit and start over. */
                if (!list) {
-                       schedule_timeout_interruptible(HZ);
-                       WARN_ON(signal_pending(current));
+                       wait_event_interruptible(rcu_tasks_cbs_wq,
+                                                rcu_tasks_cbs_head);
+                       if (!rcu_tasks_cbs_head) {
+                               WARN_ON(signal_pending(current));
+                               schedule_timeout_interruptible(HZ/10);
+                       }
                        continue;
                }
 
@@ -602,6 +611,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
                        list = next;
                        cond_resched();
                }
+               schedule_timeout_uninterruptible(HZ/10);
        }
 }