sched: affine sync wakeups
authorIngo Molnar <mingo@elte.hu>
Mon, 15 Oct 2007 15:00:19 +0000 (17:00 +0200)
committerIngo Molnar <mingo@elte.hu>
Mon, 15 Oct 2007 15:00:19 +0000 (17:00 +0200)
make sync wakeups affine for cache-cold tasks: if a cache-cold task
is woken up by a sync wakeup then use the opportunity to migrate it
straight away. (the two tasks are 'related' because they communicate)

Signed-off-by: Ingo Molnar <mingo@elte.hu>
fs/pipe.c
kernel/sched.c
net/unix/af_unix.c

index f1fa2b412f0e71aea8c44ace43f780e4bdeed754..e66ec48e95d8f44223ba960e5fe82a9c7acd962d 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -382,7 +382,7 @@ redo:
 
        /* Signal writers asynchronously that there is more room. */
        if (do_wakeup) {
-               wake_up_interruptible(&pipe->wait);
+               wake_up_interruptible_sync(&pipe->wait);
                kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
        }
        if (ret > 0)
@@ -555,7 +555,7 @@ redo2:
 out:
        mutex_unlock(&inode->i_mutex);
        if (do_wakeup) {
-               wake_up_interruptible(&pipe->wait);
+               wake_up_interruptible_sync(&pipe->wait);
                kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
        }
        if (ret > 0)
@@ -649,7 +649,7 @@ pipe_release(struct inode *inode, int decr, int decw)
        if (!pipe->readers && !pipe->writers) {
                free_pipe_info(inode);
        } else {
-               wake_up_interruptible(&pipe->wait);
+               wake_up_interruptible_sync(&pipe->wait);
                kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
                kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
        }
index 5a91fe0b5de60bdff35e37fbe7ec570370488b75..7fd343462597089def2cbb251e4702bd3709b854 100644 (file)
@@ -1521,6 +1521,12 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
                        unsigned long tl = this_load;
                        unsigned long tl_per_task;
 
+                       /*
+                        * Attract cache-cold tasks on sync wakeups:
+                        */
+                       if (sync && !task_hot(p, rq->clock, this_sd))
+                               goto out_set_cpu;
+
                        schedstat_inc(p, se.nr_wakeups_affine_attempts);
                        tl_per_task = cpu_avg_load_per_task(this_cpu);
 
@@ -1598,7 +1604,7 @@ out_activate:
         * the waker guarantees that the freshly woken up task is going
         * to be considered on this CPU.)
         */
-       if (!sync || cpu != this_cpu)
+       if (!sync || rq->curr == rq->idle)
                check_preempt_curr(rq, p);
        success = 1;
 
index 2b57eaf66abc4086f7387704c1993ee399ba65c3..6996cba5aa9664ac99f706b626137d98fb99d286 100644 (file)
@@ -334,7 +334,7 @@ static void unix_write_space(struct sock *sk)
        read_lock(&sk->sk_callback_lock);
        if (unix_writable(sk)) {
                if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
-                       wake_up_interruptible(sk->sk_sleep);
+                       wake_up_interruptible_sync(sk->sk_sleep);
                sk_wake_async(sk, 2, POLL_OUT);
        }
        read_unlock(&sk->sk_callback_lock);
@@ -1639,7 +1639,7 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
        if (!skb)
                goto out_unlock;
 
-       wake_up_interruptible(&u->peer_wait);
+       wake_up_interruptible_sync(&u->peer_wait);
 
        if (msg->msg_name)
                unix_copy_addr(msg, skb->sk);