sched: Set skip_clock_update in yield_task_fair()
authorMike Galbraith <mgalbraith@suse.de>
Tue, 22 Nov 2011 14:21:26 +0000 (15:21 +0100)
committerIngo Molnar <mingo@elte.hu>
Tue, 6 Dec 2011 08:06:24 +0000 (09:06 +0100)
This is another case where we are on our way to schedule(),
so can save a useless clock update and resulting microscopic
vruntime update.

Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1321971686.6855.18.camel@marge.simson.net
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched/core.c
kernel/sched/fair.c

index ca8fd44145acdb17a8fcdd8334c0b8b9adc89120..db313c33af29cbd8b042a24f953091a923b985c8 100644 (file)
@@ -4547,6 +4547,13 @@ again:
                 */
                if (preempt && rq != p_rq)
                        resched_task(p_rq->curr);
+       } else {
+               /*
+                * We might have set it in task_yield_fair(), but are
+                * not going to schedule(), so don't want to skip
+                * the next update.
+                */
+               rq->skip_clock_update = 0;
        }
 
 out:
index 8e534a05e3edd4a7af6c763f609de1a5bbba91fb..81ccb811afb49e767b0b80e0669d430c182774c2 100644 (file)
@@ -3075,6 +3075,12 @@ static void yield_task_fair(struct rq *rq)
                 * Update run-time statistics of the 'current'.
                 */
                update_curr(cfs_rq);
+               /*
+                * Tell update_rq_clock() that we've just updated,
+                * so we don't do microscopic update in schedule()
+                * and double the fastpath cost.
+                */
+                rq->skip_clock_update = 1;
        }
 
        set_skip_buddy(se);