perf_counter: round-robin per-CPU counters too
authorIngo Molnar <mingo@elte.hu>
Mon, 4 May 2009 16:54:32 +0000 (18:54 +0200)
committerIngo Molnar <mingo@elte.hu>
Mon, 4 May 2009 17:29:57 +0000 (19:29 +0200)
This used to be unstable when we had the rq->lock dependencies,
but now that they are that of the past we can turn on percpu
counter RR too.

[ Impact: handle counter over-commit for per-CPU counters too ]

LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/perf_counter.c

index 8660ae5795300a110ab79ace2667a2867b39af41..b9679c36bcc281e38236e57e7661dc53efe4fe38 100644 (file)
@@ -1069,18 +1069,14 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu)
 {
        struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
        struct perf_counter_context *ctx = &curr->perf_counter_ctx;
-       const int rotate_percpu = 0;
 
-       if (rotate_percpu)
-               perf_counter_cpu_sched_out(cpuctx);
+       perf_counter_cpu_sched_out(cpuctx);
        perf_counter_task_sched_out(curr, cpu);
 
-       if (rotate_percpu)
-               rotate_ctx(&cpuctx->ctx);
+       rotate_ctx(&cpuctx->ctx);
        rotate_ctx(ctx);
 
-       if (rotate_percpu)
-               perf_counter_cpu_sched_in(cpuctx, cpu);
+       perf_counter_cpu_sched_in(cpuctx, cpu);
        perf_counter_task_sched_in(curr, cpu);
 }