perf: Optimize ctx_sched_out()
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Sat, 9 Apr 2011 19:17:40 +0000 (21:17 +0200)
committerIngo Molnar <mingo@elte.hu>
Sat, 28 May 2011 16:01:09 +0000 (18:01 +0200)
Oleg noted that ctx_sched_out() disables the PMU even though it might
not actually do something, avoid needless PMU-disabling.

Reported-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20110409192141.665385503@chello.nl
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/events/core.c

index d863b3c057bba8fe4add3e774a3bb86083c493a5..4d9a1f014286083a938d836dc30c74ac7baed622 100644 (file)
@@ -1760,7 +1760,6 @@ static void ctx_sched_out(struct perf_event_context *ctx,
        struct perf_event *event;
 
        raw_spin_lock(&ctx->lock);
-       perf_pmu_disable(ctx->pmu);
        ctx->is_active = 0;
        if (likely(!ctx->nr_events))
                goto out;
@@ -1770,6 +1769,7 @@ static void ctx_sched_out(struct perf_event_context *ctx,
        if (!ctx->nr_active)
                goto out;
 
+       perf_pmu_disable(ctx->pmu);
        if (event_type & EVENT_PINNED) {
                list_for_each_entry(event, &ctx->pinned_groups, group_entry)
                        group_sched_out(event, cpuctx, ctx);
@@ -1779,8 +1779,8 @@ static void ctx_sched_out(struct perf_event_context *ctx,
                list_for_each_entry(event, &ctx->flexible_groups, group_entry)
                        group_sched_out(event, cpuctx, ctx);
        }
-out:
        perf_pmu_enable(ctx->pmu);
+out:
        raw_spin_unlock(&ctx->lock);
 }