perf, x86: Avoid double disable on throttle vs ioctl(PERF_IOC_DISABLE)
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Mon, 8 Mar 2010 16:51:33 +0000 (17:51 +0100)
committerIngo Molnar <mingo@elte.hu>
Wed, 10 Mar 2010 12:22:31 +0000 (13:22 +0100)
Calling ioctl(PERF_EVENT_IOC_DISABLE) on a thottled counter would result
in a double disable, cure this by using x86_pmu_{start,stop} for
throttle/unthrottle and teach x86_pmu_stop() to check ->active_mask.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: paulus@samba.org
Cc: eranian@google.com
Cc: robert.richter@amd.com
Cc: fweisbec@gmail.com
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel.c

index 9757b96f15f5e1d8003e874e7881a3ec8ba8813a..b68c4fb7a944ef0073020aaa0d517216200839b5 100644 (file)
@@ -983,14 +983,8 @@ static int x86_pmu_start(struct perf_event *event)
 
 static void x86_pmu_unthrottle(struct perf_event *event)
 {
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-       struct hw_perf_event *hwc = &event->hw;
-
-       if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
-                               cpuc->events[hwc->idx] != event))
-               return;
-
-       x86_pmu.enable(event);
+       int ret = x86_pmu_start(event);
+       WARN_ON_ONCE(ret);
 }
 
 void perf_event_print_debug(void)
@@ -1050,11 +1044,9 @@ static void x86_pmu_stop(struct perf_event *event)
        struct hw_perf_event *hwc = &event->hw;
        int idx = hwc->idx;
 
-       /*
-        * Must be done before we disable, otherwise the nmi handler
-        * could reenable again:
-        */
-       __clear_bit(idx, cpuc->active_mask);
+       if (!__test_and_clear_bit(idx, cpuc->active_mask))
+               return;
+
        x86_pmu.disable(event);
 
        /*
@@ -1123,7 +1115,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
                        continue;
 
                if (perf_event_overflow(event, 1, &data, regs))
-                       x86_pmu.disable(event);
+                       x86_pmu_stop(event);
        }
 
        if (handled)
index d87421c3f55b09a877c7c63c1c4e3da7d0022ed0..84bfde64a337909cfbb5549202804b7fd3c11085 100644 (file)
@@ -774,7 +774,7 @@ again:
                data.period = event->hw.last_period;
 
                if (perf_event_overflow(event, 1, &data, regs))
-                       intel_pmu_disable_event(event);
+                       x86_pmu_stop(event);
        }
 
        intel_pmu_ack_status(ack);