Revert "perf_counter, x86: speed up the scheduling fast-path"
authorIngo Molnar <mingo@elte.hu>
Mon, 25 May 2009 19:41:28 +0000 (21:41 +0200)
committerIngo Molnar <mingo@elte.hu>
Mon, 25 May 2009 19:41:28 +0000 (21:41 +0200)
This reverts commit b68f1d2e7aa21029d73c7d453a8046e95d351740.

It is causing problems (stuck/stuttering profiling) - when mixed
NMI and non-NMI counters are used.

Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <20090525153931.703093461@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/cpu/perf_counter.c

index c4b543d1a86fe4dbd60cb1c040250ad0eec5e8bd..189bf9d7cdabd9f08569b5694f380985e1bb4bc7 100644 (file)
@@ -293,7 +293,6 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
                        return -EACCES;
                hwc->nmi = 1;
        }
-       perf_counters_lapic_init(hwc->nmi);
 
        if (!hwc->irq_period)
                hwc->irq_period = x86_pmu.max_period;
@@ -612,6 +611,8 @@ try_generic:
                hwc->counter_base = x86_pmu.perfctr;
        }
 
+       perf_counters_lapic_init(hwc->nmi);
+
        x86_pmu.disable(hwc, idx);
 
        cpuc->counters[idx] = counter;
@@ -1037,7 +1038,7 @@ void __init init_hw_perf_counters(void)
 
        pr_info("... counter mask:    %016Lx\n", perf_counter_mask);
 
-       perf_counters_lapic_init(1);
+       perf_counters_lapic_init(0);
        register_die_notifier(&perf_counter_nmi_notifier);
 }