perf/x86/intel: Move NMI clearing to end of PMI handler
authorAndi Kleen <ak@linux.intel.com>
Tue, 18 Jun 2013 00:36:50 +0000 (17:36 -0700)
committerIngo Molnar <mingo@kernel.org>
Wed, 19 Jun 2013 12:43:34 +0000 (14:43 +0200)
This avoids some problems with spurious PMIs on Haswell.
Haswell seems to behave more like P4 in this regard. Do
the same thing as the P4 perf handler by unmasking
the NMI only at the end. Shouldn't make any difference
for earlier family 6 cores.

(Tested on Haswell, IvyBridge, Westmere, Saltwell (Atom).)

Signed-off-by: Andi Kleen <ak@linux.intel.com>
Cc: Andi Kleen <ak@linux.jf.intel.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Link: http://lkml.kernel.org/r/1371515812-9646-5-git-send-email-andi@firstfloor.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/cpu/perf_event_intel.c

index fb7fe44e6b9605ec36e03756fdb9115fe5d29963..f43473c50f52e441fbbfd059343efa9ba735bffa 100644 (file)
@@ -378,6 +378,7 @@ struct x86_pmu {
        struct event_constraint *event_constraints;
        struct x86_pmu_quirk *quirks;
        int             perfctr_second_write;
+       bool            late_ack;
 
        /*
         * sysfs attrs
index 4a4c4ba0c1d7fa2cc157a7137f7e1c418e4ff6c8..877672c433477953f7838a263968d6a2cd4bd7fa 100644 (file)
@@ -1185,15 +1185,11 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
        cpuc = &__get_cpu_var(cpu_hw_events);
 
        /*
-        * Some chipsets need to unmask the LVTPC in a particular spot
-        * inside the nmi handler.  As a result, the unmasking was pushed
-        * into all the nmi handlers.
-        *
-        * This handler doesn't seem to have any issues with the unmasking
-        * so it was left at the top.
+        * No known reason to not always do late ACK,
+        * but just in case do it opt-in.
         */
-       apic_write(APIC_LVTPC, APIC_DM_NMI);
-
+       if (!x86_pmu.late_ack)
+               apic_write(APIC_LVTPC, APIC_DM_NMI);
        intel_pmu_disable_all();
        handled = intel_pmu_drain_bts_buffer();
        status = intel_pmu_get_status();
@@ -1257,6 +1253,13 @@ again:
 
 done:
        intel_pmu_enable_all(0);
+       /*
+        * Only unmask the NMI after the overflow counters
+        * have been reset. This avoids spurious NMIs on
+        * Haswell CPUs.
+        */
+       if (x86_pmu.late_ack)
+               apic_write(APIC_LVTPC, APIC_DM_NMI);
        return handled;
 }
 
@@ -2260,6 +2263,7 @@ __init int intel_pmu_init(void)
        case 70:
        case 71:
        case 63:
+               x86_pmu.late_ack = true;
                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));