Impact: make perfcounter NMI and IRQ sequence more robust
Make __smp_perf_counter_interrupt() a bit more conservative: first disable
all counters, then read out the status. Most invocations are because there
are real events, so there's no performance impact.
Code flow gets a bit simpler as well this way.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
struct cpu_hw_counters *cpuc;
u64 ack, status;
- rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
- if (!status) {
- ack_APIC_irq();
- return;
- }
-
/* Disable counters globally */
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0);
ack_APIC_irq();
cpuc = &per_cpu(cpu_hw_counters, cpu);
+ rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
+ if (!status)
+ goto out;
+
again:
ack = status;
for_each_bit(bit, (unsigned long *) &status, nr_hw_counters) {
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
if (status)
goto again;
-
+out:
/*
* Do not reenable when global enable is off:
*/