perf_counter: x86: Protect against infinite loops in intel_pmu_handle_irq()
authorIngo Molnar <mingo@elte.hu>
Fri, 15 May 2009 06:26:20 +0000 (08:26 +0200)
committerIngo Molnar <mingo@elte.hu>
Fri, 15 May 2009 07:47:06 +0000 (09:47 +0200)
intel_pmu_handle_irq() can lock up in an infinite loop if the hardware
does not allow the acking of irqs. Alas, this happened in testing so
make this robust and emit a warning if it happens in the future.

Also, clean up the IRQ handlers a bit.

[ Impact: improve perfcounter irq/nmi handling robustness ]

Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/cpu/perf_counter.c

index 46a82d1e4cbec1c827d772d7376926e1ec02ade2..5a7f718eb1e15c90a6ac0e7b5462cbe120980556 100644 (file)
@@ -722,9 +722,13 @@ static void intel_pmu_save_and_restart(struct perf_counter *counter)
  */
 static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
 {
-       int bit, cpu = smp_processor_id();
+       struct cpu_hw_counters *cpuc;
+       struct cpu_hw_counters;
+       int bit, cpu, loops;
        u64 ack, status;
-       struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
+
+       cpu = smp_processor_id();
+       cpuc = &per_cpu(cpu_hw_counters, cpu);
 
        perf_disable();
        status = intel_pmu_get_status();
@@ -733,7 +737,13 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
                return 0;
        }
 
+       loops = 0;
 again:
+       if (++loops > 100) {
+               WARN_ONCE(1, "perfcounters: irq loop stuck!\n");
+               return 1;
+       }
+
        inc_irq_stat(apic_perf_irqs);
        ack = status;
        for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
@@ -765,13 +775,14 @@ again:
 
 static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
 {
-       int cpu = smp_processor_id();
-       struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
-       u64 val;
-       int handled = 0;
+       int cpu, idx, throttle = 0, handled = 0;
+       struct cpu_hw_counters *cpuc;
        struct perf_counter *counter;
        struct hw_perf_counter *hwc;
-       int idx, throttle = 0;
+       u64 val;
+
+       cpu = smp_processor_id();
+       cpuc = &per_cpu(cpu_hw_counters, cpu);
 
        if (++cpuc->interrupts == PERFMON_MAX_INTERRUPTS) {
                throttle = 1;