x86: Add per-cpu stat counter for APIC ICR read tries
authorFernando Luis Vázquez Cao <fernando@oss.ntt.co.jp>
Tue, 13 Dec 2011 02:51:53 +0000 (11:51 +0900)
committerIngo Molnar <mingo@elte.hu>
Wed, 14 Dec 2011 08:32:05 +0000 (09:32 +0100)
In the IPI delivery slow path (NMI delivery) we retry the ICR
read to check for delivery completion a limited number of times.

[ The reason for the limited retries is that some of the places
  where it is used (cpu boot, kdump, etc) IPI delivery might not
  succeed (due to a firmware bug or system crash, for example)
  and in such a case it is better to give up and resume
  execution of other code. ]

This patch adds a new entry to /proc/interrupts, RTR, which
tells user space the number of times we retried the ICR read in
the IPI delivery slow path.

This should give some insight into how well the APIC
message delivery hardware is working - if the counts are way
too large then we are hitting a (very-) slow path way too
often.

Signed-off-by: Fernando Luis Vazquez Cao <fernando@oss.ntt.co.jp>
Cc: Jörn Engel <joern@logfs.org>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Link: http://lkml.kernel.org/n/tip-vzsp20lo2xdzh5f70g0eis2s@git.kernel.org
[ extended the changelog ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/include/asm/apic.h
arch/x86/kernel/apic/apic.c
arch/x86/kernel/irq.c

index 1a6c09af048fbd587613c750502677d7cbb1c052..5fe0bd5747565ffa541ebf3acdb7f82c8bcbe652 100644 (file)
@@ -410,6 +410,9 @@ extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
 #endif
 
 #ifdef CONFIG_X86_LOCAL_APIC
+
+DECLARE_PER_CPU(unsigned, icr_read_retry_count);
+
 static inline u32 apic_read(u32 reg)
 {
        return apic->read(reg);
index f98d84caf94cfdc43cedda4cb411ebea5213e4b7..2942794a9a52bbca37f2ddba5ba4a252fe23f4d1 100644 (file)
@@ -79,6 +79,11 @@ DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
 
+/*
+ * ICR read retry counter
+ */
+DEFINE_PER_CPU(unsigned, icr_read_retry_count);
+
 #ifdef CONFIG_X86_32
 
 /*
@@ -250,6 +255,7 @@ u32 native_safe_apic_wait_icr_idle(void)
                send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
                if (!send_status)
                        break;
+               percpu_inc(icr_read_retry_count);
                udelay(100);
        } while (timeout++ < 1000);
 
index 429e0c92924eede7d925c92836ac25505de86007..4bbf1627905b7d27d4840e3547c49f6e0fd87889 100644 (file)
@@ -74,6 +74,10 @@ int arch_show_interrupts(struct seq_file *p, int prec)
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
        seq_printf(p, "  IRQ work interrupts\n");
+       seq_printf(p, "%*s: ", prec, "RTR");
+       for_each_online_cpu(j)
+               seq_printf(p, "%10u ", per_cpu(icr_read_retry_count, j));
+       seq_printf(p, "  APIC ICR read retries\n");
 #endif
        if (x86_platform_ipi_callback) {
                seq_printf(p, "%*s: ", prec, "PLT");
@@ -136,6 +140,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
        sum += irq_stats(cpu)->irq_spurious_count;
        sum += irq_stats(cpu)->apic_perf_irqs;
        sum += irq_stats(cpu)->apic_irq_work_irqs;
+       sum += per_cpu(icr_read_retry_count, cpu);
 #endif
        if (x86_platform_ipi_callback)
                sum += irq_stats(cpu)->x86_platform_ipis;