perf counters: protect them against CSTATE transitions
authorThomas Gleixner <tglx@linutronix.de>
Tue, 9 Dec 2008 20:43:39 +0000 (21:43 +0100)
committerIngo Molnar <mingo@elte.hu>
Thu, 11 Dec 2008 14:45:45 +0000 (15:45 +0100)
Impact: fix rare lost events problem

There are CPUs whose performance counters misbehave on CSTATE transitions,
so provide a way to just disable/enable them around deep idle methods.

(hw_perf_enable_all() is cheap on x86.)

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/cpu/perf_counter.c
drivers/acpi/processor_idle.c
include/linux/perf_counter.h

index 6a93d1f04d97e15319694e526ebc0d9b1b2c854f..0a7f3bea2dc6e7156ec223699e162802d2ee9c8a 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/notifier.h>
 #include <linux/hardirq.h>
 #include <linux/kprobes.h>
+#include <linux/module.h>
 #include <linux/kdebug.h>
 #include <linux/sched.h>
 
@@ -119,10 +120,21 @@ void hw_perf_enable_all(void)
        wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0);
 }
 
-void hw_perf_disable_all(void)
+void hw_perf_restore_ctrl(u64 ctrl)
 {
+       wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, ctrl, 0);
+}
+EXPORT_SYMBOL_GPL(hw_perf_restore_ctrl);
+
+u64 hw_perf_disable_all(void)
+{
+       u64 ctrl;
+
+       rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
        wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0);
+       return ctrl;
 }
+EXPORT_SYMBOL_GPL(hw_perf_disable_all);
 
 static inline void
 __hw_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx)
index 5f8d746a9b8198e516321ccdaaa8c5a1486b39b7..cca804e6f1dd451bacea8437baff4ee3283bea5a 100644 (file)
@@ -270,8 +270,11 @@ static atomic_t c3_cpu_count;
 /* Common C-state entry for C2, C3, .. */
 static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
 {
+       u64 pctrl;
+
        /* Don't trace irqs off for idle */
        stop_critical_timings();
+       pctrl = hw_perf_disable_all();
        if (cstate->entry_method == ACPI_CSTATE_FFH) {
                /* Call into architectural FFH based C-state */
                acpi_processor_ffh_cstate_enter(cstate);
@@ -284,6 +287,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
                   gets asserted in time to freeze execution properly. */
                unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
        }
+       hw_perf_restore_ctrl(pctrl);
        start_critical_timings();
 }
 #endif /* !CONFIG_CPU_IDLE */
@@ -1425,8 +1429,11 @@ static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
  */
 static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
 {
+       u64 pctrl;
+
        /* Don't trace irqs off for idle */
        stop_critical_timings();
+       pctrl = hw_perf_disable_all();
        if (cx->entry_method == ACPI_CSTATE_FFH) {
                /* Call into architectural FFH based C-state */
                acpi_processor_ffh_cstate_enter(cx);
@@ -1441,6 +1448,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
                   gets asserted in time to freeze execution properly. */
                unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
        }
+       hw_perf_restore_ctrl(pctrl);
        start_critical_timings();
 }
 
index 22c4469abf44c6980673ecd0bead3ae8e2cd2e2d..5031b5614f25ea8a94c047615ac1abb3913783f3 100644 (file)
@@ -156,6 +156,8 @@ extern void perf_counter_task_tick(struct task_struct *task, int cpu);
 extern void perf_counter_init_task(struct task_struct *task);
 extern void perf_counter_notify(struct pt_regs *regs);
 extern void perf_counter_print_debug(void);
+extern void hw_perf_restore_ctrl(u64 ctrl);
+extern u64 hw_perf_disable_all(void);
 #else
 static inline void
 perf_counter_task_sched_in(struct task_struct *task, int cpu)          { }
@@ -166,6 +168,8 @@ perf_counter_task_tick(struct task_struct *task, int cpu)           { }
 static inline void perf_counter_init_task(struct task_struct *task)    { }
 static inline void perf_counter_notify(struct pt_regs *regs)           { }
 static inline void perf_counter_print_debug(void)                      { }
+static inline void hw_perf_restore_ctrl(u64 ctrl)                      { }
+static inline u64 hw_perf_disable_all(void)            { return 0; }
 #endif
 
 #endif /* _LINUX_PERF_COUNTER_H */