wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0);
}
-void hw_perf_restore_ctrl(u64 ctrl)
+void hw_perf_restore(u64 ctrl)
{
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, ctrl, 0);
}
-EXPORT_SYMBOL_GPL(hw_perf_restore_ctrl);
+EXPORT_SYMBOL_GPL(hw_perf_restore);
-u64 hw_perf_disable_all(void)
+u64 hw_perf_save_disable(void)
{
u64 ctrl;
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0);
return ctrl;
}
-EXPORT_SYMBOL_GPL(hw_perf_disable_all);
+EXPORT_SYMBOL_GPL(hw_perf_save_disable);
static inline void
__x86_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx)
/* Common C-state entry for C2, C3, .. */
static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
{
- u64 pctrl;
+ u64 perf_flags;
/* Don't trace irqs off for idle */
stop_critical_timings();
- pctrl = hw_perf_disable_all();
+ perf_flags = hw_perf_save_disable();
if (cstate->entry_method == ACPI_CSTATE_FFH) {
/* Call into architectural FFH based C-state */
acpi_processor_ffh_cstate_enter(cstate);
gets asserted in time to freeze execution properly. */
unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
}
- hw_perf_restore_ctrl(pctrl);
+ hw_perf_restore(perf_flags);
start_critical_timings();
}
#endif /* !CONFIG_CPU_IDLE */
/* Don't trace irqs off for idle */
stop_critical_timings();
- pctrl = hw_perf_disable_all();
+ pctrl = hw_perf_save_disable();
if (cx->entry_method == ACPI_CSTATE_FFH) {
/* Call into architectural FFH based C-state */
acpi_processor_ffh_cstate_enter(cx);
gets asserted in time to freeze execution properly. */
unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
}
- hw_perf_restore_ctrl(pctrl);
+ hw_perf_restore(pctrl);
start_critical_timings();
}
* Hardware event to monitor via a performance monitoring counter:
*/
struct perf_counter_hw_event {
- u64 type;
+ s64 type;
u64 irq_period;
u32 record_type;
extern void perf_counter_init_task(struct task_struct *task);
extern void perf_counter_notify(struct pt_regs *regs);
extern void perf_counter_print_debug(void);
-extern void hw_perf_restore_ctrl(u64 ctrl);
-extern u64 hw_perf_disable_all(void);
+extern u64 hw_perf_save_disable(void);
+extern void hw_perf_restore(u64 ctrl);
extern void atomic64_counter_set(struct perf_counter *counter, u64 val64);
extern u64 atomic64_counter_read(struct perf_counter *counter);
static inline void perf_counter_init_task(struct task_struct *task) { }
static inline void perf_counter_notify(struct pt_regs *regs) { }
static inline void perf_counter_print_debug(void) { }
-static inline void hw_perf_restore_ctrl(u64 ctrl) { }
-static inline u64 hw_perf_disable_all(void) { return 0; }
+static inline void hw_perf_restore(u64 ctrl) { }
+static inline u64 hw_perf_save_disable(void) { return 0; }
#endif
#endif /* _LINUX_PERF_COUNTER_H */
return ERR_PTR(-EINVAL);
}
-u64 __weak hw_perf_disable_all(void) { return 0; }
-void __weak hw_perf_restore_ctrl(u64 ctrl) { }
+u64 __weak hw_perf_save_disable(void) { return 0; }
+void __weak hw_perf_restore(u64 ctrl) { }
void __weak hw_perf_counter_setup(void) { }
#if BITS_PER_LONG == 64
* Protect the list operation against NMI by disabling the
* counters on a global level. NOP for non NMI based counters.
*/
- perf_flags = hw_perf_disable_all();
+ perf_flags = hw_perf_save_disable();
list_del_counter(counter, ctx);
- hw_perf_restore_ctrl(perf_flags);
+ hw_perf_restore(perf_flags);
if (!ctx->task) {
/*
* Protect the list operation against NMI by disabling the
* counters on a global level. NOP for non NMI based counters.
*/
- perf_flags = hw_perf_disable_all();
+ perf_flags = hw_perf_save_disable();
list_add_counter(counter, ctx);
- hw_perf_restore_ctrl(perf_flags);
+ hw_perf_restore(perf_flags);
ctx->nr_counters++;
/*
* Rotate the first entry last (works just fine for group counters too):
*/
- perf_flags = hw_perf_disable_all();
+ perf_flags = hw_perf_save_disable();
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
list_del(&counter->list_entry);
list_add_tail(&counter->list_entry, &ctx->counter_list);
break;
}
- hw_perf_restore_ctrl(perf_flags);
+ hw_perf_restore(perf_flags);
spin_unlock(&ctx->lock);