perf, intel: Try alternative OFFCORE encodings
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Mon, 23 May 2011 09:08:15 +0000 (11:08 +0200)
committerIngo Molnar <mingo@elte.hu>
Fri, 1 Jul 2011 09:06:37 +0000 (11:06 +0200)
Since the OFFCORE registers are fully symmetric, try the other one
when the specified one is already in use.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1306141897.18455.8.camel@twins
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel.c

index 583f3113436d22835f31497a6dbc77131925a237..c53d433c3dde2d7f72f1c34cd2eba07a65460689 100644 (file)
@@ -327,9 +327,12 @@ struct x86_pmu {
         * Extra registers for events
         */
        struct extra_reg *extra_regs;
-       bool regs_no_ht_sharing;
+       unsigned int er_flags;
 };
 
+#define ERF_NO_HT_SHARING      1
+#define ERF_HAS_RSP_1          2
+
 static struct x86_pmu x86_pmu __read_mostly;
 
 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
index a674ae45a4720408dc9ecb9c0f966a202dd83bdc..5c448622468c6220c34853a74b3c05a44eb0c422 100644 (file)
@@ -1018,6 +1018,29 @@ intel_bts_constraints(struct perf_event *event)
        return NULL;
 }
 
+static bool intel_try_alt_er(struct perf_event *event, int orig_idx)
+{
+       if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
+               return false;
+
+       if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) {
+               event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
+               event->hw.config |= 0x01bb;
+               event->hw.extra_reg.idx = EXTRA_REG_RSP_1;
+               event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
+       } else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) {
+               event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
+               event->hw.config |= 0x01b7;
+               event->hw.extra_reg.idx = EXTRA_REG_RSP_0;
+               event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
+       }
+
+       if (event->hw.extra_reg.idx == orig_idx)
+               return false;
+
+       return true;
+}
+
 /*
  * manage allocation of shared extra msr for certain events
  *
@@ -1027,16 +1050,19 @@ intel_bts_constraints(struct perf_event *event)
  */
 static struct event_constraint *
 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
-                                  struct hw_perf_event_extra *reg)
+                                  struct perf_event *event)
 {
        struct event_constraint *c = &emptyconstraint;
+       struct hw_perf_event_extra *reg = &event->hw.extra_reg;
        struct er_account *era;
        unsigned long flags;
+       int orig_idx = reg->idx;
 
        /* already allocated shared msr */
        if (reg->alloc)
                return &unconstrained;
 
+again:
        era = &cpuc->shared_regs->regs[reg->idx];
        /*
         * we use spin_lock_irqsave() to avoid lockdep issues when
@@ -1065,6 +1091,9 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
                 * the regular event constraint table.
                 */
                c = &unconstrained;
+       } else if (intel_try_alt_er(event, orig_idx)) {
+               raw_spin_unlock(&era->lock);
+               goto again;
        }
        raw_spin_unlock_irqrestore(&era->lock, flags);
 
@@ -1099,11 +1128,10 @@ intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
                              struct perf_event *event)
 {
        struct event_constraint *c = NULL;
-       struct hw_perf_event_extra *xreg;
 
-       xreg = &event->hw.extra_reg;
-       if (xreg->idx != EXTRA_REG_NONE)
-               c = __intel_shared_reg_get_constraints(cpuc, xreg);
+       if (event->hw.extra_reg.idx != EXTRA_REG_NONE)
+               c = __intel_shared_reg_get_constraints(cpuc, event);
+
        return c;
 }
 
@@ -1264,7 +1292,7 @@ static void intel_pmu_cpu_starting(int cpu)
         */
        intel_pmu_lbr_reset();
 
-       if (!cpuc->shared_regs || x86_pmu.regs_no_ht_sharing)
+       if (!cpuc->shared_regs || (x86_pmu.er_flags & ERF_NO_HT_SHARING))
                return;
 
        for_each_cpu(i, topology_thread_cpumask(cpu)) {
@@ -1489,6 +1517,7 @@ static __init int intel_pmu_init(void)
                x86_pmu.enable_all = intel_pmu_nhm_enable_all;
                x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
                x86_pmu.extra_regs = intel_westmere_extra_regs;
+               x86_pmu.er_flags |= ERF_HAS_RSP_1;
 
                /* UOPS_ISSUED.STALLED_CYCLES */
                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
@@ -1508,7 +1537,8 @@ static __init int intel_pmu_init(void)
                x86_pmu.pebs_constraints = intel_snb_pebs_events;
                x86_pmu.extra_regs = intel_snb_extra_regs;
                /* all extra regs are per-cpu when HT is on */
-               x86_pmu.regs_no_ht_sharing = true;
+               x86_pmu.er_flags |= ERF_HAS_RSP_1;
+               x86_pmu.er_flags |= ERF_NO_HT_SHARING;
 
                /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
                intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;