intel_start_scheduling(struct cpu_hw_events *cpuc)
{
struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
- struct intel_excl_states *xl, *xlo;
+ struct intel_excl_states *xl;
int tid = cpuc->excl_thread_id;
- int o_tid = 1 - tid; /* sibling thread */
/*
* nothing needed if in group validation mode
if (!excl_cntrs)
return;
- xlo = &excl_cntrs->states[o_tid];
xl = &excl_cntrs->states[tid];
xl->sched_started = true;
raw_spin_lock(&excl_cntrs->lock);
/*
- * save initial state of sibling thread
+ * Save a copy of our state to work on.
*/
- memcpy(xlo->init_state, xlo->state, sizeof(xlo->init_state));
+ memcpy(xl->init_state, xl->state, sizeof(xl->init_state));
}
static void
intel_stop_scheduling(struct cpu_hw_events *cpuc)
{
struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
- struct intel_excl_states *xl, *xlo;
+ struct intel_excl_states *xl;
int tid = cpuc->excl_thread_id;
- int o_tid = 1 - tid; /* sibling thread */
/*
* nothing needed if in group validation mode
if (!excl_cntrs)
return;
- xlo = &excl_cntrs->states[o_tid];
xl = &excl_cntrs->states[tid];
/*
- * make new sibling thread state visible
+ * Commit the working state.
*/
- memcpy(xlo->state, xlo->init_state, sizeof(xlo->state));
+ memcpy(xl->state, xl->init_state, sizeof(xl->state));
xl->sched_started = false;
/*
{
struct event_constraint *cx;
struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
- struct intel_excl_states *xl, *xlo;
- int is_excl, i;
+ struct intel_excl_states *xlo;
int tid = cpuc->excl_thread_id;
- int o_tid = 1 - tid; /* alternate */
+ int is_excl, i;
/*
* validating a group does not require
*/
if (!excl_cntrs)
return c;
- /*
- * event requires exclusive counter access
- * across HT threads
- */
- is_excl = c->flags & PERF_X86_EVENT_EXCL;
- if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
- event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
- if (!cpuc->n_excl++)
- WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
- }
-
- /*
- * xl = state of current HT
- * xlo = state of sibling HT
- */
- xl = &excl_cntrs->states[tid];
- xlo = &excl_cntrs->states[o_tid];
cx = c;
* of this function
*/
+ /*
+ * state of sibling HT
+ */
+ xlo = &excl_cntrs->states[tid ^ 1];
+
+ /*
+ * event requires exclusive counter access
+ * across HT threads
+ */
+ is_excl = c->flags & PERF_X86_EVENT_EXCL;
+ if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
+ event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
+ if (!cpuc->n_excl++)
+ WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
+ }
+
/*
* Modify static constraint with current dynamic
* state of thread
* our corresponding counter cannot be used
* regardless of our event
*/
- if (xl->state[i] == INTEL_EXCL_EXCLUSIVE)
+ if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE)
__clear_bit(i, cx->idxmsk);
/*
* if measuring an exclusive event, sibling
* measuring non-exclusive, then counter cannot
* be used
*/
- if (is_excl && xl->state[i] == INTEL_EXCL_SHARED)
+ if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED)
__clear_bit(i, cx->idxmsk);
}
{
struct hw_perf_event *hwc = &event->hw;
struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
- struct intel_excl_states *xlo, *xl;
- unsigned long flags = 0; /* keep compiler happy */
int tid = cpuc->excl_thread_id;
- int o_tid = 1 - tid;
+ struct intel_excl_states *xl;
+ unsigned long flags = 0; /* keep compiler happy */
/*
* nothing needed if in group validation mode
return;
xl = &excl_cntrs->states[tid];
- xlo = &excl_cntrs->states[o_tid];
if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
if (!--cpuc->n_excl)
* counter state as unused now
*/
if (hwc->idx >= 0)
- xlo->state[hwc->idx] = INTEL_EXCL_UNUSED;
+ xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
if (!xl->sched_started)
raw_spin_unlock_irqrestore(&excl_cntrs->lock, flags);
{
struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
struct event_constraint *c = cpuc->event_constraint[idx];
- struct intel_excl_states *xlo, *xl;
+ struct intel_excl_states *xl;
int tid = cpuc->excl_thread_id;
- int o_tid = 1 - tid;
- int is_excl;
if (cpuc->is_fake || !c)
return;
- is_excl = c->flags & PERF_X86_EVENT_EXCL;
-
if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
return;
return;
xl = &excl_cntrs->states[tid];
- xlo = &excl_cntrs->states[o_tid];
WARN_ON_ONCE(!raw_spin_is_locked(&excl_cntrs->lock));
if (cntr >= 0) {
- if (is_excl)
- xlo->init_state[cntr] = INTEL_EXCL_EXCLUSIVE;
+ if (c->flags & PERF_X86_EVENT_EXCL)
+ xl->init_state[cntr] = INTEL_EXCL_EXCLUSIVE;
else
- xlo->init_state[cntr] = INTEL_EXCL_SHARED;
+ xl->init_state[cntr] = INTEL_EXCL_SHARED;
}
}