2 * ARMv6 Performance counter handling code.
4 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
6 * ARMv6 has 2 configurable performance counters and a single cycle counter.
7 * They all share a single reset bit but can be written to zero so we can use
10 * The counters can't be individually enabled or disabled so when we remove
11 * one event and replace it with another we could get spurious counts from the
12 * wrong event. However, we can take advantage of the fact that the
13 * performance counters can export events to the event bus, and the event bus
14 * itself can be monitored. This requires that we *don't* export the events to
15 * the event bus. The procedure for disabling a configurable counter is:
16 * - change the counter to count the ETMEXTOUT[0] signal (0x20). This
17 * effectively stops the counter from counting.
18 * - disable the counter's interrupt generation (each counter has it's
19 * own interrupt enable bit).
20 * Once stopped, the counter value can be written as 0 to reset.
22 * To enable a counter:
23 * - enable the counter's interrupt generation.
24 * - set the new event type.
26 * Note: the dedicated cycle counter only counts cycles and can't be
27 * enabled/disabled independently of the others. When we want to disable the
28 * cycle counter, we have to just disable the interrupt reporting and start
29 * ignoring that counter. When re-enabling, we have to reset the value and
30 * enable the interrupt.
33 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
34 enum armv6_perf_types {
35 ARMV6_PERFCTR_ICACHE_MISS = 0x0,
36 ARMV6_PERFCTR_IBUF_STALL = 0x1,
37 ARMV6_PERFCTR_DDEP_STALL = 0x2,
38 ARMV6_PERFCTR_ITLB_MISS = 0x3,
39 ARMV6_PERFCTR_DTLB_MISS = 0x4,
40 ARMV6_PERFCTR_BR_EXEC = 0x5,
41 ARMV6_PERFCTR_BR_MISPREDICT = 0x6,
42 ARMV6_PERFCTR_INSTR_EXEC = 0x7,
43 ARMV6_PERFCTR_DCACHE_HIT = 0x9,
44 ARMV6_PERFCTR_DCACHE_ACCESS = 0xA,
45 ARMV6_PERFCTR_DCACHE_MISS = 0xB,
46 ARMV6_PERFCTR_DCACHE_WBACK = 0xC,
47 ARMV6_PERFCTR_SW_PC_CHANGE = 0xD,
48 ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF,
49 ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10,
50 ARMV6_PERFCTR_LSU_FULL_STALL = 0x11,
51 ARMV6_PERFCTR_WBUF_DRAINED = 0x12,
52 ARMV6_PERFCTR_CPU_CYCLES = 0xFF,
53 ARMV6_PERFCTR_NOP = 0x20,
57 ARMV6_CYCLE_COUNTER = 0,
63 * The hardware events that we support. We do support cache operations but
64 * we have harvard caches and no way to combine instruction and data
65 * accesses/misses in hardware.
67 static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
68 [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES,
69 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC,
70 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
71 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
72 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
73 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT,
74 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
75 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6_PERFCTR_IBUF_STALL,
76 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6_PERFCTR_LSU_FULL_STALL,
79 static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
80 [PERF_COUNT_HW_CACHE_OP_MAX]
81 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
84 * The performance counters don't differentiate between read
85 * and write accesses/misses so this isn't strictly correct,
86 * but it's the best we can do. Writes and reads get
90 [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
91 [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
94 [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
95 [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
98 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
99 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
104 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
105 [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
108 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
109 [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
112 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
113 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
118 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
119 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
122 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
123 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
126 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
127 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
132 * The ARM performance counters can count micro DTLB misses,
133 * micro ITLB misses and main TLB misses. There isn't an event
134 * for TLB misses, so use the micro misses here and if users
135 * want the main TLB misses they can use a raw counter.
138 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
139 [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
142 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
143 [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
146 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
147 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
152 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
153 [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
156 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
157 [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
160 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
161 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
166 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
167 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
170 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
171 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
174 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
175 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
180 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
181 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
184 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
185 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
188 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
189 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
194 enum armv6mpcore_perf_types {
195 ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0,
196 ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1,
197 ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2,
198 ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3,
199 ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4,
200 ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5,
201 ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6,
202 ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7,
203 ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8,
204 ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA,
205 ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB,
206 ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC,
207 ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD,
208 ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE,
209 ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF,
210 ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10,
211 ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11,
212 ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12,
213 ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13,
214 ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF,
218 * The hardware events that we support. We do support cache operations but
219 * we have harvard caches and no way to combine instruction and data
220 * accesses/misses in hardware.
222 static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
223 [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
224 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
225 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
226 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
227 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
228 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
229 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
230 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6MPCORE_PERFCTR_IBUF_STALL,
231 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6MPCORE_PERFCTR_LSU_FULL_STALL,
234 static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
235 [PERF_COUNT_HW_CACHE_OP_MAX]
236 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
240 ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
242 ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
246 ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
248 ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
251 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
252 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
257 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
258 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
261 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
262 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
265 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
266 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
271 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
272 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
275 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
276 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
279 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
280 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
285 * The ARM performance counters can count micro DTLB misses,
286 * micro ITLB misses and main TLB misses. There isn't an event
287 * for TLB misses, so use the micro misses here and if users
288 * want the main TLB misses they can use a raw counter.
291 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
292 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
295 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
296 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
299 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
300 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
305 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
306 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
309 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
310 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
313 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
314 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
319 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
320 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
323 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
324 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
327 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
328 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
333 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
334 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
337 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
338 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
341 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
342 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
347 static inline unsigned long
348 armv6_pmcr_read(void)
351 asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val));
356 armv6_pmcr_write(unsigned long val)
358 asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val));
361 #define ARMV6_PMCR_ENABLE (1 << 0)
362 #define ARMV6_PMCR_CTR01_RESET (1 << 1)
363 #define ARMV6_PMCR_CCOUNT_RESET (1 << 2)
364 #define ARMV6_PMCR_CCOUNT_DIV (1 << 3)
365 #define ARMV6_PMCR_COUNT0_IEN (1 << 4)
366 #define ARMV6_PMCR_COUNT1_IEN (1 << 5)
367 #define ARMV6_PMCR_CCOUNT_IEN (1 << 6)
368 #define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8)
369 #define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9)
370 #define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10)
371 #define ARMV6_PMCR_EVT_COUNT0_SHIFT 20
372 #define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
373 #define ARMV6_PMCR_EVT_COUNT1_SHIFT 12
374 #define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
376 #define ARMV6_PMCR_OVERFLOWED_MASK \
377 (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
378 ARMV6_PMCR_CCOUNT_OVERFLOW)
381 armv6_pmcr_has_overflowed(unsigned long pmcr)
383 return pmcr & ARMV6_PMCR_OVERFLOWED_MASK;
387 armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
388 enum armv6_counters counter)
392 if (ARMV6_CYCLE_COUNTER == counter)
393 ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW;
394 else if (ARMV6_COUNTER0 == counter)
395 ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW;
396 else if (ARMV6_COUNTER1 == counter)
397 ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW;
399 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
405 armv6pmu_read_counter(int counter)
407 unsigned long value = 0;
409 if (ARMV6_CYCLE_COUNTER == counter)
410 asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value));
411 else if (ARMV6_COUNTER0 == counter)
412 asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value));
413 else if (ARMV6_COUNTER1 == counter)
414 asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value));
416 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
422 armv6pmu_write_counter(int counter,
425 if (ARMV6_CYCLE_COUNTER == counter)
426 asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value));
427 else if (ARMV6_COUNTER0 == counter)
428 asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value));
429 else if (ARMV6_COUNTER1 == counter)
430 asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value));
432 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
436 armv6pmu_enable_event(struct hw_perf_event *hwc,
439 unsigned long val, mask, evt, flags;
440 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
442 if (ARMV6_CYCLE_COUNTER == idx) {
444 evt = ARMV6_PMCR_CCOUNT_IEN;
445 } else if (ARMV6_COUNTER0 == idx) {
446 mask = ARMV6_PMCR_EVT_COUNT0_MASK;
447 evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
448 ARMV6_PMCR_COUNT0_IEN;
449 } else if (ARMV6_COUNTER1 == idx) {
450 mask = ARMV6_PMCR_EVT_COUNT1_MASK;
451 evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
452 ARMV6_PMCR_COUNT1_IEN;
454 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
459 * Mask out the current event and set the counter to count the event
460 * that we're interested in.
462 raw_spin_lock_irqsave(&events->pmu_lock, flags);
463 val = armv6_pmcr_read();
466 armv6_pmcr_write(val);
467 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
470 static int counter_is_active(unsigned long pmcr, int idx)
472 unsigned long mask = 0;
473 if (idx == ARMV6_CYCLE_COUNTER)
474 mask = ARMV6_PMCR_CCOUNT_IEN;
475 else if (idx == ARMV6_COUNTER0)
476 mask = ARMV6_PMCR_COUNT0_IEN;
477 else if (idx == ARMV6_COUNTER1)
478 mask = ARMV6_PMCR_COUNT1_IEN;
483 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
488 armv6pmu_handle_irq(int irq_num,
491 unsigned long pmcr = armv6_pmcr_read();
492 struct perf_sample_data data;
493 struct pmu_hw_events *cpuc;
494 struct pt_regs *regs;
497 if (!armv6_pmcr_has_overflowed(pmcr))
500 regs = get_irq_regs();
503 * The interrupts are cleared by writing the overflow flags back to
504 * the control register. All of the other bits don't have any effect
505 * if they are rewritten, so write the whole value back.
507 armv6_pmcr_write(pmcr);
509 perf_sample_data_init(&data, 0);
511 cpuc = &__get_cpu_var(cpu_hw_events);
512 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
513 struct perf_event *event = cpuc->events[idx];
514 struct hw_perf_event *hwc;
516 if (!counter_is_active(pmcr, idx))
520 * We have a single interrupt for all counters. Check that
521 * each counter has overflowed before we process it.
523 if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
527 armpmu_event_update(event, hwc, idx);
528 data.period = event->hw.last_period;
529 if (!armpmu_event_set_period(event, hwc, idx))
532 if (perf_event_overflow(event, &data, regs))
533 cpu_pmu->disable(hwc, idx);
537 * Handle the pending perf events.
539 * Note: this call *must* be run with interrupts disabled. For
540 * platforms that can have the PMU interrupts raised as an NMI, this
551 unsigned long flags, val;
552 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
554 raw_spin_lock_irqsave(&events->pmu_lock, flags);
555 val = armv6_pmcr_read();
556 val |= ARMV6_PMCR_ENABLE;
557 armv6_pmcr_write(val);
558 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
564 unsigned long flags, val;
565 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
567 raw_spin_lock_irqsave(&events->pmu_lock, flags);
568 val = armv6_pmcr_read();
569 val &= ~ARMV6_PMCR_ENABLE;
570 armv6_pmcr_write(val);
571 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
575 armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
576 struct hw_perf_event *event)
578 /* Always place a cycle counter into the cycle counter. */
579 if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) {
580 if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
583 return ARMV6_CYCLE_COUNTER;
586 * For anything other than a cycle counter, try and use
587 * counter0 and counter1.
589 if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask))
590 return ARMV6_COUNTER1;
592 if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask))
593 return ARMV6_COUNTER0;
595 /* The counters are all in use. */
601 armv6pmu_disable_event(struct hw_perf_event *hwc,
604 unsigned long val, mask, evt, flags;
605 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
607 if (ARMV6_CYCLE_COUNTER == idx) {
608 mask = ARMV6_PMCR_CCOUNT_IEN;
610 } else if (ARMV6_COUNTER0 == idx) {
611 mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK;
612 evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT;
613 } else if (ARMV6_COUNTER1 == idx) {
614 mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK;
615 evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT;
617 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
622 * Mask out the current event and set the counter to count the number
623 * of ETM bus signal assertion cycles. The external reporting should
624 * be disabled and so this should never increment.
626 raw_spin_lock_irqsave(&events->pmu_lock, flags);
627 val = armv6_pmcr_read();
630 armv6_pmcr_write(val);
631 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
635 armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
638 unsigned long val, mask, flags, evt = 0;
639 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
641 if (ARMV6_CYCLE_COUNTER == idx) {
642 mask = ARMV6_PMCR_CCOUNT_IEN;
643 } else if (ARMV6_COUNTER0 == idx) {
644 mask = ARMV6_PMCR_COUNT0_IEN;
645 } else if (ARMV6_COUNTER1 == idx) {
646 mask = ARMV6_PMCR_COUNT1_IEN;
648 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
653 * Unlike UP ARMv6, we don't have a way of stopping the counters. We
654 * simply disable the interrupt reporting.
656 raw_spin_lock_irqsave(&events->pmu_lock, flags);
657 val = armv6_pmcr_read();
660 armv6_pmcr_write(val);
661 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
664 static int armv6_map_event(struct perf_event *event)
666 return map_cpu_event(event, &armv6_perf_map,
667 &armv6_perf_cache_map, 0xFF);
670 static struct arm_pmu armv6pmu = {
671 .id = ARM_PERF_PMU_ID_V6,
673 .handle_irq = armv6pmu_handle_irq,
674 .enable = armv6pmu_enable_event,
675 .disable = armv6pmu_disable_event,
676 .read_counter = armv6pmu_read_counter,
677 .write_counter = armv6pmu_write_counter,
678 .get_event_idx = armv6pmu_get_event_idx,
679 .start = armv6pmu_start,
680 .stop = armv6pmu_stop,
681 .map_event = armv6_map_event,
683 .max_period = (1LLU << 32) - 1,
686 static struct arm_pmu *__init armv6pmu_init(void)
692 * ARMv6mpcore is almost identical to single core ARMv6 with the exception
693 * that some of the events have different enumerations and that there is no
694 * *hack* to stop the programmable counters. To stop the counters we simply
695 * disable the interrupt reporting and update the event. When unthrottling we
696 * reset the period and enable the interrupt reporting.
699 static int armv6mpcore_map_event(struct perf_event *event)
701 return map_cpu_event(event, &armv6mpcore_perf_map,
702 &armv6mpcore_perf_cache_map, 0xFF);
705 static struct arm_pmu armv6mpcore_pmu = {
706 .id = ARM_PERF_PMU_ID_V6MP,
708 .handle_irq = armv6pmu_handle_irq,
709 .enable = armv6pmu_enable_event,
710 .disable = armv6mpcore_pmu_disable_event,
711 .read_counter = armv6pmu_read_counter,
712 .write_counter = armv6pmu_write_counter,
713 .get_event_idx = armv6pmu_get_event_idx,
714 .start = armv6pmu_start,
715 .stop = armv6pmu_stop,
716 .map_event = armv6mpcore_map_event,
718 .max_period = (1LLU << 32) - 1,
721 static struct arm_pmu *__init armv6mpcore_pmu_init(void)
723 return &armv6mpcore_pmu;
726 static struct arm_pmu *__init armv6pmu_init(void)
731 static struct arm_pmu *__init armv6mpcore_pmu_init(void)
735 #endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */