2 * Linux performance counter support for MIPS.
4 * Copyright (C) 2010 MIPS Technologies, Inc.
5 * Copyright (C) 2011 Cavium Networks, Inc.
6 * Author: Deng-Cheng Zhu
8 * This code is based on the implementation for ARM, which is in turn
9 * based on the sparc64 perf event code and the x86 code. Performance
10 * counter access is based on the MIPS Oprofile code. And the callchain
11 * support references the code of MIPS stacktrace.c.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #include <linux/cpumask.h>
19 #include <linux/interrupt.h>
20 #include <linux/smp.h>
21 #include <linux/kernel.h>
22 #include <linux/perf_event.h>
23 #include <linux/uaccess.h>
26 #include <asm/irq_regs.h>
27 #include <asm/stacktrace.h>
28 #include <asm/time.h> /* For perf_irq */
30 #define MIPS_MAX_HWEVENTS 4
32 struct cpu_hw_events {
33 /* Array of events on this cpu. */
34 struct perf_event *events[MIPS_MAX_HWEVENTS];
37 * Set the bit (indexed by the counter number) when the counter
38 * is used for an event.
40 unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
43 * Software copy of the control register for each performance counter.
44 * MIPS CPUs vary in performance counters. They use this differently,
45 * and even may not use it.
47 unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
49 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
53 /* The description of MIPS performance events. */
54 struct mips_perf_event {
55 unsigned int event_id;
57 * MIPS performance counters are indexed starting from 0.
58 * CNTR_EVEN indicates the indexes of the counters to be used are
61 unsigned int cntr_mask;
62 #define CNTR_EVEN 0x55555555
63 #define CNTR_ODD 0xaaaaaaaa
64 #define CNTR_ALL 0xffffffff
65 #ifdef CONFIG_MIPS_MT_SMP
78 static struct mips_perf_event raw_event;
79 static DEFINE_MUTEX(raw_event_mutex);
81 #define C(x) PERF_COUNT_HW_CACHE_##x
89 u64 (*read_counter)(unsigned int idx);
90 void (*write_counter)(unsigned int idx, u64 val);
91 const struct mips_perf_event *(*map_raw_event)(u64 config);
92 const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
93 const struct mips_perf_event (*cache_event_map)
94 [PERF_COUNT_HW_CACHE_MAX]
95 [PERF_COUNT_HW_CACHE_OP_MAX]
96 [PERF_COUNT_HW_CACHE_RESULT_MAX];
97 unsigned int num_counters;
100 static struct mips_pmu mipspmu;
102 #define M_CONFIG1_PC (1 << 4)
104 #define M_PERFCTL_EXL (1 << 0)
105 #define M_PERFCTL_KERNEL (1 << 1)
106 #define M_PERFCTL_SUPERVISOR (1 << 2)
107 #define M_PERFCTL_USER (1 << 3)
108 #define M_PERFCTL_INTERRUPT_ENABLE (1 << 4)
109 #define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
110 #define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
111 #define M_PERFCTL_MT_EN(filter) ((filter) << 20)
112 #define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
113 #define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
114 #define M_TC_EN_TC M_PERFCTL_MT_EN(2)
115 #define M_PERFCTL_TCID(tcid) ((tcid) << 22)
116 #define M_PERFCTL_WIDE (1 << 30)
117 #define M_PERFCTL_MORE (1 << 31)
119 #define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \
122 M_PERFCTL_SUPERVISOR | \
123 M_PERFCTL_INTERRUPT_ENABLE)
125 #ifdef CONFIG_MIPS_MT_SMP
126 #define M_PERFCTL_CONFIG_MASK 0x3fff801f
128 #define M_PERFCTL_CONFIG_MASK 0x1f
130 #define M_PERFCTL_EVENT_MASK 0xfe0
133 #ifdef CONFIG_MIPS_MT_SMP
134 static int cpu_has_mipsmt_pertccounters;
136 static DEFINE_RWLOCK(pmuint_rwlock);
139 * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
140 * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
142 #if defined(CONFIG_HW_PERF_EVENTS)
143 #define vpe_id() (cpu_has_mipsmt_pertccounters ? \
144 0 : smp_processor_id())
146 #define vpe_id() (cpu_has_mipsmt_pertccounters ? \
147 0 : cpu_data[smp_processor_id()].vpe_id)
150 /* Copied from op_model_mipsxx.c */
151 static unsigned int vpe_shift(void)
153 if (num_possible_cpus() > 1)
159 static unsigned int counters_total_to_per_cpu(unsigned int counters)
161 return counters >> vpe_shift();
164 #else /* !CONFIG_MIPS_MT_SMP */
167 #endif /* CONFIG_MIPS_MT_SMP */
169 static void resume_local_counters(void);
170 static void pause_local_counters(void);
171 static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
172 static int mipsxx_pmu_handle_shared_irq(void);
174 static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
181 static u64 mipsxx_pmu_read_counter(unsigned int idx)
183 idx = mipsxx_pmu_swizzle_perf_idx(idx);
188 * The counters are unsigned, we must cast to truncate
191 return (u32)read_c0_perfcntr0();
193 return (u32)read_c0_perfcntr1();
195 return (u32)read_c0_perfcntr2();
197 return (u32)read_c0_perfcntr3();
199 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
204 static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
206 idx = mipsxx_pmu_swizzle_perf_idx(idx);
210 return read_c0_perfcntr0_64();
212 return read_c0_perfcntr1_64();
214 return read_c0_perfcntr2_64();
216 return read_c0_perfcntr3_64();
218 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
223 static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
225 idx = mipsxx_pmu_swizzle_perf_idx(idx);
229 write_c0_perfcntr0(val);
232 write_c0_perfcntr1(val);
235 write_c0_perfcntr2(val);
238 write_c0_perfcntr3(val);
243 static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
245 idx = mipsxx_pmu_swizzle_perf_idx(idx);
249 write_c0_perfcntr0_64(val);
252 write_c0_perfcntr1_64(val);
255 write_c0_perfcntr2_64(val);
258 write_c0_perfcntr3_64(val);
263 static unsigned int mipsxx_pmu_read_control(unsigned int idx)
265 idx = mipsxx_pmu_swizzle_perf_idx(idx);
269 return read_c0_perfctrl0();
271 return read_c0_perfctrl1();
273 return read_c0_perfctrl2();
275 return read_c0_perfctrl3();
277 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
282 static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
284 idx = mipsxx_pmu_swizzle_perf_idx(idx);
288 write_c0_perfctrl0(val);
291 write_c0_perfctrl1(val);
294 write_c0_perfctrl2(val);
297 write_c0_perfctrl3(val);
302 static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
303 struct hw_perf_event *hwc)
308 * We only need to care the counter mask. The range has been
309 * checked definitely.
311 unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
313 for (i = mipspmu.num_counters - 1; i >= 0; i--) {
315 * Note that some MIPS perf events can be counted by both
316 * even and odd counters, wheresas many other are only by
317 * even _or_ odd counters. This introduces an issue that
318 * when the former kind of event takes the counter the
319 * latter kind of event wants to use, then the "counter
320 * allocation" for the latter event will fail. In fact if
321 * they can be dynamically swapped, they both feel happy.
322 * But here we leave this issue alone for now.
324 if (test_bit(i, &cntr_mask) &&
325 !test_and_set_bit(i, cpuc->used_mask))
332 static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
334 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
336 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
338 cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
339 (evt->config_base & M_PERFCTL_CONFIG_MASK) |
340 /* Make sure interrupt enabled. */
341 M_PERFCTL_INTERRUPT_ENABLE;
343 * We do not actually let the counter run. Leave it until start().
347 static void mipsxx_pmu_disable_event(int idx)
349 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
352 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
354 local_irq_save(flags);
355 cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
356 ~M_PERFCTL_COUNT_EVENT_WHENEVER;
357 mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
358 local_irq_restore(flags);
361 static int mipspmu_event_set_period(struct perf_event *event,
362 struct hw_perf_event *hwc,
365 u64 left = local64_read(&hwc->period_left);
366 u64 period = hwc->sample_period;
369 if (unlikely((left + period) & (1ULL << 63))) {
370 /* left underflowed by more than period. */
372 local64_set(&hwc->period_left, left);
373 hwc->last_period = period;
375 } else if (unlikely((left + period) <= period)) {
376 /* left underflowed by less than period. */
378 local64_set(&hwc->period_left, left);
379 hwc->last_period = period;
383 if (left > mipspmu.max_period) {
384 left = mipspmu.max_period;
385 local64_set(&hwc->period_left, left);
388 local64_set(&hwc->prev_count, mipspmu.overflow - left);
390 mipspmu.write_counter(idx, mipspmu.overflow - left);
392 perf_event_update_userpage(event);
397 static void mipspmu_event_update(struct perf_event *event,
398 struct hw_perf_event *hwc,
401 u64 prev_raw_count, new_raw_count;
405 prev_raw_count = local64_read(&hwc->prev_count);
406 new_raw_count = mipspmu.read_counter(idx);
408 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
409 new_raw_count) != prev_raw_count)
412 delta = new_raw_count - prev_raw_count;
414 local64_add(delta, &event->count);
415 local64_sub(delta, &hwc->period_left);
418 static void mipspmu_start(struct perf_event *event, int flags)
420 struct hw_perf_event *hwc = &event->hw;
422 if (flags & PERF_EF_RELOAD)
423 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
427 /* Set the period for the event. */
428 mipspmu_event_set_period(event, hwc, hwc->idx);
430 /* Enable the event. */
431 mipsxx_pmu_enable_event(hwc, hwc->idx);
434 static void mipspmu_stop(struct perf_event *event, int flags)
436 struct hw_perf_event *hwc = &event->hw;
438 if (!(hwc->state & PERF_HES_STOPPED)) {
439 /* We are working on a local event. */
440 mipsxx_pmu_disable_event(hwc->idx);
442 mipspmu_event_update(event, hwc, hwc->idx);
443 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
447 static int mipspmu_add(struct perf_event *event, int flags)
449 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
450 struct hw_perf_event *hwc = &event->hw;
454 perf_pmu_disable(event->pmu);
456 /* To look for a free counter for this event. */
457 idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
464 * If there is an event in the counter we are going to use then
465 * make sure it is disabled.
468 mipsxx_pmu_disable_event(idx);
469 cpuc->events[idx] = event;
471 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
472 if (flags & PERF_EF_START)
473 mipspmu_start(event, PERF_EF_RELOAD);
475 /* Propagate our changes to the userspace mapping. */
476 perf_event_update_userpage(event);
479 perf_pmu_enable(event->pmu);
483 static void mipspmu_del(struct perf_event *event, int flags)
485 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
486 struct hw_perf_event *hwc = &event->hw;
489 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
491 mipspmu_stop(event, PERF_EF_UPDATE);
492 cpuc->events[idx] = NULL;
493 clear_bit(idx, cpuc->used_mask);
495 perf_event_update_userpage(event);
498 static void mipspmu_read(struct perf_event *event)
500 struct hw_perf_event *hwc = &event->hw;
502 /* Don't read disabled counters! */
506 mipspmu_event_update(event, hwc, hwc->idx);
509 static void mipspmu_enable(struct pmu *pmu)
511 #ifdef CONFIG_MIPS_MT_SMP
512 write_unlock(&pmuint_rwlock);
514 resume_local_counters();
518 * MIPS performance counters can be per-TC. The control registers can
519 * not be directly accessed accross CPUs. Hence if we want to do global
520 * control, we need cross CPU calls. on_each_cpu() can help us, but we
521 * can not make sure this function is called with interrupts enabled. So
522 * here we pause local counters and then grab a rwlock and leave the
523 * counters on other CPUs alone. If any counter interrupt raises while
524 * we own the write lock, simply pause local counters on that CPU and
525 * spin in the handler. Also we know we won't be switched to another
526 * CPU after pausing local counters and before grabbing the lock.
528 static void mipspmu_disable(struct pmu *pmu)
530 pause_local_counters();
531 #ifdef CONFIG_MIPS_MT_SMP
532 write_lock(&pmuint_rwlock);
536 static atomic_t active_events = ATOMIC_INIT(0);
537 static DEFINE_MUTEX(pmu_reserve_mutex);
538 static int (*save_perf_irq)(void);
540 static int mipspmu_get_irq(void)
544 if (mipspmu.irq >= 0) {
545 /* Request my own irq handler. */
546 err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
547 IRQF_PERCPU | IRQF_NOBALANCING,
548 "mips_perf_pmu", NULL);
550 pr_warning("Unable to request IRQ%d for MIPS "
551 "performance counters!\n", mipspmu.irq);
553 } else if (cp0_perfcount_irq < 0) {
555 * We are sharing the irq number with the timer interrupt.
557 save_perf_irq = perf_irq;
558 perf_irq = mipsxx_pmu_handle_shared_irq;
561 pr_warning("The platform hasn't properly defined its "
562 "interrupt controller.\n");
569 static void mipspmu_free_irq(void)
571 if (mipspmu.irq >= 0)
572 free_irq(mipspmu.irq, NULL);
573 else if (cp0_perfcount_irq < 0)
574 perf_irq = save_perf_irq;
578 * mipsxx/rm9000/loongson2 have different performance counters, they have
579 * specific low-level init routines.
581 static void reset_counters(void *arg);
582 static int __hw_perf_event_init(struct perf_event *event);
584 static void hw_perf_event_destroy(struct perf_event *event)
586 if (atomic_dec_and_mutex_lock(&active_events,
587 &pmu_reserve_mutex)) {
589 * We must not call the destroy function with interrupts
592 on_each_cpu(reset_counters,
593 (void *)(long)mipspmu.num_counters, 1);
595 mutex_unlock(&pmu_reserve_mutex);
599 static int mipspmu_event_init(struct perf_event *event)
603 /* does not support taken branch sampling */
604 if (has_branch_stack(event))
607 switch (event->attr.type) {
609 case PERF_TYPE_HARDWARE:
610 case PERF_TYPE_HW_CACHE:
617 if (event->cpu >= nr_cpumask_bits ||
618 (event->cpu >= 0 && !cpu_online(event->cpu)))
621 if (!atomic_inc_not_zero(&active_events)) {
622 mutex_lock(&pmu_reserve_mutex);
623 if (atomic_read(&active_events) == 0)
624 err = mipspmu_get_irq();
627 atomic_inc(&active_events);
628 mutex_unlock(&pmu_reserve_mutex);
634 return __hw_perf_event_init(event);
637 static struct pmu pmu = {
638 .pmu_enable = mipspmu_enable,
639 .pmu_disable = mipspmu_disable,
640 .event_init = mipspmu_event_init,
643 .start = mipspmu_start,
644 .stop = mipspmu_stop,
645 .read = mipspmu_read,
648 static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
651 * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
654 #ifdef CONFIG_MIPS_MT_SMP
655 return ((unsigned int)pev->range << 24) |
656 (pev->cntr_mask & 0xffff00) |
657 (pev->event_id & 0xff);
659 return (pev->cntr_mask & 0xffff00) |
660 (pev->event_id & 0xff);
664 static const struct mips_perf_event *mipspmu_map_general_event(int idx)
667 if ((*mipspmu.general_event_map)[idx].cntr_mask == 0)
668 return ERR_PTR(-EOPNOTSUPP);
669 return &(*mipspmu.general_event_map)[idx];
672 static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
674 unsigned int cache_type, cache_op, cache_result;
675 const struct mips_perf_event *pev;
677 cache_type = (config >> 0) & 0xff;
678 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
679 return ERR_PTR(-EINVAL);
681 cache_op = (config >> 8) & 0xff;
682 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
683 return ERR_PTR(-EINVAL);
685 cache_result = (config >> 16) & 0xff;
686 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
687 return ERR_PTR(-EINVAL);
689 pev = &((*mipspmu.cache_event_map)
694 if (pev->cntr_mask == 0)
695 return ERR_PTR(-EOPNOTSUPP);
701 static int validate_group(struct perf_event *event)
703 struct perf_event *sibling, *leader = event->group_leader;
704 struct cpu_hw_events fake_cpuc;
706 memset(&fake_cpuc, 0, sizeof(fake_cpuc));
708 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
711 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
712 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
716 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
722 /* This is needed by specific irq handlers in perf_event_*.c */
723 static void handle_associated_event(struct cpu_hw_events *cpuc,
724 int idx, struct perf_sample_data *data,
725 struct pt_regs *regs)
727 struct perf_event *event = cpuc->events[idx];
728 struct hw_perf_event *hwc = &event->hw;
730 mipspmu_event_update(event, hwc, idx);
731 data->period = event->hw.last_period;
732 if (!mipspmu_event_set_period(event, hwc, idx))
735 if (perf_event_overflow(event, data, regs))
736 mipsxx_pmu_disable_event(idx);
740 static int __n_counters(void)
742 if (!(read_c0_config1() & M_CONFIG1_PC))
744 if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
746 if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
748 if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
754 static int n_counters(void)
758 switch (current_cpu_type()) {
769 counters = __n_counters();
775 static void reset_counters(void *arg)
777 int counters = (int)(long)arg;
780 mipsxx_pmu_write_control(3, 0);
781 mipspmu.write_counter(3, 0);
783 mipsxx_pmu_write_control(2, 0);
784 mipspmu.write_counter(2, 0);
786 mipsxx_pmu_write_control(1, 0);
787 mipspmu.write_counter(1, 0);
789 mipsxx_pmu_write_control(0, 0);
790 mipspmu.write_counter(0, 0);
794 /* 24K/34K/1004K cores can share the same event map. */
795 static const struct mips_perf_event mipsxxcore_event_map
796 [PERF_COUNT_HW_MAX] = {
797 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
798 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
799 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
800 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
803 /* 74K core has different branch event code. */
804 static const struct mips_perf_event mipsxx74Kcore_event_map
805 [PERF_COUNT_HW_MAX] = {
806 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
807 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
808 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
809 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
812 static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
813 [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
814 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
815 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
816 [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL },
817 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
818 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
819 [PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
822 /* 24K/34K/1004K cores can share the same cache event map. */
823 static const struct mips_perf_event mipsxxcore_cache_map
824 [PERF_COUNT_HW_CACHE_MAX]
825 [PERF_COUNT_HW_CACHE_OP_MAX]
826 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
829 * Like some other architectures (e.g. ARM), the performance
830 * counters don't differentiate between read and write
831 * accesses/misses, so this isn't strictly correct, but it's the
832 * best we can do. Writes and reads get combined.
835 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
836 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
839 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
840 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
845 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
846 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
849 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
850 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
853 [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
855 * Note that MIPS has only "hit" events countable for
856 * the prefetch operation.
862 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
863 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
866 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
867 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
872 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
873 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
876 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
877 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
882 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
883 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
886 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
887 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
891 /* Using the same code for *HW_BRANCH* */
893 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
894 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
897 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
898 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
903 /* 74K core has completely different cache event map. */
904 static const struct mips_perf_event mipsxx74Kcore_cache_map
905 [PERF_COUNT_HW_CACHE_MAX]
906 [PERF_COUNT_HW_CACHE_OP_MAX]
907 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
910 * Like some other architectures (e.g. ARM), the performance
911 * counters don't differentiate between read and write
912 * accesses/misses, so this isn't strictly correct, but it's the
913 * best we can do. Writes and reads get combined.
916 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
917 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
920 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
921 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
926 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
927 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
930 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
931 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
934 [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
936 * Note that MIPS has only "hit" events countable for
937 * the prefetch operation.
943 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
944 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
947 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
948 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
953 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
954 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
957 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
958 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
962 /* Using the same code for *HW_BRANCH* */
964 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
965 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
968 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
969 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
975 static const struct mips_perf_event octeon_cache_map
976 [PERF_COUNT_HW_CACHE_MAX]
977 [PERF_COUNT_HW_CACHE_OP_MAX]
978 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
981 [C(RESULT_ACCESS)] = { 0x2b, CNTR_ALL },
982 [C(RESULT_MISS)] = { 0x2e, CNTR_ALL },
985 [C(RESULT_ACCESS)] = { 0x30, CNTR_ALL },
990 [C(RESULT_ACCESS)] = { 0x18, CNTR_ALL },
993 [C(RESULT_ACCESS)] = { 0x19, CNTR_ALL },
998 * Only general DTLB misses are counted use the same event for
1002 [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
1005 [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
1010 [C(RESULT_MISS)] = { 0x37, CNTR_ALL },
1015 #ifdef CONFIG_MIPS_MT_SMP
1016 static void check_and_calc_range(struct perf_event *event,
1017 const struct mips_perf_event *pev)
1019 struct hw_perf_event *hwc = &event->hw;
1021 if (event->cpu >= 0) {
1022 if (pev->range > V) {
1024 * The user selected an event that is processor
1025 * wide, while expecting it to be VPE wide.
1027 hwc->config_base |= M_TC_EN_ALL;
1030 * FIXME: cpu_data[event->cpu].vpe_id reports 0
1033 hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
1034 hwc->config_base |= M_TC_EN_VPE;
1037 hwc->config_base |= M_TC_EN_ALL;
1040 static void check_and_calc_range(struct perf_event *event,
1041 const struct mips_perf_event *pev)
1046 static int __hw_perf_event_init(struct perf_event *event)
1048 struct perf_event_attr *attr = &event->attr;
1049 struct hw_perf_event *hwc = &event->hw;
1050 const struct mips_perf_event *pev;
1053 /* Returning MIPS event descriptor for generic perf event. */
1054 if (PERF_TYPE_HARDWARE == event->attr.type) {
1055 if (event->attr.config >= PERF_COUNT_HW_MAX)
1057 pev = mipspmu_map_general_event(event->attr.config);
1058 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
1059 pev = mipspmu_map_cache_event(event->attr.config);
1060 } else if (PERF_TYPE_RAW == event->attr.type) {
1061 /* We are working on the global raw event. */
1062 mutex_lock(&raw_event_mutex);
1063 pev = mipspmu.map_raw_event(event->attr.config);
1065 /* The event type is not (yet) supported. */
1070 if (PERF_TYPE_RAW == event->attr.type)
1071 mutex_unlock(&raw_event_mutex);
1072 return PTR_ERR(pev);
1076 * We allow max flexibility on how each individual counter shared
1077 * by the single CPU operates (the mode exclusion and the range).
1079 hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
1081 /* Calculate range bits and validate it. */
1082 if (num_possible_cpus() > 1)
1083 check_and_calc_range(event, pev);
1085 hwc->event_base = mipspmu_perf_event_encode(pev);
1086 if (PERF_TYPE_RAW == event->attr.type)
1087 mutex_unlock(&raw_event_mutex);
1089 if (!attr->exclude_user)
1090 hwc->config_base |= M_PERFCTL_USER;
1091 if (!attr->exclude_kernel) {
1092 hwc->config_base |= M_PERFCTL_KERNEL;
1093 /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
1094 hwc->config_base |= M_PERFCTL_EXL;
1096 if (!attr->exclude_hv)
1097 hwc->config_base |= M_PERFCTL_SUPERVISOR;
1099 hwc->config_base &= M_PERFCTL_CONFIG_MASK;
1101 * The event can belong to another cpu. We do not assign a local
1102 * counter for it for now.
1107 if (!hwc->sample_period) {
1108 hwc->sample_period = mipspmu.max_period;
1109 hwc->last_period = hwc->sample_period;
1110 local64_set(&hwc->period_left, hwc->sample_period);
1114 if (event->group_leader != event)
1115 err = validate_group(event);
1117 event->destroy = hw_perf_event_destroy;
1120 event->destroy(event);
1125 static void pause_local_counters(void)
1127 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1128 int ctr = mipspmu.num_counters;
1129 unsigned long flags;
1131 local_irq_save(flags);
1134 cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
1135 mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
1136 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
1138 local_irq_restore(flags);
1141 static void resume_local_counters(void)
1143 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1144 int ctr = mipspmu.num_counters;
1148 mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
1152 static int mipsxx_pmu_handle_shared_irq(void)
1154 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1155 struct perf_sample_data data;
1156 unsigned int counters = mipspmu.num_counters;
1158 int handled = IRQ_NONE;
1159 struct pt_regs *regs;
1161 if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI))
1164 * First we pause the local counters, so that when we are locked
1165 * here, the counters are all paused. When it gets locked due to
1166 * perf_disable(), the timer interrupt handler will be delayed.
1168 * See also mipsxx_pmu_start().
1170 pause_local_counters();
1171 #ifdef CONFIG_MIPS_MT_SMP
1172 read_lock(&pmuint_rwlock);
1175 regs = get_irq_regs();
1177 perf_sample_data_init(&data, 0, 0);
1180 #define HANDLE_COUNTER(n) \
1182 if (test_bit(n, cpuc->used_mask)) { \
1183 counter = mipspmu.read_counter(n); \
1184 if (counter & mipspmu.overflow) { \
1185 handle_associated_event(cpuc, n, &data, regs); \
1186 handled = IRQ_HANDLED; \
1196 * Do all the work for the pending perf events. We can do this
1197 * in here because the performance counter interrupt is a regular
1198 * interrupt, not NMI.
1200 if (handled == IRQ_HANDLED)
1203 #ifdef CONFIG_MIPS_MT_SMP
1204 read_unlock(&pmuint_rwlock);
1206 resume_local_counters();
1210 static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
1212 return mipsxx_pmu_handle_shared_irq();
1216 #define IS_BOTH_COUNTERS_24K_EVENT(b) \
1217 ((b) == 0 || (b) == 1 || (b) == 11)
1220 #define IS_BOTH_COUNTERS_34K_EVENT(b) \
1221 ((b) == 0 || (b) == 1 || (b) == 11)
1222 #ifdef CONFIG_MIPS_MT_SMP
1223 #define IS_RANGE_P_34K_EVENT(r, b) \
1224 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1225 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
1226 (r) == 176 || ((b) >= 50 && (b) <= 55) || \
1227 ((b) >= 64 && (b) <= 67))
1228 #define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
1232 #define IS_BOTH_COUNTERS_74K_EVENT(b) \
1233 ((b) == 0 || (b) == 1)
1236 #define IS_BOTH_COUNTERS_1004K_EVENT(b) \
1237 ((b) == 0 || (b) == 1 || (b) == 11)
1238 #ifdef CONFIG_MIPS_MT_SMP
1239 #define IS_RANGE_P_1004K_EVENT(r, b) \
1240 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1241 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
1242 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
1243 (r) == 188 || (b) == 61 || (b) == 62 || \
1244 ((b) >= 64 && (b) <= 67))
1245 #define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
1249 * User can use 0-255 raw events, where 0-127 for the events of even
1250 * counters, and 128-255 for odd counters. Note that bit 7 is used to
1251 * indicate the parity. So, for example, when user wants to take the
1252 * Event Num of 15 for odd counters (by referring to the user manual),
1253 * then 128 needs to be added to 15 as the input for the event config,
1254 * i.e., 143 (0x8F) to be used.
1256 static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1258 unsigned int raw_id = config & 0xff;
1259 unsigned int base_id = raw_id & 0x7f;
1261 raw_event.event_id = base_id;
1263 switch (current_cpu_type()) {
1265 if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
1266 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1268 raw_event.cntr_mask =
1269 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1270 #ifdef CONFIG_MIPS_MT_SMP
1272 * This is actually doing nothing. Non-multithreading
1273 * CPUs will not check and calculate the range.
1275 raw_event.range = P;
1279 if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
1280 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1282 raw_event.cntr_mask =
1283 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1284 #ifdef CONFIG_MIPS_MT_SMP
1285 if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
1286 raw_event.range = P;
1287 else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
1288 raw_event.range = V;
1290 raw_event.range = T;
1294 if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
1295 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1297 raw_event.cntr_mask =
1298 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1299 #ifdef CONFIG_MIPS_MT_SMP
1300 raw_event.range = P;
1304 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
1305 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1307 raw_event.cntr_mask =
1308 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1309 #ifdef CONFIG_MIPS_MT_SMP
1310 if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
1311 raw_event.range = P;
1312 else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
1313 raw_event.range = V;
1315 raw_event.range = T;
1323 static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
1325 unsigned int raw_id = config & 0xff;
1326 unsigned int base_id = raw_id & 0x7f;
1329 raw_event.cntr_mask = CNTR_ALL;
1330 raw_event.event_id = base_id;
1332 if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
1334 return ERR_PTR(-EOPNOTSUPP);
1337 return ERR_PTR(-EOPNOTSUPP);
1348 return ERR_PTR(-EOPNOTSUPP);
1357 init_hw_perf_events(void)
1362 pr_info("Performance counters: ");
1364 counters = n_counters();
1365 if (counters == 0) {
1366 pr_cont("No available PMU.\n");
1370 #ifdef CONFIG_MIPS_MT_SMP
1371 cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
1372 if (!cpu_has_mipsmt_pertccounters)
1373 counters = counters_total_to_per_cpu(counters);
1376 #ifdef MSC01E_INT_BASE
1379 * Using platform specific interrupt controller defines.
1381 irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
1384 if ((cp0_perfcount_irq >= 0) &&
1385 (cp0_compare_irq != cp0_perfcount_irq))
1386 irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1389 #ifdef MSC01E_INT_BASE
1393 mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
1395 switch (current_cpu_type()) {
1397 mipspmu.name = "mips/24K";
1398 mipspmu.general_event_map = &mipsxxcore_event_map;
1399 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1402 mipspmu.name = "mips/34K";
1403 mipspmu.general_event_map = &mipsxxcore_event_map;
1404 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1407 mipspmu.name = "mips/74K";
1408 mipspmu.general_event_map = &mipsxx74Kcore_event_map;
1409 mipspmu.cache_event_map = &mipsxx74Kcore_cache_map;
1412 mipspmu.name = "mips/1004K";
1413 mipspmu.general_event_map = &mipsxxcore_event_map;
1414 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1417 mipspmu.name = "mips/loongson1";
1418 mipspmu.general_event_map = &mipsxxcore_event_map;
1419 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1421 case CPU_CAVIUM_OCTEON:
1422 case CPU_CAVIUM_OCTEON_PLUS:
1423 case CPU_CAVIUM_OCTEON2:
1424 mipspmu.name = "octeon";
1425 mipspmu.general_event_map = &octeon_event_map;
1426 mipspmu.cache_event_map = &octeon_cache_map;
1427 mipspmu.map_raw_event = octeon_pmu_map_raw_event;
1430 pr_cont("Either hardware does not support performance "
1431 "counters, or not yet implemented.\n");
1435 mipspmu.num_counters = counters;
1438 if (read_c0_perfctrl0() & M_PERFCTL_WIDE) {
1439 mipspmu.max_period = (1ULL << 63) - 1;
1440 mipspmu.valid_count = (1ULL << 63) - 1;
1441 mipspmu.overflow = 1ULL << 63;
1442 mipspmu.read_counter = mipsxx_pmu_read_counter_64;
1443 mipspmu.write_counter = mipsxx_pmu_write_counter_64;
1446 mipspmu.max_period = (1ULL << 31) - 1;
1447 mipspmu.valid_count = (1ULL << 31) - 1;
1448 mipspmu.overflow = 1ULL << 31;
1449 mipspmu.read_counter = mipsxx_pmu_read_counter;
1450 mipspmu.write_counter = mipsxx_pmu_write_counter;
1454 on_each_cpu(reset_counters, (void *)(long)counters, 1);
1456 pr_cont("%s PMU enabled, %d %d-bit counters available to each "
1457 "CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
1458 irq < 0 ? " (share with timer interrupt)" : "");
1460 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1464 early_initcall(init_hw_perf_events);