4 * ARM performance counter support.
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
7 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
9 * This code is based on the sparc64 perf event code, which is in turn based
10 * on the x86 code. Callchain code is based on the ARM OProfile backtrace
13 #define pr_fmt(fmt) "hw perfevents: " fmt
15 #include <linux/cpumask.h>
16 #include <linux/kernel.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/uaccess.h>
21 #include <asm/irq_regs.h>
23 #include <asm/stacktrace.h>
26 armpmu_map_cache_event(const unsigned (*cache_map)
27 [PERF_COUNT_HW_CACHE_MAX]
28 [PERF_COUNT_HW_CACHE_OP_MAX]
29 [PERF_COUNT_HW_CACHE_RESULT_MAX],
32 unsigned int cache_type, cache_op, cache_result, ret;
34 cache_type = (config >> 0) & 0xff;
35 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
38 cache_op = (config >> 8) & 0xff;
39 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
42 cache_result = (config >> 16) & 0xff;
43 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
46 ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
48 if (ret == CACHE_OP_UNSUPPORTED)
55 armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
57 int mapping = (*event_map)[config];
58 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
62 armpmu_map_raw_event(u32 raw_event_mask, u64 config)
64 return (int)(config & raw_event_mask);
68 armpmu_map_event(struct perf_event *event,
69 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
70 const unsigned (*cache_map)
71 [PERF_COUNT_HW_CACHE_MAX]
72 [PERF_COUNT_HW_CACHE_OP_MAX]
73 [PERF_COUNT_HW_CACHE_RESULT_MAX],
76 u64 config = event->attr.config;
78 switch (event->attr.type) {
79 case PERF_TYPE_HARDWARE:
80 return armpmu_map_hw_event(event_map, config);
81 case PERF_TYPE_HW_CACHE:
82 return armpmu_map_cache_event(cache_map, config);
84 return armpmu_map_raw_event(raw_event_mask, config);
86 if (event->attr.type >= PERF_TYPE_MAX)
87 return armpmu_map_raw_event(raw_event_mask, config);
93 int armpmu_event_set_period(struct perf_event *event)
95 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
96 struct hw_perf_event *hwc = &event->hw;
97 s64 left = local64_read(&hwc->period_left);
98 s64 period = hwc->sample_period;
101 /* The period may have been changed by PERF_EVENT_IOC_PERIOD */
102 if (unlikely(period != hwc->last_period))
103 left = period - (hwc->last_period - left);
105 if (unlikely(left <= -period)) {
107 local64_set(&hwc->period_left, left);
108 hwc->last_period = period;
112 if (unlikely(left <= 0)) {
114 local64_set(&hwc->period_left, left);
115 hwc->last_period = period;
119 if (left > (s64)armpmu->max_period)
120 left = armpmu->max_period;
122 local64_set(&hwc->prev_count, (u64)-left);
124 armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
126 perf_event_update_userpage(event);
131 u64 armpmu_event_update(struct perf_event *event)
133 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
134 struct hw_perf_event *hwc = &event->hw;
135 u64 delta, prev_raw_count, new_raw_count;
138 prev_raw_count = local64_read(&hwc->prev_count);
139 new_raw_count = armpmu->read_counter(event);
141 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
142 new_raw_count) != prev_raw_count)
145 delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
147 local64_add(delta, &event->count);
148 local64_sub(delta, &hwc->period_left);
150 return new_raw_count;
154 armpmu_read(struct perf_event *event)
156 armpmu_event_update(event);
160 armpmu_stop(struct perf_event *event, int flags)
162 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
163 struct hw_perf_event *hwc = &event->hw;
165 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
168 * ARM pmu always has to update the counter, so ignore
169 * PERF_EF_UPDATE, see comments in armpmu_start().
171 if (!(hwc->state & PERF_HES_STOPPED)) {
172 armpmu->disable(event);
173 armpmu_event_update(event);
174 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
178 static void armpmu_start(struct perf_event *event, int flags)
180 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
181 struct hw_perf_event *hwc = &event->hw;
183 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
186 * ARM pmu always has to reprogram the period, so ignore
187 * PERF_EF_RELOAD, see the comment below.
189 if (flags & PERF_EF_RELOAD)
190 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
194 * Set the period again. Some counters can't be stopped, so when we
195 * were stopped we simply disabled the IRQ source and the counter
196 * may have been left counting. If we don't do this step then we may
197 * get an interrupt too soon or *way* too late if the overflow has
198 * happened since disabling.
200 armpmu_event_set_period(event);
201 armpmu->enable(event);
205 armpmu_del(struct perf_event *event, int flags)
207 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
208 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
209 struct hw_perf_event *hwc = &event->hw;
212 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
215 armpmu_stop(event, PERF_EF_UPDATE);
216 hw_events->events[idx] = NULL;
217 clear_bit(idx, hw_events->used_mask);
219 perf_event_update_userpage(event);
223 armpmu_add(struct perf_event *event, int flags)
225 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
226 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
227 struct hw_perf_event *hwc = &event->hw;
231 /* An event following a process won't be stopped earlier */
232 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
235 perf_pmu_disable(event->pmu);
237 /* If we don't have a space for the counter then finish early. */
238 idx = armpmu->get_event_idx(hw_events, event);
245 * If there is an event in the counter we are going to use then make
246 * sure it is disabled.
249 armpmu->disable(event);
250 hw_events->events[idx] = event;
252 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
253 if (flags & PERF_EF_START)
254 armpmu_start(event, PERF_EF_RELOAD);
256 /* Propagate our changes to the userspace mapping. */
257 perf_event_update_userpage(event);
260 perf_pmu_enable(event->pmu);
265 validate_event(struct pmu_hw_events *hw_events,
266 struct perf_event *event)
268 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
269 struct pmu *leader_pmu = event->group_leader->pmu;
271 if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
274 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
277 return armpmu->get_event_idx(hw_events, event) >= 0;
281 validate_group(struct perf_event *event)
283 struct perf_event *sibling, *leader = event->group_leader;
284 struct pmu_hw_events fake_pmu;
285 DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
288 * Initialise the fake PMU. We only need to populate the
289 * used_mask for the purposes of validation.
291 memset(fake_used_mask, 0, sizeof(fake_used_mask));
292 fake_pmu.used_mask = fake_used_mask;
294 if (!validate_event(&fake_pmu, leader))
297 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
298 if (!validate_event(&fake_pmu, sibling))
302 if (!validate_event(&fake_pmu, event))
308 static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
310 struct arm_pmu *armpmu = (struct arm_pmu *) dev;
311 struct platform_device *plat_device = armpmu->plat_device;
312 struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
314 if (plat && plat->handle_irq)
315 return plat->handle_irq(irq, dev, armpmu->handle_irq);
317 return armpmu->handle_irq(irq, dev);
321 armpmu_release_hardware(struct arm_pmu *armpmu)
323 armpmu->free_irq(armpmu);
324 pm_runtime_put_sync(&armpmu->plat_device->dev);
328 armpmu_reserve_hardware(struct arm_pmu *armpmu)
331 struct platform_device *pmu_device = armpmu->plat_device;
336 pm_runtime_get_sync(&pmu_device->dev);
337 err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
339 armpmu_release_hardware(armpmu);
347 hw_perf_event_destroy(struct perf_event *event)
349 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
350 atomic_t *active_events = &armpmu->active_events;
351 struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
353 if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
354 armpmu_release_hardware(armpmu);
355 mutex_unlock(pmu_reserve_mutex);
360 event_requires_mode_exclusion(struct perf_event_attr *attr)
362 return attr->exclude_idle || attr->exclude_user ||
363 attr->exclude_kernel || attr->exclude_hv;
367 __hw_perf_event_init(struct perf_event *event)
369 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
370 struct hw_perf_event *hwc = &event->hw;
373 mapping = armpmu->map_event(event);
376 pr_debug("event %x:%llx not supported\n", event->attr.type,
382 * We don't assign an index until we actually place the event onto
383 * hardware. Use -1 to signify that we haven't decided where to put it
384 * yet. For SMP systems, each core has it's own PMU so we can't do any
385 * clever allocation or constraints checking at this point.
388 hwc->config_base = 0;
393 * Check whether we need to exclude the counter from certain modes.
395 if ((!armpmu->set_event_filter ||
396 armpmu->set_event_filter(hwc, &event->attr)) &&
397 event_requires_mode_exclusion(&event->attr)) {
398 pr_debug("ARM performance counters do not support "
404 * Store the event encoding into the config_base field.
406 hwc->config_base |= (unsigned long)mapping;
408 if (!hwc->sample_period) {
410 * For non-sampling runs, limit the sample_period to half
411 * of the counter width. That way, the new counter value
412 * is far less likely to overtake the previous one unless
413 * you have some serious IRQ latency issues.
415 hwc->sample_period = armpmu->max_period >> 1;
416 hwc->last_period = hwc->sample_period;
417 local64_set(&hwc->period_left, hwc->sample_period);
420 if (event->group_leader != event) {
421 if (validate_group(event) != 0)
428 static int armpmu_event_init(struct perf_event *event)
430 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
432 atomic_t *active_events = &armpmu->active_events;
434 if (event->cpu != -1 &&
435 !cpumask_test_cpu(event->cpu, &armpmu->valid_cpus))
438 /* does not support taken branch sampling */
439 if (has_branch_stack(event))
442 if (armpmu->map_event(event) == -ENOENT)
445 event->destroy = hw_perf_event_destroy;
447 if (!atomic_inc_not_zero(active_events)) {
448 mutex_lock(&armpmu->reserve_mutex);
449 if (atomic_read(active_events) == 0)
450 err = armpmu_reserve_hardware(armpmu);
453 atomic_inc(active_events);
454 mutex_unlock(&armpmu->reserve_mutex);
460 err = __hw_perf_event_init(event);
462 hw_perf_event_destroy(event);
467 static void armpmu_enable(struct pmu *pmu)
469 struct arm_pmu *armpmu = to_arm_pmu(pmu);
470 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
471 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
474 armpmu->start(armpmu);
477 static void armpmu_disable(struct pmu *pmu)
479 struct arm_pmu *armpmu = to_arm_pmu(pmu);
480 armpmu->stop(armpmu);
483 #ifdef CONFIG_PM_RUNTIME
484 static int armpmu_runtime_resume(struct device *dev)
486 struct arm_pmu_platdata *plat = dev_get_platdata(dev);
488 if (plat && plat->runtime_resume)
489 return plat->runtime_resume(dev);
494 static int armpmu_runtime_suspend(struct device *dev)
496 struct arm_pmu_platdata *plat = dev_get_platdata(dev);
498 if (plat && plat->runtime_suspend)
499 return plat->runtime_suspend(dev);
505 const struct dev_pm_ops armpmu_dev_pm_ops = {
506 SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL)
509 static void armpmu_init(struct arm_pmu *armpmu)
511 atomic_set(&armpmu->active_events, 0);
512 mutex_init(&armpmu->reserve_mutex);
514 armpmu->pmu = (struct pmu) {
515 .pmu_enable = armpmu_enable,
516 .pmu_disable = armpmu_disable,
517 .event_init = armpmu_event_init,
520 .start = armpmu_start,
526 int armpmu_register(struct arm_pmu *armpmu, int type)
529 pm_runtime_enable(&armpmu->plat_device->dev);
530 pr_info("enabled with %s PMU driver, %d counters available\n",
531 armpmu->name, armpmu->num_events);
532 return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
536 * Callchain handling code.
540 * The registers we're interested in are at the end of the variable
541 * length saved register structure. The fp points at the end of this
542 * structure so the address of this struct is:
543 * (struct frame_tail *)(xxx->fp)-1
545 * This code has been adapted from the ARM OProfile support.
548 struct frame_tail __user *fp;
551 } __attribute__((packed));
554 * Get the return address for a single stackframe and return a pointer to the
557 static struct frame_tail __user *
558 user_backtrace(struct frame_tail __user *tail,
559 struct perf_callchain_entry *entry)
561 struct frame_tail buftail;
563 /* Also check accessibility of one struct frame_tail beyond */
564 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
566 if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
569 perf_callchain_store(entry, buftail.lr);
572 * Frame pointers should strictly progress back up the stack
573 * (towards higher addresses).
575 if (tail + 1 >= buftail.fp)
578 return buftail.fp - 1;
582 perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
584 struct frame_tail __user *tail;
586 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
587 /* We don't support guest os callchain now */
591 tail = (struct frame_tail __user *)regs->ARM_fp - 1;
593 while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
594 tail && !((unsigned long)tail & 0x3))
595 tail = user_backtrace(tail, entry);
599 * Gets called by walk_stackframe() for every stackframe. This will be called
600 * whist unwinding the stackframe and is like a subroutine return so we use
604 callchain_trace(struct stackframe *fr,
607 struct perf_callchain_entry *entry = data;
608 perf_callchain_store(entry, fr->pc);
613 perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
615 struct stackframe fr;
617 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
618 /* We don't support guest os callchain now */
622 fr.fp = regs->ARM_fp;
623 fr.sp = regs->ARM_sp;
624 fr.lr = regs->ARM_lr;
625 fr.pc = regs->ARM_pc;
626 walk_stackframe(&fr, callchain_trace, entry);
629 unsigned long perf_instruction_pointer(struct pt_regs *regs)
631 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
632 return perf_guest_cbs->get_guest_ip();
634 return instruction_pointer(regs);
637 unsigned long perf_misc_flags(struct pt_regs *regs)
641 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
642 if (perf_guest_cbs->is_user_mode())
643 misc |= PERF_RECORD_MISC_GUEST_USER;
645 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
648 misc |= PERF_RECORD_MISC_USER;
650 misc |= PERF_RECORD_MISC_KERNEL;