4 * ARM performance counter support.
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
7 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
9 * This code is based on the sparc64 perf event code, which is in turn based
10 * on the x86 code. Callchain code is based on the ARM OProfile backtrace
13 #define pr_fmt(fmt) "hw perfevents: " fmt
15 #include <linux/cpumask.h>
16 #include <linux/kernel.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/uaccess.h>
21 #include <asm/irq_regs.h>
23 #include <asm/stacktrace.h>
26 armpmu_map_cache_event(const unsigned (*cache_map)
27 [PERF_COUNT_HW_CACHE_MAX]
28 [PERF_COUNT_HW_CACHE_OP_MAX]
29 [PERF_COUNT_HW_CACHE_RESULT_MAX],
32 unsigned int cache_type, cache_op, cache_result, ret;
34 cache_type = (config >> 0) & 0xff;
35 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
38 cache_op = (config >> 8) & 0xff;
39 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
42 cache_result = (config >> 16) & 0xff;
43 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
46 ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
48 if (ret == CACHE_OP_UNSUPPORTED)
55 armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
59 if (config >= PERF_COUNT_HW_MAX)
62 mapping = (*event_map)[config];
63 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
67 armpmu_map_raw_event(u32 raw_event_mask, u64 config)
69 return (int)(config & raw_event_mask);
73 armpmu_map_event(struct perf_event *event,
74 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
75 const unsigned (*cache_map)
76 [PERF_COUNT_HW_CACHE_MAX]
77 [PERF_COUNT_HW_CACHE_OP_MAX]
78 [PERF_COUNT_HW_CACHE_RESULT_MAX],
81 u64 config = event->attr.config;
83 switch (event->attr.type) {
84 case PERF_TYPE_HARDWARE:
85 return armpmu_map_hw_event(event_map, config);
86 case PERF_TYPE_HW_CACHE:
87 return armpmu_map_cache_event(cache_map, config);
89 return armpmu_map_raw_event(raw_event_mask, config);
91 if (event->attr.type >= PERF_TYPE_MAX)
92 return armpmu_map_raw_event(raw_event_mask, config);
98 int armpmu_event_set_period(struct perf_event *event)
100 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
101 struct hw_perf_event *hwc = &event->hw;
102 s64 left = local64_read(&hwc->period_left);
103 s64 period = hwc->sample_period;
106 /* The period may have been changed by PERF_EVENT_IOC_PERIOD */
107 if (unlikely(period != hwc->last_period))
108 left = period - (hwc->last_period - left);
110 if (unlikely(left <= -period)) {
112 local64_set(&hwc->period_left, left);
113 hwc->last_period = period;
117 if (unlikely(left <= 0)) {
119 local64_set(&hwc->period_left, left);
120 hwc->last_period = period;
124 if (left > (s64)armpmu->max_period)
125 left = armpmu->max_period;
127 local64_set(&hwc->prev_count, (u64)-left);
129 armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
131 perf_event_update_userpage(event);
136 u64 armpmu_event_update(struct perf_event *event)
138 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
139 struct hw_perf_event *hwc = &event->hw;
140 u64 delta, prev_raw_count, new_raw_count;
143 prev_raw_count = local64_read(&hwc->prev_count);
144 new_raw_count = armpmu->read_counter(event);
146 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
147 new_raw_count) != prev_raw_count)
150 delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
152 local64_add(delta, &event->count);
153 local64_sub(delta, &hwc->period_left);
155 return new_raw_count;
159 armpmu_read(struct perf_event *event)
161 armpmu_event_update(event);
165 armpmu_stop(struct perf_event *event, int flags)
167 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
168 struct hw_perf_event *hwc = &event->hw;
170 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
173 * ARM pmu always has to update the counter, so ignore
174 * PERF_EF_UPDATE, see comments in armpmu_start().
176 if (!(hwc->state & PERF_HES_STOPPED)) {
177 armpmu->disable(event);
178 armpmu_event_update(event);
179 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
183 static void armpmu_start(struct perf_event *event, int flags)
185 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
186 struct hw_perf_event *hwc = &event->hw;
188 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
191 * ARM pmu always has to reprogram the period, so ignore
192 * PERF_EF_RELOAD, see the comment below.
194 if (flags & PERF_EF_RELOAD)
195 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
199 * Set the period again. Some counters can't be stopped, so when we
200 * were stopped we simply disabled the IRQ source and the counter
201 * may have been left counting. If we don't do this step then we may
202 * get an interrupt too soon or *way* too late if the overflow has
203 * happened since disabling.
205 armpmu_event_set_period(event);
206 armpmu->enable(event);
210 armpmu_del(struct perf_event *event, int flags)
212 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
213 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
214 struct hw_perf_event *hwc = &event->hw;
217 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
220 armpmu_stop(event, PERF_EF_UPDATE);
221 hw_events->events[idx] = NULL;
222 clear_bit(idx, hw_events->used_mask);
224 perf_event_update_userpage(event);
228 armpmu_add(struct perf_event *event, int flags)
230 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
231 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
232 struct hw_perf_event *hwc = &event->hw;
236 /* An event following a process won't be stopped earlier */
237 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
240 perf_pmu_disable(event->pmu);
242 /* If we don't have a space for the counter then finish early. */
243 idx = armpmu->get_event_idx(hw_events, event);
250 * If there is an event in the counter we are going to use then make
251 * sure it is disabled.
254 armpmu->disable(event);
255 hw_events->events[idx] = event;
257 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
258 if (flags & PERF_EF_START)
259 armpmu_start(event, PERF_EF_RELOAD);
261 /* Propagate our changes to the userspace mapping. */
262 perf_event_update_userpage(event);
265 perf_pmu_enable(event->pmu);
270 validate_event(struct pmu_hw_events *hw_events,
271 struct perf_event *event)
273 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
274 struct pmu *leader_pmu = event->group_leader->pmu;
276 if (is_software_event(event))
279 if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
282 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
285 return armpmu->get_event_idx(hw_events, event) >= 0;
289 validate_group(struct perf_event *event)
291 struct perf_event *sibling, *leader = event->group_leader;
292 struct pmu_hw_events fake_pmu;
293 DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
296 * Initialise the fake PMU. We only need to populate the
297 * used_mask for the purposes of validation.
299 memset(fake_used_mask, 0, sizeof(fake_used_mask));
300 fake_pmu.used_mask = fake_used_mask;
302 if (!validate_event(&fake_pmu, leader))
305 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
306 if (!validate_event(&fake_pmu, sibling))
310 if (!validate_event(&fake_pmu, event))
316 static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
318 struct arm_pmu *armpmu = (struct arm_pmu *) dev;
319 struct platform_device *plat_device = armpmu->plat_device;
320 struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
322 if (plat && plat->handle_irq)
323 return plat->handle_irq(irq, dev, armpmu->handle_irq);
325 return armpmu->handle_irq(irq, dev);
329 armpmu_release_hardware(struct arm_pmu *armpmu)
331 armpmu->free_irq(armpmu);
332 pm_runtime_put_sync(&armpmu->plat_device->dev);
336 armpmu_reserve_hardware(struct arm_pmu *armpmu)
339 struct platform_device *pmu_device = armpmu->plat_device;
344 pm_runtime_get_sync(&pmu_device->dev);
345 err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
347 armpmu_release_hardware(armpmu);
355 hw_perf_event_destroy(struct perf_event *event)
357 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
358 atomic_t *active_events = &armpmu->active_events;
359 struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
361 if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
362 armpmu_release_hardware(armpmu);
363 mutex_unlock(pmu_reserve_mutex);
368 event_requires_mode_exclusion(struct perf_event_attr *attr)
370 return attr->exclude_idle || attr->exclude_user ||
371 attr->exclude_kernel || attr->exclude_hv;
375 __hw_perf_event_init(struct perf_event *event)
377 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
378 struct hw_perf_event *hwc = &event->hw;
381 mapping = armpmu->map_event(event);
384 pr_debug("event %x:%llx not supported\n", event->attr.type,
390 * We don't assign an index until we actually place the event onto
391 * hardware. Use -1 to signify that we haven't decided where to put it
392 * yet. For SMP systems, each core has it's own PMU so we can't do any
393 * clever allocation or constraints checking at this point.
396 hwc->config_base = 0;
401 * Check whether we need to exclude the counter from certain modes.
403 if ((!armpmu->set_event_filter ||
404 armpmu->set_event_filter(hwc, &event->attr)) &&
405 event_requires_mode_exclusion(&event->attr)) {
406 pr_debug("ARM performance counters do not support "
412 * Store the event encoding into the config_base field.
414 hwc->config_base |= (unsigned long)mapping;
416 if (!hwc->sample_period) {
418 * For non-sampling runs, limit the sample_period to half
419 * of the counter width. That way, the new counter value
420 * is far less likely to overtake the previous one unless
421 * you have some serious IRQ latency issues.
423 hwc->sample_period = armpmu->max_period >> 1;
424 hwc->last_period = hwc->sample_period;
425 local64_set(&hwc->period_left, hwc->sample_period);
428 if (event->group_leader != event) {
429 if (validate_group(event) != 0)
436 static int armpmu_event_init(struct perf_event *event)
438 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
440 atomic_t *active_events = &armpmu->active_events;
442 if (event->cpu != -1 &&
443 !cpumask_test_cpu(event->cpu, &armpmu->valid_cpus))
446 /* does not support taken branch sampling */
447 if (has_branch_stack(event))
450 if (armpmu->map_event(event) == -ENOENT)
453 event->destroy = hw_perf_event_destroy;
455 if (!atomic_inc_not_zero(active_events)) {
456 mutex_lock(&armpmu->reserve_mutex);
457 if (atomic_read(active_events) == 0)
458 err = armpmu_reserve_hardware(armpmu);
461 atomic_inc(active_events);
462 mutex_unlock(&armpmu->reserve_mutex);
468 err = __hw_perf_event_init(event);
470 hw_perf_event_destroy(event);
475 static void armpmu_enable(struct pmu *pmu)
477 struct arm_pmu *armpmu = to_arm_pmu(pmu);
478 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
479 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
482 armpmu->start(armpmu);
485 static void armpmu_disable(struct pmu *pmu)
487 struct arm_pmu *armpmu = to_arm_pmu(pmu);
488 armpmu->stop(armpmu);
491 #ifdef CONFIG_PM_RUNTIME
492 static int armpmu_runtime_resume(struct device *dev)
494 struct arm_pmu_platdata *plat = dev_get_platdata(dev);
496 if (plat && plat->runtime_resume)
497 return plat->runtime_resume(dev);
502 static int armpmu_runtime_suspend(struct device *dev)
504 struct arm_pmu_platdata *plat = dev_get_platdata(dev);
506 if (plat && plat->runtime_suspend)
507 return plat->runtime_suspend(dev);
513 const struct dev_pm_ops armpmu_dev_pm_ops = {
514 SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL)
517 static void armpmu_init(struct arm_pmu *armpmu)
519 atomic_set(&armpmu->active_events, 0);
520 mutex_init(&armpmu->reserve_mutex);
522 armpmu->pmu = (struct pmu) {
523 .pmu_enable = armpmu_enable,
524 .pmu_disable = armpmu_disable,
525 .event_init = armpmu_event_init,
528 .start = armpmu_start,
534 int armpmu_register(struct arm_pmu *armpmu, int type)
537 pm_runtime_enable(&armpmu->plat_device->dev);
538 pr_info("enabled with %s PMU driver, %d counters available\n",
539 armpmu->name, armpmu->num_events);
540 return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
544 * Callchain handling code.
548 * The registers we're interested in are at the end of the variable
549 * length saved register structure. The fp points at the end of this
550 * structure so the address of this struct is:
551 * (struct frame_tail *)(xxx->fp)-1
553 * This code has been adapted from the ARM OProfile support.
556 struct frame_tail __user *fp;
559 } __attribute__((packed));
562 * Get the return address for a single stackframe and return a pointer to the
565 static struct frame_tail __user *
566 user_backtrace(struct frame_tail __user *tail,
567 struct perf_callchain_entry *entry)
569 struct frame_tail buftail;
571 /* Also check accessibility of one struct frame_tail beyond */
572 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
574 if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
577 perf_callchain_store(entry, buftail.lr);
580 * Frame pointers should strictly progress back up the stack
581 * (towards higher addresses).
583 if (tail + 1 >= buftail.fp)
586 return buftail.fp - 1;
590 perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
592 struct frame_tail __user *tail;
594 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
595 /* We don't support guest os callchain now */
599 perf_callchain_store(entry, regs->ARM_pc);
600 tail = (struct frame_tail __user *)regs->ARM_fp - 1;
602 while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
603 tail && !((unsigned long)tail & 0x3))
604 tail = user_backtrace(tail, entry);
608 * Gets called by walk_stackframe() for every stackframe. This will be called
609 * whist unwinding the stackframe and is like a subroutine return so we use
613 callchain_trace(struct stackframe *fr,
616 struct perf_callchain_entry *entry = data;
617 perf_callchain_store(entry, fr->pc);
622 perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
624 struct stackframe fr;
626 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
627 /* We don't support guest os callchain now */
631 fr.fp = regs->ARM_fp;
632 fr.sp = regs->ARM_sp;
633 fr.lr = regs->ARM_lr;
634 fr.pc = regs->ARM_pc;
635 walk_stackframe(&fr, callchain_trace, entry);
638 unsigned long perf_instruction_pointer(struct pt_regs *regs)
640 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
641 return perf_guest_cbs->get_guest_ip();
643 return instruction_pointer(regs);
646 unsigned long perf_misc_flags(struct pt_regs *regs)
650 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
651 if (perf_guest_cbs->is_user_mode())
652 misc |= PERF_RECORD_MISC_GUEST_USER;
654 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
657 misc |= PERF_RECORD_MISC_USER;
659 misc |= PERF_RECORD_MISC_KERNEL;