Merge tag 'lsk-v4.4-17.07-android' of git://git.linaro.org/kernel/linux-linaro-stable.git
[firefly-linux-kernel-4.4.55.git] / drivers / perf / arm_pmu.c
1 #undef DEBUG
2
3 /*
4  * ARM performance counter support.
5  *
6  * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
7  * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
8  *
9  * This code is based on the sparc64 perf event code, which is in turn based
10  * on the x86 code.
11  */
12 #define pr_fmt(fmt) "hw perfevents: " fmt
13
14 #include <linux/bitmap.h>
15 #include <linux/cpumask.h>
16 #include <linux/cpu_pm.h>
17 #include <linux/export.h>
18 #include <linux/kernel.h>
19 #include <linux/of_device.h>
20 #include <linux/perf/arm_pmu.h>
21 #include <linux/platform_device.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 #include <linux/irq.h>
25 #include <linux/irqdesc.h>
26
27 #include <asm/cputype.h>
28 #include <asm/irq_regs.h>
29
30 static int
31 armpmu_map_cache_event(const unsigned (*cache_map)
32                                       [PERF_COUNT_HW_CACHE_MAX]
33                                       [PERF_COUNT_HW_CACHE_OP_MAX]
34                                       [PERF_COUNT_HW_CACHE_RESULT_MAX],
35                        u64 config)
36 {
37         unsigned int cache_type, cache_op, cache_result, ret;
38
39         cache_type = (config >>  0) & 0xff;
40         if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
41                 return -EINVAL;
42
43         cache_op = (config >>  8) & 0xff;
44         if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
45                 return -EINVAL;
46
47         cache_result = (config >> 16) & 0xff;
48         if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
49                 return -EINVAL;
50
51         ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
52
53         if (ret == CACHE_OP_UNSUPPORTED)
54                 return -ENOENT;
55
56         return ret;
57 }
58
59 static int
60 armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
61 {
62         int mapping;
63
64         if (config >= PERF_COUNT_HW_MAX)
65                 return -EINVAL;
66
67         mapping = (*event_map)[config];
68         return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
69 }
70
71 static int
72 armpmu_map_raw_event(u32 raw_event_mask, u64 config)
73 {
74         return (int)(config & raw_event_mask);
75 }
76
77 int
78 armpmu_map_event(struct perf_event *event,
79                  const unsigned (*event_map)[PERF_COUNT_HW_MAX],
80                  const unsigned (*cache_map)
81                                 [PERF_COUNT_HW_CACHE_MAX]
82                                 [PERF_COUNT_HW_CACHE_OP_MAX]
83                                 [PERF_COUNT_HW_CACHE_RESULT_MAX],
84                  u32 raw_event_mask)
85 {
86         u64 config = event->attr.config;
87         int type = event->attr.type;
88
89         if (type == event->pmu->type)
90                 return armpmu_map_raw_event(raw_event_mask, config);
91
92         switch (type) {
93         case PERF_TYPE_HARDWARE:
94                 return armpmu_map_hw_event(event_map, config);
95         case PERF_TYPE_HW_CACHE:
96                 return armpmu_map_cache_event(cache_map, config);
97         case PERF_TYPE_RAW:
98                 return armpmu_map_raw_event(raw_event_mask, config);
99         }
100
101         return -ENOENT;
102 }
103
104 int armpmu_event_set_period(struct perf_event *event)
105 {
106         struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
107         struct hw_perf_event *hwc = &event->hw;
108         s64 left = local64_read(&hwc->period_left);
109         s64 period = hwc->sample_period;
110         int ret = 0;
111
112         if (unlikely(left <= -period)) {
113                 left = period;
114                 local64_set(&hwc->period_left, left);
115                 hwc->last_period = period;
116                 ret = 1;
117         }
118
119         if (unlikely(left <= 0)) {
120                 left += period;
121                 local64_set(&hwc->period_left, left);
122                 hwc->last_period = period;
123                 ret = 1;
124         }
125
126         /*
127          * Limit the maximum period to prevent the counter value
128          * from overtaking the one we are about to program. In
129          * effect we are reducing max_period to account for
130          * interrupt latency (and we are being very conservative).
131          */
132         if (left > (armpmu->max_period >> 1))
133                 left = armpmu->max_period >> 1;
134
135         local64_set(&hwc->prev_count, (u64)-left);
136
137         armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
138
139         perf_event_update_userpage(event);
140
141         return ret;
142 }
143
144 u64 armpmu_event_update(struct perf_event *event)
145 {
146         struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
147         struct hw_perf_event *hwc = &event->hw;
148         u64 delta, prev_raw_count, new_raw_count;
149
150 again:
151         prev_raw_count = local64_read(&hwc->prev_count);
152         new_raw_count = armpmu->read_counter(event);
153
154         if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
155                              new_raw_count) != prev_raw_count)
156                 goto again;
157
158         delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
159
160         local64_add(delta, &event->count);
161         local64_sub(delta, &hwc->period_left);
162
163         return new_raw_count;
164 }
165
166 static void
167 armpmu_read(struct perf_event *event)
168 {
169         armpmu_event_update(event);
170 }
171
172 static void
173 armpmu_stop(struct perf_event *event, int flags)
174 {
175         struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
176         struct hw_perf_event *hwc = &event->hw;
177
178         /*
179          * ARM pmu always has to update the counter, so ignore
180          * PERF_EF_UPDATE, see comments in armpmu_start().
181          */
182         if (!(hwc->state & PERF_HES_STOPPED)) {
183                 armpmu->disable(event);
184                 armpmu_event_update(event);
185                 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
186         }
187 }
188
189 static void armpmu_start(struct perf_event *event, int flags)
190 {
191         struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
192         struct hw_perf_event *hwc = &event->hw;
193
194         /*
195          * ARM pmu always has to reprogram the period, so ignore
196          * PERF_EF_RELOAD, see the comment below.
197          */
198         if (flags & PERF_EF_RELOAD)
199                 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
200
201         hwc->state = 0;
202         /*
203          * Set the period again. Some counters can't be stopped, so when we
204          * were stopped we simply disabled the IRQ source and the counter
205          * may have been left counting. If we don't do this step then we may
206          * get an interrupt too soon or *way* too late if the overflow has
207          * happened since disabling.
208          */
209         armpmu_event_set_period(event);
210         armpmu->enable(event);
211 }
212
213 static void
214 armpmu_del(struct perf_event *event, int flags)
215 {
216         struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
217         struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
218         struct hw_perf_event *hwc = &event->hw;
219         int idx = hwc->idx;
220
221         armpmu_stop(event, PERF_EF_UPDATE);
222         hw_events->events[idx] = NULL;
223         clear_bit(idx, hw_events->used_mask);
224         if (armpmu->clear_event_idx)
225                 armpmu->clear_event_idx(hw_events, event);
226
227         perf_event_update_userpage(event);
228 }
229
230 static int
231 armpmu_add(struct perf_event *event, int flags)
232 {
233         struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
234         struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
235         struct hw_perf_event *hwc = &event->hw;
236         int idx;
237         int err = 0;
238
239         /* An event following a process won't be stopped earlier */
240         if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
241                 return -ENOENT;
242
243         perf_pmu_disable(event->pmu);
244
245         /* If we don't have a space for the counter then finish early. */
246         idx = armpmu->get_event_idx(hw_events, event);
247         if (idx < 0) {
248                 err = idx;
249                 goto out;
250         }
251
252         /*
253          * If there is an event in the counter we are going to use then make
254          * sure it is disabled.
255          */
256         event->hw.idx = idx;
257         armpmu->disable(event);
258         hw_events->events[idx] = event;
259
260         hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
261         if (flags & PERF_EF_START)
262                 armpmu_start(event, PERF_EF_RELOAD);
263
264         /* Propagate our changes to the userspace mapping. */
265         perf_event_update_userpage(event);
266
267 out:
268         perf_pmu_enable(event->pmu);
269         return err;
270 }
271
272 static int
273 validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
274                                struct perf_event *event)
275 {
276         struct arm_pmu *armpmu;
277
278         if (is_software_event(event))
279                 return 1;
280
281         /*
282          * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
283          * core perf code won't check that the pmu->ctx == leader->ctx
284          * until after pmu->event_init(event).
285          */
286         if (event->pmu != pmu)
287                 return 0;
288
289         if (event->state < PERF_EVENT_STATE_OFF)
290                 return 1;
291
292         if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
293                 return 1;
294
295         armpmu = to_arm_pmu(event->pmu);
296         return armpmu->get_event_idx(hw_events, event) >= 0;
297 }
298
299 static int
300 validate_group(struct perf_event *event)
301 {
302         struct perf_event *sibling, *leader = event->group_leader;
303         struct pmu_hw_events fake_pmu;
304
305         /*
306          * Initialise the fake PMU. We only need to populate the
307          * used_mask for the purposes of validation.
308          */
309         memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
310
311         if (!validate_event(event->pmu, &fake_pmu, leader))
312                 return -EINVAL;
313
314         list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
315                 if (!validate_event(event->pmu, &fake_pmu, sibling))
316                         return -EINVAL;
317         }
318
319         if (!validate_event(event->pmu, &fake_pmu, event))
320                 return -EINVAL;
321
322         return 0;
323 }
324
325 static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
326 {
327         struct arm_pmu *armpmu;
328         struct platform_device *plat_device;
329         struct arm_pmu_platdata *plat;
330         int ret;
331         u64 start_clock, finish_clock;
332
333         /*
334          * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
335          * the handlers expect a struct arm_pmu*. The percpu_irq framework will
336          * do any necessary shifting, we just need to perform the first
337          * dereference.
338          */
339         armpmu = *(void **)dev;
340         plat_device = armpmu->plat_device;
341         plat = dev_get_platdata(&plat_device->dev);
342
343         start_clock = sched_clock();
344         if (plat && plat->handle_irq)
345                 ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
346         else
347                 ret = armpmu->handle_irq(irq, armpmu);
348         finish_clock = sched_clock();
349
350         perf_sample_event_took(finish_clock - start_clock);
351         return ret;
352 }
353
354 static void
355 armpmu_release_hardware(struct arm_pmu *armpmu)
356 {
357         armpmu->free_irq(armpmu);
358 }
359
360 static int
361 armpmu_reserve_hardware(struct arm_pmu *armpmu)
362 {
363         int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
364         if (err) {
365                 armpmu_release_hardware(armpmu);
366                 return err;
367         }
368
369         return 0;
370 }
371
372 static void
373 hw_perf_event_destroy(struct perf_event *event)
374 {
375         struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
376         atomic_t *active_events  = &armpmu->active_events;
377         struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
378
379         if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
380                 armpmu_release_hardware(armpmu);
381                 mutex_unlock(pmu_reserve_mutex);
382         }
383 }
384
385 static int
386 event_requires_mode_exclusion(struct perf_event_attr *attr)
387 {
388         return attr->exclude_idle || attr->exclude_user ||
389                attr->exclude_kernel || attr->exclude_hv;
390 }
391
392 static int
393 __hw_perf_event_init(struct perf_event *event)
394 {
395         struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
396         struct hw_perf_event *hwc = &event->hw;
397         int mapping;
398
399         mapping = armpmu->map_event(event);
400
401         if (mapping < 0) {
402                 pr_debug("event %x:%llx not supported\n", event->attr.type,
403                          event->attr.config);
404                 return mapping;
405         }
406
407         /*
408          * We don't assign an index until we actually place the event onto
409          * hardware. Use -1 to signify that we haven't decided where to put it
410          * yet. For SMP systems, each core has it's own PMU so we can't do any
411          * clever allocation or constraints checking at this point.
412          */
413         hwc->idx                = -1;
414         hwc->config_base        = 0;
415         hwc->config             = 0;
416         hwc->event_base         = 0;
417
418         /*
419          * Check whether we need to exclude the counter from certain modes.
420          */
421         if ((!armpmu->set_event_filter ||
422              armpmu->set_event_filter(hwc, &event->attr)) &&
423              event_requires_mode_exclusion(&event->attr)) {
424                 pr_debug("ARM performance counters do not support "
425                          "mode exclusion\n");
426                 return -EOPNOTSUPP;
427         }
428
429         /*
430          * Store the event encoding into the config_base field.
431          */
432         hwc->config_base            |= (unsigned long)mapping;
433
434         if (!is_sampling_event(event)) {
435                 /*
436                  * For non-sampling runs, limit the sample_period to half
437                  * of the counter width. That way, the new counter value
438                  * is far less likely to overtake the previous one unless
439                  * you have some serious IRQ latency issues.
440                  */
441                 hwc->sample_period  = armpmu->max_period >> 1;
442                 hwc->last_period    = hwc->sample_period;
443                 local64_set(&hwc->period_left, hwc->sample_period);
444         }
445
446         if (event->group_leader != event) {
447                 if (validate_group(event) != 0)
448                         return -EINVAL;
449         }
450
451         return 0;
452 }
453
454 static int armpmu_event_init(struct perf_event *event)
455 {
456         struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
457         int err = 0;
458         atomic_t *active_events = &armpmu->active_events;
459
460         /*
461          * Reject CPU-affine events for CPUs that are of a different class to
462          * that which this PMU handles. Process-following events (where
463          * event->cpu == -1) can be migrated between CPUs, and thus we have to
464          * reject them later (in armpmu_add) if they're scheduled on a
465          * different class of CPU.
466          */
467         if (event->cpu != -1 &&
468                 !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
469                 return -ENOENT;
470
471         /* does not support taken branch sampling */
472         if (has_branch_stack(event))
473                 return -EOPNOTSUPP;
474
475         if (armpmu->map_event(event) == -ENOENT)
476                 return -ENOENT;
477
478         event->destroy = hw_perf_event_destroy;
479
480         if (!atomic_inc_not_zero(active_events)) {
481                 mutex_lock(&armpmu->reserve_mutex);
482                 if (atomic_read(active_events) == 0)
483                         err = armpmu_reserve_hardware(armpmu);
484
485                 if (!err)
486                         atomic_inc(active_events);
487                 mutex_unlock(&armpmu->reserve_mutex);
488         }
489
490         if (err)
491                 return err;
492
493         err = __hw_perf_event_init(event);
494         if (err)
495                 hw_perf_event_destroy(event);
496
497         return err;
498 }
499
500 static void armpmu_enable(struct pmu *pmu)
501 {
502         struct arm_pmu *armpmu = to_arm_pmu(pmu);
503         struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
504         int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
505
506         /* For task-bound events we may be called on other CPUs */
507         if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
508                 return;
509
510         if (enabled)
511                 armpmu->start(armpmu);
512 }
513
514 static void armpmu_disable(struct pmu *pmu)
515 {
516         struct arm_pmu *armpmu = to_arm_pmu(pmu);
517
518         /* For task-bound events we may be called on other CPUs */
519         if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
520                 return;
521
522         armpmu->stop(armpmu);
523 }
524
525 /*
526  * In heterogeneous systems, events are specific to a particular
527  * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
528  * the same microarchitecture.
529  */
530 static int armpmu_filter_match(struct perf_event *event)
531 {
532         struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
533         unsigned int cpu = smp_processor_id();
534         return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
535 }
536
537 static void armpmu_init(struct arm_pmu *armpmu)
538 {
539         atomic_set(&armpmu->active_events, 0);
540         mutex_init(&armpmu->reserve_mutex);
541
542         armpmu->pmu = (struct pmu) {
543                 .pmu_enable     = armpmu_enable,
544                 .pmu_disable    = armpmu_disable,
545                 .event_init     = armpmu_event_init,
546                 .add            = armpmu_add,
547                 .del            = armpmu_del,
548                 .start          = armpmu_start,
549                 .stop           = armpmu_stop,
550                 .read           = armpmu_read,
551                 .filter_match   = armpmu_filter_match,
552         };
553 }
554
555 int armpmu_register(struct arm_pmu *armpmu, int type)
556 {
557         armpmu_init(armpmu);
558         pr_info("enabled with %s PMU driver, %d counters available\n",
559                         armpmu->name, armpmu->num_events);
560         return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
561 }
562
563 /* Set at runtime when we know what CPU type we are. */
564 static struct arm_pmu *__oprofile_cpu_pmu;
565
566 /*
567  * Despite the names, these two functions are CPU-specific and are used
568  * by the OProfile/perf code.
569  */
570 const char *perf_pmu_name(void)
571 {
572         if (!__oprofile_cpu_pmu)
573                 return NULL;
574
575         return __oprofile_cpu_pmu->name;
576 }
577 EXPORT_SYMBOL_GPL(perf_pmu_name);
578
579 int perf_num_counters(void)
580 {
581         int max_events = 0;
582
583         if (__oprofile_cpu_pmu != NULL)
584                 max_events = __oprofile_cpu_pmu->num_events;
585
586         return max_events;
587 }
588 EXPORT_SYMBOL_GPL(perf_num_counters);
589
590 static void cpu_pmu_enable_percpu_irq(void *data)
591 {
592         int irq = *(int *)data;
593
594         enable_percpu_irq(irq, IRQ_TYPE_NONE);
595 }
596
597 static void cpu_pmu_disable_percpu_irq(void *data)
598 {
599         int irq = *(int *)data;
600
601         disable_percpu_irq(irq);
602 }
603
604 static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
605 {
606         int i, irq, irqs;
607         struct platform_device *pmu_device = cpu_pmu->plat_device;
608         struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
609
610         irqs = min(pmu_device->num_resources, num_possible_cpus());
611
612         irq = platform_get_irq(pmu_device, 0);
613         if (irq >= 0 && irq_is_percpu(irq)) {
614                 on_each_cpu_mask(&cpu_pmu->supported_cpus,
615                                  cpu_pmu_disable_percpu_irq, &irq, 1);
616                 free_percpu_irq(irq, &hw_events->percpu_pmu);
617         } else {
618                 for (i = 0; i < irqs; ++i) {
619                         int cpu = i;
620
621                         if (cpu_pmu->irq_affinity)
622                                 cpu = cpu_pmu->irq_affinity[i];
623
624                         if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
625                                 continue;
626                         irq = platform_get_irq(pmu_device, i);
627                         if (irq >= 0)
628                                 free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
629                 }
630         }
631 }
632
633 static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
634 {
635         int i, err, irq, irqs;
636         struct platform_device *pmu_device = cpu_pmu->plat_device;
637         struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
638
639         if (!pmu_device)
640                 return -ENODEV;
641
642         irqs = min(pmu_device->num_resources, num_possible_cpus());
643         if (irqs < 1) {
644                 pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
645                 return 0;
646         }
647
648         irq = platform_get_irq(pmu_device, 0);
649         if (irq >= 0 && irq_is_percpu(irq)) {
650                 err = request_percpu_irq(irq, handler, "arm-pmu",
651                                          &hw_events->percpu_pmu);
652                 if (err) {
653                         pr_err("unable to request IRQ%d for ARM PMU counters\n",
654                                 irq);
655                         return err;
656                 }
657
658                 on_each_cpu_mask(&cpu_pmu->supported_cpus,
659                                  cpu_pmu_enable_percpu_irq, &irq, 1);
660         } else {
661                 for (i = 0; i < irqs; ++i) {
662                         int cpu = i;
663
664                         err = 0;
665                         irq = platform_get_irq(pmu_device, i);
666                         if (irq < 0)
667                                 continue;
668
669                         if (cpu_pmu->irq_affinity)
670                                 cpu = cpu_pmu->irq_affinity[i];
671
672                         /*
673                          * If we have a single PMU interrupt that we can't shift,
674                          * assume that we're running on a uniprocessor machine and
675                          * continue. Otherwise, continue without this interrupt.
676                          */
677                         if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
678                                 pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
679                                         irq, cpu);
680                                 continue;
681                         }
682
683                         err = request_irq(irq, handler,
684                                           IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
685                                           per_cpu_ptr(&hw_events->percpu_pmu, cpu));
686                         if (err) {
687                                 pr_err("unable to request IRQ%d for ARM PMU counters\n",
688                                         irq);
689                                 return err;
690                         }
691
692                         cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
693                 }
694         }
695
696         return 0;
697 }
698
699 /*
700  * PMU hardware loses all context when a CPU goes offline.
701  * When a CPU is hotplugged back in, since some hardware registers are
702  * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
703  * junk values out of them.
704  */
705 static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
706                           void *hcpu)
707 {
708         int cpu = (unsigned long)hcpu;
709         struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb);
710
711         if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
712                 return NOTIFY_DONE;
713
714         if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
715                 return NOTIFY_DONE;
716
717         if (pmu->reset)
718                 pmu->reset(pmu);
719         else
720                 return NOTIFY_DONE;
721
722         return NOTIFY_OK;
723 }
724
725 #ifdef CONFIG_CPU_PM
726 static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
727 {
728         struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
729         struct perf_event *event;
730         int idx;
731
732         for (idx = 0; idx < armpmu->num_events; idx++) {
733                 /*
734                  * If the counter is not used skip it, there is no
735                  * need of stopping/restarting it.
736                  */
737                 if (!test_bit(idx, hw_events->used_mask))
738                         continue;
739
740                 event = hw_events->events[idx];
741
742                 switch (cmd) {
743                 case CPU_PM_ENTER:
744                         /*
745                          * Stop and update the counter
746                          */
747                         armpmu_stop(event, PERF_EF_UPDATE);
748                         break;
749                 case CPU_PM_EXIT:
750                 case CPU_PM_ENTER_FAILED:
751                          /*
752                           * Restore and enable the counter.
753                           * armpmu_start() indirectly calls
754                           *
755                           * perf_event_update_userpage()
756                           *
757                           * that requires RCU read locking to be functional,
758                           * wrap the call within RCU_NONIDLE to make the
759                           * RCU subsystem aware this cpu is not idle from
760                           * an RCU perspective for the armpmu_start() call
761                           * duration.
762                           */
763                         RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
764                         break;
765                 default:
766                         break;
767                 }
768         }
769 }
770
771 static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
772                              void *v)
773 {
774         struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
775         struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
776         int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
777
778         if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
779                 return NOTIFY_DONE;
780
781         /*
782          * Always reset the PMU registers on power-up even if
783          * there are no events running.
784          */
785         if (cmd == CPU_PM_EXIT && armpmu->reset)
786                 armpmu->reset(armpmu);
787
788         if (!enabled)
789                 return NOTIFY_OK;
790
791         switch (cmd) {
792         case CPU_PM_ENTER:
793                 armpmu->stop(armpmu);
794                 cpu_pm_pmu_setup(armpmu, cmd);
795                 break;
796         case CPU_PM_EXIT:
797                 cpu_pm_pmu_setup(armpmu, cmd);
798         case CPU_PM_ENTER_FAILED:
799                 armpmu->start(armpmu);
800                 break;
801         default:
802                 return NOTIFY_DONE;
803         }
804
805         return NOTIFY_OK;
806 }
807
808 static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
809 {
810         cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify;
811         return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb);
812 }
813
814 static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
815 {
816         cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
817 }
818 #else
819 static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
820 static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
821 #endif
822
823 static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
824 {
825         int err;
826         int cpu;
827         struct pmu_hw_events __percpu *cpu_hw_events;
828
829         cpu_hw_events = alloc_percpu(struct pmu_hw_events);
830         if (!cpu_hw_events)
831                 return -ENOMEM;
832
833         cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify;
834         err = register_cpu_notifier(&cpu_pmu->hotplug_nb);
835         if (err)
836                 goto out_hw_events;
837
838         err = cpu_pm_pmu_register(cpu_pmu);
839         if (err)
840                 goto out_unregister;
841
842         for_each_possible_cpu(cpu) {
843                 struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
844                 raw_spin_lock_init(&events->pmu_lock);
845                 events->percpu_pmu = cpu_pmu;
846         }
847
848         cpu_pmu->hw_events      = cpu_hw_events;
849         cpu_pmu->request_irq    = cpu_pmu_request_irq;
850         cpu_pmu->free_irq       = cpu_pmu_free_irq;
851
852         /* Ensure the PMU has sane values out of reset. */
853         if (cpu_pmu->reset)
854                 on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset,
855                          cpu_pmu, 1);
856
857         /* If no interrupts available, set the corresponding capability flag */
858         if (!platform_get_irq(cpu_pmu->plat_device, 0))
859                 cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
860
861         return 0;
862
863 out_unregister:
864         unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
865 out_hw_events:
866         free_percpu(cpu_hw_events);
867         return err;
868 }
869
870 static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
871 {
872         cpu_pm_pmu_unregister(cpu_pmu);
873         unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
874         free_percpu(cpu_pmu->hw_events);
875 }
876
877 /*
878  * CPU PMU identification and probing.
879  */
880 static int probe_current_pmu(struct arm_pmu *pmu,
881                              const struct pmu_probe_info *info)
882 {
883         int cpu = get_cpu();
884         unsigned int cpuid = read_cpuid_id();
885         int ret = -ENODEV;
886
887         pr_info("probing PMU on CPU %d\n", cpu);
888
889         for (; info->init != NULL; info++) {
890                 if ((cpuid & info->mask) != info->cpuid)
891                         continue;
892                 ret = info->init(pmu);
893                 break;
894         }
895
896         put_cpu();
897         return ret;
898 }
899
900 static int of_pmu_irq_cfg(struct arm_pmu *pmu)
901 {
902         int *irqs, i = 0;
903         bool using_spi = false;
904         struct platform_device *pdev = pmu->plat_device;
905
906         irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
907         if (!irqs)
908                 return -ENOMEM;
909
910         do {
911                 struct device_node *dn;
912                 int cpu, irq;
913
914                 /* See if we have an affinity entry */
915                 dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", i);
916                 if (!dn)
917                         break;
918
919                 /* Check the IRQ type and prohibit a mix of PPIs and SPIs */
920                 irq = platform_get_irq(pdev, i);
921                 if (irq >= 0) {
922                         bool spi = !irq_is_percpu(irq);
923
924                         if (i > 0 && spi != using_spi) {
925                                 pr_err("PPI/SPI IRQ type mismatch for %s!\n",
926                                         dn->name);
927                                 of_node_put(dn);
928                                 kfree(irqs);
929                                 return -EINVAL;
930                         }
931
932                         using_spi = spi;
933                 }
934
935                 /* Now look up the logical CPU number */
936                 for_each_possible_cpu(cpu) {
937                         struct device_node *cpu_dn;
938
939                         cpu_dn = of_cpu_device_node_get(cpu);
940                         of_node_put(cpu_dn);
941
942                         if (dn == cpu_dn)
943                                 break;
944                 }
945
946                 if (cpu >= nr_cpu_ids) {
947                         pr_warn("Failed to find logical CPU for %s\n",
948                                 dn->name);
949                         of_node_put(dn);
950                         cpumask_setall(&pmu->supported_cpus);
951                         break;
952                 }
953                 of_node_put(dn);
954
955                 /* For SPIs, we need to track the affinity per IRQ */
956                 if (using_spi) {
957                         if (i >= pdev->num_resources) {
958                                 of_node_put(dn);
959                                 break;
960                         }
961
962                         irqs[i] = cpu;
963                 }
964
965                 /* Keep track of the CPUs containing this PMU type */
966                 cpumask_set_cpu(cpu, &pmu->supported_cpus);
967                 of_node_put(dn);
968                 i++;
969         } while (1);
970
971         /* If we didn't manage to parse anything, try the interrupt affinity */
972         if (cpumask_weight(&pmu->supported_cpus) == 0) {
973                 if (!using_spi) {
974                         /* If using PPIs, check the affinity of the partition */
975                         int ret, irq;
976
977                         irq = platform_get_irq(pdev, 0);
978                         ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
979                         if (ret) {
980                                 kfree(irqs);
981                                 return ret;
982                         }
983                 } else {
984                         /* Otherwise default to all CPUs */
985                         cpumask_setall(&pmu->supported_cpus);
986                 }
987         }
988
989         /* If we matched up the IRQ affinities, use them to route the SPIs */
990         if (using_spi && i == pdev->num_resources)
991                 pmu->irq_affinity = irqs;
992         else
993                 kfree(irqs);
994
995         return 0;
996 }
997
998 int arm_pmu_device_probe(struct platform_device *pdev,
999                          const struct of_device_id *of_table,
1000                          const struct pmu_probe_info *probe_table)
1001 {
1002         const struct of_device_id *of_id;
1003         const int (*init_fn)(struct arm_pmu *);
1004         struct device_node *node = pdev->dev.of_node;
1005         struct arm_pmu *pmu;
1006         int ret = -ENODEV;
1007
1008         pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
1009         if (!pmu) {
1010                 pr_info("failed to allocate PMU device!\n");
1011                 return -ENOMEM;
1012         }
1013
1014         if (!__oprofile_cpu_pmu)
1015                 __oprofile_cpu_pmu = pmu;
1016
1017         pmu->plat_device = pdev;
1018
1019         if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
1020                 init_fn = of_id->data;
1021
1022                 ret = of_pmu_irq_cfg(pmu);
1023                 if (!ret)
1024                         ret = init_fn(pmu);
1025         } else {
1026                 ret = probe_current_pmu(pmu, probe_table);
1027                 cpumask_setall(&pmu->supported_cpus);
1028         }
1029
1030         if (ret) {
1031                 pr_info("failed to probe PMU!\n");
1032                 goto out_free;
1033         }
1034
1035         ret = cpu_pmu_init(pmu);
1036         if (ret)
1037                 goto out_free;
1038
1039         ret = armpmu_register(pmu, -1);
1040         if (ret)
1041                 goto out_destroy;
1042
1043         return 0;
1044
1045 out_destroy:
1046         cpu_pmu_destroy(pmu);
1047 out_free:
1048         pr_info("failed to register PMU devices!\n");
1049         kfree(pmu);
1050         return ret;
1051 }