71c824ce0203b6302b5483bf23d94f5908ae38fd
[firefly-linux-kernel-4.4.55.git] / arch / arm / kernel / perf_event_cpu.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License version 2 as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
14  *
15  * Copyright (C) 2012 ARM Limited
16  *
17  * Author: Will Deacon <will.deacon@arm.com>
18  */
19 #define pr_fmt(fmt) "CPU PMU: " fmt
20
21 #include <linux/bitmap.h>
22 #include <linux/export.h>
23 #include <linux/kernel.h>
24 #include <linux/of.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28
29 #include <asm/cputype.h>
30 #include <asm/irq_regs.h>
31 #include <asm/pmu.h>
32
33 /* Set at runtime when we know what CPU type we are. */
34 static struct arm_pmu *cpu_pmu;
35
36 static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
37 static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
38 static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
39
40 /*
41  * Despite the names, these two functions are CPU-specific and are used
42  * by the OProfile/perf code.
43  */
44 const char *perf_pmu_name(void)
45 {
46         if (!cpu_pmu)
47                 return NULL;
48
49         return cpu_pmu->pmu.name;
50 }
51 EXPORT_SYMBOL_GPL(perf_pmu_name);
52
53 int perf_num_counters(void)
54 {
55         int max_events = 0;
56
57         if (cpu_pmu != NULL)
58                 max_events = cpu_pmu->num_events;
59
60         return max_events;
61 }
62 EXPORT_SYMBOL_GPL(perf_num_counters);
63
64 /* Include the PMU-specific implementations. */
65 #include "perf_event_xscale.c"
66 #include "perf_event_v6.c"
67 #include "perf_event_v7.c"
68
69 static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
70 {
71         return &__get_cpu_var(cpu_hw_events);
72 }
73
74 static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
75 {
76         int i, irq, irqs;
77         struct platform_device *pmu_device = cpu_pmu->plat_device;
78
79         irqs = min(pmu_device->num_resources, num_possible_cpus());
80
81         for (i = 0; i < irqs; ++i) {
82                 if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs))
83                         continue;
84                 irq = platform_get_irq(pmu_device, i);
85                 if (irq >= 0)
86                         free_irq(irq, cpu_pmu);
87         }
88 }
89
90 static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
91 {
92         int i, err, irq, irqs;
93         struct platform_device *pmu_device = cpu_pmu->plat_device;
94
95         if (!pmu_device)
96                 return -ENODEV;
97
98         irqs = min(pmu_device->num_resources, num_possible_cpus());
99         if (irqs < 1) {
100                 pr_err("no irqs for PMUs defined\n");
101                 return -ENODEV;
102         }
103
104         for (i = 0; i < irqs; ++i) {
105                 err = 0;
106                 irq = platform_get_irq(pmu_device, i);
107                 if (irq < 0)
108                         continue;
109
110                 /*
111                  * If we have a single PMU interrupt that we can't shift,
112                  * assume that we're running on a uniprocessor machine and
113                  * continue. Otherwise, continue without this interrupt.
114                  */
115                 if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
116                         pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
117                                     irq, i);
118                         continue;
119                 }
120
121                 err = request_irq(irq, handler, IRQF_NOBALANCING, "arm-pmu",
122                                   cpu_pmu);
123                 if (err) {
124                         pr_err("unable to request IRQ%d for ARM PMU counters\n",
125                                 irq);
126                         return err;
127                 }
128
129                 cpumask_set_cpu(i, &cpu_pmu->active_irqs);
130         }
131
132         return 0;
133 }
134
135 static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu)
136 {
137         int cpu;
138         for_each_possible_cpu(cpu) {
139                 struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
140                 events->events = per_cpu(hw_events, cpu);
141                 events->used_mask = per_cpu(used_mask, cpu);
142                 raw_spin_lock_init(&events->pmu_lock);
143         }
144
145         cpu_pmu->get_hw_events  = cpu_pmu_get_cpu_events;
146         cpu_pmu->request_irq    = cpu_pmu_request_irq;
147         cpu_pmu->free_irq       = cpu_pmu_free_irq;
148
149         /* Ensure the PMU has sane values out of reset. */
150         if (cpu_pmu && cpu_pmu->reset)
151                 on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
152 }
153
154 /*
155  * PMU hardware loses all context when a CPU goes offline.
156  * When a CPU is hotplugged back in, since some hardware registers are
157  * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
158  * junk values out of them.
159  */
160 static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
161                                     unsigned long action, void *hcpu)
162 {
163         if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
164                 return NOTIFY_DONE;
165
166         if (cpu_pmu && cpu_pmu->reset)
167                 cpu_pmu->reset(cpu_pmu);
168
169         return NOTIFY_OK;
170 }
171
172 static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
173         .notifier_call = cpu_pmu_notify,
174 };
175
176 /*
177  * PMU platform driver and devicetree bindings.
178  */
179 static struct of_device_id __devinitdata cpu_pmu_of_device_ids[] = {
180         {.compatible = "arm,cortex-a15-pmu",    .data = armv7_a15_pmu_init},
181         {.compatible = "arm,cortex-a9-pmu",     .data = armv7_a9_pmu_init},
182         {.compatible = "arm,cortex-a8-pmu",     .data = armv7_a8_pmu_init},
183         {.compatible = "arm,cortex-a7-pmu",     .data = armv7_a7_pmu_init},
184         {.compatible = "arm,cortex-a5-pmu",     .data = armv7_a5_pmu_init},
185         {.compatible = "arm,arm11mpcore-pmu",   .data = armv6mpcore_pmu_init},
186         {.compatible = "arm,arm1176-pmu",       .data = armv6pmu_init},
187         {.compatible = "arm,arm1136-pmu",       .data = armv6pmu_init},
188         {},
189 };
190
191 static struct platform_device_id __devinitdata cpu_pmu_plat_device_ids[] = {
192         {.name = "arm-pmu"},
193         {},
194 };
195
196 /*
197  * CPU PMU identification and probing.
198  */
199 static int __devinit probe_current_pmu(struct arm_pmu *pmu)
200 {
201         int cpu = get_cpu();
202         unsigned long cpuid = read_cpuid_id();
203         unsigned long implementor = (cpuid & 0xFF000000) >> 24;
204         unsigned long part_number = (cpuid & 0xFFF0);
205         int ret = -ENODEV;
206
207         pr_info("probing PMU on CPU %d\n", cpu);
208
209         /* ARM Ltd CPUs. */
210         if (0x41 == implementor) {
211                 switch (part_number) {
212                 case 0xB360:    /* ARM1136 */
213                 case 0xB560:    /* ARM1156 */
214                 case 0xB760:    /* ARM1176 */
215                         ret = armv6pmu_init(pmu);
216                         break;
217                 case 0xB020:    /* ARM11mpcore */
218                         ret = armv6mpcore_pmu_init(pmu);
219                         break;
220                 case 0xC080:    /* Cortex-A8 */
221                         ret = armv7_a8_pmu_init(pmu);
222                         break;
223                 case 0xC090:    /* Cortex-A9 */
224                         ret = armv7_a9_pmu_init(pmu);
225                         break;
226                 case 0xC050:    /* Cortex-A5 */
227                         ret = armv7_a5_pmu_init(pmu);
228                         break;
229                 case 0xC0F0:    /* Cortex-A15 */
230                         ret = armv7_a15_pmu_init(pmu);
231                         break;
232                 case 0xC070:    /* Cortex-A7 */
233                         ret = armv7_a7_pmu_init(pmu);
234                         break;
235                 }
236         /* Intel CPUs [xscale]. */
237         } else if (0x69 == implementor) {
238                 part_number = (cpuid >> 13) & 0x7;
239                 switch (part_number) {
240                 case 1:
241                         ret = xscale1pmu_init(pmu);
242                         break;
243                 case 2:
244                         ret = xscale2pmu_init(pmu);
245                         break;
246                 }
247         }
248
249         put_cpu();
250         return ret;
251 }
252
253 static int __devinit cpu_pmu_device_probe(struct platform_device *pdev)
254 {
255         const struct of_device_id *of_id;
256         int (*init_fn)(struct arm_pmu *);
257         struct device_node *node = pdev->dev.of_node;
258         struct arm_pmu *pmu;
259         int ret = -ENODEV;
260
261         if (cpu_pmu) {
262                 pr_info("attempt to register multiple PMU devices!");
263                 return -ENOSPC;
264         }
265
266         pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
267         if (!pmu) {
268                 pr_info("failed to allocate PMU device!");
269                 return -ENOMEM;
270         }
271
272         if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
273                 init_fn = of_id->data;
274                 ret = init_fn(pmu);
275         } else {
276                 ret = probe_current_pmu(pmu);
277         }
278
279         if (ret) {
280                 pr_info("failed to register PMU devices!");
281                 kfree(pmu);
282                 return ret;
283         }
284
285         cpu_pmu = pmu;
286         cpu_pmu->plat_device = pdev;
287         cpu_pmu_init(cpu_pmu);
288         armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW);
289
290         return 0;
291 }
292
293 static struct platform_driver cpu_pmu_driver = {
294         .driver         = {
295                 .name   = "arm-pmu",
296                 .pm     = &armpmu_dev_pm_ops,
297                 .of_match_table = cpu_pmu_of_device_ids,
298         },
299         .probe          = cpu_pmu_device_probe,
300         .id_table       = cpu_pmu_plat_device_ids,
301 };
302
303 static int __init register_pmu_driver(void)
304 {
305         int err;
306
307         err = register_cpu_notifier(&cpu_pmu_hotplug_notifier);
308         if (err)
309                 return err;
310
311         err = platform_driver_register(&cpu_pmu_driver);
312         if (err)
313                 unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
314
315         return err;
316 }
317 device_initcall(register_pmu_driver);