perf x86: revert 20b279 - require exclude_guest to use PEBS - kernel side
[firefly-linux-kernel-4.4.55.git] / arch / x86 / kernel / cpu / perf_event.c
1 /*
2  * Performance events x86 architecture code
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14
15 #include <linux/perf_event.h>
16 #include <linux/capability.h>
17 #include <linux/notifier.h>
18 #include <linux/hardirq.h>
19 #include <linux/kprobes.h>
20 #include <linux/module.h>
21 #include <linux/kdebug.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
24 #include <linux/slab.h>
25 #include <linux/cpu.h>
26 #include <linux/bitops.h>
27 #include <linux/device.h>
28
29 #include <asm/apic.h>
30 #include <asm/stacktrace.h>
31 #include <asm/nmi.h>
32 #include <asm/smp.h>
33 #include <asm/alternative.h>
34 #include <asm/timer.h>
35 #include <asm/desc.h>
36 #include <asm/ldt.h>
37
38 #include "perf_event.h"
39
40 struct x86_pmu x86_pmu __read_mostly;
41
42 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
43         .enabled = 1,
44 };
45
46 u64 __read_mostly hw_cache_event_ids
47                                 [PERF_COUNT_HW_CACHE_MAX]
48                                 [PERF_COUNT_HW_CACHE_OP_MAX]
49                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
50 u64 __read_mostly hw_cache_extra_regs
51                                 [PERF_COUNT_HW_CACHE_MAX]
52                                 [PERF_COUNT_HW_CACHE_OP_MAX]
53                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
54
55 /*
56  * Propagate event elapsed time into the generic event.
57  * Can only be executed on the CPU where the event is active.
58  * Returns the delta events processed.
59  */
60 u64 x86_perf_event_update(struct perf_event *event)
61 {
62         struct hw_perf_event *hwc = &event->hw;
63         int shift = 64 - x86_pmu.cntval_bits;
64         u64 prev_raw_count, new_raw_count;
65         int idx = hwc->idx;
66         s64 delta;
67
68         if (idx == INTEL_PMC_IDX_FIXED_BTS)
69                 return 0;
70
71         /*
72          * Careful: an NMI might modify the previous event value.
73          *
74          * Our tactic to handle this is to first atomically read and
75          * exchange a new raw count - then add that new-prev delta
76          * count to the generic event atomically:
77          */
78 again:
79         prev_raw_count = local64_read(&hwc->prev_count);
80         rdpmcl(hwc->event_base_rdpmc, new_raw_count);
81
82         if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
83                                         new_raw_count) != prev_raw_count)
84                 goto again;
85
86         /*
87          * Now we have the new raw value and have updated the prev
88          * timestamp already. We can now calculate the elapsed delta
89          * (event-)time and add that to the generic event.
90          *
91          * Careful, not all hw sign-extends above the physical width
92          * of the count.
93          */
94         delta = (new_raw_count << shift) - (prev_raw_count << shift);
95         delta >>= shift;
96
97         local64_add(delta, &event->count);
98         local64_sub(delta, &hwc->period_left);
99
100         return new_raw_count;
101 }
102
103 /*
104  * Find and validate any extra registers to set up.
105  */
106 static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
107 {
108         struct hw_perf_event_extra *reg;
109         struct extra_reg *er;
110
111         reg = &event->hw.extra_reg;
112
113         if (!x86_pmu.extra_regs)
114                 return 0;
115
116         for (er = x86_pmu.extra_regs; er->msr; er++) {
117                 if (er->event != (config & er->config_mask))
118                         continue;
119                 if (event->attr.config1 & ~er->valid_mask)
120                         return -EINVAL;
121
122                 reg->idx = er->idx;
123                 reg->config = event->attr.config1;
124                 reg->reg = er->msr;
125                 break;
126         }
127         return 0;
128 }
129
130 static atomic_t active_events;
131 static DEFINE_MUTEX(pmc_reserve_mutex);
132
133 #ifdef CONFIG_X86_LOCAL_APIC
134
135 static bool reserve_pmc_hardware(void)
136 {
137         int i;
138
139         for (i = 0; i < x86_pmu.num_counters; i++) {
140                 if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
141                         goto perfctr_fail;
142         }
143
144         for (i = 0; i < x86_pmu.num_counters; i++) {
145                 if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
146                         goto eventsel_fail;
147         }
148
149         return true;
150
151 eventsel_fail:
152         for (i--; i >= 0; i--)
153                 release_evntsel_nmi(x86_pmu_config_addr(i));
154
155         i = x86_pmu.num_counters;
156
157 perfctr_fail:
158         for (i--; i >= 0; i--)
159                 release_perfctr_nmi(x86_pmu_event_addr(i));
160
161         return false;
162 }
163
164 static void release_pmc_hardware(void)
165 {
166         int i;
167
168         for (i = 0; i < x86_pmu.num_counters; i++) {
169                 release_perfctr_nmi(x86_pmu_event_addr(i));
170                 release_evntsel_nmi(x86_pmu_config_addr(i));
171         }
172 }
173
174 #else
175
176 static bool reserve_pmc_hardware(void) { return true; }
177 static void release_pmc_hardware(void) {}
178
179 #endif
180
181 static bool check_hw_exists(void)
182 {
183         u64 val, val_new = ~0;
184         int i, reg, ret = 0;
185
186         /*
187          * Check to see if the BIOS enabled any of the counters, if so
188          * complain and bail.
189          */
190         for (i = 0; i < x86_pmu.num_counters; i++) {
191                 reg = x86_pmu_config_addr(i);
192                 ret = rdmsrl_safe(reg, &val);
193                 if (ret)
194                         goto msr_fail;
195                 if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
196                         goto bios_fail;
197         }
198
199         if (x86_pmu.num_counters_fixed) {
200                 reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
201                 ret = rdmsrl_safe(reg, &val);
202                 if (ret)
203                         goto msr_fail;
204                 for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
205                         if (val & (0x03 << i*4))
206                                 goto bios_fail;
207                 }
208         }
209
210         /*
211          * Read the current value, change it and read it back to see if it
212          * matches, this is needed to detect certain hardware emulators
213          * (qemu/kvm) that don't trap on the MSR access and always return 0s.
214          */
215         reg = x86_pmu_event_addr(0);
216         if (rdmsrl_safe(reg, &val))
217                 goto msr_fail;
218         val ^= 0xffffUL;
219         ret = wrmsrl_safe(reg, val);
220         ret |= rdmsrl_safe(reg, &val_new);
221         if (ret || val != val_new)
222                 goto msr_fail;
223
224         return true;
225
226 bios_fail:
227         /*
228          * We still allow the PMU driver to operate:
229          */
230         printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n");
231         printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val);
232
233         return true;
234
235 msr_fail:
236         printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
237         printk(KERN_ERR "Failed to access perfctr msr (MSR %x is %Lx)\n", reg, val_new);
238
239         return false;
240 }
241
242 static void hw_perf_event_destroy(struct perf_event *event)
243 {
244         if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
245                 release_pmc_hardware();
246                 release_ds_buffers();
247                 mutex_unlock(&pmc_reserve_mutex);
248         }
249 }
250
251 static inline int x86_pmu_initialized(void)
252 {
253         return x86_pmu.handle_irq != NULL;
254 }
255
256 static inline int
257 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
258 {
259         struct perf_event_attr *attr = &event->attr;
260         unsigned int cache_type, cache_op, cache_result;
261         u64 config, val;
262
263         config = attr->config;
264
265         cache_type = (config >>  0) & 0xff;
266         if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
267                 return -EINVAL;
268
269         cache_op = (config >>  8) & 0xff;
270         if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
271                 return -EINVAL;
272
273         cache_result = (config >> 16) & 0xff;
274         if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
275                 return -EINVAL;
276
277         val = hw_cache_event_ids[cache_type][cache_op][cache_result];
278
279         if (val == 0)
280                 return -ENOENT;
281
282         if (val == -1)
283                 return -EINVAL;
284
285         hwc->config |= val;
286         attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
287         return x86_pmu_extra_regs(val, event);
288 }
289
290 int x86_setup_perfctr(struct perf_event *event)
291 {
292         struct perf_event_attr *attr = &event->attr;
293         struct hw_perf_event *hwc = &event->hw;
294         u64 config;
295
296         if (!is_sampling_event(event)) {
297                 hwc->sample_period = x86_pmu.max_period;
298                 hwc->last_period = hwc->sample_period;
299                 local64_set(&hwc->period_left, hwc->sample_period);
300         } else {
301                 /*
302                  * If we have a PMU initialized but no APIC
303                  * interrupts, we cannot sample hardware
304                  * events (user-space has to fall back and
305                  * sample via a hrtimer based software event):
306                  */
307                 if (!x86_pmu.apic)
308                         return -EOPNOTSUPP;
309         }
310
311         if (attr->type == PERF_TYPE_RAW)
312                 return x86_pmu_extra_regs(event->attr.config, event);
313
314         if (attr->type == PERF_TYPE_HW_CACHE)
315                 return set_ext_hw_attr(hwc, event);
316
317         if (attr->config >= x86_pmu.max_events)
318                 return -EINVAL;
319
320         /*
321          * The generic map:
322          */
323         config = x86_pmu.event_map(attr->config);
324
325         if (config == 0)
326                 return -ENOENT;
327
328         if (config == -1LL)
329                 return -EINVAL;
330
331         /*
332          * Branch tracing:
333          */
334         if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
335             !attr->freq && hwc->sample_period == 1) {
336                 /* BTS is not supported by this architecture. */
337                 if (!x86_pmu.bts_active)
338                         return -EOPNOTSUPP;
339
340                 /* BTS is currently only allowed for user-mode. */
341                 if (!attr->exclude_kernel)
342                         return -EOPNOTSUPP;
343         }
344
345         hwc->config |= config;
346
347         return 0;
348 }
349
350 /*
351  * check that branch_sample_type is compatible with
352  * settings needed for precise_ip > 1 which implies
353  * using the LBR to capture ALL taken branches at the
354  * priv levels of the measurement
355  */
356 static inline int precise_br_compat(struct perf_event *event)
357 {
358         u64 m = event->attr.branch_sample_type;
359         u64 b = 0;
360
361         /* must capture all branches */
362         if (!(m & PERF_SAMPLE_BRANCH_ANY))
363                 return 0;
364
365         m &= PERF_SAMPLE_BRANCH_KERNEL | PERF_SAMPLE_BRANCH_USER;
366
367         if (!event->attr.exclude_user)
368                 b |= PERF_SAMPLE_BRANCH_USER;
369
370         if (!event->attr.exclude_kernel)
371                 b |= PERF_SAMPLE_BRANCH_KERNEL;
372
373         /*
374          * ignore PERF_SAMPLE_BRANCH_HV, not supported on x86
375          */
376
377         return m == b;
378 }
379
380 int x86_pmu_hw_config(struct perf_event *event)
381 {
382         if (event->attr.precise_ip) {
383                 int precise = 0;
384
385                 /* Support for constant skid */
386                 if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
387                         precise++;
388
389                         /* Support for IP fixup */
390                         if (x86_pmu.lbr_nr)
391                                 precise++;
392                 }
393
394                 if (event->attr.precise_ip > precise)
395                         return -EOPNOTSUPP;
396                 /*
397                  * check that PEBS LBR correction does not conflict with
398                  * whatever the user is asking with attr->branch_sample_type
399                  */
400                 if (event->attr.precise_ip > 1) {
401                         u64 *br_type = &event->attr.branch_sample_type;
402
403                         if (has_branch_stack(event)) {
404                                 if (!precise_br_compat(event))
405                                         return -EOPNOTSUPP;
406
407                                 /* branch_sample_type is compatible */
408
409                         } else {
410                                 /*
411                                  * user did not specify  branch_sample_type
412                                  *
413                                  * For PEBS fixups, we capture all
414                                  * the branches at the priv level of the
415                                  * event.
416                                  */
417                                 *br_type = PERF_SAMPLE_BRANCH_ANY;
418
419                                 if (!event->attr.exclude_user)
420                                         *br_type |= PERF_SAMPLE_BRANCH_USER;
421
422                                 if (!event->attr.exclude_kernel)
423                                         *br_type |= PERF_SAMPLE_BRANCH_KERNEL;
424                         }
425                 }
426         }
427
428         /*
429          * Generate PMC IRQs:
430          * (keep 'enabled' bit clear for now)
431          */
432         event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
433
434         /*
435          * Count user and OS events unless requested not to
436          */
437         if (!event->attr.exclude_user)
438                 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
439         if (!event->attr.exclude_kernel)
440                 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
441
442         if (event->attr.type == PERF_TYPE_RAW)
443                 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
444
445         return x86_setup_perfctr(event);
446 }
447
448 /*
449  * Setup the hardware configuration for a given attr_type
450  */
451 static int __x86_pmu_event_init(struct perf_event *event)
452 {
453         int err;
454
455         if (!x86_pmu_initialized())
456                 return -ENODEV;
457
458         err = 0;
459         if (!atomic_inc_not_zero(&active_events)) {
460                 mutex_lock(&pmc_reserve_mutex);
461                 if (atomic_read(&active_events) == 0) {
462                         if (!reserve_pmc_hardware())
463                                 err = -EBUSY;
464                         else
465                                 reserve_ds_buffers();
466                 }
467                 if (!err)
468                         atomic_inc(&active_events);
469                 mutex_unlock(&pmc_reserve_mutex);
470         }
471         if (err)
472                 return err;
473
474         event->destroy = hw_perf_event_destroy;
475
476         event->hw.idx = -1;
477         event->hw.last_cpu = -1;
478         event->hw.last_tag = ~0ULL;
479
480         /* mark unused */
481         event->hw.extra_reg.idx = EXTRA_REG_NONE;
482         event->hw.branch_reg.idx = EXTRA_REG_NONE;
483
484         return x86_pmu.hw_config(event);
485 }
486
487 void x86_pmu_disable_all(void)
488 {
489         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
490         int idx;
491
492         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
493                 u64 val;
494
495                 if (!test_bit(idx, cpuc->active_mask))
496                         continue;
497                 rdmsrl(x86_pmu_config_addr(idx), val);
498                 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
499                         continue;
500                 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
501                 wrmsrl(x86_pmu_config_addr(idx), val);
502         }
503 }
504
505 static void x86_pmu_disable(struct pmu *pmu)
506 {
507         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
508
509         if (!x86_pmu_initialized())
510                 return;
511
512         if (!cpuc->enabled)
513                 return;
514
515         cpuc->n_added = 0;
516         cpuc->enabled = 0;
517         barrier();
518
519         x86_pmu.disable_all();
520 }
521
522 void x86_pmu_enable_all(int added)
523 {
524         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
525         int idx;
526
527         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
528                 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
529
530                 if (!test_bit(idx, cpuc->active_mask))
531                         continue;
532
533                 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
534         }
535 }
536
537 static struct pmu pmu;
538
539 static inline int is_x86_event(struct perf_event *event)
540 {
541         return event->pmu == &pmu;
542 }
543
544 /*
545  * Event scheduler state:
546  *
547  * Assign events iterating over all events and counters, beginning
548  * with events with least weights first. Keep the current iterator
549  * state in struct sched_state.
550  */
551 struct sched_state {
552         int     weight;
553         int     event;          /* event index */
554         int     counter;        /* counter index */
555         int     unassigned;     /* number of events to be assigned left */
556         unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
557 };
558
559 /* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */
560 #define SCHED_STATES_MAX        2
561
562 struct perf_sched {
563         int                     max_weight;
564         int                     max_events;
565         struct event_constraint **constraints;
566         struct sched_state      state;
567         int                     saved_states;
568         struct sched_state      saved[SCHED_STATES_MAX];
569 };
570
571 /*
572  * Initialize interator that runs through all events and counters.
573  */
574 static void perf_sched_init(struct perf_sched *sched, struct event_constraint **c,
575                             int num, int wmin, int wmax)
576 {
577         int idx;
578
579         memset(sched, 0, sizeof(*sched));
580         sched->max_events       = num;
581         sched->max_weight       = wmax;
582         sched->constraints      = c;
583
584         for (idx = 0; idx < num; idx++) {
585                 if (c[idx]->weight == wmin)
586                         break;
587         }
588
589         sched->state.event      = idx;          /* start with min weight */
590         sched->state.weight     = wmin;
591         sched->state.unassigned = num;
592 }
593
594 static void perf_sched_save_state(struct perf_sched *sched)
595 {
596         if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX))
597                 return;
598
599         sched->saved[sched->saved_states] = sched->state;
600         sched->saved_states++;
601 }
602
603 static bool perf_sched_restore_state(struct perf_sched *sched)
604 {
605         if (!sched->saved_states)
606                 return false;
607
608         sched->saved_states--;
609         sched->state = sched->saved[sched->saved_states];
610
611         /* continue with next counter: */
612         clear_bit(sched->state.counter++, sched->state.used);
613
614         return true;
615 }
616
617 /*
618  * Select a counter for the current event to schedule. Return true on
619  * success.
620  */
621 static bool __perf_sched_find_counter(struct perf_sched *sched)
622 {
623         struct event_constraint *c;
624         int idx;
625
626         if (!sched->state.unassigned)
627                 return false;
628
629         if (sched->state.event >= sched->max_events)
630                 return false;
631
632         c = sched->constraints[sched->state.event];
633
634         /* Prefer fixed purpose counters */
635         if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
636                 idx = INTEL_PMC_IDX_FIXED;
637                 for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
638                         if (!__test_and_set_bit(idx, sched->state.used))
639                                 goto done;
640                 }
641         }
642         /* Grab the first unused counter starting with idx */
643         idx = sched->state.counter;
644         for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
645                 if (!__test_and_set_bit(idx, sched->state.used))
646                         goto done;
647         }
648
649         return false;
650
651 done:
652         sched->state.counter = idx;
653
654         if (c->overlap)
655                 perf_sched_save_state(sched);
656
657         return true;
658 }
659
660 static bool perf_sched_find_counter(struct perf_sched *sched)
661 {
662         while (!__perf_sched_find_counter(sched)) {
663                 if (!perf_sched_restore_state(sched))
664                         return false;
665         }
666
667         return true;
668 }
669
670 /*
671  * Go through all unassigned events and find the next one to schedule.
672  * Take events with the least weight first. Return true on success.
673  */
674 static bool perf_sched_next_event(struct perf_sched *sched)
675 {
676         struct event_constraint *c;
677
678         if (!sched->state.unassigned || !--sched->state.unassigned)
679                 return false;
680
681         do {
682                 /* next event */
683                 sched->state.event++;
684                 if (sched->state.event >= sched->max_events) {
685                         /* next weight */
686                         sched->state.event = 0;
687                         sched->state.weight++;
688                         if (sched->state.weight > sched->max_weight)
689                                 return false;
690                 }
691                 c = sched->constraints[sched->state.event];
692         } while (c->weight != sched->state.weight);
693
694         sched->state.counter = 0;       /* start with first counter */
695
696         return true;
697 }
698
699 /*
700  * Assign a counter for each event.
701  */
702 int perf_assign_events(struct event_constraint **constraints, int n,
703                         int wmin, int wmax, int *assign)
704 {
705         struct perf_sched sched;
706
707         perf_sched_init(&sched, constraints, n, wmin, wmax);
708
709         do {
710                 if (!perf_sched_find_counter(&sched))
711                         break;  /* failed */
712                 if (assign)
713                         assign[sched.state.event] = sched.state.counter;
714         } while (perf_sched_next_event(&sched));
715
716         return sched.state.unassigned;
717 }
718
719 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
720 {
721         struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
722         unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
723         int i, wmin, wmax, num = 0;
724         struct hw_perf_event *hwc;
725
726         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
727
728         for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
729                 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
730                 constraints[i] = c;
731                 wmin = min(wmin, c->weight);
732                 wmax = max(wmax, c->weight);
733         }
734
735         /*
736          * fastpath, try to reuse previous register
737          */
738         for (i = 0; i < n; i++) {
739                 hwc = &cpuc->event_list[i]->hw;
740                 c = constraints[i];
741
742                 /* never assigned */
743                 if (hwc->idx == -1)
744                         break;
745
746                 /* constraint still honored */
747                 if (!test_bit(hwc->idx, c->idxmsk))
748                         break;
749
750                 /* not already used */
751                 if (test_bit(hwc->idx, used_mask))
752                         break;
753
754                 __set_bit(hwc->idx, used_mask);
755                 if (assign)
756                         assign[i] = hwc->idx;
757         }
758
759         /* slow path */
760         if (i != n)
761                 num = perf_assign_events(constraints, n, wmin, wmax, assign);
762
763         /*
764          * scheduling failed or is just a simulation,
765          * free resources if necessary
766          */
767         if (!assign || num) {
768                 for (i = 0; i < n; i++) {
769                         if (x86_pmu.put_event_constraints)
770                                 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
771                 }
772         }
773         return num ? -EINVAL : 0;
774 }
775
776 /*
777  * dogrp: true if must collect siblings events (group)
778  * returns total number of events and error code
779  */
780 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
781 {
782         struct perf_event *event;
783         int n, max_count;
784
785         max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
786
787         /* current number of events already accepted */
788         n = cpuc->n_events;
789
790         if (is_x86_event(leader)) {
791                 if (n >= max_count)
792                         return -EINVAL;
793                 cpuc->event_list[n] = leader;
794                 n++;
795         }
796         if (!dogrp)
797                 return n;
798
799         list_for_each_entry(event, &leader->sibling_list, group_entry) {
800                 if (!is_x86_event(event) ||
801                     event->state <= PERF_EVENT_STATE_OFF)
802                         continue;
803
804                 if (n >= max_count)
805                         return -EINVAL;
806
807                 cpuc->event_list[n] = event;
808                 n++;
809         }
810         return n;
811 }
812
813 static inline void x86_assign_hw_event(struct perf_event *event,
814                                 struct cpu_hw_events *cpuc, int i)
815 {
816         struct hw_perf_event *hwc = &event->hw;
817
818         hwc->idx = cpuc->assign[i];
819         hwc->last_cpu = smp_processor_id();
820         hwc->last_tag = ++cpuc->tags[i];
821
822         if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) {
823                 hwc->config_base = 0;
824                 hwc->event_base = 0;
825         } else if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
826                 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
827                 hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED);
828                 hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30;
829         } else {
830                 hwc->config_base = x86_pmu_config_addr(hwc->idx);
831                 hwc->event_base  = x86_pmu_event_addr(hwc->idx);
832                 hwc->event_base_rdpmc = hwc->idx;
833         }
834 }
835
836 static inline int match_prev_assignment(struct hw_perf_event *hwc,
837                                         struct cpu_hw_events *cpuc,
838                                         int i)
839 {
840         return hwc->idx == cpuc->assign[i] &&
841                 hwc->last_cpu == smp_processor_id() &&
842                 hwc->last_tag == cpuc->tags[i];
843 }
844
845 static void x86_pmu_start(struct perf_event *event, int flags);
846
847 static void x86_pmu_enable(struct pmu *pmu)
848 {
849         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
850         struct perf_event *event;
851         struct hw_perf_event *hwc;
852         int i, added = cpuc->n_added;
853
854         if (!x86_pmu_initialized())
855                 return;
856
857         if (cpuc->enabled)
858                 return;
859
860         if (cpuc->n_added) {
861                 int n_running = cpuc->n_events - cpuc->n_added;
862                 /*
863                  * apply assignment obtained either from
864                  * hw_perf_group_sched_in() or x86_pmu_enable()
865                  *
866                  * step1: save events moving to new counters
867                  * step2: reprogram moved events into new counters
868                  */
869                 for (i = 0; i < n_running; i++) {
870                         event = cpuc->event_list[i];
871                         hwc = &event->hw;
872
873                         /*
874                          * we can avoid reprogramming counter if:
875                          * - assigned same counter as last time
876                          * - running on same CPU as last time
877                          * - no other event has used the counter since
878                          */
879                         if (hwc->idx == -1 ||
880                             match_prev_assignment(hwc, cpuc, i))
881                                 continue;
882
883                         /*
884                          * Ensure we don't accidentally enable a stopped
885                          * counter simply because we rescheduled.
886                          */
887                         if (hwc->state & PERF_HES_STOPPED)
888                                 hwc->state |= PERF_HES_ARCH;
889
890                         x86_pmu_stop(event, PERF_EF_UPDATE);
891                 }
892
893                 for (i = 0; i < cpuc->n_events; i++) {
894                         event = cpuc->event_list[i];
895                         hwc = &event->hw;
896
897                         if (!match_prev_assignment(hwc, cpuc, i))
898                                 x86_assign_hw_event(event, cpuc, i);
899                         else if (i < n_running)
900                                 continue;
901
902                         if (hwc->state & PERF_HES_ARCH)
903                                 continue;
904
905                         x86_pmu_start(event, PERF_EF_RELOAD);
906                 }
907                 cpuc->n_added = 0;
908                 perf_events_lapic_init();
909         }
910
911         cpuc->enabled = 1;
912         barrier();
913
914         x86_pmu.enable_all(added);
915 }
916
917 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
918
919 /*
920  * Set the next IRQ period, based on the hwc->period_left value.
921  * To be called with the event disabled in hw:
922  */
923 int x86_perf_event_set_period(struct perf_event *event)
924 {
925         struct hw_perf_event *hwc = &event->hw;
926         s64 left = local64_read(&hwc->period_left);
927         s64 period = hwc->sample_period;
928         int ret = 0, idx = hwc->idx;
929
930         if (idx == INTEL_PMC_IDX_FIXED_BTS)
931                 return 0;
932
933         /*
934          * If we are way outside a reasonable range then just skip forward:
935          */
936         if (unlikely(left <= -period)) {
937                 left = period;
938                 local64_set(&hwc->period_left, left);
939                 hwc->last_period = period;
940                 ret = 1;
941         }
942
943         if (unlikely(left <= 0)) {
944                 left += period;
945                 local64_set(&hwc->period_left, left);
946                 hwc->last_period = period;
947                 ret = 1;
948         }
949         /*
950          * Quirk: certain CPUs dont like it if just 1 hw_event is left:
951          */
952         if (unlikely(left < 2))
953                 left = 2;
954
955         if (left > x86_pmu.max_period)
956                 left = x86_pmu.max_period;
957
958         per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
959
960         /*
961          * The hw event starts counting from this event offset,
962          * mark it to be able to extra future deltas:
963          */
964         local64_set(&hwc->prev_count, (u64)-left);
965
966         wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
967
968         /*
969          * Due to erratum on certan cpu we need
970          * a second write to be sure the register
971          * is updated properly
972          */
973         if (x86_pmu.perfctr_second_write) {
974                 wrmsrl(hwc->event_base,
975                         (u64)(-left) & x86_pmu.cntval_mask);
976         }
977
978         perf_event_update_userpage(event);
979
980         return ret;
981 }
982
983 void x86_pmu_enable_event(struct perf_event *event)
984 {
985         if (__this_cpu_read(cpu_hw_events.enabled))
986                 __x86_pmu_enable_event(&event->hw,
987                                        ARCH_PERFMON_EVENTSEL_ENABLE);
988 }
989
990 /*
991  * Add a single event to the PMU.
992  *
993  * The event is added to the group of enabled events
994  * but only if it can be scehduled with existing events.
995  */
996 static int x86_pmu_add(struct perf_event *event, int flags)
997 {
998         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
999         struct hw_perf_event *hwc;
1000         int assign[X86_PMC_IDX_MAX];
1001         int n, n0, ret;
1002
1003         hwc = &event->hw;
1004
1005         perf_pmu_disable(event->pmu);
1006         n0 = cpuc->n_events;
1007         ret = n = collect_events(cpuc, event, false);
1008         if (ret < 0)
1009                 goto out;
1010
1011         hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1012         if (!(flags & PERF_EF_START))
1013                 hwc->state |= PERF_HES_ARCH;
1014
1015         /*
1016          * If group events scheduling transaction was started,
1017          * skip the schedulability test here, it will be performed
1018          * at commit time (->commit_txn) as a whole
1019          */
1020         if (cpuc->group_flag & PERF_EVENT_TXN)
1021                 goto done_collect;
1022
1023         ret = x86_pmu.schedule_events(cpuc, n, assign);
1024         if (ret)
1025                 goto out;
1026         /*
1027          * copy new assignment, now we know it is possible
1028          * will be used by hw_perf_enable()
1029          */
1030         memcpy(cpuc->assign, assign, n*sizeof(int));
1031
1032 done_collect:
1033         cpuc->n_events = n;
1034         cpuc->n_added += n - n0;
1035         cpuc->n_txn += n - n0;
1036
1037         ret = 0;
1038 out:
1039         perf_pmu_enable(event->pmu);
1040         return ret;
1041 }
1042
1043 static void x86_pmu_start(struct perf_event *event, int flags)
1044 {
1045         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1046         int idx = event->hw.idx;
1047
1048         if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1049                 return;
1050
1051         if (WARN_ON_ONCE(idx == -1))
1052                 return;
1053
1054         if (flags & PERF_EF_RELOAD) {
1055                 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
1056                 x86_perf_event_set_period(event);
1057         }
1058
1059         event->hw.state = 0;
1060
1061         cpuc->events[idx] = event;
1062         __set_bit(idx, cpuc->active_mask);
1063         __set_bit(idx, cpuc->running);
1064         x86_pmu.enable(event);
1065         perf_event_update_userpage(event);
1066 }
1067
1068 void perf_event_print_debug(void)
1069 {
1070         u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1071         u64 pebs;
1072         struct cpu_hw_events *cpuc;
1073         unsigned long flags;
1074         int cpu, idx;
1075
1076         if (!x86_pmu.num_counters)
1077                 return;
1078
1079         local_irq_save(flags);
1080
1081         cpu = smp_processor_id();
1082         cpuc = &per_cpu(cpu_hw_events, cpu);
1083
1084         if (x86_pmu.version >= 2) {
1085                 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1086                 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1087                 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1088                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1089                 rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
1090
1091                 pr_info("\n");
1092                 pr_info("CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
1093                 pr_info("CPU#%d: status:     %016llx\n", cpu, status);
1094                 pr_info("CPU#%d: overflow:   %016llx\n", cpu, overflow);
1095                 pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
1096                 pr_info("CPU#%d: pebs:       %016llx\n", cpu, pebs);
1097         }
1098         pr_info("CPU#%d: active:     %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1099
1100         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1101                 rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
1102                 rdmsrl(x86_pmu_event_addr(idx), pmc_count);
1103
1104                 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1105
1106                 pr_info("CPU#%d:   gen-PMC%d ctrl:  %016llx\n",
1107                         cpu, idx, pmc_ctrl);
1108                 pr_info("CPU#%d:   gen-PMC%d count: %016llx\n",
1109                         cpu, idx, pmc_count);
1110                 pr_info("CPU#%d:   gen-PMC%d left:  %016llx\n",
1111                         cpu, idx, prev_left);
1112         }
1113         for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
1114                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1115
1116                 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1117                         cpu, idx, pmc_count);
1118         }
1119         local_irq_restore(flags);
1120 }
1121
1122 void x86_pmu_stop(struct perf_event *event, int flags)
1123 {
1124         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1125         struct hw_perf_event *hwc = &event->hw;
1126
1127         if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
1128                 x86_pmu.disable(event);
1129                 cpuc->events[hwc->idx] = NULL;
1130                 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1131                 hwc->state |= PERF_HES_STOPPED;
1132         }
1133
1134         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1135                 /*
1136                  * Drain the remaining delta count out of a event
1137                  * that we are disabling:
1138                  */
1139                 x86_perf_event_update(event);
1140                 hwc->state |= PERF_HES_UPTODATE;
1141         }
1142 }
1143
1144 static void x86_pmu_del(struct perf_event *event, int flags)
1145 {
1146         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1147         int i;
1148
1149         /*
1150          * If we're called during a txn, we don't need to do anything.
1151          * The events never got scheduled and ->cancel_txn will truncate
1152          * the event_list.
1153          */
1154         if (cpuc->group_flag & PERF_EVENT_TXN)
1155                 return;
1156
1157         x86_pmu_stop(event, PERF_EF_UPDATE);
1158
1159         for (i = 0; i < cpuc->n_events; i++) {
1160                 if (event == cpuc->event_list[i]) {
1161
1162                         if (x86_pmu.put_event_constraints)
1163                                 x86_pmu.put_event_constraints(cpuc, event);
1164
1165                         while (++i < cpuc->n_events)
1166                                 cpuc->event_list[i-1] = cpuc->event_list[i];
1167
1168                         --cpuc->n_events;
1169                         break;
1170                 }
1171         }
1172         perf_event_update_userpage(event);
1173 }
1174
1175 int x86_pmu_handle_irq(struct pt_regs *regs)
1176 {
1177         struct perf_sample_data data;
1178         struct cpu_hw_events *cpuc;
1179         struct perf_event *event;
1180         int idx, handled = 0;
1181         u64 val;
1182
1183         cpuc = &__get_cpu_var(cpu_hw_events);
1184
1185         /*
1186          * Some chipsets need to unmask the LVTPC in a particular spot
1187          * inside the nmi handler.  As a result, the unmasking was pushed
1188          * into all the nmi handlers.
1189          *
1190          * This generic handler doesn't seem to have any issues where the
1191          * unmasking occurs so it was left at the top.
1192          */
1193         apic_write(APIC_LVTPC, APIC_DM_NMI);
1194
1195         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1196                 if (!test_bit(idx, cpuc->active_mask)) {
1197                         /*
1198                          * Though we deactivated the counter some cpus
1199                          * might still deliver spurious interrupts still
1200                          * in flight. Catch them:
1201                          */
1202                         if (__test_and_clear_bit(idx, cpuc->running))
1203                                 handled++;
1204                         continue;
1205                 }
1206
1207                 event = cpuc->events[idx];
1208
1209                 val = x86_perf_event_update(event);
1210                 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
1211                         continue;
1212
1213                 /*
1214                  * event overflow
1215                  */
1216                 handled++;
1217                 perf_sample_data_init(&data, 0, event->hw.last_period);
1218
1219                 if (!x86_perf_event_set_period(event))
1220                         continue;
1221
1222                 if (perf_event_overflow(event, &data, regs))
1223                         x86_pmu_stop(event, 0);
1224         }
1225
1226         if (handled)
1227                 inc_irq_stat(apic_perf_irqs);
1228
1229         return handled;
1230 }
1231
1232 void perf_events_lapic_init(void)
1233 {
1234         if (!x86_pmu.apic || !x86_pmu_initialized())
1235                 return;
1236
1237         /*
1238          * Always use NMI for PMU
1239          */
1240         apic_write(APIC_LVTPC, APIC_DM_NMI);
1241 }
1242
1243 static int __kprobes
1244 perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
1245 {
1246         if (!atomic_read(&active_events))
1247                 return NMI_DONE;
1248
1249         return x86_pmu.handle_irq(regs);
1250 }
1251
1252 struct event_constraint emptyconstraint;
1253 struct event_constraint unconstrained;
1254
1255 static int __cpuinit
1256 x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1257 {
1258         unsigned int cpu = (long)hcpu;
1259         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1260         int ret = NOTIFY_OK;
1261
1262         switch (action & ~CPU_TASKS_FROZEN) {
1263         case CPU_UP_PREPARE:
1264                 cpuc->kfree_on_online = NULL;
1265                 if (x86_pmu.cpu_prepare)
1266                         ret = x86_pmu.cpu_prepare(cpu);
1267                 break;
1268
1269         case CPU_STARTING:
1270                 if (x86_pmu.attr_rdpmc)
1271                         set_in_cr4(X86_CR4_PCE);
1272                 if (x86_pmu.cpu_starting)
1273                         x86_pmu.cpu_starting(cpu);
1274                 break;
1275
1276         case CPU_ONLINE:
1277                 kfree(cpuc->kfree_on_online);
1278                 break;
1279
1280         case CPU_DYING:
1281                 if (x86_pmu.cpu_dying)
1282                         x86_pmu.cpu_dying(cpu);
1283                 break;
1284
1285         case CPU_UP_CANCELED:
1286         case CPU_DEAD:
1287                 if (x86_pmu.cpu_dead)
1288                         x86_pmu.cpu_dead(cpu);
1289                 break;
1290
1291         default:
1292                 break;
1293         }
1294
1295         return ret;
1296 }
1297
1298 static void __init pmu_check_apic(void)
1299 {
1300         if (cpu_has_apic)
1301                 return;
1302
1303         x86_pmu.apic = 0;
1304         pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1305         pr_info("no hardware sampling interrupt available.\n");
1306 }
1307
1308 static struct attribute_group x86_pmu_format_group = {
1309         .name = "format",
1310         .attrs = NULL,
1311 };
1312
1313 struct perf_pmu_events_attr {
1314         struct device_attribute attr;
1315         u64 id;
1316 };
1317
1318 /*
1319  * Remove all undefined events (x86_pmu.event_map(id) == 0)
1320  * out of events_attr attributes.
1321  */
1322 static void __init filter_events(struct attribute **attrs)
1323 {
1324         int i, j;
1325
1326         for (i = 0; attrs[i]; i++) {
1327                 if (x86_pmu.event_map(i))
1328                         continue;
1329
1330                 for (j = i; attrs[j]; j++)
1331                         attrs[j] = attrs[j + 1];
1332
1333                 /* Check the shifted attr. */
1334                 i--;
1335         }
1336 }
1337
1338 static ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
1339                           char *page)
1340 {
1341         struct perf_pmu_events_attr *pmu_attr = \
1342                 container_of(attr, struct perf_pmu_events_attr, attr);
1343
1344         u64 config = x86_pmu.event_map(pmu_attr->id);
1345         return x86_pmu.events_sysfs_show(page, config);
1346 }
1347
1348 #define EVENT_VAR(_id)  event_attr_##_id
1349 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
1350
1351 #define EVENT_ATTR(_name, _id)                                  \
1352 static struct perf_pmu_events_attr EVENT_VAR(_id) = {           \
1353         .attr = __ATTR(_name, 0444, events_sysfs_show, NULL),   \
1354         .id   =  PERF_COUNT_HW_##_id,                           \
1355 };
1356
1357 EVENT_ATTR(cpu-cycles,                  CPU_CYCLES              );
1358 EVENT_ATTR(instructions,                INSTRUCTIONS            );
1359 EVENT_ATTR(cache-references,            CACHE_REFERENCES        );
1360 EVENT_ATTR(cache-misses,                CACHE_MISSES            );
1361 EVENT_ATTR(branch-instructions,         BRANCH_INSTRUCTIONS     );
1362 EVENT_ATTR(branch-misses,               BRANCH_MISSES           );
1363 EVENT_ATTR(bus-cycles,                  BUS_CYCLES              );
1364 EVENT_ATTR(stalled-cycles-frontend,     STALLED_CYCLES_FRONTEND );
1365 EVENT_ATTR(stalled-cycles-backend,      STALLED_CYCLES_BACKEND  );
1366 EVENT_ATTR(ref-cycles,                  REF_CPU_CYCLES          );
1367
1368 static struct attribute *empty_attrs;
1369
1370 static struct attribute *events_attr[] = {
1371         EVENT_PTR(CPU_CYCLES),
1372         EVENT_PTR(INSTRUCTIONS),
1373         EVENT_PTR(CACHE_REFERENCES),
1374         EVENT_PTR(CACHE_MISSES),
1375         EVENT_PTR(BRANCH_INSTRUCTIONS),
1376         EVENT_PTR(BRANCH_MISSES),
1377         EVENT_PTR(BUS_CYCLES),
1378         EVENT_PTR(STALLED_CYCLES_FRONTEND),
1379         EVENT_PTR(STALLED_CYCLES_BACKEND),
1380         EVENT_PTR(REF_CPU_CYCLES),
1381         NULL,
1382 };
1383
1384 static struct attribute_group x86_pmu_events_group = {
1385         .name = "events",
1386         .attrs = events_attr,
1387 };
1388
1389 ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
1390 {
1391         u64 umask  = (config & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
1392         u64 cmask  = (config & ARCH_PERFMON_EVENTSEL_CMASK) >> 24;
1393         bool edge  = (config & ARCH_PERFMON_EVENTSEL_EDGE);
1394         bool pc    = (config & ARCH_PERFMON_EVENTSEL_PIN_CONTROL);
1395         bool any   = (config & ARCH_PERFMON_EVENTSEL_ANY);
1396         bool inv   = (config & ARCH_PERFMON_EVENTSEL_INV);
1397         ssize_t ret;
1398
1399         /*
1400         * We have whole page size to spend and just little data
1401         * to write, so we can safely use sprintf.
1402         */
1403         ret = sprintf(page, "event=0x%02llx", event);
1404
1405         if (umask)
1406                 ret += sprintf(page + ret, ",umask=0x%02llx", umask);
1407
1408         if (edge)
1409                 ret += sprintf(page + ret, ",edge");
1410
1411         if (pc)
1412                 ret += sprintf(page + ret, ",pc");
1413
1414         if (any)
1415                 ret += sprintf(page + ret, ",any");
1416
1417         if (inv)
1418                 ret += sprintf(page + ret, ",inv");
1419
1420         if (cmask)
1421                 ret += sprintf(page + ret, ",cmask=0x%02llx", cmask);
1422
1423         ret += sprintf(page + ret, "\n");
1424
1425         return ret;
1426 }
1427
1428 static int __init init_hw_perf_events(void)
1429 {
1430         struct x86_pmu_quirk *quirk;
1431         int err;
1432
1433         pr_info("Performance Events: ");
1434
1435         switch (boot_cpu_data.x86_vendor) {
1436         case X86_VENDOR_INTEL:
1437                 err = intel_pmu_init();
1438                 break;
1439         case X86_VENDOR_AMD:
1440                 err = amd_pmu_init();
1441                 break;
1442         default:
1443                 return 0;
1444         }
1445         if (err != 0) {
1446                 pr_cont("no PMU driver, software events only.\n");
1447                 return 0;
1448         }
1449
1450         pmu_check_apic();
1451
1452         /* sanity check that the hardware exists or is emulated */
1453         if (!check_hw_exists())
1454                 return 0;
1455
1456         pr_cont("%s PMU driver.\n", x86_pmu.name);
1457
1458         for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
1459                 quirk->func();
1460
1461         if (!x86_pmu.intel_ctrl)
1462                 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1463
1464         perf_events_lapic_init();
1465         register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
1466
1467         unconstrained = (struct event_constraint)
1468                 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1469                                    0, x86_pmu.num_counters, 0);
1470
1471         x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
1472         x86_pmu_format_group.attrs = x86_pmu.format_attrs;
1473
1474         if (!x86_pmu.events_sysfs_show)
1475                 x86_pmu_events_group.attrs = &empty_attrs;
1476         else
1477                 filter_events(x86_pmu_events_group.attrs);
1478
1479         pr_info("... version:                %d\n",     x86_pmu.version);
1480         pr_info("... bit width:              %d\n",     x86_pmu.cntval_bits);
1481         pr_info("... generic registers:      %d\n",     x86_pmu.num_counters);
1482         pr_info("... value mask:             %016Lx\n", x86_pmu.cntval_mask);
1483         pr_info("... max period:             %016Lx\n", x86_pmu.max_period);
1484         pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_counters_fixed);
1485         pr_info("... event mask:             %016Lx\n", x86_pmu.intel_ctrl);
1486
1487         perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1488         perf_cpu_notifier(x86_pmu_notifier);
1489
1490         return 0;
1491 }
1492 early_initcall(init_hw_perf_events);
1493
1494 static inline void x86_pmu_read(struct perf_event *event)
1495 {
1496         x86_perf_event_update(event);
1497 }
1498
1499 /*
1500  * Start group events scheduling transaction
1501  * Set the flag to make pmu::enable() not perform the
1502  * schedulability test, it will be performed at commit time
1503  */
1504 static void x86_pmu_start_txn(struct pmu *pmu)
1505 {
1506         perf_pmu_disable(pmu);
1507         __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
1508         __this_cpu_write(cpu_hw_events.n_txn, 0);
1509 }
1510
1511 /*
1512  * Stop group events scheduling transaction
1513  * Clear the flag and pmu::enable() will perform the
1514  * schedulability test.
1515  */
1516 static void x86_pmu_cancel_txn(struct pmu *pmu)
1517 {
1518         __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
1519         /*
1520          * Truncate the collected events.
1521          */
1522         __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
1523         __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
1524         perf_pmu_enable(pmu);
1525 }
1526
1527 /*
1528  * Commit group events scheduling transaction
1529  * Perform the group schedulability test as a whole
1530  * Return 0 if success
1531  */
1532 static int x86_pmu_commit_txn(struct pmu *pmu)
1533 {
1534         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1535         int assign[X86_PMC_IDX_MAX];
1536         int n, ret;
1537
1538         n = cpuc->n_events;
1539
1540         if (!x86_pmu_initialized())
1541                 return -EAGAIN;
1542
1543         ret = x86_pmu.schedule_events(cpuc, n, assign);
1544         if (ret)
1545                 return ret;
1546
1547         /*
1548          * copy new assignment, now we know it is possible
1549          * will be used by hw_perf_enable()
1550          */
1551         memcpy(cpuc->assign, assign, n*sizeof(int));
1552
1553         cpuc->group_flag &= ~PERF_EVENT_TXN;
1554         perf_pmu_enable(pmu);
1555         return 0;
1556 }
1557 /*
1558  * a fake_cpuc is used to validate event groups. Due to
1559  * the extra reg logic, we need to also allocate a fake
1560  * per_core and per_cpu structure. Otherwise, group events
1561  * using extra reg may conflict without the kernel being
1562  * able to catch this when the last event gets added to
1563  * the group.
1564  */
1565 static void free_fake_cpuc(struct cpu_hw_events *cpuc)
1566 {
1567         kfree(cpuc->shared_regs);
1568         kfree(cpuc);
1569 }
1570
1571 static struct cpu_hw_events *allocate_fake_cpuc(void)
1572 {
1573         struct cpu_hw_events *cpuc;
1574         int cpu = raw_smp_processor_id();
1575
1576         cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
1577         if (!cpuc)
1578                 return ERR_PTR(-ENOMEM);
1579
1580         /* only needed, if we have extra_regs */
1581         if (x86_pmu.extra_regs) {
1582                 cpuc->shared_regs = allocate_shared_regs(cpu);
1583                 if (!cpuc->shared_regs)
1584                         goto error;
1585         }
1586         cpuc->is_fake = 1;
1587         return cpuc;
1588 error:
1589         free_fake_cpuc(cpuc);
1590         return ERR_PTR(-ENOMEM);
1591 }
1592
1593 /*
1594  * validate that we can schedule this event
1595  */
1596 static int validate_event(struct perf_event *event)
1597 {
1598         struct cpu_hw_events *fake_cpuc;
1599         struct event_constraint *c;
1600         int ret = 0;
1601
1602         fake_cpuc = allocate_fake_cpuc();
1603         if (IS_ERR(fake_cpuc))
1604                 return PTR_ERR(fake_cpuc);
1605
1606         c = x86_pmu.get_event_constraints(fake_cpuc, event);
1607
1608         if (!c || !c->weight)
1609                 ret = -EINVAL;
1610
1611         if (x86_pmu.put_event_constraints)
1612                 x86_pmu.put_event_constraints(fake_cpuc, event);
1613
1614         free_fake_cpuc(fake_cpuc);
1615
1616         return ret;
1617 }
1618
1619 /*
1620  * validate a single event group
1621  *
1622  * validation include:
1623  *      - check events are compatible which each other
1624  *      - events do not compete for the same counter
1625  *      - number of events <= number of counters
1626  *
1627  * validation ensures the group can be loaded onto the
1628  * PMU if it was the only group available.
1629  */
1630 static int validate_group(struct perf_event *event)
1631 {
1632         struct perf_event *leader = event->group_leader;
1633         struct cpu_hw_events *fake_cpuc;
1634         int ret = -EINVAL, n;
1635
1636         fake_cpuc = allocate_fake_cpuc();
1637         if (IS_ERR(fake_cpuc))
1638                 return PTR_ERR(fake_cpuc);
1639         /*
1640          * the event is not yet connected with its
1641          * siblings therefore we must first collect
1642          * existing siblings, then add the new event
1643          * before we can simulate the scheduling
1644          */
1645         n = collect_events(fake_cpuc, leader, true);
1646         if (n < 0)
1647                 goto out;
1648
1649         fake_cpuc->n_events = n;
1650         n = collect_events(fake_cpuc, event, false);
1651         if (n < 0)
1652                 goto out;
1653
1654         fake_cpuc->n_events = n;
1655
1656         ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
1657
1658 out:
1659         free_fake_cpuc(fake_cpuc);
1660         return ret;
1661 }
1662
1663 static int x86_pmu_event_init(struct perf_event *event)
1664 {
1665         struct pmu *tmp;
1666         int err;
1667
1668         switch (event->attr.type) {
1669         case PERF_TYPE_RAW:
1670         case PERF_TYPE_HARDWARE:
1671         case PERF_TYPE_HW_CACHE:
1672                 break;
1673
1674         default:
1675                 return -ENOENT;
1676         }
1677
1678         err = __x86_pmu_event_init(event);
1679         if (!err) {
1680                 /*
1681                  * we temporarily connect event to its pmu
1682                  * such that validate_group() can classify
1683                  * it as an x86 event using is_x86_event()
1684                  */
1685                 tmp = event->pmu;
1686                 event->pmu = &pmu;
1687
1688                 if (event->group_leader != event)
1689                         err = validate_group(event);
1690                 else
1691                         err = validate_event(event);
1692
1693                 event->pmu = tmp;
1694         }
1695         if (err) {
1696                 if (event->destroy)
1697                         event->destroy(event);
1698         }
1699
1700         return err;
1701 }
1702
1703 static int x86_pmu_event_idx(struct perf_event *event)
1704 {
1705         int idx = event->hw.idx;
1706
1707         if (!x86_pmu.attr_rdpmc)
1708                 return 0;
1709
1710         if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
1711                 idx -= INTEL_PMC_IDX_FIXED;
1712                 idx |= 1 << 30;
1713         }
1714
1715         return idx + 1;
1716 }
1717
1718 static ssize_t get_attr_rdpmc(struct device *cdev,
1719                               struct device_attribute *attr,
1720                               char *buf)
1721 {
1722         return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc);
1723 }
1724
1725 static void change_rdpmc(void *info)
1726 {
1727         bool enable = !!(unsigned long)info;
1728
1729         if (enable)
1730                 set_in_cr4(X86_CR4_PCE);
1731         else
1732                 clear_in_cr4(X86_CR4_PCE);
1733 }
1734
1735 static ssize_t set_attr_rdpmc(struct device *cdev,
1736                               struct device_attribute *attr,
1737                               const char *buf, size_t count)
1738 {
1739         unsigned long val;
1740         ssize_t ret;
1741
1742         ret = kstrtoul(buf, 0, &val);
1743         if (ret)
1744                 return ret;
1745
1746         if (!!val != !!x86_pmu.attr_rdpmc) {
1747                 x86_pmu.attr_rdpmc = !!val;
1748                 smp_call_function(change_rdpmc, (void *)val, 1);
1749         }
1750
1751         return count;
1752 }
1753
1754 static DEVICE_ATTR(rdpmc, S_IRUSR | S_IWUSR, get_attr_rdpmc, set_attr_rdpmc);
1755
1756 static struct attribute *x86_pmu_attrs[] = {
1757         &dev_attr_rdpmc.attr,
1758         NULL,
1759 };
1760
1761 static struct attribute_group x86_pmu_attr_group = {
1762         .attrs = x86_pmu_attrs,
1763 };
1764
1765 static const struct attribute_group *x86_pmu_attr_groups[] = {
1766         &x86_pmu_attr_group,
1767         &x86_pmu_format_group,
1768         &x86_pmu_events_group,
1769         NULL,
1770 };
1771
1772 static void x86_pmu_flush_branch_stack(void)
1773 {
1774         if (x86_pmu.flush_branch_stack)
1775                 x86_pmu.flush_branch_stack();
1776 }
1777
1778 void perf_check_microcode(void)
1779 {
1780         if (x86_pmu.check_microcode)
1781                 x86_pmu.check_microcode();
1782 }
1783 EXPORT_SYMBOL_GPL(perf_check_microcode);
1784
1785 static struct pmu pmu = {
1786         .pmu_enable             = x86_pmu_enable,
1787         .pmu_disable            = x86_pmu_disable,
1788
1789         .attr_groups            = x86_pmu_attr_groups,
1790
1791         .event_init             = x86_pmu_event_init,
1792
1793         .add                    = x86_pmu_add,
1794         .del                    = x86_pmu_del,
1795         .start                  = x86_pmu_start,
1796         .stop                   = x86_pmu_stop,
1797         .read                   = x86_pmu_read,
1798
1799         .start_txn              = x86_pmu_start_txn,
1800         .cancel_txn             = x86_pmu_cancel_txn,
1801         .commit_txn             = x86_pmu_commit_txn,
1802
1803         .event_idx              = x86_pmu_event_idx,
1804         .flush_branch_stack     = x86_pmu_flush_branch_stack,
1805 };
1806
1807 void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
1808 {
1809         userpg->cap_usr_time = 0;
1810         userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc;
1811         userpg->pmc_width = x86_pmu.cntval_bits;
1812
1813         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1814                 return;
1815
1816         if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
1817                 return;
1818
1819         userpg->cap_usr_time = 1;
1820         userpg->time_mult = this_cpu_read(cyc2ns);
1821         userpg->time_shift = CYC2NS_SCALE_FACTOR;
1822         userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
1823 }
1824
1825 /*
1826  * callchain support
1827  */
1828
1829 static int backtrace_stack(void *data, char *name)
1830 {
1831         return 0;
1832 }
1833
1834 static void backtrace_address(void *data, unsigned long addr, int reliable)
1835 {
1836         struct perf_callchain_entry *entry = data;
1837
1838         perf_callchain_store(entry, addr);
1839 }
1840
1841 static const struct stacktrace_ops backtrace_ops = {
1842         .stack                  = backtrace_stack,
1843         .address                = backtrace_address,
1844         .walk_stack             = print_context_stack_bp,
1845 };
1846
1847 void
1848 perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
1849 {
1850         if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1851                 /* TODO: We don't support guest os callchain now */
1852                 return;
1853         }
1854
1855         perf_callchain_store(entry, regs->ip);
1856
1857         dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
1858 }
1859
1860 static inline int
1861 valid_user_frame(const void __user *fp, unsigned long size)
1862 {
1863         return (__range_not_ok(fp, size, TASK_SIZE) == 0);
1864 }
1865
1866 static unsigned long get_segment_base(unsigned int segment)
1867 {
1868         struct desc_struct *desc;
1869         int idx = segment >> 3;
1870
1871         if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
1872                 if (idx > LDT_ENTRIES)
1873                         return 0;
1874
1875                 if (idx > current->active_mm->context.size)
1876                         return 0;
1877
1878                 desc = current->active_mm->context.ldt;
1879         } else {
1880                 if (idx > GDT_ENTRIES)
1881                         return 0;
1882
1883                 desc = __this_cpu_ptr(&gdt_page.gdt[0]);
1884         }
1885
1886         return get_desc_base(desc + idx);
1887 }
1888
1889 #ifdef CONFIG_COMPAT
1890
1891 #include <asm/compat.h>
1892
1893 static inline int
1894 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1895 {
1896         /* 32-bit process in 64-bit kernel. */
1897         unsigned long ss_base, cs_base;
1898         struct stack_frame_ia32 frame;
1899         const void __user *fp;
1900
1901         if (!test_thread_flag(TIF_IA32))
1902                 return 0;
1903
1904         cs_base = get_segment_base(regs->cs);
1905         ss_base = get_segment_base(regs->ss);
1906
1907         fp = compat_ptr(ss_base + regs->bp);
1908         while (entry->nr < PERF_MAX_STACK_DEPTH) {
1909                 unsigned long bytes;
1910                 frame.next_frame     = 0;
1911                 frame.return_address = 0;
1912
1913                 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1914                 if (bytes != sizeof(frame))
1915                         break;
1916
1917                 if (!valid_user_frame(fp, sizeof(frame)))
1918                         break;
1919
1920                 perf_callchain_store(entry, cs_base + frame.return_address);
1921                 fp = compat_ptr(ss_base + frame.next_frame);
1922         }
1923         return 1;
1924 }
1925 #else
1926 static inline int
1927 perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1928 {
1929     return 0;
1930 }
1931 #endif
1932
1933 void
1934 perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1935 {
1936         struct stack_frame frame;
1937         const void __user *fp;
1938
1939         if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1940                 /* TODO: We don't support guest os callchain now */
1941                 return;
1942         }
1943
1944         /*
1945          * We don't know what to do with VM86 stacks.. ignore them for now.
1946          */
1947         if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
1948                 return;
1949
1950         fp = (void __user *)regs->bp;
1951
1952         perf_callchain_store(entry, regs->ip);
1953
1954         if (!current->mm)
1955                 return;
1956
1957         if (perf_callchain_user32(regs, entry))
1958                 return;
1959
1960         while (entry->nr < PERF_MAX_STACK_DEPTH) {
1961                 unsigned long bytes;
1962                 frame.next_frame             = NULL;
1963                 frame.return_address = 0;
1964
1965                 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1966                 if (bytes != sizeof(frame))
1967                         break;
1968
1969                 if (!valid_user_frame(fp, sizeof(frame)))
1970                         break;
1971
1972                 perf_callchain_store(entry, frame.return_address);
1973                 fp = frame.next_frame;
1974         }
1975 }
1976
1977 /*
1978  * Deal with code segment offsets for the various execution modes:
1979  *
1980  *   VM86 - the good olde 16 bit days, where the linear address is
1981  *          20 bits and we use regs->ip + 0x10 * regs->cs.
1982  *
1983  *   IA32 - Where we need to look at GDT/LDT segment descriptor tables
1984  *          to figure out what the 32bit base address is.
1985  *
1986  *    X32 - has TIF_X32 set, but is running in x86_64
1987  *
1988  * X86_64 - CS,DS,SS,ES are all zero based.
1989  */
1990 static unsigned long code_segment_base(struct pt_regs *regs)
1991 {
1992         /*
1993          * If we are in VM86 mode, add the segment offset to convert to a
1994          * linear address.
1995          */
1996         if (regs->flags & X86_VM_MASK)
1997                 return 0x10 * regs->cs;
1998
1999         /*
2000          * For IA32 we look at the GDT/LDT segment base to convert the
2001          * effective IP to a linear address.
2002          */
2003 #ifdef CONFIG_X86_32
2004         if (user_mode(regs) && regs->cs != __USER_CS)
2005                 return get_segment_base(regs->cs);
2006 #else
2007         if (test_thread_flag(TIF_IA32)) {
2008                 if (user_mode(regs) && regs->cs != __USER32_CS)
2009                         return get_segment_base(regs->cs);
2010         }
2011 #endif
2012         return 0;
2013 }
2014
2015 unsigned long perf_instruction_pointer(struct pt_regs *regs)
2016 {
2017         if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
2018                 return perf_guest_cbs->get_guest_ip();
2019
2020         return regs->ip + code_segment_base(regs);
2021 }
2022
2023 unsigned long perf_misc_flags(struct pt_regs *regs)
2024 {
2025         int misc = 0;
2026
2027         if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
2028                 if (perf_guest_cbs->is_user_mode())
2029                         misc |= PERF_RECORD_MISC_GUEST_USER;
2030                 else
2031                         misc |= PERF_RECORD_MISC_GUEST_KERNEL;
2032         } else {
2033                 if (user_mode(regs))
2034                         misc |= PERF_RECORD_MISC_USER;
2035                 else
2036                         misc |= PERF_RECORD_MISC_KERNEL;
2037         }
2038
2039         if (regs->flags & PERF_EFLAGS_EXACT)
2040                 misc |= PERF_RECORD_MISC_EXACT_IP;
2041
2042         return misc;
2043 }
2044
2045 void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
2046 {
2047         cap->version            = x86_pmu.version;
2048         cap->num_counters_gp    = x86_pmu.num_counters;
2049         cap->num_counters_fixed = x86_pmu.num_counters_fixed;
2050         cap->bit_width_gp       = x86_pmu.cntval_bits;
2051         cap->bit_width_fixed    = x86_pmu.cntval_bits;
2052         cap->events_mask        = (unsigned int)x86_pmu.events_maskl;
2053         cap->events_mask_len    = x86_pmu.events_mask_len;
2054 }
2055 EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability);