perf/x86: Add Intel Nehalem and Sandy Bridge uncore PMU support
[firefly-linux-kernel-4.4.55.git] / arch / x86 / kernel / cpu / perf_event_intel_uncore.h
1 #include <linux/module.h>
2 #include <linux/slab.h>
3 #include <linux/perf_event.h>
4 #include "perf_event.h"
5
6 #define UNCORE_PMU_NAME_LEN             32
7 #define UNCORE_BOX_HASH_SIZE            8
8
9 #define UNCORE_PMU_HRTIMER_INTERVAL     (60 * NSEC_PER_SEC)
10
11 #define UNCORE_FIXED_EVENT              0xffff
12 #define UNCORE_PMC_IDX_MAX_GENERIC      8
13 #define UNCORE_PMC_IDX_FIXED            UNCORE_PMC_IDX_MAX_GENERIC
14 #define UNCORE_PMC_IDX_MAX              (UNCORE_PMC_IDX_FIXED + 1)
15
16 #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
17
18 /* SNB event control */
19 #define SNB_UNC_CTL_EV_SEL_MASK                 0x000000ff
20 #define SNB_UNC_CTL_UMASK_MASK                  0x0000ff00
21 #define SNB_UNC_CTL_EDGE_DET                    (1 << 18)
22 #define SNB_UNC_CTL_EN                          (1 << 22)
23 #define SNB_UNC_CTL_INVERT                      (1 << 23)
24 #define SNB_UNC_CTL_CMASK_MASK                  0x1f000000
25 #define NHM_UNC_CTL_CMASK_MASK                  0xff000000
26 #define NHM_UNC_FIXED_CTR_CTL_EN                (1 << 0)
27
28 #define SNB_UNC_RAW_EVENT_MASK                  (SNB_UNC_CTL_EV_SEL_MASK | \
29                                                  SNB_UNC_CTL_UMASK_MASK | \
30                                                  SNB_UNC_CTL_EDGE_DET | \
31                                                  SNB_UNC_CTL_INVERT | \
32                                                  SNB_UNC_CTL_CMASK_MASK)
33
34 #define NHM_UNC_RAW_EVENT_MASK                  (SNB_UNC_CTL_EV_SEL_MASK | \
35                                                  SNB_UNC_CTL_UMASK_MASK | \
36                                                  SNB_UNC_CTL_EDGE_DET | \
37                                                  SNB_UNC_CTL_INVERT | \
38                                                  NHM_UNC_CTL_CMASK_MASK)
39
40 /* SNB global control register */
41 #define SNB_UNC_PERF_GLOBAL_CTL                 0x391
42 #define SNB_UNC_FIXED_CTR_CTRL                  0x394
43 #define SNB_UNC_FIXED_CTR                       0x395
44
45 /* SNB uncore global control */
46 #define SNB_UNC_GLOBAL_CTL_CORE_ALL             ((1 << 4) - 1)
47 #define SNB_UNC_GLOBAL_CTL_EN                   (1 << 29)
48
49 /* SNB Cbo register */
50 #define SNB_UNC_CBO_0_PERFEVTSEL0               0x700
51 #define SNB_UNC_CBO_0_PER_CTR0                  0x706
52 #define SNB_UNC_CBO_MSR_OFFSET                  0x10
53
54 /* NHM global control register */
55 #define NHM_UNC_PERF_GLOBAL_CTL                 0x391
56 #define NHM_UNC_FIXED_CTR                       0x394
57 #define NHM_UNC_FIXED_CTR_CTRL                  0x395
58
59 /* NHM uncore global control */
60 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL            ((1ULL << 8) - 1)
61 #define NHM_UNC_GLOBAL_CTL_EN_FC                (1ULL << 32)
62
63 /* NHM uncore register */
64 #define NHM_UNC_PERFEVTSEL0                     0x3c0
65 #define NHM_UNC_UNCORE_PMC0                     0x3b0
66
67
68 struct intel_uncore_ops;
69 struct intel_uncore_pmu;
70 struct intel_uncore_box;
71 struct uncore_event_desc;
72
73 struct intel_uncore_type {
74         const char *name;
75         int num_counters;
76         int num_boxes;
77         int perf_ctr_bits;
78         int fixed_ctr_bits;
79         int single_fixed;
80         unsigned perf_ctr;
81         unsigned event_ctl;
82         unsigned event_mask;
83         unsigned fixed_ctr;
84         unsigned fixed_ctl;
85         unsigned box_ctl;
86         unsigned msr_offset;
87         struct event_constraint unconstrainted;
88         struct event_constraint *constraints;
89         struct intel_uncore_pmu *pmus;
90         struct intel_uncore_ops *ops;
91         struct uncore_event_desc *event_descs;
92         const struct attribute_group *attr_groups[3];
93 };
94
95 #define format_group attr_groups[0]
96
97 struct intel_uncore_ops {
98         void (*init_box)(struct intel_uncore_box *);
99         void (*disable_box)(struct intel_uncore_box *);
100         void (*enable_box)(struct intel_uncore_box *);
101         void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
102         void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
103         u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
104 };
105
106 struct intel_uncore_pmu {
107         struct pmu pmu;
108         char name[UNCORE_PMU_NAME_LEN];
109         int pmu_idx;
110         int func_id;
111         struct intel_uncore_type *type;
112         struct intel_uncore_box ** __percpu box;
113 };
114
115 struct intel_uncore_box {
116         int phys_id;
117         int n_active;   /* number of active events */
118         int n_events;
119         int cpu;        /* cpu to collect events */
120         unsigned long flags;
121         atomic_t refcnt;
122         struct perf_event *events[UNCORE_PMC_IDX_MAX];
123         struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
124         unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
125         u64 tags[UNCORE_PMC_IDX_MAX];
126         struct intel_uncore_pmu *pmu;
127         struct hrtimer hrtimer;
128         struct list_head list;
129 };
130
131 #define UNCORE_BOX_FLAG_INITIATED       0
132
133 struct uncore_event_desc {
134         struct kobj_attribute attr;
135         const char *config;
136 };
137
138 #define INTEL_UNCORE_EVENT_DESC(_name, _config)                 \
139 {                                                               \
140         .attr   = __ATTR(_name, 0444, uncore_event_show, NULL), \
141         .config = _config,                                      \
142 }
143
144 #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format)                 \
145 static ssize_t __uncore_##_var##_show(struct kobject *kobj,             \
146                                 struct kobj_attribute *attr,            \
147                                 char *page)                             \
148 {                                                                       \
149         BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                     \
150         return sprintf(page, _format "\n");                             \
151 }                                                                       \
152 static struct kobj_attribute format_attr_##_var =                       \
153         __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
154
155
156 static ssize_t uncore_event_show(struct kobject *kobj,
157                                 struct kobj_attribute *attr, char *buf)
158 {
159         struct uncore_event_desc *event =
160                 container_of(attr, struct uncore_event_desc, attr);
161         return sprintf(buf, "%s", event->config);
162 }
163
164 static inline
165 unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
166 {
167         if (!box->pmu->type->box_ctl)
168                 return 0;
169         return box->pmu->type->box_ctl +
170                 box->pmu->type->msr_offset * box->pmu->pmu_idx;
171 }
172
173 static inline
174 unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
175 {
176         if (!box->pmu->type->fixed_ctl)
177                 return 0;
178         return box->pmu->type->fixed_ctl +
179                 box->pmu->type->msr_offset * box->pmu->pmu_idx;
180 }
181
182 static inline
183 unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
184 {
185         return box->pmu->type->fixed_ctr +
186                 box->pmu->type->msr_offset * box->pmu->pmu_idx;
187 }
188
189 static inline
190 unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
191 {
192         return idx + box->pmu->type->event_ctl +
193                 box->pmu->type->msr_offset * box->pmu->pmu_idx;
194 }
195
196 static inline
197 unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
198 {
199         return idx + box->pmu->type->perf_ctr +
200                 box->pmu->type->msr_offset * box->pmu->pmu_idx;
201 }
202
203 static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
204 {
205         return box->pmu->type->perf_ctr_bits;
206 }
207
208 static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
209 {
210         return box->pmu->type->fixed_ctr_bits;
211 }
212
213 static inline int uncore_num_counters(struct intel_uncore_box *box)
214 {
215         return box->pmu->type->num_counters;
216 }
217
218 static inline void uncore_disable_box(struct intel_uncore_box *box)
219 {
220         if (box->pmu->type->ops->disable_box)
221                 box->pmu->type->ops->disable_box(box);
222 }
223
224 static inline void uncore_enable_box(struct intel_uncore_box *box)
225 {
226         if (box->pmu->type->ops->enable_box)
227                 box->pmu->type->ops->enable_box(box);
228 }
229
230 static inline void uncore_disable_event(struct intel_uncore_box *box,
231                                 struct perf_event *event)
232 {
233         box->pmu->type->ops->disable_event(box, event);
234 }
235
236 static inline void uncore_enable_event(struct intel_uncore_box *box,
237                                 struct perf_event *event)
238 {
239         box->pmu->type->ops->enable_event(box, event);
240 }
241
242 static inline u64 uncore_read_counter(struct intel_uncore_box *box,
243                                 struct perf_event *event)
244 {
245         return box->pmu->type->ops->read_counter(box, event);
246 }
247
248 static inline void uncore_box_init(struct intel_uncore_box *box)
249 {
250         if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
251                 if (box->pmu->type->ops->init_box)
252                         box->pmu->type->ops->init_box(box);
253         }
254 }