aa01df87b8de7edbf1a9688ab2b93b2f303f85ca
[firefly-linux-kernel-4.4.55.git] / arch / x86 / kernel / cpu / perf_event_intel_uncore.h
1 #include <linux/module.h>
2 #include <linux/slab.h>
3 #include <linux/pci.h>
4 #include <linux/perf_event.h>
5 #include "perf_event.h"
6
7 #define UNCORE_PMU_NAME_LEN             32
8 #define UNCORE_BOX_HASH_SIZE            8
9
10 #define UNCORE_PMU_HRTIMER_INTERVAL     (60 * NSEC_PER_SEC)
11
12 #define UNCORE_FIXED_EVENT              0xffff
13 #define UNCORE_PMC_IDX_MAX_GENERIC      8
14 #define UNCORE_PMC_IDX_FIXED            UNCORE_PMC_IDX_MAX_GENERIC
15 #define UNCORE_PMC_IDX_MAX              (UNCORE_PMC_IDX_FIXED + 1)
16
17 #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
18
19 /* SNB event control */
20 #define SNB_UNC_CTL_EV_SEL_MASK                 0x000000ff
21 #define SNB_UNC_CTL_UMASK_MASK                  0x0000ff00
22 #define SNB_UNC_CTL_EDGE_DET                    (1 << 18)
23 #define SNB_UNC_CTL_EN                          (1 << 22)
24 #define SNB_UNC_CTL_INVERT                      (1 << 23)
25 #define SNB_UNC_CTL_CMASK_MASK                  0x1f000000
26 #define NHM_UNC_CTL_CMASK_MASK                  0xff000000
27 #define NHM_UNC_FIXED_CTR_CTL_EN                (1 << 0)
28
29 #define SNB_UNC_RAW_EVENT_MASK                  (SNB_UNC_CTL_EV_SEL_MASK | \
30                                                  SNB_UNC_CTL_UMASK_MASK | \
31                                                  SNB_UNC_CTL_EDGE_DET | \
32                                                  SNB_UNC_CTL_INVERT | \
33                                                  SNB_UNC_CTL_CMASK_MASK)
34
35 #define NHM_UNC_RAW_EVENT_MASK                  (SNB_UNC_CTL_EV_SEL_MASK | \
36                                                  SNB_UNC_CTL_UMASK_MASK | \
37                                                  SNB_UNC_CTL_EDGE_DET | \
38                                                  SNB_UNC_CTL_INVERT | \
39                                                  NHM_UNC_CTL_CMASK_MASK)
40
41 /* SNB global control register */
42 #define SNB_UNC_PERF_GLOBAL_CTL                 0x391
43 #define SNB_UNC_FIXED_CTR_CTRL                  0x394
44 #define SNB_UNC_FIXED_CTR                       0x395
45
46 /* SNB uncore global control */
47 #define SNB_UNC_GLOBAL_CTL_CORE_ALL             ((1 << 4) - 1)
48 #define SNB_UNC_GLOBAL_CTL_EN                   (1 << 29)
49
50 /* SNB Cbo register */
51 #define SNB_UNC_CBO_0_PERFEVTSEL0               0x700
52 #define SNB_UNC_CBO_0_PER_CTR0                  0x706
53 #define SNB_UNC_CBO_MSR_OFFSET                  0x10
54
55 /* NHM global control register */
56 #define NHM_UNC_PERF_GLOBAL_CTL                 0x391
57 #define NHM_UNC_FIXED_CTR                       0x394
58 #define NHM_UNC_FIXED_CTR_CTRL                  0x395
59
60 /* NHM uncore global control */
61 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL            ((1ULL << 8) - 1)
62 #define NHM_UNC_GLOBAL_CTL_EN_FC                (1ULL << 32)
63
64 /* NHM uncore register */
65 #define NHM_UNC_PERFEVTSEL0                     0x3c0
66 #define NHM_UNC_UNCORE_PMC0                     0x3b0
67
68
69 struct intel_uncore_ops;
70 struct intel_uncore_pmu;
71 struct intel_uncore_box;
72 struct uncore_event_desc;
73
74 struct intel_uncore_type {
75         const char *name;
76         int num_counters;
77         int num_boxes;
78         int perf_ctr_bits;
79         int fixed_ctr_bits;
80         int single_fixed;
81         unsigned perf_ctr;
82         unsigned event_ctl;
83         unsigned event_mask;
84         unsigned fixed_ctr;
85         unsigned fixed_ctl;
86         unsigned box_ctl;
87         unsigned msr_offset;
88         struct event_constraint unconstrainted;
89         struct event_constraint *constraints;
90         struct intel_uncore_pmu *pmus;
91         struct intel_uncore_ops *ops;
92         struct uncore_event_desc *event_descs;
93         const struct attribute_group *attr_groups[3];
94 };
95
96 #define format_group attr_groups[0]
97
98 struct intel_uncore_ops {
99         void (*init_box)(struct intel_uncore_box *);
100         void (*disable_box)(struct intel_uncore_box *);
101         void (*enable_box)(struct intel_uncore_box *);
102         void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
103         void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
104         u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
105 };
106
107 struct intel_uncore_pmu {
108         struct pmu pmu;
109         char name[UNCORE_PMU_NAME_LEN];
110         int pmu_idx;
111         int func_id;
112         struct intel_uncore_type *type;
113         struct intel_uncore_box ** __percpu box;
114         struct list_head box_list;
115 };
116
117 struct intel_uncore_box {
118         int phys_id;
119         int n_active;   /* number of active events */
120         int n_events;
121         int cpu;        /* cpu to collect events */
122         unsigned long flags;
123         atomic_t refcnt;
124         struct perf_event *events[UNCORE_PMC_IDX_MAX];
125         struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
126         unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
127         u64 tags[UNCORE_PMC_IDX_MAX];
128         struct pci_dev *pci_dev;
129         struct intel_uncore_pmu *pmu;
130         struct hrtimer hrtimer;
131         struct list_head list;
132 };
133
134 #define UNCORE_BOX_FLAG_INITIATED       0
135
136 struct uncore_event_desc {
137         struct kobj_attribute attr;
138         const char *config;
139 };
140
141 #define INTEL_UNCORE_EVENT_DESC(_name, _config)                 \
142 {                                                               \
143         .attr   = __ATTR(_name, 0444, uncore_event_show, NULL), \
144         .config = _config,                                      \
145 }
146
147 #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format)                 \
148 static ssize_t __uncore_##_var##_show(struct kobject *kobj,             \
149                                 struct kobj_attribute *attr,            \
150                                 char *page)                             \
151 {                                                                       \
152         BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                     \
153         return sprintf(page, _format "\n");                             \
154 }                                                                       \
155 static struct kobj_attribute format_attr_##_var =                       \
156         __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
157
158
159 static ssize_t uncore_event_show(struct kobject *kobj,
160                                 struct kobj_attribute *attr, char *buf)
161 {
162         struct uncore_event_desc *event =
163                 container_of(attr, struct uncore_event_desc, attr);
164         return sprintf(buf, "%s", event->config);
165 }
166
167 static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
168 {
169         return box->pmu->type->box_ctl;
170 }
171
172 static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
173 {
174         return box->pmu->type->fixed_ctl;
175 }
176
177 static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
178 {
179         return box->pmu->type->fixed_ctr;
180 }
181
182 static inline
183 unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
184 {
185         return idx * 4 + box->pmu->type->event_ctl;
186 }
187
188 static inline
189 unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
190 {
191         return idx * 8 + box->pmu->type->perf_ctr;
192 }
193
194 static inline
195 unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
196 {
197         if (!box->pmu->type->box_ctl)
198                 return 0;
199         return box->pmu->type->box_ctl +
200                 box->pmu->type->msr_offset * box->pmu->pmu_idx;
201 }
202
203 static inline
204 unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
205 {
206         if (!box->pmu->type->fixed_ctl)
207                 return 0;
208         return box->pmu->type->fixed_ctl +
209                 box->pmu->type->msr_offset * box->pmu->pmu_idx;
210 }
211
212 static inline
213 unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
214 {
215         return box->pmu->type->fixed_ctr +
216                 box->pmu->type->msr_offset * box->pmu->pmu_idx;
217 }
218
219 static inline
220 unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
221 {
222         return idx + box->pmu->type->event_ctl +
223                 box->pmu->type->msr_offset * box->pmu->pmu_idx;
224 }
225
226 static inline
227 unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
228 {
229         return idx + box->pmu->type->perf_ctr +
230                 box->pmu->type->msr_offset * box->pmu->pmu_idx;
231 }
232
233 static inline
234 unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
235 {
236         if (box->pci_dev)
237                 return uncore_pci_fixed_ctl(box);
238         else
239                 return uncore_msr_fixed_ctl(box);
240 }
241
242 static inline
243 unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
244 {
245         if (box->pci_dev)
246                 return uncore_pci_fixed_ctr(box);
247         else
248                 return uncore_msr_fixed_ctr(box);
249 }
250
251 static inline
252 unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
253 {
254         if (box->pci_dev)
255                 return uncore_pci_event_ctl(box, idx);
256         else
257                 return uncore_msr_event_ctl(box, idx);
258 }
259
260 static inline
261 unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
262 {
263         if (box->pci_dev)
264                 return uncore_pci_perf_ctr(box, idx);
265         else
266                 return uncore_msr_perf_ctr(box, idx);
267 }
268
269 static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
270 {
271         return box->pmu->type->perf_ctr_bits;
272 }
273
274 static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
275 {
276         return box->pmu->type->fixed_ctr_bits;
277 }
278
279 static inline int uncore_num_counters(struct intel_uncore_box *box)
280 {
281         return box->pmu->type->num_counters;
282 }
283
284 static inline void uncore_disable_box(struct intel_uncore_box *box)
285 {
286         if (box->pmu->type->ops->disable_box)
287                 box->pmu->type->ops->disable_box(box);
288 }
289
290 static inline void uncore_enable_box(struct intel_uncore_box *box)
291 {
292         if (box->pmu->type->ops->enable_box)
293                 box->pmu->type->ops->enable_box(box);
294 }
295
296 static inline void uncore_disable_event(struct intel_uncore_box *box,
297                                 struct perf_event *event)
298 {
299         box->pmu->type->ops->disable_event(box, event);
300 }
301
302 static inline void uncore_enable_event(struct intel_uncore_box *box,
303                                 struct perf_event *event)
304 {
305         box->pmu->type->ops->enable_event(box, event);
306 }
307
308 static inline u64 uncore_read_counter(struct intel_uncore_box *box,
309                                 struct perf_event *event)
310 {
311         return box->pmu->type->ops->read_counter(box, event);
312 }
313
314 static inline void uncore_box_init(struct intel_uncore_box *box)
315 {
316         if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
317                 if (box->pmu->type->ops->init_box)
318                         box->pmu->type->ops->init_box(box);
319         }
320 }