1 #include <linux/module.h>
2 #include <linux/slab.h>
4 #include <linux/perf_event.h>
5 #include "perf_event.h"
7 #define UNCORE_PMU_NAME_LEN 32
8 #define UNCORE_BOX_HASH_SIZE 8
10 #define UNCORE_PMU_HRTIMER_INTERVAL (60 * NSEC_PER_SEC)
12 #define UNCORE_FIXED_EVENT 0xffff
13 #define UNCORE_PMC_IDX_MAX_GENERIC 8
14 #define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC
15 #define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1)
17 #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
19 /* SNB event control */
20 #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
21 #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
22 #define SNB_UNC_CTL_EDGE_DET (1 << 18)
23 #define SNB_UNC_CTL_EN (1 << 22)
24 #define SNB_UNC_CTL_INVERT (1 << 23)
25 #define SNB_UNC_CTL_CMASK_MASK 0x1f000000
26 #define NHM_UNC_CTL_CMASK_MASK 0xff000000
27 #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0)
29 #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
30 SNB_UNC_CTL_UMASK_MASK | \
31 SNB_UNC_CTL_EDGE_DET | \
32 SNB_UNC_CTL_INVERT | \
33 SNB_UNC_CTL_CMASK_MASK)
35 #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
36 SNB_UNC_CTL_UMASK_MASK | \
37 SNB_UNC_CTL_EDGE_DET | \
38 SNB_UNC_CTL_INVERT | \
39 NHM_UNC_CTL_CMASK_MASK)
41 /* SNB global control register */
42 #define SNB_UNC_PERF_GLOBAL_CTL 0x391
43 #define SNB_UNC_FIXED_CTR_CTRL 0x394
44 #define SNB_UNC_FIXED_CTR 0x395
46 /* SNB uncore global control */
47 #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1)
48 #define SNB_UNC_GLOBAL_CTL_EN (1 << 29)
50 /* SNB Cbo register */
51 #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700
52 #define SNB_UNC_CBO_0_PER_CTR0 0x706
53 #define SNB_UNC_CBO_MSR_OFFSET 0x10
55 /* NHM global control register */
56 #define NHM_UNC_PERF_GLOBAL_CTL 0x391
57 #define NHM_UNC_FIXED_CTR 0x394
58 #define NHM_UNC_FIXED_CTR_CTRL 0x395
60 /* NHM uncore global control */
61 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1)
62 #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32)
64 /* NHM uncore register */
65 #define NHM_UNC_PERFEVTSEL0 0x3c0
66 #define NHM_UNC_UNCORE_PMC0 0x3b0
69 struct intel_uncore_ops;
70 struct intel_uncore_pmu;
71 struct intel_uncore_box;
72 struct uncore_event_desc;
74 struct intel_uncore_type {
88 struct event_constraint unconstrainted;
89 struct event_constraint *constraints;
90 struct intel_uncore_pmu *pmus;
91 struct intel_uncore_ops *ops;
92 struct uncore_event_desc *event_descs;
93 const struct attribute_group *attr_groups[3];
96 #define format_group attr_groups[0]
98 struct intel_uncore_ops {
99 void (*init_box)(struct intel_uncore_box *);
100 void (*disable_box)(struct intel_uncore_box *);
101 void (*enable_box)(struct intel_uncore_box *);
102 void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
103 void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
104 u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
107 struct intel_uncore_pmu {
109 char name[UNCORE_PMU_NAME_LEN];
112 struct intel_uncore_type *type;
113 struct intel_uncore_box ** __percpu box;
114 struct list_head box_list;
117 struct intel_uncore_box {
119 int n_active; /* number of active events */
121 int cpu; /* cpu to collect events */
124 struct perf_event *events[UNCORE_PMC_IDX_MAX];
125 struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
126 unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
127 u64 tags[UNCORE_PMC_IDX_MAX];
128 struct pci_dev *pci_dev;
129 struct intel_uncore_pmu *pmu;
130 struct hrtimer hrtimer;
131 struct list_head list;
134 #define UNCORE_BOX_FLAG_INITIATED 0
136 struct uncore_event_desc {
137 struct kobj_attribute attr;
141 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
143 .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \
147 #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
148 static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
149 struct kobj_attribute *attr, \
152 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
153 return sprintf(page, _format "\n"); \
155 static struct kobj_attribute format_attr_##_var = \
156 __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
159 static ssize_t uncore_event_show(struct kobject *kobj,
160 struct kobj_attribute *attr, char *buf)
162 struct uncore_event_desc *event =
163 container_of(attr, struct uncore_event_desc, attr);
164 return sprintf(buf, "%s", event->config);
167 static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
169 return box->pmu->type->box_ctl;
172 static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
174 return box->pmu->type->fixed_ctl;
177 static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
179 return box->pmu->type->fixed_ctr;
183 unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
185 return idx * 4 + box->pmu->type->event_ctl;
189 unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
191 return idx * 8 + box->pmu->type->perf_ctr;
195 unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
197 if (!box->pmu->type->box_ctl)
199 return box->pmu->type->box_ctl +
200 box->pmu->type->msr_offset * box->pmu->pmu_idx;
204 unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
206 if (!box->pmu->type->fixed_ctl)
208 return box->pmu->type->fixed_ctl +
209 box->pmu->type->msr_offset * box->pmu->pmu_idx;
213 unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
215 return box->pmu->type->fixed_ctr +
216 box->pmu->type->msr_offset * box->pmu->pmu_idx;
220 unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
222 return idx + box->pmu->type->event_ctl +
223 box->pmu->type->msr_offset * box->pmu->pmu_idx;
227 unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
229 return idx + box->pmu->type->perf_ctr +
230 box->pmu->type->msr_offset * box->pmu->pmu_idx;
234 unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
237 return uncore_pci_fixed_ctl(box);
239 return uncore_msr_fixed_ctl(box);
243 unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
246 return uncore_pci_fixed_ctr(box);
248 return uncore_msr_fixed_ctr(box);
252 unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
255 return uncore_pci_event_ctl(box, idx);
257 return uncore_msr_event_ctl(box, idx);
261 unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
264 return uncore_pci_perf_ctr(box, idx);
266 return uncore_msr_perf_ctr(box, idx);
269 static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
271 return box->pmu->type->perf_ctr_bits;
274 static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
276 return box->pmu->type->fixed_ctr_bits;
279 static inline int uncore_num_counters(struct intel_uncore_box *box)
281 return box->pmu->type->num_counters;
284 static inline void uncore_disable_box(struct intel_uncore_box *box)
286 if (box->pmu->type->ops->disable_box)
287 box->pmu->type->ops->disable_box(box);
290 static inline void uncore_enable_box(struct intel_uncore_box *box)
292 if (box->pmu->type->ops->enable_box)
293 box->pmu->type->ops->enable_box(box);
296 static inline void uncore_disable_event(struct intel_uncore_box *box,
297 struct perf_event *event)
299 box->pmu->type->ops->disable_event(box, event);
302 static inline void uncore_enable_event(struct intel_uncore_box *box,
303 struct perf_event *event)
305 box->pmu->type->ops->enable_event(box, event);
308 static inline u64 uncore_read_counter(struct intel_uncore_box *box,
309 struct perf_event *event)
311 return box->pmu->type->ops->read_counter(box, event);
314 static inline void uncore_box_init(struct intel_uncore_box *box)
316 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
317 if (box->pmu->type->ops->init_box)
318 box->pmu->type->ops->init_box(box);