1 #include "perf_event_intel_uncore.h"
3 static struct intel_uncore_type *empty_uncore[] = { NULL, };
4 static struct intel_uncore_type **msr_uncores = empty_uncore;
5 static struct intel_uncore_type **pci_uncores = empty_uncore;
6 /* pci bus to socket mapping */
7 static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
9 static DEFINE_RAW_SPINLOCK(uncore_box_lock);
11 /* mask of cpus that collect uncore events */
12 static cpumask_t uncore_cpu_mask;
14 /* constraint for the fixed counter */
15 static struct event_constraint constraint_fixed =
16 EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
18 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
19 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
20 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
21 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
22 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
23 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
24 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
25 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
26 DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
27 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
28 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
30 /* Sandy Bridge-EP uncore support */
31 static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
33 struct pci_dev *pdev = box->pci_dev;
34 int box_ctl = uncore_pci_box_ctl(box);
37 pci_read_config_dword(pdev, box_ctl, &config);
38 config |= SNBEP_PMON_BOX_CTL_FRZ;
39 pci_write_config_dword(pdev, box_ctl, config);
42 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
44 struct pci_dev *pdev = box->pci_dev;
45 int box_ctl = uncore_pci_box_ctl(box);
48 pci_read_config_dword(pdev, box_ctl, &config);
49 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
50 pci_write_config_dword(pdev, box_ctl, config);
53 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box,
54 struct perf_event *event)
56 struct pci_dev *pdev = box->pci_dev;
57 struct hw_perf_event *hwc = &event->hw;
59 pci_write_config_dword(pdev, hwc->config_base, hwc->config |
63 static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box,
64 struct perf_event *event)
66 struct pci_dev *pdev = box->pci_dev;
67 struct hw_perf_event *hwc = &event->hw;
69 pci_write_config_dword(pdev, hwc->config_base, hwc->config);
72 static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box,
73 struct perf_event *event)
75 struct pci_dev *pdev = box->pci_dev;
76 struct hw_perf_event *hwc = &event->hw;
79 pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
80 pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
84 static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
86 struct pci_dev *pdev = box->pci_dev;
87 pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL,
88 SNBEP_PMON_BOX_CTL_INT);
91 static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
96 msr = uncore_msr_box_ctl(box);
99 config |= SNBEP_PMON_BOX_CTL_FRZ;
105 static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
110 msr = uncore_msr_box_ctl(box);
113 config &= ~SNBEP_PMON_BOX_CTL_FRZ;
119 static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box,
120 struct perf_event *event)
122 struct hw_perf_event *hwc = &event->hw;
124 wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
127 static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
128 struct perf_event *event)
130 struct hw_perf_event *hwc = &event->hw;
132 wrmsrl(hwc->config_base, hwc->config);
135 static u64 snbep_uncore_msr_read_counter(struct intel_uncore_box *box,
136 struct perf_event *event)
138 struct hw_perf_event *hwc = &event->hw;
141 rdmsrl(hwc->event_base, count);
145 static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
147 unsigned msr = uncore_msr_box_ctl(box);
149 wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
152 static struct attribute *snbep_uncore_formats_attr[] = {
153 &format_attr_event.attr,
154 &format_attr_umask.attr,
155 &format_attr_edge.attr,
156 &format_attr_inv.attr,
157 &format_attr_thresh8.attr,
161 static struct attribute *snbep_uncore_ubox_formats_attr[] = {
162 &format_attr_event.attr,
163 &format_attr_umask.attr,
164 &format_attr_edge.attr,
165 &format_attr_inv.attr,
166 &format_attr_thresh5.attr,
170 static struct attribute *snbep_uncore_pcu_formats_attr[] = {
171 &format_attr_event.attr,
172 &format_attr_occ_sel.attr,
173 &format_attr_edge.attr,
174 &format_attr_inv.attr,
175 &format_attr_thresh5.attr,
176 &format_attr_occ_invert.attr,
177 &format_attr_occ_edge.attr,
181 static struct uncore_event_desc snbep_uncore_imc_events[] = {
182 INTEL_UNCORE_EVENT_DESC(CLOCKTICKS, "config=0xffff"),
184 INTEL_UNCORE_EVENT_DESC(CAS_COUNT_RD, "event=0x4,umask=0x3"),
186 INTEL_UNCORE_EVENT_DESC(CAS_COUNT_WR, "event=0x4,umask=0xc"),
187 { /* end: all zeroes */ },
190 static struct uncore_event_desc snbep_uncore_qpi_events[] = {
191 INTEL_UNCORE_EVENT_DESC(CLOCKTICKS, "event=0x14"),
192 /* outgoing data+nondata flits */
193 INTEL_UNCORE_EVENT_DESC(TxL_FLITS_ACTIVE, "event=0x0,umask=0x6"),
194 /* DRS data received */
195 INTEL_UNCORE_EVENT_DESC(DRS_DATA, "event=0x2,umask=0x8"),
196 /* NCB data received */
197 INTEL_UNCORE_EVENT_DESC(NCB_DATA, "event=0x3,umask=0x4"),
198 { /* end: all zeroes */ },
201 static struct attribute_group snbep_uncore_format_group = {
203 .attrs = snbep_uncore_formats_attr,
206 static struct attribute_group snbep_uncore_ubox_format_group = {
208 .attrs = snbep_uncore_ubox_formats_attr,
211 static struct attribute_group snbep_uncore_pcu_format_group = {
213 .attrs = snbep_uncore_pcu_formats_attr,
216 static struct intel_uncore_ops snbep_uncore_msr_ops = {
217 .init_box = snbep_uncore_msr_init_box,
218 .disable_box = snbep_uncore_msr_disable_box,
219 .enable_box = snbep_uncore_msr_enable_box,
220 .disable_event = snbep_uncore_msr_disable_event,
221 .enable_event = snbep_uncore_msr_enable_event,
222 .read_counter = snbep_uncore_msr_read_counter,
225 static struct intel_uncore_ops snbep_uncore_pci_ops = {
226 .init_box = snbep_uncore_pci_init_box,
227 .disable_box = snbep_uncore_pci_disable_box,
228 .enable_box = snbep_uncore_pci_enable_box,
229 .disable_event = snbep_uncore_pci_disable_event,
230 .enable_event = snbep_uncore_pci_enable_event,
231 .read_counter = snbep_uncore_pci_read_counter,
234 static struct event_constraint snbep_uncore_cbox_constraints[] = {
235 UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
236 UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
237 UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
238 UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
239 UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
240 UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
241 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
242 UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
243 UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
244 UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
245 UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
246 UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
247 UNCORE_EVENT_CONSTRAINT(0x1f, 0xe),
248 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
249 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
250 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
251 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
252 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
253 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
254 UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
255 UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
256 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
257 UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
258 UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
259 UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
263 static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
264 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
265 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
266 UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
267 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
268 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
269 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
270 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
271 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
272 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
273 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
277 static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
278 UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
279 UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
280 UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
281 UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
282 UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
283 UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
284 UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
285 UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
286 UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
287 UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
288 UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
289 UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
290 UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
291 UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
292 UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
293 UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
294 UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
295 UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
299 static struct intel_uncore_type snbep_uncore_ubox = {
304 .fixed_ctr_bits = 48,
305 .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
306 .event_ctl = SNBEP_U_MSR_PMON_CTL0,
307 .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
308 .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
309 .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
310 .ops = &snbep_uncore_msr_ops,
311 .format_group = &snbep_uncore_ubox_format_group,
314 static struct intel_uncore_type snbep_uncore_cbox = {
319 .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
320 .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
321 .event_mask = SNBEP_PMON_RAW_EVENT_MASK,
322 .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
323 .msr_offset = SNBEP_CBO_MSR_OFFSET,
324 .constraints = snbep_uncore_cbox_constraints,
325 .ops = &snbep_uncore_msr_ops,
326 .format_group = &snbep_uncore_format_group,
329 static struct intel_uncore_type snbep_uncore_pcu = {
334 .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
335 .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
336 .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
337 .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
338 .ops = &snbep_uncore_msr_ops,
339 .format_group = &snbep_uncore_pcu_format_group,
342 static struct intel_uncore_type *snbep_msr_uncores[] = {
349 #define SNBEP_UNCORE_PCI_COMMON_INIT() \
350 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
351 .event_ctl = SNBEP_PCI_PMON_CTL0, \
352 .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
353 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
354 .ops = &snbep_uncore_pci_ops, \
355 .format_group = &snbep_uncore_format_group
357 static struct intel_uncore_type snbep_uncore_ha = {
362 SNBEP_UNCORE_PCI_COMMON_INIT(),
365 static struct intel_uncore_type snbep_uncore_imc = {
370 .fixed_ctr_bits = 48,
371 .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
372 .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
373 .event_descs = snbep_uncore_imc_events,
374 SNBEP_UNCORE_PCI_COMMON_INIT(),
377 static struct intel_uncore_type snbep_uncore_qpi = {
382 .event_descs = snbep_uncore_qpi_events,
383 SNBEP_UNCORE_PCI_COMMON_INIT(),
387 static struct intel_uncore_type snbep_uncore_r2pcie = {
392 .constraints = snbep_uncore_r2pcie_constraints,
393 SNBEP_UNCORE_PCI_COMMON_INIT(),
396 static struct intel_uncore_type snbep_uncore_r3qpi = {
401 .constraints = snbep_uncore_r3qpi_constraints,
402 SNBEP_UNCORE_PCI_COMMON_INIT(),
405 static struct intel_uncore_type *snbep_pci_uncores[] = {
409 &snbep_uncore_r2pcie,
414 static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
416 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
417 .driver_data = (unsigned long)&snbep_uncore_ha,
420 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
421 .driver_data = (unsigned long)&snbep_uncore_imc,
424 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
425 .driver_data = (unsigned long)&snbep_uncore_imc,
428 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
429 .driver_data = (unsigned long)&snbep_uncore_imc,
432 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
433 .driver_data = (unsigned long)&snbep_uncore_imc,
436 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
437 .driver_data = (unsigned long)&snbep_uncore_qpi,
440 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
441 .driver_data = (unsigned long)&snbep_uncore_qpi,
444 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
445 .driver_data = (unsigned long)&snbep_uncore_r2pcie,
448 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
449 .driver_data = (unsigned long)&snbep_uncore_r3qpi,
452 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
453 .driver_data = (unsigned long)&snbep_uncore_r3qpi,
455 { /* end: all zeroes */ }
458 static struct pci_driver snbep_uncore_pci_driver = {
459 .name = "snbep_uncore",
460 .id_table = snbep_uncore_pci_ids,
464 * build pci bus to socket mapping
466 static void snbep_pci2phy_map_init(void)
468 struct pci_dev *ubox_dev = NULL;
473 /* find the UBOX device */
474 ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
475 PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX,
479 bus = ubox_dev->bus->number;
480 /* get the Node ID of the local register */
481 pci_read_config_dword(ubox_dev, 0x40, &config);
483 /* get the Node ID mapping */
484 pci_read_config_dword(ubox_dev, 0x54, &config);
486 * every three bits in the Node ID mapping register maps
487 * to a particular node.
489 for (i = 0; i < 8; i++) {
490 if (nodeid == ((config >> (3 * i)) & 0x7)) {
491 pcibus_to_physid[bus] = i;
498 /* end of Sandy Bridge-EP uncore support */
501 /* Sandy Bridge uncore support */
502 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box,
503 struct perf_event *event)
505 struct hw_perf_event *hwc = &event->hw;
507 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
508 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
510 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
513 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box,
514 struct perf_event *event)
516 wrmsrl(event->hw.config_base, 0);
519 static u64 snb_uncore_msr_read_counter(struct intel_uncore_box *box,
520 struct perf_event *event)
523 rdmsrl(event->hw.event_base, count);
527 static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
529 if (box->pmu->pmu_idx == 0) {
530 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
531 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
535 static struct attribute *snb_uncore_formats_attr[] = {
536 &format_attr_event.attr,
537 &format_attr_umask.attr,
538 &format_attr_edge.attr,
539 &format_attr_inv.attr,
540 &format_attr_cmask5.attr,
544 static struct attribute_group snb_uncore_format_group = {
546 .attrs = snb_uncore_formats_attr,
549 static struct intel_uncore_ops snb_uncore_msr_ops = {
550 .init_box = snb_uncore_msr_init_box,
551 .disable_event = snb_uncore_msr_disable_event,
552 .enable_event = snb_uncore_msr_enable_event,
553 .read_counter = snb_uncore_msr_read_counter,
556 static struct event_constraint snb_uncore_cbox_constraints[] = {
557 UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
558 UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
562 static struct intel_uncore_type snb_uncore_cbox = {
567 .fixed_ctr_bits = 48,
568 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
569 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
570 .fixed_ctr = SNB_UNC_FIXED_CTR,
571 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
573 .event_mask = SNB_UNC_RAW_EVENT_MASK,
574 .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
575 .constraints = snb_uncore_cbox_constraints,
576 .ops = &snb_uncore_msr_ops,
577 .format_group = &snb_uncore_format_group,
580 static struct intel_uncore_type *snb_msr_uncores[] = {
584 /* end of Sandy Bridge uncore support */
586 /* Nehalem uncore support */
587 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
589 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
592 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
594 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL,
595 NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
598 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box,
599 struct perf_event *event)
601 struct hw_perf_event *hwc = &event->hw;
603 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
604 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
606 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
609 static struct attribute *nhm_uncore_formats_attr[] = {
610 &format_attr_event.attr,
611 &format_attr_umask.attr,
612 &format_attr_edge.attr,
613 &format_attr_inv.attr,
614 &format_attr_cmask8.attr,
618 static struct attribute_group nhm_uncore_format_group = {
620 .attrs = nhm_uncore_formats_attr,
623 static struct uncore_event_desc nhm_uncore_events[] = {
624 INTEL_UNCORE_EVENT_DESC(CLOCKTICKS, "config=0xffff"),
625 /* full cache line writes to DRAM */
626 INTEL_UNCORE_EVENT_DESC(QMC_WRITES_FULL_ANY, "event=0x2f,umask=0xf"),
627 /* Quickpath Memory Controller normal priority read requests */
628 INTEL_UNCORE_EVENT_DESC(QMC_NORMAL_READS_ANY, "event=0x2c,umask=0xf"),
629 /* Quickpath Home Logic read requests from the IOH */
630 INTEL_UNCORE_EVENT_DESC(QHL_REQUEST_IOH_READS,
631 "event=0x20,umask=0x1"),
632 /* Quickpath Home Logic write requests from the IOH */
633 INTEL_UNCORE_EVENT_DESC(QHL_REQUEST_IOH_WRITES,
634 "event=0x20,umask=0x2"),
635 /* Quickpath Home Logic read requests from a remote socket */
636 INTEL_UNCORE_EVENT_DESC(QHL_REQUEST_REMOTE_READS,
637 "event=0x20,umask=0x4"),
638 /* Quickpath Home Logic write requests from a remote socket */
639 INTEL_UNCORE_EVENT_DESC(QHL_REQUEST_REMOTE_WRITES,
640 "event=0x20,umask=0x8"),
641 /* Quickpath Home Logic read requests from the local socket */
642 INTEL_UNCORE_EVENT_DESC(QHL_REQUEST_LOCAL_READS,
643 "event=0x20,umask=0x10"),
644 /* Quickpath Home Logic write requests from the local socket */
645 INTEL_UNCORE_EVENT_DESC(QHL_REQUEST_LOCAL_WRITES,
646 "event=0x20,umask=0x20"),
647 { /* end: all zeroes */ },
650 static struct intel_uncore_ops nhm_uncore_msr_ops = {
651 .disable_box = nhm_uncore_msr_disable_box,
652 .enable_box = nhm_uncore_msr_enable_box,
653 .disable_event = snb_uncore_msr_disable_event,
654 .enable_event = nhm_uncore_msr_enable_event,
655 .read_counter = snb_uncore_msr_read_counter,
658 static struct intel_uncore_type nhm_uncore = {
663 .fixed_ctr_bits = 48,
664 .event_ctl = NHM_UNC_PERFEVTSEL0,
665 .perf_ctr = NHM_UNC_UNCORE_PMC0,
666 .fixed_ctr = NHM_UNC_FIXED_CTR,
667 .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
668 .event_mask = NHM_UNC_RAW_EVENT_MASK,
669 .event_descs = nhm_uncore_events,
670 .ops = &nhm_uncore_msr_ops,
671 .format_group = &nhm_uncore_format_group,
674 static struct intel_uncore_type *nhm_msr_uncores[] = {
678 /* end of Nehalem uncore support */
680 static void uncore_assign_hw_event(struct intel_uncore_box *box,
681 struct perf_event *event, int idx)
683 struct hw_perf_event *hwc = &event->hw;
686 hwc->last_tag = ++box->tags[idx];
688 if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
689 hwc->event_base = uncore_fixed_ctr(box);
690 hwc->config_base = uncore_fixed_ctl(box);
694 hwc->config_base = uncore_event_ctl(box, hwc->idx);
695 hwc->event_base = uncore_perf_ctr(box, hwc->idx);
698 static void uncore_perf_event_update(struct intel_uncore_box *box,
699 struct perf_event *event)
701 u64 prev_count, new_count, delta;
704 if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
705 shift = 64 - uncore_fixed_ctr_bits(box);
707 shift = 64 - uncore_perf_ctr_bits(box);
709 /* the hrtimer might modify the previous event value */
711 prev_count = local64_read(&event->hw.prev_count);
712 new_count = uncore_read_counter(box, event);
713 if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
716 delta = (new_count << shift) - (prev_count << shift);
719 local64_add(delta, &event->count);
723 * The overflow interrupt is unavailable for SandyBridge-EP, is broken
724 * for SandyBridge. So we use hrtimer to periodically poll the counter
727 static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
729 struct intel_uncore_box *box;
733 box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
734 if (!box->n_active || box->cpu != smp_processor_id())
735 return HRTIMER_NORESTART;
737 * disable local interrupt to prevent uncore_pmu_event_start/stop
738 * to interrupt the update process
740 local_irq_save(flags);
742 for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
743 uncore_perf_event_update(box, box->events[bit]);
745 local_irq_restore(flags);
747 hrtimer_forward_now(hrtimer, ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL));
748 return HRTIMER_RESTART;
751 static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
753 __hrtimer_start_range_ns(&box->hrtimer,
754 ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL), 0,
755 HRTIMER_MODE_REL_PINNED, 0);
758 static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
760 hrtimer_cancel(&box->hrtimer);
763 static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
765 hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
766 box->hrtimer.function = uncore_pmu_hrtimer;
769 struct intel_uncore_box *uncore_alloc_box(int cpu)
771 struct intel_uncore_box *box;
773 box = kmalloc_node(sizeof(*box), GFP_KERNEL | __GFP_ZERO,
778 uncore_pmu_init_hrtimer(box);
779 atomic_set(&box->refcnt, 1);
786 static struct intel_uncore_box *
787 uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
789 static struct intel_uncore_box *box;
791 box = *per_cpu_ptr(pmu->box, cpu);
795 raw_spin_lock(&uncore_box_lock);
796 list_for_each_entry(box, &pmu->box_list, list) {
797 if (box->phys_id == topology_physical_package_id(cpu)) {
798 atomic_inc(&box->refcnt);
799 *per_cpu_ptr(pmu->box, cpu) = box;
803 raw_spin_unlock(&uncore_box_lock);
805 return *per_cpu_ptr(pmu->box, cpu);
808 static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
810 return container_of(event->pmu, struct intel_uncore_pmu, pmu);
813 static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
816 * perf core schedules event on the basis of cpu, uncore events are
817 * collected by one of the cpus inside a physical package.
819 return uncore_pmu_to_box(uncore_event_to_pmu(event),
823 static int uncore_collect_events(struct intel_uncore_box *box,
824 struct perf_event *leader, bool dogrp)
826 struct perf_event *event;
829 max_count = box->pmu->type->num_counters;
830 if (box->pmu->type->fixed_ctl)
833 if (box->n_events >= max_count)
837 box->event_list[n] = leader;
842 list_for_each_entry(event, &leader->sibling_list, group_entry) {
843 if (event->state <= PERF_EVENT_STATE_OFF)
849 box->event_list[n] = event;
855 static struct event_constraint *
856 uncore_event_constraint(struct intel_uncore_type *type,
857 struct perf_event *event)
859 struct event_constraint *c;
861 if (event->hw.config == ~0ULL)
862 return &constraint_fixed;
864 if (type->constraints) {
865 for_each_event_constraint(c, type->constraints) {
866 if ((event->hw.config & c->cmask) == c->code)
871 return &type->unconstrainted;
874 static int uncore_assign_events(struct intel_uncore_box *box,
877 unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
878 struct event_constraint *c, *constraints[UNCORE_PMC_IDX_MAX];
879 int i, ret, wmin, wmax;
880 struct hw_perf_event *hwc;
882 bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
884 for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
885 c = uncore_event_constraint(box->pmu->type,
888 wmin = min(wmin, c->weight);
889 wmax = max(wmax, c->weight);
892 /* fastpath, try to reuse previous register */
893 for (i = 0; i < n; i++) {
894 hwc = &box->event_list[i]->hw;
901 /* constraint still honored */
902 if (!test_bit(hwc->idx, c->idxmsk))
905 /* not already used */
906 if (test_bit(hwc->idx, used_mask))
909 __set_bit(hwc->idx, used_mask);
910 assign[i] = hwc->idx;
916 ret = perf_assign_events(constraints, n, wmin, wmax, assign);
917 return ret ? -EINVAL : 0;
920 static void uncore_pmu_event_start(struct perf_event *event, int flags)
922 struct intel_uncore_box *box = uncore_event_to_box(event);
923 int idx = event->hw.idx;
925 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
928 if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
932 box->events[idx] = event;
934 __set_bit(idx, box->active_mask);
936 local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
937 uncore_enable_event(box, event);
939 if (box->n_active == 1) {
940 uncore_enable_box(box);
941 uncore_pmu_start_hrtimer(box);
945 static void uncore_pmu_event_stop(struct perf_event *event, int flags)
947 struct intel_uncore_box *box = uncore_event_to_box(event);
948 struct hw_perf_event *hwc = &event->hw;
950 if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
951 uncore_disable_event(box, event);
953 box->events[hwc->idx] = NULL;
954 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
955 hwc->state |= PERF_HES_STOPPED;
957 if (box->n_active == 0) {
958 uncore_disable_box(box);
959 uncore_pmu_cancel_hrtimer(box);
963 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
965 * Drain the remaining delta count out of a event
966 * that we are disabling:
968 uncore_perf_event_update(box, event);
969 hwc->state |= PERF_HES_UPTODATE;
973 static int uncore_pmu_event_add(struct perf_event *event, int flags)
975 struct intel_uncore_box *box = uncore_event_to_box(event);
976 struct hw_perf_event *hwc = &event->hw;
977 int assign[UNCORE_PMC_IDX_MAX];
983 ret = n = uncore_collect_events(box, event, false);
987 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
988 if (!(flags & PERF_EF_START))
989 hwc->state |= PERF_HES_ARCH;
991 ret = uncore_assign_events(box, assign, n);
995 /* save events moving to new counters */
996 for (i = 0; i < box->n_events; i++) {
997 event = box->event_list[i];
1000 if (hwc->idx == assign[i] &&
1001 hwc->last_tag == box->tags[assign[i]])
1004 * Ensure we don't accidentally enable a stopped
1005 * counter simply because we rescheduled.
1007 if (hwc->state & PERF_HES_STOPPED)
1008 hwc->state |= PERF_HES_ARCH;
1010 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
1013 /* reprogram moved events into new counters */
1014 for (i = 0; i < n; i++) {
1015 event = box->event_list[i];
1018 if (hwc->idx != assign[i] ||
1019 hwc->last_tag != box->tags[assign[i]])
1020 uncore_assign_hw_event(box, event, assign[i]);
1021 else if (i < box->n_events)
1024 if (hwc->state & PERF_HES_ARCH)
1027 uncore_pmu_event_start(event, 0);
1034 static void uncore_pmu_event_del(struct perf_event *event, int flags)
1036 struct intel_uncore_box *box = uncore_event_to_box(event);
1039 uncore_pmu_event_stop(event, PERF_EF_UPDATE);
1041 for (i = 0; i < box->n_events; i++) {
1042 if (event == box->event_list[i]) {
1043 while (++i < box->n_events)
1044 box->event_list[i - 1] = box->event_list[i];
1052 event->hw.last_tag = ~0ULL;
1055 static void uncore_pmu_event_read(struct perf_event *event)
1057 struct intel_uncore_box *box = uncore_event_to_box(event);
1058 uncore_perf_event_update(box, event);
1062 * validation ensures the group can be loaded onto the
1063 * PMU if it was the only group available.
1065 static int uncore_validate_group(struct intel_uncore_pmu *pmu,
1066 struct perf_event *event)
1068 struct perf_event *leader = event->group_leader;
1069 struct intel_uncore_box *fake_box;
1070 int assign[UNCORE_PMC_IDX_MAX];
1071 int ret = -EINVAL, n;
1073 fake_box = uncore_alloc_box(smp_processor_id());
1077 fake_box->pmu = pmu;
1079 * the event is not yet connected with its
1080 * siblings therefore we must first collect
1081 * existing siblings, then add the new event
1082 * before we can simulate the scheduling
1084 n = uncore_collect_events(fake_box, leader, true);
1088 fake_box->n_events = n;
1089 n = uncore_collect_events(fake_box, event, false);
1093 fake_box->n_events = n;
1095 ret = uncore_assign_events(fake_box, assign, n);
1101 int uncore_pmu_event_init(struct perf_event *event)
1103 struct intel_uncore_pmu *pmu;
1104 struct intel_uncore_box *box;
1105 struct hw_perf_event *hwc = &event->hw;
1108 if (event->attr.type != event->pmu->type)
1111 pmu = uncore_event_to_pmu(event);
1112 /* no device found for this pmu */
1113 if (pmu->func_id < 0)
1117 * Uncore PMU does measure at all privilege level all the time.
1118 * So it doesn't make sense to specify any exclude bits.
1120 if (event->attr.exclude_user || event->attr.exclude_kernel ||
1121 event->attr.exclude_hv || event->attr.exclude_idle)
1124 /* Sampling not supported yet */
1125 if (hwc->sample_period)
1129 * Place all uncore events for a particular physical package
1134 box = uncore_pmu_to_box(pmu, event->cpu);
1135 if (!box || box->cpu < 0)
1137 event->cpu = box->cpu;
1139 if (event->attr.config == UNCORE_FIXED_EVENT) {
1140 /* no fixed counter */
1141 if (!pmu->type->fixed_ctl)
1144 * if there is only one fixed counter, only the first pmu
1145 * can access the fixed counter
1147 if (pmu->type->single_fixed && pmu->pmu_idx > 0)
1149 hwc->config = ~0ULL;
1151 hwc->config = event->attr.config & pmu->type->event_mask;
1155 event->hw.last_tag = ~0ULL;
1157 if (event->group_leader != event)
1158 ret = uncore_validate_group(pmu, event);
1165 static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
1169 pmu->pmu = (struct pmu) {
1170 .attr_groups = pmu->type->attr_groups,
1171 .task_ctx_nr = perf_invalid_context,
1172 .event_init = uncore_pmu_event_init,
1173 .add = uncore_pmu_event_add,
1174 .del = uncore_pmu_event_del,
1175 .start = uncore_pmu_event_start,
1176 .stop = uncore_pmu_event_stop,
1177 .read = uncore_pmu_event_read,
1180 if (pmu->type->num_boxes == 1) {
1181 if (strlen(pmu->type->name) > 0)
1182 sprintf(pmu->name, "uncore_%s", pmu->type->name);
1184 sprintf(pmu->name, "uncore");
1186 sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
1190 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
1194 static void __init uncore_type_exit(struct intel_uncore_type *type)
1198 for (i = 0; i < type->num_boxes; i++)
1199 free_percpu(type->pmus[i].box);
1202 kfree(type->attr_groups[1]);
1203 type->attr_groups[1] = NULL;
1206 static void uncore_types_exit(struct intel_uncore_type **types)
1209 for (i = 0; types[i]; i++)
1210 uncore_type_exit(types[i]);
1213 static int __init uncore_type_init(struct intel_uncore_type *type)
1215 struct intel_uncore_pmu *pmus;
1216 struct attribute_group *events_group;
1217 struct attribute **attrs;
1220 pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
1224 type->unconstrainted = (struct event_constraint)
1225 __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
1226 0, type->num_counters, 0);
1228 for (i = 0; i < type->num_boxes; i++) {
1229 pmus[i].func_id = -1;
1230 pmus[i].pmu_idx = i;
1231 pmus[i].type = type;
1232 INIT_LIST_HEAD(&pmus[i].box_list);
1233 pmus[i].box = alloc_percpu(struct intel_uncore_box *);
1238 if (type->event_descs) {
1240 while (type->event_descs[i].attr.attr.name)
1243 events_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
1244 sizeof(*events_group), GFP_KERNEL);
1248 attrs = (struct attribute **)(events_group + 1);
1249 events_group->name = "events";
1250 events_group->attrs = attrs;
1252 for (j = 0; j < i; j++)
1253 attrs[j] = &type->event_descs[j].attr.attr;
1255 type->attr_groups[1] = events_group;
1261 uncore_type_exit(type);
1265 static int __init uncore_types_init(struct intel_uncore_type **types)
1269 for (i = 0; types[i]; i++) {
1270 ret = uncore_type_init(types[i]);
1277 uncore_type_exit(types[i]);
1281 static struct pci_driver *uncore_pci_driver;
1282 static bool pcidrv_registered;
1285 * add a pci uncore device
1287 static int __devinit uncore_pci_add(struct intel_uncore_type *type,
1288 struct pci_dev *pdev)
1290 struct intel_uncore_pmu *pmu;
1291 struct intel_uncore_box *box;
1294 phys_id = pcibus_to_physid[pdev->bus->number];
1298 box = uncore_alloc_box(0);
1303 * for performance monitoring unit with multiple boxes,
1304 * each box has a different function id.
1306 for (i = 0; i < type->num_boxes; i++) {
1307 pmu = &type->pmus[i];
1308 if (pmu->func_id == pdev->devfn)
1310 if (pmu->func_id < 0) {
1311 pmu->func_id = pdev->devfn;
1322 box->phys_id = phys_id;
1323 box->pci_dev = pdev;
1325 uncore_box_init(box);
1326 pci_set_drvdata(pdev, box);
1328 raw_spin_lock(&uncore_box_lock);
1329 list_add_tail(&box->list, &pmu->box_list);
1330 raw_spin_unlock(&uncore_box_lock);
1335 static void __devexit uncore_pci_remove(struct pci_dev *pdev)
1337 struct intel_uncore_box *box = pci_get_drvdata(pdev);
1338 struct intel_uncore_pmu *pmu = box->pmu;
1339 int cpu, phys_id = pcibus_to_physid[pdev->bus->number];
1341 if (WARN_ON_ONCE(phys_id != box->phys_id))
1344 raw_spin_lock(&uncore_box_lock);
1345 list_del(&box->list);
1346 raw_spin_unlock(&uncore_box_lock);
1348 for_each_possible_cpu(cpu) {
1349 if (*per_cpu_ptr(pmu->box, cpu) == box) {
1350 *per_cpu_ptr(pmu->box, cpu) = NULL;
1351 atomic_dec(&box->refcnt);
1355 WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
1359 static int __devinit uncore_pci_probe(struct pci_dev *pdev,
1360 const struct pci_device_id *id)
1362 struct intel_uncore_type *type;
1364 type = (struct intel_uncore_type *)id->driver_data;
1365 return uncore_pci_add(type, pdev);
1368 static int __init uncore_pci_init(void)
1372 switch (boot_cpu_data.x86_model) {
1373 case 45: /* Sandy Bridge-EP */
1374 pci_uncores = snbep_pci_uncores;
1375 uncore_pci_driver = &snbep_uncore_pci_driver;
1376 snbep_pci2phy_map_init();
1382 ret = uncore_types_init(pci_uncores);
1386 uncore_pci_driver->probe = uncore_pci_probe;
1387 uncore_pci_driver->remove = uncore_pci_remove;
1389 ret = pci_register_driver(uncore_pci_driver);
1391 pcidrv_registered = true;
1393 uncore_types_exit(pci_uncores);
1398 static void __init uncore_pci_exit(void)
1400 if (pcidrv_registered) {
1401 pcidrv_registered = false;
1402 pci_unregister_driver(uncore_pci_driver);
1403 uncore_types_exit(pci_uncores);
1407 static void __cpuinit uncore_cpu_dying(int cpu)
1409 struct intel_uncore_type *type;
1410 struct intel_uncore_pmu *pmu;
1411 struct intel_uncore_box *box;
1414 for (i = 0; msr_uncores[i]; i++) {
1415 type = msr_uncores[i];
1416 for (j = 0; j < type->num_boxes; j++) {
1417 pmu = &type->pmus[j];
1418 box = *per_cpu_ptr(pmu->box, cpu);
1419 *per_cpu_ptr(pmu->box, cpu) = NULL;
1420 if (box && atomic_dec_and_test(&box->refcnt))
1426 static int __cpuinit uncore_cpu_starting(int cpu)
1428 struct intel_uncore_type *type;
1429 struct intel_uncore_pmu *pmu;
1430 struct intel_uncore_box *box, *exist;
1431 int i, j, k, phys_id;
1433 phys_id = topology_physical_package_id(cpu);
1435 for (i = 0; msr_uncores[i]; i++) {
1436 type = msr_uncores[i];
1437 for (j = 0; j < type->num_boxes; j++) {
1438 pmu = &type->pmus[j];
1439 box = *per_cpu_ptr(pmu->box, cpu);
1440 /* called by uncore_cpu_init? */
1441 if (box && box->phys_id >= 0) {
1442 uncore_box_init(box);
1446 for_each_online_cpu(k) {
1447 exist = *per_cpu_ptr(pmu->box, k);
1448 if (exist && exist->phys_id == phys_id) {
1449 atomic_inc(&exist->refcnt);
1450 *per_cpu_ptr(pmu->box, cpu) = exist;
1458 box->phys_id = phys_id;
1459 uncore_box_init(box);
1466 static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id)
1468 struct intel_uncore_type *type;
1469 struct intel_uncore_pmu *pmu;
1470 struct intel_uncore_box *box;
1473 for (i = 0; msr_uncores[i]; i++) {
1474 type = msr_uncores[i];
1475 for (j = 0; j < type->num_boxes; j++) {
1476 pmu = &type->pmus[j];
1477 if (pmu->func_id < 0)
1480 box = uncore_alloc_box(cpu);
1485 box->phys_id = phys_id;
1486 *per_cpu_ptr(pmu->box, cpu) = box;
1492 static void __cpuinit uncore_change_context(struct intel_uncore_type **uncores,
1493 int old_cpu, int new_cpu)
1495 struct intel_uncore_type *type;
1496 struct intel_uncore_pmu *pmu;
1497 struct intel_uncore_box *box;
1500 for (i = 0; uncores[i]; i++) {
1502 for (j = 0; j < type->num_boxes; j++) {
1503 pmu = &type->pmus[j];
1505 box = uncore_pmu_to_box(pmu, new_cpu);
1507 box = uncore_pmu_to_box(pmu, old_cpu);
1512 WARN_ON_ONCE(box->cpu != -1);
1517 WARN_ON_ONCE(box->cpu != old_cpu);
1519 uncore_pmu_cancel_hrtimer(box);
1520 perf_pmu_migrate_context(&pmu->pmu,
1530 static void __cpuinit uncore_event_exit_cpu(int cpu)
1532 int i, phys_id, target;
1534 /* if exiting cpu is used for collecting uncore events */
1535 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
1538 /* find a new cpu to collect uncore events */
1539 phys_id = topology_physical_package_id(cpu);
1541 for_each_online_cpu(i) {
1544 if (phys_id == topology_physical_package_id(i)) {
1550 /* migrate uncore events to the new cpu */
1552 cpumask_set_cpu(target, &uncore_cpu_mask);
1554 uncore_change_context(msr_uncores, cpu, target);
1555 uncore_change_context(pci_uncores, cpu, target);
1558 static void __cpuinit uncore_event_init_cpu(int cpu)
1562 phys_id = topology_physical_package_id(cpu);
1563 for_each_cpu(i, &uncore_cpu_mask) {
1564 if (phys_id == topology_physical_package_id(i))
1568 cpumask_set_cpu(cpu, &uncore_cpu_mask);
1570 uncore_change_context(msr_uncores, -1, cpu);
1571 uncore_change_context(pci_uncores, -1, cpu);
1574 static int __cpuinit uncore_cpu_notifier(struct notifier_block *self,
1575 unsigned long action, void *hcpu)
1577 unsigned int cpu = (long)hcpu;
1579 /* allocate/free data structure for uncore box */
1580 switch (action & ~CPU_TASKS_FROZEN) {
1581 case CPU_UP_PREPARE:
1582 uncore_cpu_prepare(cpu, -1);
1585 uncore_cpu_starting(cpu);
1587 case CPU_UP_CANCELED:
1589 uncore_cpu_dying(cpu);
1595 /* select the cpu that collects uncore events */
1596 switch (action & ~CPU_TASKS_FROZEN) {
1597 case CPU_DOWN_FAILED:
1599 uncore_event_init_cpu(cpu);
1601 case CPU_DOWN_PREPARE:
1602 uncore_event_exit_cpu(cpu);
1611 static struct notifier_block uncore_cpu_nb __cpuinitdata = {
1612 .notifier_call = uncore_cpu_notifier,
1614 * to migrate uncore events, our notifier should be executed
1615 * before perf core's notifier.
1617 .priority = CPU_PRI_PERF + 1,
1620 static void __init uncore_cpu_setup(void *dummy)
1622 uncore_cpu_starting(smp_processor_id());
1625 static int __init uncore_cpu_init(void)
1629 switch (boot_cpu_data.x86_model) {
1630 case 26: /* Nehalem */
1632 case 37: /* Westmere */
1634 msr_uncores = nhm_msr_uncores;
1636 case 42: /* Sandy Bridge */
1637 msr_uncores = snb_msr_uncores;
1639 case 45: /* Sandy Birdge-EP */
1640 msr_uncores = snbep_msr_uncores;
1646 ret = uncore_types_init(msr_uncores);
1652 for_each_online_cpu(cpu) {
1653 int i, phys_id = topology_physical_package_id(cpu);
1655 for_each_cpu(i, &uncore_cpu_mask) {
1656 if (phys_id == topology_physical_package_id(i)) {
1664 uncore_cpu_prepare(cpu, phys_id);
1665 uncore_event_init_cpu(cpu);
1667 on_each_cpu(uncore_cpu_setup, NULL, 1);
1669 register_cpu_notifier(&uncore_cpu_nb);
1676 static int __init uncore_pmus_register(void)
1678 struct intel_uncore_pmu *pmu;
1679 struct intel_uncore_type *type;
1682 for (i = 0; msr_uncores[i]; i++) {
1683 type = msr_uncores[i];
1684 for (j = 0; j < type->num_boxes; j++) {
1685 pmu = &type->pmus[j];
1686 uncore_pmu_register(pmu);
1690 for (i = 0; pci_uncores[i]; i++) {
1691 type = pci_uncores[i];
1692 for (j = 0; j < type->num_boxes; j++) {
1693 pmu = &type->pmus[j];
1694 uncore_pmu_register(pmu);
1701 static int __init intel_uncore_init(void)
1705 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
1708 ret = uncore_pci_init();
1711 ret = uncore_cpu_init();
1717 uncore_pmus_register();
1722 device_initcall(intel_uncore_init);