1 /* Intel Sandy Bridge -EN/-EP/-EX Memory Controller kernel module
3 * This driver supports the memory controllers found on the Intel
4 * processor family Sandy Bridge.
6 * This file may be distributed under the terms of the
7 * GNU General Public License version 2 only.
9 * Copyright (c) 2011 by:
10 * Mauro Carvalho Chehab
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/pci.h>
16 #include <linux/pci_ids.h>
17 #include <linux/slab.h>
18 #include <linux/delay.h>
19 #include <linux/edac.h>
20 #include <linux/mmzone.h>
21 #include <linux/smp.h>
22 #include <linux/bitmap.h>
23 #include <linux/math64.h>
24 #include <asm/processor.h>
27 #include "edac_core.h"
30 static LIST_HEAD(sbridge_edac_list);
31 static DEFINE_MUTEX(sbridge_edac_lock);
35 * Alter this version for the module when modifications are made
37 #define SBRIDGE_REVISION " Ver: 1.1.0 "
38 #define EDAC_MOD_STR "sbridge_edac"
43 #define sbridge_printk(level, fmt, arg...) \
44 edac_printk(level, "sbridge", fmt, ##arg)
46 #define sbridge_mc_printk(mci, level, fmt, arg...) \
47 edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg)
50 * Get a bit field at register value <v>, from bit <lo> to bit <hi>
52 #define GET_BITFIELD(v, lo, hi) \
53 (((v) & GENMASK_ULL(hi, lo)) >> (lo))
56 * sbridge Memory Controller Registers
60 * FIXME: For now, let's order by device function, as it makes
61 * easier for driver's development process. This table should be
62 * moved to pci_id.h when submitted upstream
64 #define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0 0x3cf4 /* 12.6 */
65 #define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */
66 #define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */
67 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0 0x3ca0 /* 14.0 */
68 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA 0x3ca8 /* 15.0 */
69 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS 0x3c71 /* 15.1 */
70 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0 0x3caa /* 15.2 */
71 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1 0x3cab /* 15.3 */
72 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2 0x3cac /* 15.4 */
73 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3 0x3cad /* 15.5 */
74 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO 0x3cb8 /* 17.0 */
77 * Currently, unused, but will be needed in the future
78 * implementations, as they hold the error counters
80 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR0 0x3c72 /* 16.2 */
81 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR1 0x3c73 /* 16.3 */
82 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR2 0x3c76 /* 16.6 */
83 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR3 0x3c77 /* 16.7 */
85 /* Devices 12 Function 6, Offsets 0x80 to 0xcc */
86 static const u32 sbridge_dram_rule[] = {
87 0x80, 0x88, 0x90, 0x98, 0xa0,
88 0xa8, 0xb0, 0xb8, 0xc0, 0xc8,
91 static const u32 ibridge_dram_rule[] = {
92 0x60, 0x68, 0x70, 0x78, 0x80,
93 0x88, 0x90, 0x98, 0xa0, 0xa8,
94 0xb0, 0xb8, 0xc0, 0xc8, 0xd0,
95 0xd8, 0xe0, 0xe8, 0xf0, 0xf8,
98 #define SAD_LIMIT(reg) ((GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff)
99 #define DRAM_ATTR(reg) GET_BITFIELD(reg, 2, 3)
100 #define INTERLEAVE_MODE(reg) GET_BITFIELD(reg, 1, 1)
101 #define DRAM_RULE_ENABLE(reg) GET_BITFIELD(reg, 0, 0)
103 static char *get_dram_attr(u32 reg)
105 switch(DRAM_ATTR(reg)) {
117 static const u32 sbridge_interleave_list[] = {
118 0x84, 0x8c, 0x94, 0x9c, 0xa4,
119 0xac, 0xb4, 0xbc, 0xc4, 0xcc,
122 static const u32 ibridge_interleave_list[] = {
123 0x64, 0x6c, 0x74, 0x7c, 0x84,
124 0x8c, 0x94, 0x9c, 0xa4, 0xac,
125 0xb4, 0xbc, 0xc4, 0xcc, 0xd4,
126 0xdc, 0xe4, 0xec, 0xf4, 0xfc,
129 struct interleave_pkg {
134 static const struct interleave_pkg sbridge_interleave_pkg[] = {
145 static const struct interleave_pkg ibridge_interleave_pkg[] = {
156 static inline int sad_pkg(const struct interleave_pkg *table, u32 reg,
159 return GET_BITFIELD(reg, table[interleave].start,
160 table[interleave].end);
163 /* Devices 12 Function 7 */
168 #define GET_TOLM(reg) ((GET_BITFIELD(reg, 0, 3) << 28) | 0x3ffffff)
169 #define GET_TOHM(reg) ((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff)
171 /* Device 13 Function 6 */
173 #define SAD_TARGET 0xf0
175 #define SOURCE_ID(reg) GET_BITFIELD(reg, 9, 11)
177 #define SAD_CONTROL 0xf4
179 /* Device 14 function 0 */
181 static const u32 tad_dram_rule[] = {
182 0x40, 0x44, 0x48, 0x4c,
183 0x50, 0x54, 0x58, 0x5c,
184 0x60, 0x64, 0x68, 0x6c,
186 #define MAX_TAD ARRAY_SIZE(tad_dram_rule)
188 #define TAD_LIMIT(reg) ((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff)
189 #define TAD_SOCK(reg) GET_BITFIELD(reg, 10, 11)
190 #define TAD_CH(reg) GET_BITFIELD(reg, 8, 9)
191 #define TAD_TGT3(reg) GET_BITFIELD(reg, 6, 7)
192 #define TAD_TGT2(reg) GET_BITFIELD(reg, 4, 5)
193 #define TAD_TGT1(reg) GET_BITFIELD(reg, 2, 3)
194 #define TAD_TGT0(reg) GET_BITFIELD(reg, 0, 1)
196 /* Device 15, function 0 */
200 #define IS_ECC_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 2, 2)
201 #define IS_LOCKSTEP_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 1, 1)
202 #define IS_CLOSE_PG(mcmtr) GET_BITFIELD(mcmtr, 0, 0)
204 /* Device 15, function 1 */
206 #define RASENABLES 0xac
207 #define IS_MIRROR_ENABLED(reg) GET_BITFIELD(reg, 0, 0)
209 /* Device 15, functions 2-5 */
211 static const int mtr_regs[] = {
215 #define RANK_DISABLE(mtr) GET_BITFIELD(mtr, 16, 19)
216 #define IS_DIMM_PRESENT(mtr) GET_BITFIELD(mtr, 14, 14)
217 #define RANK_CNT_BITS(mtr) GET_BITFIELD(mtr, 12, 13)
218 #define RANK_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 2, 4)
219 #define COL_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 0, 1)
221 static const u32 tad_ch_nilv_offset[] = {
222 0x90, 0x94, 0x98, 0x9c,
223 0xa0, 0xa4, 0xa8, 0xac,
224 0xb0, 0xb4, 0xb8, 0xbc,
226 #define CHN_IDX_OFFSET(reg) GET_BITFIELD(reg, 28, 29)
227 #define TAD_OFFSET(reg) (GET_BITFIELD(reg, 6, 25) << 26)
229 static const u32 rir_way_limit[] = {
230 0x108, 0x10c, 0x110, 0x114, 0x118,
232 #define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit)
234 #define IS_RIR_VALID(reg) GET_BITFIELD(reg, 31, 31)
235 #define RIR_WAY(reg) GET_BITFIELD(reg, 28, 29)
236 #define RIR_LIMIT(reg) ((GET_BITFIELD(reg, 1, 10) << 29)| 0x1fffffff)
238 #define MAX_RIR_WAY 8
240 static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
241 { 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c },
242 { 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c },
243 { 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c },
244 { 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c },
245 { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
248 #define RIR_RNK_TGT(reg) GET_BITFIELD(reg, 16, 19)
249 #define RIR_OFFSET(reg) GET_BITFIELD(reg, 2, 14)
251 /* Device 16, functions 2-7 */
254 * FIXME: Implement the error count reads directly
257 static const u32 correrrcnt[] = {
258 0x104, 0x108, 0x10c, 0x110,
261 #define RANK_ODD_OV(reg) GET_BITFIELD(reg, 31, 31)
262 #define RANK_ODD_ERR_CNT(reg) GET_BITFIELD(reg, 16, 30)
263 #define RANK_EVEN_OV(reg) GET_BITFIELD(reg, 15, 15)
264 #define RANK_EVEN_ERR_CNT(reg) GET_BITFIELD(reg, 0, 14)
266 static const u32 correrrthrsld[] = {
267 0x11c, 0x120, 0x124, 0x128,
270 #define RANK_ODD_ERR_THRSLD(reg) GET_BITFIELD(reg, 16, 30)
271 #define RANK_EVEN_ERR_THRSLD(reg) GET_BITFIELD(reg, 0, 14)
274 /* Device 17, function 0 */
276 #define SB_RANK_CFG_A 0x0328
278 #define IB_RANK_CFG_A 0x0320
284 #define NUM_CHANNELS 4
285 #define MAX_DIMMS 3 /* Max DIMMS per channel */
293 struct sbridge_info {
297 u64 (*get_tolm)(struct sbridge_pvt *pvt);
298 u64 (*get_tohm)(struct sbridge_pvt *pvt);
299 const u32 *dram_rule;
300 const u32 *interleave_list;
301 const struct interleave_pkg *interleave_pkg;
304 u8 (*get_node_id)(struct sbridge_pvt *pvt);
305 enum mem_type (*get_memory_type)(struct sbridge_pvt *pvt);
308 struct sbridge_channel {
313 struct pci_id_descr {
320 struct pci_id_table {
321 const struct pci_id_descr *descr;
326 struct list_head list;
328 u8 node_id, source_id;
329 struct pci_dev **pdev;
331 struct mem_ctl_info *mci;
335 struct pci_dev *pci_ta, *pci_ddrio, *pci_ras;
336 struct pci_dev *pci_sad0, *pci_sad1;
337 struct pci_dev *pci_ha0, *pci_ha1;
338 struct pci_dev *pci_br0, *pci_br1;
339 struct pci_dev *pci_tad[NUM_CHANNELS];
341 struct sbridge_dev *sbridge_dev;
343 struct sbridge_info info;
344 struct sbridge_channel channel[NUM_CHANNELS];
346 /* Memory type detection */
347 bool is_mirrored, is_lockstep, is_close_pg;
349 /* Fifo double buffers */
350 struct mce mce_entry[MCE_LOG_LEN];
351 struct mce mce_outentry[MCE_LOG_LEN];
353 /* Fifo in/out counters */
354 unsigned mce_in, mce_out;
356 /* Count indicator to show errors not got */
357 unsigned mce_overrun;
359 /* Memory description */
363 #define PCI_DESCR(device, function, device_id, opt) \
365 .func = (function), \
366 .dev_id = (device_id), \
369 static const struct pci_id_descr pci_dev_descr_sbridge[] = {
370 /* Processor Home Agent */
371 { PCI_DESCR(14, 0, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0) },
373 /* Memory controller */
374 { PCI_DESCR(15, 0, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0) },
375 { PCI_DESCR(15, 1, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0) },
376 { PCI_DESCR(15, 2, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0) },
377 { PCI_DESCR(15, 3, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0) },
378 { PCI_DESCR(15, 4, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0) },
379 { PCI_DESCR(15, 5, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0) },
380 { PCI_DESCR(17, 0, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1) },
382 /* System Address Decoder */
383 { PCI_DESCR(12, 6, PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0) },
384 { PCI_DESCR(12, 7, PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0) },
386 /* Broadcast Registers */
387 { PCI_DESCR(13, 6, PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) },
390 #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
391 static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
392 PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge),
393 {0,} /* 0 terminated list. */
396 /* This changes depending if 1HA or 2HA:
398 * 0x0eb8 (17.0) is DDRIO0
400 * 0x0ebc (17.4) is DDRIO0
402 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0 0x0eb8
403 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0 0x0ebc
406 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0 0x0ea0
407 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA 0x0ea8
408 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS 0x0e71
409 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0 0x0eaa
410 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1 0x0eab
411 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2 0x0eac
412 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3 0x0ead
413 #define PCI_DEVICE_ID_INTEL_IBRIDGE_SAD 0x0ec8
414 #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR0 0x0ec9
415 #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR1 0x0eca
416 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1 0x0e60
417 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA 0x0e68
418 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS 0x0e79
419 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 0x0e6a
420 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1 0x0e6b
422 static const struct pci_id_descr pci_dev_descr_ibridge[] = {
423 /* Processor Home Agent */
424 { PCI_DESCR(14, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0) },
426 /* Memory controller */
427 { PCI_DESCR(15, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0) },
428 { PCI_DESCR(15, 1, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0) },
429 { PCI_DESCR(15, 2, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0) },
430 { PCI_DESCR(15, 3, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0) },
431 { PCI_DESCR(15, 4, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0) },
432 { PCI_DESCR(15, 5, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0) },
434 /* System Address Decoder */
435 { PCI_DESCR(22, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0) },
437 /* Broadcast Registers */
438 { PCI_DESCR(22, 1, PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1) },
439 { PCI_DESCR(22, 2, PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0) },
441 /* Optional, mode 2HA */
442 { PCI_DESCR(28, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1) },
444 { PCI_DESCR(29, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1) },
445 { PCI_DESCR(29, 1, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1) },
447 { PCI_DESCR(29, 2, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1) },
448 { PCI_DESCR(29, 3, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1) },
450 { PCI_DESCR(17, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1) },
451 { PCI_DESCR(17, 4, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1) },
454 static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
455 PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge),
456 {0,} /* 0 terminated list. */
460 * pci_device_id table for which devices we are looking for
462 static const struct pci_device_id sbridge_pci_tbl[] = {
463 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
464 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA)},
465 {0,} /* 0 terminated list. */
469 /****************************************************************************
470 Ancillary status routines
471 ****************************************************************************/
473 static inline int numrank(u32 mtr)
475 int ranks = (1 << RANK_CNT_BITS(mtr));
478 edac_dbg(0, "Invalid number of ranks: %d (max = 4) raw value = %x (%04x)\n",
479 ranks, (unsigned int)RANK_CNT_BITS(mtr), mtr);
486 static inline int numrow(u32 mtr)
488 int rows = (RANK_WIDTH_BITS(mtr) + 12);
490 if (rows < 13 || rows > 18) {
491 edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n",
492 rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
499 static inline int numcol(u32 mtr)
501 int cols = (COL_WIDTH_BITS(mtr) + 10);
504 edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n",
505 cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
512 static struct sbridge_dev *get_sbridge_dev(u8 bus)
514 struct sbridge_dev *sbridge_dev;
516 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
517 if (sbridge_dev->bus == bus)
524 static struct sbridge_dev *alloc_sbridge_dev(u8 bus,
525 const struct pci_id_table *table)
527 struct sbridge_dev *sbridge_dev;
529 sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL);
533 sbridge_dev->pdev = kzalloc(sizeof(*sbridge_dev->pdev) * table->n_devs,
535 if (!sbridge_dev->pdev) {
540 sbridge_dev->bus = bus;
541 sbridge_dev->n_devs = table->n_devs;
542 list_add_tail(&sbridge_dev->list, &sbridge_edac_list);
547 static void free_sbridge_dev(struct sbridge_dev *sbridge_dev)
549 list_del(&sbridge_dev->list);
550 kfree(sbridge_dev->pdev);
554 static u64 sbridge_get_tolm(struct sbridge_pvt *pvt)
558 /* Address range is 32:28 */
559 pci_read_config_dword(pvt->pci_sad1, TOLM, ®);
560 return GET_TOLM(reg);
563 static u64 sbridge_get_tohm(struct sbridge_pvt *pvt)
567 pci_read_config_dword(pvt->pci_sad1, TOHM, ®);
568 return GET_TOHM(reg);
571 static u64 ibridge_get_tolm(struct sbridge_pvt *pvt)
575 pci_read_config_dword(pvt->pci_br1, TOLM, ®);
577 return GET_TOLM(reg);
580 static u64 ibridge_get_tohm(struct sbridge_pvt *pvt)
584 pci_read_config_dword(pvt->pci_br1, TOHM, ®);
586 return GET_TOHM(reg);
589 static enum mem_type get_memory_type(struct sbridge_pvt *pvt)
594 if (pvt->pci_ddrio) {
595 pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr,
597 if (GET_BITFIELD(reg, 11, 11))
598 /* FIXME: Can also be LRDIMM */
608 static u8 get_node_id(struct sbridge_pvt *pvt)
611 pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, ®);
612 return GET_BITFIELD(reg, 0, 2);
615 static inline u8 sad_pkg_socket(u8 pkg)
617 /* on Ivy Bridge, nodeID is SASS, where A is HA and S is node id */
618 return (pkg >> 3) | (pkg & 0x3);
621 static inline u8 sad_pkg_ha(u8 pkg)
623 return (pkg >> 2) & 0x1;
626 /****************************************************************************
627 Memory check routines
628 ****************************************************************************/
629 static struct pci_dev *get_pdev_slot_func(u8 bus, unsigned slot,
632 struct sbridge_dev *sbridge_dev = get_sbridge_dev(bus);
638 for (i = 0; i < sbridge_dev->n_devs; i++) {
639 if (!sbridge_dev->pdev[i])
642 if (PCI_SLOT(sbridge_dev->pdev[i]->devfn) == slot &&
643 PCI_FUNC(sbridge_dev->pdev[i]->devfn) == func) {
644 edac_dbg(1, "Associated %02x.%02x.%d with %p\n",
645 bus, slot, func, sbridge_dev->pdev[i]);
646 return sbridge_dev->pdev[i];
654 * check_if_ecc_is_active() - Checks if ECC is active
657 static int check_if_ecc_is_active(const u8 bus)
659 struct pci_dev *pdev = NULL;
662 pdev = get_pdev_slot_func(bus, 15, 0);
664 sbridge_printk(KERN_ERR, "Couldn't find PCI device "
670 pci_read_config_dword(pdev, MCMTR, &mcmtr);
671 if (!IS_ECC_ENABLED(mcmtr)) {
672 sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n");
678 static int get_dimm_config(struct mem_ctl_info *mci)
680 struct sbridge_pvt *pvt = mci->pvt_info;
681 struct dimm_info *dimm;
682 unsigned i, j, banks, ranks, rows, cols, npages;
688 pci_read_config_dword(pvt->pci_br0, SAD_TARGET, ®);
689 pvt->sbridge_dev->source_id = SOURCE_ID(reg);
691 pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
692 edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
693 pvt->sbridge_dev->mc,
694 pvt->sbridge_dev->node_id,
695 pvt->sbridge_dev->source_id);
697 pci_read_config_dword(pvt->pci_ras, RASENABLES, ®);
698 if (IS_MIRROR_ENABLED(reg)) {
699 edac_dbg(0, "Memory mirror is enabled\n");
700 pvt->is_mirrored = true;
702 edac_dbg(0, "Memory mirror is disabled\n");
703 pvt->is_mirrored = false;
706 pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr);
707 if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
708 edac_dbg(0, "Lockstep is enabled\n");
709 mode = EDAC_S8ECD8ED;
710 pvt->is_lockstep = true;
712 edac_dbg(0, "Lockstep is disabled\n");
713 mode = EDAC_S4ECD4ED;
714 pvt->is_lockstep = false;
716 if (IS_CLOSE_PG(pvt->info.mcmtr)) {
717 edac_dbg(0, "address map is on closed page mode\n");
718 pvt->is_close_pg = true;
720 edac_dbg(0, "address map is on open page mode\n");
721 pvt->is_close_pg = false;
724 mtype = pvt->info.get_memory_type(pvt);
725 if (mtype == MEM_RDDR3)
726 edac_dbg(0, "Memory is registered\n");
727 else if (mtype == MEM_UNKNOWN)
728 edac_dbg(0, "Cannot determine memory type\n");
730 edac_dbg(0, "Memory is unregistered\n");
732 /* On all supported DDR3 DIMM types, there are 8 banks available */
735 for (i = 0; i < NUM_CHANNELS; i++) {
738 for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) {
739 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
741 pci_read_config_dword(pvt->pci_tad[i],
743 edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr);
744 if (IS_DIMM_PRESENT(mtr)) {
745 pvt->channel[i].dimms++;
747 ranks = numrank(mtr);
751 /* DDR3 has 8 I/O banks */
752 size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
753 npages = MiB_TO_PAGES(size);
755 edac_dbg(0, "mc#%d: channel %d, dimm %d, %Ld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
756 pvt->sbridge_dev->mc, i, j,
758 banks, ranks, rows, cols);
760 dimm->nr_pages = npages;
762 dimm->dtype = (banks == 8) ? DEV_X8 : DEV_X4;
764 dimm->edac_mode = mode;
765 snprintf(dimm->label, sizeof(dimm->label),
766 "CPU_SrcID#%u_Channel#%u_DIMM#%u",
767 pvt->sbridge_dev->source_id, i, j);
775 static void get_memory_layout(const struct mem_ctl_info *mci)
777 struct sbridge_pvt *pvt = mci->pvt_info;
778 int i, j, k, n_sads, n_tads, sad_interl;
786 * Step 1) Get TOLM/TOHM ranges
789 pvt->tolm = pvt->info.get_tolm(pvt);
790 tmp_mb = (1 + pvt->tolm) >> 20;
792 mb = div_u64_rem(tmp_mb, 1000, &kb);
793 edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm);
795 /* Address range is already 45:25 */
796 pvt->tohm = pvt->info.get_tohm(pvt);
797 tmp_mb = (1 + pvt->tohm) >> 20;
799 mb = div_u64_rem(tmp_mb, 1000, &kb);
800 edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tohm);
803 * Step 2) Get SAD range and SAD Interleave list
804 * TAD registers contain the interleave wayness. However, it
805 * seems simpler to just discover it indirectly, with the
809 for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
810 /* SAD_LIMIT Address range is 45:26 */
811 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
813 limit = SAD_LIMIT(reg);
815 if (!DRAM_RULE_ENABLE(reg))
821 tmp_mb = (limit + 1) >> 20;
822 mb = div_u64_rem(tmp_mb, 1000, &kb);
823 edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
827 ((u64)tmp_mb) << 20L,
828 INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]",
832 pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
834 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
835 for (j = 0; j < 8; j++) {
836 u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, j);
837 if (j > 0 && sad_interl == pkg)
840 edac_dbg(0, "SAD#%d, interleave #%d: %d\n",
846 * Step 3) Get TAD range
849 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
850 pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads],
852 limit = TAD_LIMIT(reg);
855 tmp_mb = (limit + 1) >> 20;
857 mb = div_u64_rem(tmp_mb, 1000, &kb);
858 edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
860 ((u64)tmp_mb) << 20L,
872 * Step 4) Get TAD offsets, per each channel
874 for (i = 0; i < NUM_CHANNELS; i++) {
875 if (!pvt->channel[i].dimms)
877 for (j = 0; j < n_tads; j++) {
878 pci_read_config_dword(pvt->pci_tad[i],
879 tad_ch_nilv_offset[j],
881 tmp_mb = TAD_OFFSET(reg) >> 20;
882 mb = div_u64_rem(tmp_mb, 1000, &kb);
883 edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
886 ((u64)tmp_mb) << 20L,
892 * Step 6) Get RIR Wayness/Limit, per each channel
894 for (i = 0; i < NUM_CHANNELS; i++) {
895 if (!pvt->channel[i].dimms)
897 for (j = 0; j < MAX_RIR_RANGES; j++) {
898 pci_read_config_dword(pvt->pci_tad[i],
902 if (!IS_RIR_VALID(reg))
905 tmp_mb = RIR_LIMIT(reg) >> 20;
906 rir_way = 1 << RIR_WAY(reg);
907 mb = div_u64_rem(tmp_mb, 1000, &kb);
908 edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
911 ((u64)tmp_mb) << 20L,
915 for (k = 0; k < rir_way; k++) {
916 pci_read_config_dword(pvt->pci_tad[i],
919 tmp_mb = RIR_OFFSET(reg) << 6;
921 mb = div_u64_rem(tmp_mb, 1000, &kb);
922 edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
925 ((u64)tmp_mb) << 20L,
926 (u32)RIR_RNK_TGT(reg),
933 static struct mem_ctl_info *get_mci_for_node_id(u8 node_id)
935 struct sbridge_dev *sbridge_dev;
937 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
938 if (sbridge_dev->node_id == node_id)
939 return sbridge_dev->mci;
944 static int get_memory_error_data(struct mem_ctl_info *mci,
949 char **area_type, char *msg)
951 struct mem_ctl_info *new_mci;
952 struct sbridge_pvt *pvt = mci->pvt_info;
953 struct pci_dev *pci_ha;
954 int n_rir, n_sads, n_tads, sad_way, sck_xch;
955 int sad_interl, idx, base_ch;
957 unsigned sad_interleave[pvt->info.max_interleave];
959 u8 ch_way, sck_way, pkg, sad_ha = 0;
963 u64 ch_addr, offset, limit = 0, prv = 0;
967 * Step 0) Check if the address is at special memory ranges
968 * The check bellow is probably enough to fill all cases where
969 * the error is not inside a memory, except for the legacy
970 * range (e. g. VGA addresses). It is unlikely, however, that the
971 * memory controller would generate an error on that range.
973 if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
974 sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
977 if (addr >= (u64)pvt->tohm) {
978 sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
985 for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
986 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
989 if (!DRAM_RULE_ENABLE(reg))
992 limit = SAD_LIMIT(reg);
994 sprintf(msg, "Can't discover the memory socket");
1001 if (n_sads == pvt->info.max_sad) {
1002 sprintf(msg, "Can't discover the memory socket");
1005 *area_type = get_dram_attr(reg);
1006 interleave_mode = INTERLEAVE_MODE(reg);
1008 pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
1011 if (pvt->info.type == SANDY_BRIDGE) {
1012 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
1013 for (sad_way = 0; sad_way < 8; sad_way++) {
1014 u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, sad_way);
1015 if (sad_way > 0 && sad_interl == pkg)
1017 sad_interleave[sad_way] = pkg;
1018 edac_dbg(0, "SAD interleave #%d: %d\n",
1019 sad_way, sad_interleave[sad_way]);
1021 edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
1022 pvt->sbridge_dev->mc,
1027 !interleave_mode ? "" : "XOR[18:16]");
1028 if (interleave_mode)
1029 idx = ((addr >> 6) ^ (addr >> 16)) & 7;
1031 idx = (addr >> 6) & 7;
1045 sprintf(msg, "Can't discover socket interleave");
1048 *socket = sad_interleave[idx];
1049 edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
1050 idx, sad_way, *socket);
1052 /* Ivy Bridge's SAD mode doesn't support XOR interleave mode */
1053 idx = (addr >> 6) & 7;
1054 pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
1055 *socket = sad_pkg_socket(pkg);
1056 sad_ha = sad_pkg_ha(pkg);
1057 edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n",
1058 idx, *socket, sad_ha);
1062 * Move to the proper node structure, in order to access the
1063 * right PCI registers
1065 new_mci = get_mci_for_node_id(*socket);
1067 sprintf(msg, "Struct for socket #%u wasn't initialized",
1072 pvt = mci->pvt_info;
1075 * Step 2) Get memory channel
1078 if (pvt->info.type == SANDY_BRIDGE)
1079 pci_ha = pvt->pci_ha0;
1082 pci_ha = pvt->pci_ha1;
1084 pci_ha = pvt->pci_ha0;
1086 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
1087 pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], ®);
1088 limit = TAD_LIMIT(reg);
1090 sprintf(msg, "Can't discover the memory channel");
1097 if (n_tads == MAX_TAD) {
1098 sprintf(msg, "Can't discover the memory channel");
1102 ch_way = TAD_CH(reg) + 1;
1103 sck_way = TAD_SOCK(reg) + 1;
1108 idx = addr >> (6 + sck_way);
1112 * FIXME: Shouldn't we use CHN_IDX_OFFSET() here, when ch_way == 3 ???
1116 base_ch = TAD_TGT0(reg);
1119 base_ch = TAD_TGT1(reg);
1122 base_ch = TAD_TGT2(reg);
1125 base_ch = TAD_TGT3(reg);
1128 sprintf(msg, "Can't discover the TAD target");
1131 *channel_mask = 1 << base_ch;
1133 pci_read_config_dword(pvt->pci_tad[base_ch],
1134 tad_ch_nilv_offset[n_tads],
1137 if (pvt->is_mirrored) {
1138 *channel_mask |= 1 << ((base_ch + 2) % 4);
1142 sck_xch = 1 << sck_way * (ch_way >> 1);
1145 sprintf(msg, "Invalid mirror set. Can't decode addr");
1149 sck_xch = (1 << sck_way) * ch_way;
1151 if (pvt->is_lockstep)
1152 *channel_mask |= 1 << ((base_ch + 1) % 4);
1154 offset = TAD_OFFSET(tad_offset);
1156 edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
1167 /* Calculate channel address */
1168 /* Remove the TAD offset */
1170 if (offset > addr) {
1171 sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
1176 /* Store the low bits [0:6] of the addr */
1177 ch_addr = addr & 0x7f;
1178 /* Remove socket wayness and remove 6 bits */
1180 addr = div_u64(addr, sck_xch);
1182 /* Divide by channel way */
1183 addr = addr / ch_way;
1185 /* Recover the last 6 bits */
1186 ch_addr |= addr << 6;
1189 * Step 3) Decode rank
1191 for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) {
1192 pci_read_config_dword(pvt->pci_tad[base_ch],
1193 rir_way_limit[n_rir],
1196 if (!IS_RIR_VALID(reg))
1199 limit = RIR_LIMIT(reg);
1200 mb = div_u64_rem(limit >> 20, 1000, &kb);
1201 edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
1206 if (ch_addr <= limit)
1209 if (n_rir == MAX_RIR_RANGES) {
1210 sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
1214 rir_way = RIR_WAY(reg);
1215 if (pvt->is_close_pg)
1216 idx = (ch_addr >> 6);
1218 idx = (ch_addr >> 13); /* FIXME: Datasheet says to shift by 15 */
1219 idx %= 1 << rir_way;
1221 pci_read_config_dword(pvt->pci_tad[base_ch],
1222 rir_offset[n_rir][idx],
1224 *rank = RIR_RNK_TGT(reg);
1226 edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
1236 /****************************************************************************
1237 Device initialization routines: put/get, init/exit
1238 ****************************************************************************/
1241 * sbridge_put_all_devices 'put' all the devices that we have
1242 * reserved via 'get'
1244 static void sbridge_put_devices(struct sbridge_dev *sbridge_dev)
1249 for (i = 0; i < sbridge_dev->n_devs; i++) {
1250 struct pci_dev *pdev = sbridge_dev->pdev[i];
1253 edac_dbg(0, "Removing dev %02x:%02x.%d\n",
1255 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1260 static void sbridge_put_all_devices(void)
1262 struct sbridge_dev *sbridge_dev, *tmp;
1264 list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) {
1265 sbridge_put_devices(sbridge_dev);
1266 free_sbridge_dev(sbridge_dev);
1270 static int sbridge_get_onedevice(struct pci_dev **prev,
1272 const struct pci_id_table *table,
1273 const unsigned devno)
1275 struct sbridge_dev *sbridge_dev;
1276 const struct pci_id_descr *dev_descr = &table->descr[devno];
1278 struct pci_dev *pdev = NULL;
1281 sbridge_printk(KERN_DEBUG,
1282 "Seeking for: dev %02x.%d PCI ID %04x:%04x\n",
1283 dev_descr->dev, dev_descr->func,
1284 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1286 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1287 dev_descr->dev_id, *prev);
1295 if (dev_descr->optional)
1301 sbridge_printk(KERN_INFO,
1302 "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
1303 dev_descr->dev, dev_descr->func,
1304 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1306 /* End of list, leave */
1309 bus = pdev->bus->number;
1311 sbridge_dev = get_sbridge_dev(bus);
1313 sbridge_dev = alloc_sbridge_dev(bus, table);
1321 if (sbridge_dev->pdev[devno]) {
1322 sbridge_printk(KERN_ERR,
1323 "Duplicated device for "
1324 "dev %02x:%d.%d PCI ID %04x:%04x\n",
1325 bus, dev_descr->dev, dev_descr->func,
1326 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1331 sbridge_dev->pdev[devno] = pdev;
1334 if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev ||
1335 PCI_FUNC(pdev->devfn) != dev_descr->func)) {
1336 sbridge_printk(KERN_ERR,
1337 "Device PCI ID %04x:%04x "
1338 "has dev %02x:%d.%d instead of dev %02x:%02x.%d\n",
1339 PCI_VENDOR_ID_INTEL, dev_descr->dev_id,
1340 bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1341 bus, dev_descr->dev, dev_descr->func);
1345 /* Be sure that the device is enabled */
1346 if (unlikely(pci_enable_device(pdev) < 0)) {
1347 sbridge_printk(KERN_ERR,
1349 "dev %02x:%d.%d PCI ID %04x:%04x\n",
1350 bus, dev_descr->dev, dev_descr->func,
1351 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1355 edac_dbg(0, "Detected dev %02x:%d.%d PCI ID %04x:%04x\n",
1356 bus, dev_descr->dev, dev_descr->func,
1357 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1360 * As stated on drivers/pci/search.c, the reference count for
1361 * @from is always decremented if it is not %NULL. So, as we need
1362 * to get all devices up to null, we need to do a get for the device
1372 * sbridge_get_all_devices - Find and perform 'get' operation on the MCH's
1373 * device/functions we want to reference for this driver.
1374 * Need to 'get' device 16 func 1 and func 2.
1375 * @num_mc: pointer to the memory controllers count, to be incremented in case
1377 * @table: model specific table
1379 * returns 0 in case of success or error code
1381 static int sbridge_get_all_devices(u8 *num_mc,
1382 const struct pci_id_table *table)
1385 struct pci_dev *pdev = NULL;
1387 while (table && table->descr) {
1388 for (i = 0; i < table->n_devs; i++) {
1391 rc = sbridge_get_onedevice(&pdev, num_mc,
1398 sbridge_put_all_devices();
1409 static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
1410 struct sbridge_dev *sbridge_dev)
1412 struct sbridge_pvt *pvt = mci->pvt_info;
1413 struct pci_dev *pdev;
1416 for (i = 0; i < sbridge_dev->n_devs; i++) {
1417 pdev = sbridge_dev->pdev[i];
1420 slot = PCI_SLOT(pdev->devfn);
1421 func = PCI_FUNC(pdev->devfn);
1426 pvt->pci_sad0 = pdev;
1429 pvt->pci_sad1 = pdev;
1438 pvt->pci_br0 = pdev;
1447 pvt->pci_ha0 = pdev;
1459 pvt->pci_ras = pdev;
1465 pvt->pci_tad[func - 2] = pdev;
1474 pvt->pci_ddrio = pdev;
1484 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
1486 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1490 /* Check if everything were registered */
1491 if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha0 ||
1492 !pvt-> pci_tad || !pvt->pci_ras || !pvt->pci_ta)
1495 for (i = 0; i < NUM_CHANNELS; i++) {
1496 if (!pvt->pci_tad[i])
1502 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
1506 sbridge_printk(KERN_ERR, "Device %d, function %d "
1507 "is out of the expected range\n",
1512 static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
1513 struct sbridge_dev *sbridge_dev)
1515 struct sbridge_pvt *pvt = mci->pvt_info;
1516 struct pci_dev *pdev, *tmp;
1518 bool mode_2ha = false;
1520 tmp = pci_get_device(PCI_VENDOR_ID_INTEL,
1521 PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, NULL);
1527 for (i = 0; i < sbridge_dev->n_devs; i++) {
1528 pdev = sbridge_dev->pdev[i];
1531 slot = PCI_SLOT(pdev->devfn);
1532 func = PCI_FUNC(pdev->devfn);
1537 pvt->pci_ha0 = pdev;
1547 pvt->pci_ras = pdev;
1551 /* if we have 2 HAs active, channels 2 and 3
1552 * are in other device */
1558 pvt->pci_tad[func - 2] = pdev;
1566 pvt->pci_ddrio = pdev;
1568 } else if (func == 0) {
1570 pvt->pci_ddrio = pdev;
1577 pvt->pci_sad0 = pdev;
1580 pvt->pci_br0 = pdev;
1583 pvt->pci_br1 = pdev;
1591 pvt->pci_ha1 = pdev;
1596 /* we shouldn't have this device if we have just one
1599 if (func == 2 || func == 3) {
1600 pvt->pci_tad[func] = pdev;
1608 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
1610 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1614 /* Check if everything were registered */
1615 if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_br0 ||
1616 !pvt->pci_br1 || !pvt->pci_tad || !pvt->pci_ras ||
1620 for (i = 0; i < NUM_CHANNELS; i++) {
1621 if (!pvt->pci_tad[i])
1627 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
1631 sbridge_printk(KERN_ERR,
1632 "Device %d, function %d is out of the expected range\n",
1637 /****************************************************************************
1638 Error check routines
1639 ****************************************************************************/
1642 * While Sandy Bridge has error count registers, SMI BIOS read values from
1643 * and resets the counters. So, they are not reliable for the OS to read
1644 * from them. So, we have no option but to just trust on whatever MCE is
1645 * telling us about the errors.
1647 static void sbridge_mce_output_error(struct mem_ctl_info *mci,
1648 const struct mce *m)
1650 struct mem_ctl_info *new_mci;
1651 struct sbridge_pvt *pvt = mci->pvt_info;
1652 enum hw_event_mc_err_type tp_event;
1653 char *type, *optype, msg[256];
1654 bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
1655 bool overflow = GET_BITFIELD(m->status, 62, 62);
1656 bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
1658 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1659 u32 mscod = GET_BITFIELD(m->status, 16, 31);
1660 u32 errcode = GET_BITFIELD(m->status, 0, 15);
1661 u32 channel = GET_BITFIELD(m->status, 0, 3);
1662 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1663 long channel_mask, first_channel;
1666 char *area_type = NULL;
1668 if (pvt->info.type == IVY_BRIDGE)
1671 recoverable = GET_BITFIELD(m->status, 56, 56);
1673 if (uncorrected_error) {
1676 tp_event = HW_EVENT_ERR_FATAL;
1679 tp_event = HW_EVENT_ERR_UNCORRECTED;
1683 tp_event = HW_EVENT_ERR_CORRECTED;
1687 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1688 * memory errors should fit in this mask:
1689 * 000f 0000 1mmm cccc (binary)
1691 * f = Correction Report Filtering Bit. If 1, subsequent errors
1695 * If the mask doesn't match, report an error to the parsing logic
1697 if (! ((errcode & 0xef80) == 0x80)) {
1698 optype = "Can't parse: it is not a mem";
1700 switch (optypenum) {
1702 optype = "generic undef request error";
1705 optype = "memory read error";
1708 optype = "memory write error";
1711 optype = "addr/cmd error";
1714 optype = "memory scrubbing error";
1717 optype = "reserved";
1722 /* Only decode errors with an valid address (ADDRV) */
1723 if (!GET_BITFIELD(m->status, 58, 58))
1726 rc = get_memory_error_data(mci, m->addr, &socket,
1727 &channel_mask, &rank, &area_type, msg);
1730 new_mci = get_mci_for_node_id(socket);
1732 strcpy(msg, "Error: socket got corrupted!");
1736 pvt = mci->pvt_info;
1738 first_channel = find_first_bit(&channel_mask, NUM_CHANNELS);
1749 * FIXME: On some memory configurations (mirror, lockstep), the
1750 * Memory Controller can't point the error to a single DIMM. The
1751 * EDAC core should be handling the channel mask, in order to point
1752 * to the group of dimm's where the error may be happening.
1754 snprintf(msg, sizeof(msg),
1755 "%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d",
1756 overflow ? " OVERFLOW" : "",
1757 (uncorrected_error && recoverable) ? " recoverable" : "",
1764 edac_dbg(0, "%s\n", msg);
1766 /* FIXME: need support for channel mask */
1768 /* Call the helper to output message */
1769 edac_mc_handle_error(tp_event, mci, core_err_cnt,
1770 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
1775 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0,
1782 * sbridge_check_error Retrieve and process errors reported by the
1783 * hardware. Called by the Core module.
1785 static void sbridge_check_error(struct mem_ctl_info *mci)
1787 struct sbridge_pvt *pvt = mci->pvt_info;
1793 * MCE first step: Copy all mce errors into a temporary buffer
1794 * We use a double buffering here, to reduce the risk of
1798 count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
1803 m = pvt->mce_outentry;
1804 if (pvt->mce_in + count > MCE_LOG_LEN) {
1805 unsigned l = MCE_LOG_LEN - pvt->mce_in;
1807 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
1813 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
1815 pvt->mce_in += count;
1818 if (pvt->mce_overrun) {
1819 sbridge_printk(KERN_ERR, "Lost %d memory errors\n",
1822 pvt->mce_overrun = 0;
1826 * MCE second step: parse errors and display
1828 for (i = 0; i < count; i++)
1829 sbridge_mce_output_error(mci, &pvt->mce_outentry[i]);
1833 * sbridge_mce_check_error Replicates mcelog routine to get errors
1834 * This routine simply queues mcelog errors, and
1835 * return. The error itself should be handled later
1836 * by sbridge_check_error.
1837 * WARNING: As this routine should be called at NMI time, extra care should
1838 * be taken to avoid deadlocks, and to be as fast as possible.
1840 static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
1843 struct mce *mce = (struct mce *)data;
1844 struct mem_ctl_info *mci;
1845 struct sbridge_pvt *pvt;
1848 if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
1851 mci = get_mci_for_node_id(mce->socketid);
1854 pvt = mci->pvt_info;
1857 * Just let mcelog handle it if the error is
1858 * outside the memory controller. A memory error
1859 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1860 * bit 12 has an special meaning.
1862 if ((mce->status & 0xefff) >> 7 != 1)
1865 if (mce->mcgstatus & MCG_STATUS_MCIP)
1870 sbridge_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
1872 sbridge_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
1873 "Bank %d: %016Lx\n", mce->extcpu, type,
1874 mce->mcgstatus, mce->bank, mce->status);
1875 sbridge_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
1876 sbridge_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
1877 sbridge_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
1879 sbridge_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
1880 "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
1881 mce->time, mce->socketid, mce->apicid);
1883 /* Only handle if it is the right mc controller */
1884 if (cpu_data(mce->cpu).phys_proc_id != pvt->sbridge_dev->mc)
1888 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
1894 /* Copy memory error at the ringbuffer */
1895 memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
1897 pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
1899 /* Handle fatal errors immediately */
1900 if (mce->mcgstatus & 1)
1901 sbridge_check_error(mci);
1903 /* Advice mcelog that the error were handled */
1907 static struct notifier_block sbridge_mce_dec = {
1908 .notifier_call = sbridge_mce_check_error,
1911 /****************************************************************************
1912 EDAC register/unregister logic
1913 ****************************************************************************/
1915 static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
1917 struct mem_ctl_info *mci = sbridge_dev->mci;
1918 struct sbridge_pvt *pvt;
1920 if (unlikely(!mci || !mci->pvt_info)) {
1921 edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
1923 sbridge_printk(KERN_ERR, "Couldn't find mci handler\n");
1927 pvt = mci->pvt_info;
1929 edac_dbg(0, "MC: mci = %p, dev = %p\n",
1930 mci, &sbridge_dev->pdev[0]->dev);
1932 /* Remove MC sysfs nodes */
1933 edac_mc_del_mc(mci->pdev);
1935 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1936 kfree(mci->ctl_name);
1938 sbridge_dev->mci = NULL;
1941 static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
1943 struct mem_ctl_info *mci;
1944 struct edac_mc_layer layers[2];
1945 struct sbridge_pvt *pvt;
1946 struct pci_dev *pdev = sbridge_dev->pdev[0];
1949 /* Check the number of active and not disabled channels */
1950 rc = check_if_ecc_is_active(sbridge_dev->bus);
1951 if (unlikely(rc < 0))
1954 /* allocate a new MC control structure */
1955 layers[0].type = EDAC_MC_LAYER_CHANNEL;
1956 layers[0].size = NUM_CHANNELS;
1957 layers[0].is_virt_csrow = false;
1958 layers[1].type = EDAC_MC_LAYER_SLOT;
1959 layers[1].size = MAX_DIMMS;
1960 layers[1].is_virt_csrow = true;
1961 mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
1967 edac_dbg(0, "MC: mci = %p, dev = %p\n",
1970 pvt = mci->pvt_info;
1971 memset(pvt, 0, sizeof(*pvt));
1973 /* Associate sbridge_dev and mci for future usage */
1974 pvt->sbridge_dev = sbridge_dev;
1975 sbridge_dev->mci = mci;
1977 mci->mtype_cap = MEM_FLAG_DDR3;
1978 mci->edac_ctl_cap = EDAC_FLAG_NONE;
1979 mci->edac_cap = EDAC_FLAG_NONE;
1980 mci->mod_name = "sbridge_edac.c";
1981 mci->mod_ver = SBRIDGE_REVISION;
1982 mci->dev_name = pci_name(pdev);
1983 mci->ctl_page_to_phys = NULL;
1985 /* Set the function pointer to an actual operation function */
1986 mci->edac_check = sbridge_check_error;
1988 pvt->info.type = type;
1989 if (type == IVY_BRIDGE) {
1990 pvt->info.rankcfgr = IB_RANK_CFG_A;
1991 pvt->info.get_tolm = ibridge_get_tolm;
1992 pvt->info.get_tohm = ibridge_get_tohm;
1993 pvt->info.dram_rule = ibridge_dram_rule;
1994 pvt->info.get_memory_type = get_memory_type;
1995 pvt->info.get_node_id = get_node_id;
1996 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
1997 pvt->info.interleave_list = ibridge_interleave_list;
1998 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
1999 pvt->info.interleave_pkg = ibridge_interleave_pkg;
2000 mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge Socket#%d", mci->mc_idx);
2002 /* Store pci devices at mci for faster access */
2003 rc = ibridge_mci_bind_devs(mci, sbridge_dev);
2004 if (unlikely(rc < 0))
2007 pvt->info.rankcfgr = SB_RANK_CFG_A;
2008 pvt->info.get_tolm = sbridge_get_tolm;
2009 pvt->info.get_tohm = sbridge_get_tohm;
2010 pvt->info.dram_rule = sbridge_dram_rule;
2011 pvt->info.get_memory_type = get_memory_type;
2012 pvt->info.get_node_id = get_node_id;
2013 pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule);
2014 pvt->info.interleave_list = sbridge_interleave_list;
2015 pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list);
2016 pvt->info.interleave_pkg = sbridge_interleave_pkg;
2017 mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx);
2019 /* Store pci devices at mci for faster access */
2020 rc = sbridge_mci_bind_devs(mci, sbridge_dev);
2021 if (unlikely(rc < 0))
2026 /* Get dimm basic config and the memory layout */
2027 get_dimm_config(mci);
2028 get_memory_layout(mci);
2030 /* record ptr to the generic device */
2031 mci->pdev = &pdev->dev;
2033 /* add this new MC control structure to EDAC's list of MCs */
2034 if (unlikely(edac_mc_add_mc(mci))) {
2035 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
2043 kfree(mci->ctl_name);
2045 sbridge_dev->mci = NULL;
2050 * sbridge_probe Probe for ONE instance of device to see if it is
2053 * 0 for FOUND a device
2054 * < 0 for error code
2057 static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2061 struct sbridge_dev *sbridge_dev;
2064 /* get the pci devices we want to reserve for our use */
2065 mutex_lock(&sbridge_edac_lock);
2068 * All memory controllers are allocated at the first pass.
2070 if (unlikely(probed >= 1)) {
2071 mutex_unlock(&sbridge_edac_lock);
2076 if (pdev->device == PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA) {
2077 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_ibridge_table);
2080 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_sbridge_table);
2081 type = SANDY_BRIDGE;
2083 if (unlikely(rc < 0))
2087 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
2088 edac_dbg(0, "Registering MC#%d (%d of %d)\n",
2089 mc, mc + 1, num_mc);
2090 sbridge_dev->mc = mc++;
2091 rc = sbridge_register_mci(sbridge_dev, type);
2092 if (unlikely(rc < 0))
2096 sbridge_printk(KERN_INFO, "Driver loaded.\n");
2098 mutex_unlock(&sbridge_edac_lock);
2102 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
2103 sbridge_unregister_mci(sbridge_dev);
2105 sbridge_put_all_devices();
2107 mutex_unlock(&sbridge_edac_lock);
2112 * sbridge_remove destructor for one instance of device
2115 static void sbridge_remove(struct pci_dev *pdev)
2117 struct sbridge_dev *sbridge_dev;
2122 * we have a trouble here: pdev value for removal will be wrong, since
2123 * it will point to the X58 register used to detect that the machine
2124 * is a Nehalem or upper design. However, due to the way several PCI
2125 * devices are grouped together to provide MC functionality, we need
2126 * to use a different method for releasing the devices
2129 mutex_lock(&sbridge_edac_lock);
2131 if (unlikely(!probed)) {
2132 mutex_unlock(&sbridge_edac_lock);
2136 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
2137 sbridge_unregister_mci(sbridge_dev);
2139 /* Release PCI resources */
2140 sbridge_put_all_devices();
2144 mutex_unlock(&sbridge_edac_lock);
2147 MODULE_DEVICE_TABLE(pci, sbridge_pci_tbl);
2150 * sbridge_driver pci_driver structure for this module
2153 static struct pci_driver sbridge_driver = {
2154 .name = "sbridge_edac",
2155 .probe = sbridge_probe,
2156 .remove = sbridge_remove,
2157 .id_table = sbridge_pci_tbl,
2161 * sbridge_init Module entry function
2162 * Try to initialize this module for its devices
2164 static int __init sbridge_init(void)
2170 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
2173 pci_rc = pci_register_driver(&sbridge_driver);
2175 mce_register_decode_chain(&sbridge_mce_dec);
2176 if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
2177 sbridge_printk(KERN_WARNING, "Loading driver, error reporting disabled.\n");
2181 sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
2188 * sbridge_exit() Module exit function
2189 * Unregister the driver
2191 static void __exit sbridge_exit(void)
2194 pci_unregister_driver(&sbridge_driver);
2195 mce_unregister_decode_chain(&sbridge_mce_dec);
2198 module_init(sbridge_init);
2199 module_exit(sbridge_exit);
2201 module_param(edac_op_state, int, 0444);
2202 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
2204 MODULE_LICENSE("GPL");
2205 MODULE_AUTHOR("Mauro Carvalho Chehab");
2206 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
2207 MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - "