1 /* Intel Sandy Bridge -EN/-EP/-EX Memory Controller kernel module
3 * This driver supports the memory controllers found on the Intel
4 * processor family Sandy Bridge.
6 * This file may be distributed under the terms of the
7 * GNU General Public License version 2 only.
9 * Copyright (c) 2011 by:
10 * Mauro Carvalho Chehab
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/pci.h>
16 #include <linux/pci_ids.h>
17 #include <linux/slab.h>
18 #include <linux/delay.h>
19 #include <linux/edac.h>
20 #include <linux/mmzone.h>
21 #include <linux/smp.h>
22 #include <linux/bitmap.h>
23 #include <linux/math64.h>
24 #include <asm/processor.h>
27 #include "edac_core.h"
30 static LIST_HEAD(sbridge_edac_list);
31 static DEFINE_MUTEX(sbridge_edac_lock);
35 * Alter this version for the module when modifications are made
37 #define SBRIDGE_REVISION " Ver: 1.1.0 "
38 #define EDAC_MOD_STR "sbridge_edac"
43 #define sbridge_printk(level, fmt, arg...) \
44 edac_printk(level, "sbridge", fmt, ##arg)
46 #define sbridge_mc_printk(mci, level, fmt, arg...) \
47 edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg)
50 * Get a bit field at register value <v>, from bit <lo> to bit <hi>
52 #define GET_BITFIELD(v, lo, hi) \
53 (((v) & GENMASK_ULL(hi, lo)) >> (lo))
56 * sbridge Memory Controller Registers
60 * FIXME: For now, let's order by device function, as it makes
61 * easier for driver's development process. This table should be
62 * moved to pci_id.h when submitted upstream
64 #define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0 0x3cf4 /* 12.6 */
65 #define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */
66 #define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */
67 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0 0x3ca0 /* 14.0 */
68 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA 0x3ca8 /* 15.0 */
69 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS 0x3c71 /* 15.1 */
70 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0 0x3caa /* 15.2 */
71 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1 0x3cab /* 15.3 */
72 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2 0x3cac /* 15.4 */
73 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3 0x3cad /* 15.5 */
74 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO 0x3cb8 /* 17.0 */
77 * Currently, unused, but will be needed in the future
78 * implementations, as they hold the error counters
80 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR0 0x3c72 /* 16.2 */
81 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR1 0x3c73 /* 16.3 */
82 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR2 0x3c76 /* 16.6 */
83 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR3 0x3c77 /* 16.7 */
85 /* Devices 12 Function 6, Offsets 0x80 to 0xcc */
86 static const u32 sbridge_dram_rule[] = {
87 0x80, 0x88, 0x90, 0x98, 0xa0,
88 0xa8, 0xb0, 0xb8, 0xc0, 0xc8,
91 static const u32 ibridge_dram_rule[] = {
92 0x60, 0x68, 0x70, 0x78, 0x80,
93 0x88, 0x90, 0x98, 0xa0, 0xa8,
94 0xb0, 0xb8, 0xc0, 0xc8, 0xd0,
95 0xd8, 0xe0, 0xe8, 0xf0, 0xf8,
98 #define SAD_LIMIT(reg) ((GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff)
99 #define DRAM_ATTR(reg) GET_BITFIELD(reg, 2, 3)
100 #define INTERLEAVE_MODE(reg) GET_BITFIELD(reg, 1, 1)
101 #define DRAM_RULE_ENABLE(reg) GET_BITFIELD(reg, 0, 0)
102 #define A7MODE(reg) GET_BITFIELD(reg, 26, 26)
104 static char *get_dram_attr(u32 reg)
106 switch(DRAM_ATTR(reg)) {
118 static const u32 sbridge_interleave_list[] = {
119 0x84, 0x8c, 0x94, 0x9c, 0xa4,
120 0xac, 0xb4, 0xbc, 0xc4, 0xcc,
123 static const u32 ibridge_interleave_list[] = {
124 0x64, 0x6c, 0x74, 0x7c, 0x84,
125 0x8c, 0x94, 0x9c, 0xa4, 0xac,
126 0xb4, 0xbc, 0xc4, 0xcc, 0xd4,
127 0xdc, 0xe4, 0xec, 0xf4, 0xfc,
130 struct interleave_pkg {
135 static const struct interleave_pkg sbridge_interleave_pkg[] = {
146 static const struct interleave_pkg ibridge_interleave_pkg[] = {
157 static inline int sad_pkg(const struct interleave_pkg *table, u32 reg,
160 return GET_BITFIELD(reg, table[interleave].start,
161 table[interleave].end);
164 /* Devices 12 Function 7 */
168 #define HASWELL_TOHM_0 0xd4
169 #define HASWELL_TOHM_1 0xd8
171 #define GET_TOLM(reg) ((GET_BITFIELD(reg, 0, 3) << 28) | 0x3ffffff)
172 #define GET_TOHM(reg) ((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff)
174 /* Device 13 Function 6 */
176 #define SAD_TARGET 0xf0
178 #define SOURCE_ID(reg) GET_BITFIELD(reg, 9, 11)
180 #define SAD_CONTROL 0xf4
182 /* Device 14 function 0 */
184 static const u32 tad_dram_rule[] = {
185 0x40, 0x44, 0x48, 0x4c,
186 0x50, 0x54, 0x58, 0x5c,
187 0x60, 0x64, 0x68, 0x6c,
189 #define MAX_TAD ARRAY_SIZE(tad_dram_rule)
191 #define TAD_LIMIT(reg) ((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff)
192 #define TAD_SOCK(reg) GET_BITFIELD(reg, 10, 11)
193 #define TAD_CH(reg) GET_BITFIELD(reg, 8, 9)
194 #define TAD_TGT3(reg) GET_BITFIELD(reg, 6, 7)
195 #define TAD_TGT2(reg) GET_BITFIELD(reg, 4, 5)
196 #define TAD_TGT1(reg) GET_BITFIELD(reg, 2, 3)
197 #define TAD_TGT0(reg) GET_BITFIELD(reg, 0, 1)
199 /* Device 15, function 0 */
203 #define IS_ECC_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 2, 2)
204 #define IS_LOCKSTEP_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 1, 1)
205 #define IS_CLOSE_PG(mcmtr) GET_BITFIELD(mcmtr, 0, 0)
207 /* Device 15, function 1 */
209 #define RASENABLES 0xac
210 #define IS_MIRROR_ENABLED(reg) GET_BITFIELD(reg, 0, 0)
212 /* Device 15, functions 2-5 */
214 static const int mtr_regs[] = {
218 #define RANK_DISABLE(mtr) GET_BITFIELD(mtr, 16, 19)
219 #define IS_DIMM_PRESENT(mtr) GET_BITFIELD(mtr, 14, 14)
220 #define RANK_CNT_BITS(mtr) GET_BITFIELD(mtr, 12, 13)
221 #define RANK_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 2, 4)
222 #define COL_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 0, 1)
224 static const u32 tad_ch_nilv_offset[] = {
225 0x90, 0x94, 0x98, 0x9c,
226 0xa0, 0xa4, 0xa8, 0xac,
227 0xb0, 0xb4, 0xb8, 0xbc,
229 #define CHN_IDX_OFFSET(reg) GET_BITFIELD(reg, 28, 29)
230 #define TAD_OFFSET(reg) (GET_BITFIELD(reg, 6, 25) << 26)
232 static const u32 rir_way_limit[] = {
233 0x108, 0x10c, 0x110, 0x114, 0x118,
235 #define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit)
237 #define IS_RIR_VALID(reg) GET_BITFIELD(reg, 31, 31)
238 #define RIR_WAY(reg) GET_BITFIELD(reg, 28, 29)
240 #define MAX_RIR_WAY 8
242 static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
243 { 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c },
244 { 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c },
245 { 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c },
246 { 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c },
247 { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
250 #define RIR_RNK_TGT(reg) GET_BITFIELD(reg, 16, 19)
251 #define RIR_OFFSET(reg) GET_BITFIELD(reg, 2, 14)
253 /* Device 16, functions 2-7 */
256 * FIXME: Implement the error count reads directly
259 static const u32 correrrcnt[] = {
260 0x104, 0x108, 0x10c, 0x110,
263 #define RANK_ODD_OV(reg) GET_BITFIELD(reg, 31, 31)
264 #define RANK_ODD_ERR_CNT(reg) GET_BITFIELD(reg, 16, 30)
265 #define RANK_EVEN_OV(reg) GET_BITFIELD(reg, 15, 15)
266 #define RANK_EVEN_ERR_CNT(reg) GET_BITFIELD(reg, 0, 14)
268 static const u32 correrrthrsld[] = {
269 0x11c, 0x120, 0x124, 0x128,
272 #define RANK_ODD_ERR_THRSLD(reg) GET_BITFIELD(reg, 16, 30)
273 #define RANK_EVEN_ERR_THRSLD(reg) GET_BITFIELD(reg, 0, 14)
276 /* Device 17, function 0 */
278 #define SB_RANK_CFG_A 0x0328
280 #define IB_RANK_CFG_A 0x0320
286 #define NUM_CHANNELS 4
287 #define MAX_DIMMS 3 /* Max DIMMS per channel */
288 #define CHANNEL_UNSPECIFIED 0xf /* Intel IA32 SDM 15-14 */
297 struct sbridge_info {
301 u64 (*get_tolm)(struct sbridge_pvt *pvt);
302 u64 (*get_tohm)(struct sbridge_pvt *pvt);
303 u64 (*rir_limit)(u32 reg);
304 const u32 *dram_rule;
305 const u32 *interleave_list;
306 const struct interleave_pkg *interleave_pkg;
309 u8 (*get_node_id)(struct sbridge_pvt *pvt);
310 enum mem_type (*get_memory_type)(struct sbridge_pvt *pvt);
311 struct pci_dev *pci_vtd;
314 struct sbridge_channel {
319 struct pci_id_descr {
324 struct pci_id_table {
325 const struct pci_id_descr *descr;
330 struct list_head list;
332 u8 node_id, source_id;
333 struct pci_dev **pdev;
335 struct mem_ctl_info *mci;
339 struct pci_dev *pci_ta, *pci_ddrio, *pci_ras;
340 struct pci_dev *pci_sad0, *pci_sad1;
341 struct pci_dev *pci_ha0, *pci_ha1;
342 struct pci_dev *pci_br0, *pci_br1;
343 struct pci_dev *pci_ha1_ta;
344 struct pci_dev *pci_tad[NUM_CHANNELS];
346 struct sbridge_dev *sbridge_dev;
348 struct sbridge_info info;
349 struct sbridge_channel channel[NUM_CHANNELS];
351 /* Memory type detection */
352 bool is_mirrored, is_lockstep, is_close_pg;
354 /* Fifo double buffers */
355 struct mce mce_entry[MCE_LOG_LEN];
356 struct mce mce_outentry[MCE_LOG_LEN];
358 /* Fifo in/out counters */
359 unsigned mce_in, mce_out;
361 /* Count indicator to show errors not got */
362 unsigned mce_overrun;
364 /* Memory description */
368 #define PCI_DESCR(device_id, opt) \
369 .dev_id = (device_id), \
372 static const struct pci_id_descr pci_dev_descr_sbridge[] = {
373 /* Processor Home Agent */
374 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0) },
376 /* Memory controller */
377 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0) },
378 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0) },
379 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0) },
380 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0) },
381 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0) },
382 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0) },
383 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1) },
385 /* System Address Decoder */
386 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0) },
387 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0) },
389 /* Broadcast Registers */
390 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) },
393 #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
394 static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
395 PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge),
396 {0,} /* 0 terminated list. */
399 /* This changes depending if 1HA or 2HA:
401 * 0x0eb8 (17.0) is DDRIO0
403 * 0x0ebc (17.4) is DDRIO0
405 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0 0x0eb8
406 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0 0x0ebc
409 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0 0x0ea0
410 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA 0x0ea8
411 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS 0x0e71
412 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0 0x0eaa
413 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1 0x0eab
414 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2 0x0eac
415 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3 0x0ead
416 #define PCI_DEVICE_ID_INTEL_IBRIDGE_SAD 0x0ec8
417 #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR0 0x0ec9
418 #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR1 0x0eca
419 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1 0x0e60
420 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA 0x0e68
421 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS 0x0e79
422 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 0x0e6a
423 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1 0x0e6b
425 static const struct pci_id_descr pci_dev_descr_ibridge[] = {
426 /* Processor Home Agent */
427 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0) },
429 /* Memory controller */
430 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0) },
431 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0) },
432 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0) },
433 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0) },
434 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0) },
435 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0) },
437 /* System Address Decoder */
438 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0) },
440 /* Broadcast Registers */
441 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1) },
442 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0) },
444 /* Optional, mode 2HA */
445 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1) },
447 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1) },
448 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1) },
450 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1) },
451 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1) },
453 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1) },
454 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1) },
457 static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
458 PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge),
459 {0,} /* 0 terminated list. */
462 /* Haswell support */
465 * - 3 DDR3 channels, 2 DPC per channel
468 * - 4 DDR4 channels, 3 DPC per channel
471 * - 4 DDR4 channels, 3 DPC per channel
474 * - each IMC interfaces with a SMI 2 channel
475 * - each SMI channel interfaces with a scalable memory buffer
476 * - each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
478 #define HASWELL_DDRCRCLKCONTROLS 0xa10
479 #define HASWELL_HASYSDEFEATURE2 0x84
480 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC 0x2f28
481 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0 0x2fa0
482 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1 0x2f60
483 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA 0x2fa8
484 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL 0x2f71
485 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA 0x2f68
486 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL 0x2f79
487 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc
488 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd
489 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa
490 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1 0x2fab
491 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2 0x2fac
492 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3 0x2fad
493 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 0x2f6a
494 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1 0x2f6b
495 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c
496 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d
497 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd
498 static const struct pci_id_descr pci_dev_descr_haswell[] = {
499 /* first item must be the HA */
500 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0) },
502 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0) },
503 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0) },
505 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, 1) },
507 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA, 0) },
508 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL, 0) },
509 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0) },
510 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0) },
511 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1) },
512 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1) },
514 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1) },
516 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1) },
517 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL, 1) },
518 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1) },
519 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1) },
520 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1) },
521 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1) },
524 static const struct pci_id_table pci_dev_descr_haswell_table[] = {
525 PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell),
526 {0,} /* 0 terminated list. */
530 * pci_device_id table for which devices we are looking for
532 static const struct pci_device_id sbridge_pci_tbl[] = {
533 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
534 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA)},
535 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0)},
536 {0,} /* 0 terminated list. */
540 /****************************************************************************
541 Ancillary status routines
542 ****************************************************************************/
544 static inline int numrank(enum type type, u32 mtr)
546 int ranks = (1 << RANK_CNT_BITS(mtr));
553 edac_dbg(0, "Invalid number of ranks: %d (max = %i) raw value = %x (%04x)\n",
554 ranks, max, (unsigned int)RANK_CNT_BITS(mtr), mtr);
561 static inline int numrow(u32 mtr)
563 int rows = (RANK_WIDTH_BITS(mtr) + 12);
565 if (rows < 13 || rows > 18) {
566 edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n",
567 rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
574 static inline int numcol(u32 mtr)
576 int cols = (COL_WIDTH_BITS(mtr) + 10);
579 edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n",
580 cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
587 static struct sbridge_dev *get_sbridge_dev(u8 bus)
589 struct sbridge_dev *sbridge_dev;
591 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
592 if (sbridge_dev->bus == bus)
599 static struct sbridge_dev *alloc_sbridge_dev(u8 bus,
600 const struct pci_id_table *table)
602 struct sbridge_dev *sbridge_dev;
604 sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL);
608 sbridge_dev->pdev = kzalloc(sizeof(*sbridge_dev->pdev) * table->n_devs,
610 if (!sbridge_dev->pdev) {
615 sbridge_dev->bus = bus;
616 sbridge_dev->n_devs = table->n_devs;
617 list_add_tail(&sbridge_dev->list, &sbridge_edac_list);
622 static void free_sbridge_dev(struct sbridge_dev *sbridge_dev)
624 list_del(&sbridge_dev->list);
625 kfree(sbridge_dev->pdev);
629 static u64 sbridge_get_tolm(struct sbridge_pvt *pvt)
633 /* Address range is 32:28 */
634 pci_read_config_dword(pvt->pci_sad1, TOLM, ®);
635 return GET_TOLM(reg);
638 static u64 sbridge_get_tohm(struct sbridge_pvt *pvt)
642 pci_read_config_dword(pvt->pci_sad1, TOHM, ®);
643 return GET_TOHM(reg);
646 static u64 ibridge_get_tolm(struct sbridge_pvt *pvt)
650 pci_read_config_dword(pvt->pci_br1, TOLM, ®);
652 return GET_TOLM(reg);
655 static u64 ibridge_get_tohm(struct sbridge_pvt *pvt)
659 pci_read_config_dword(pvt->pci_br1, TOHM, ®);
661 return GET_TOHM(reg);
664 static u64 rir_limit(u32 reg)
666 return ((u64)GET_BITFIELD(reg, 1, 10) << 29) | 0x1fffffff;
669 static enum mem_type get_memory_type(struct sbridge_pvt *pvt)
674 if (pvt->pci_ddrio) {
675 pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr,
677 if (GET_BITFIELD(reg, 11, 11))
678 /* FIXME: Can also be LRDIMM */
688 static enum mem_type haswell_get_memory_type(struct sbridge_pvt *pvt)
691 bool registered = false;
692 enum mem_type mtype = MEM_UNKNOWN;
697 pci_read_config_dword(pvt->pci_ddrio,
698 HASWELL_DDRCRCLKCONTROLS, ®);
700 if (GET_BITFIELD(reg, 16, 16))
703 pci_read_config_dword(pvt->pci_ta, MCMTR, ®);
704 if (GET_BITFIELD(reg, 14, 14)) {
720 static u8 get_node_id(struct sbridge_pvt *pvt)
723 pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, ®);
724 return GET_BITFIELD(reg, 0, 2);
727 static u8 haswell_get_node_id(struct sbridge_pvt *pvt)
731 pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, ®);
732 return GET_BITFIELD(reg, 0, 3);
735 static u64 haswell_get_tolm(struct sbridge_pvt *pvt)
739 pci_read_config_dword(pvt->info.pci_vtd, TOLM, ®);
740 return (GET_BITFIELD(reg, 26, 31) << 26) | 0x1ffffff;
743 static u64 haswell_get_tohm(struct sbridge_pvt *pvt)
748 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_0, ®);
749 rc = GET_BITFIELD(reg, 26, 31);
750 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, ®);
751 rc = ((reg << 6) | rc) << 26;
753 return rc | 0x1ffffff;
756 static u64 haswell_rir_limit(u32 reg)
758 return (((u64)GET_BITFIELD(reg, 1, 11) + 1) << 29) - 1;
761 static inline u8 sad_pkg_socket(u8 pkg)
763 /* on Ivy Bridge, nodeID is SASS, where A is HA and S is node id */
764 return ((pkg >> 3) << 2) | (pkg & 0x3);
767 static inline u8 sad_pkg_ha(u8 pkg)
769 return (pkg >> 2) & 0x1;
772 /****************************************************************************
773 Memory check routines
774 ****************************************************************************/
775 static struct pci_dev *get_pdev_same_bus(u8 bus, u32 id)
777 struct pci_dev *pdev = NULL;
780 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, id, pdev);
781 if (pdev && pdev->bus->number == bus)
789 * check_if_ecc_is_active() - Checks if ECC is active
791 * @type: Memory controller type
792 * returns: 0 in case ECC is active, -ENODEV if it can't be determined or
795 static int check_if_ecc_is_active(const u8 bus, enum type type)
797 struct pci_dev *pdev = NULL;
800 if (type == IVY_BRIDGE)
801 id = PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA;
802 else if (type == HASWELL)
803 id = PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA;
805 id = PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA;
807 pdev = get_pdev_same_bus(bus, id);
809 sbridge_printk(KERN_ERR, "Couldn't find PCI device "
810 "%04x:%04x! on bus %02d\n",
811 PCI_VENDOR_ID_INTEL, id, bus);
815 pci_read_config_dword(pdev, MCMTR, &mcmtr);
816 if (!IS_ECC_ENABLED(mcmtr)) {
817 sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n");
823 static int get_dimm_config(struct mem_ctl_info *mci)
825 struct sbridge_pvt *pvt = mci->pvt_info;
826 struct dimm_info *dimm;
827 unsigned i, j, banks, ranks, rows, cols, npages;
833 if (pvt->info.type == HASWELL)
834 pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, ®);
836 pci_read_config_dword(pvt->pci_br0, SAD_TARGET, ®);
838 pvt->sbridge_dev->source_id = SOURCE_ID(reg);
840 pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
841 edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
842 pvt->sbridge_dev->mc,
843 pvt->sbridge_dev->node_id,
844 pvt->sbridge_dev->source_id);
846 pci_read_config_dword(pvt->pci_ras, RASENABLES, ®);
847 if (IS_MIRROR_ENABLED(reg)) {
848 edac_dbg(0, "Memory mirror is enabled\n");
849 pvt->is_mirrored = true;
851 edac_dbg(0, "Memory mirror is disabled\n");
852 pvt->is_mirrored = false;
855 pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr);
856 if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
857 edac_dbg(0, "Lockstep is enabled\n");
858 mode = EDAC_S8ECD8ED;
859 pvt->is_lockstep = true;
861 edac_dbg(0, "Lockstep is disabled\n");
862 mode = EDAC_S4ECD4ED;
863 pvt->is_lockstep = false;
865 if (IS_CLOSE_PG(pvt->info.mcmtr)) {
866 edac_dbg(0, "address map is on closed page mode\n");
867 pvt->is_close_pg = true;
869 edac_dbg(0, "address map is on open page mode\n");
870 pvt->is_close_pg = false;
873 mtype = pvt->info.get_memory_type(pvt);
874 if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4)
875 edac_dbg(0, "Memory is registered\n");
876 else if (mtype == MEM_UNKNOWN)
877 edac_dbg(0, "Cannot determine memory type\n");
879 edac_dbg(0, "Memory is unregistered\n");
881 if (mtype == MEM_DDR4 || MEM_RDDR4)
886 for (i = 0; i < NUM_CHANNELS; i++) {
889 for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) {
890 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
892 pci_read_config_dword(pvt->pci_tad[i],
894 edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr);
895 if (IS_DIMM_PRESENT(mtr)) {
896 pvt->channel[i].dimms++;
898 ranks = numrank(pvt->info.type, mtr);
902 size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
903 npages = MiB_TO_PAGES(size);
905 edac_dbg(0, "mc#%d: channel %d, dimm %d, %Ld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
906 pvt->sbridge_dev->mc, i, j,
908 banks, ranks, rows, cols);
910 dimm->nr_pages = npages;
914 dimm->dtype = DEV_X16;
917 dimm->dtype = DEV_X8;
920 dimm->dtype = DEV_X4;
924 dimm->edac_mode = mode;
925 snprintf(dimm->label, sizeof(dimm->label),
926 "CPU_SrcID#%u_Channel#%u_DIMM#%u",
927 pvt->sbridge_dev->source_id, i, j);
935 static void get_memory_layout(const struct mem_ctl_info *mci)
937 struct sbridge_pvt *pvt = mci->pvt_info;
938 int i, j, k, n_sads, n_tads, sad_interl;
946 * Step 1) Get TOLM/TOHM ranges
949 pvt->tolm = pvt->info.get_tolm(pvt);
950 tmp_mb = (1 + pvt->tolm) >> 20;
952 mb = div_u64_rem(tmp_mb, 1000, &kb);
953 edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm);
955 /* Address range is already 45:25 */
956 pvt->tohm = pvt->info.get_tohm(pvt);
957 tmp_mb = (1 + pvt->tohm) >> 20;
959 mb = div_u64_rem(tmp_mb, 1000, &kb);
960 edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tohm);
963 * Step 2) Get SAD range and SAD Interleave list
964 * TAD registers contain the interleave wayness. However, it
965 * seems simpler to just discover it indirectly, with the
969 for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
970 /* SAD_LIMIT Address range is 45:26 */
971 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
973 limit = SAD_LIMIT(reg);
975 if (!DRAM_RULE_ENABLE(reg))
981 tmp_mb = (limit + 1) >> 20;
982 mb = div_u64_rem(tmp_mb, 1000, &kb);
983 edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
987 ((u64)tmp_mb) << 20L,
988 INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]",
992 pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
994 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
995 for (j = 0; j < 8; j++) {
996 u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, j);
997 if (j > 0 && sad_interl == pkg)
1000 edac_dbg(0, "SAD#%d, interleave #%d: %d\n",
1006 * Step 3) Get TAD range
1009 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
1010 pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads],
1012 limit = TAD_LIMIT(reg);
1015 tmp_mb = (limit + 1) >> 20;
1017 mb = div_u64_rem(tmp_mb, 1000, &kb);
1018 edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
1020 ((u64)tmp_mb) << 20L,
1032 * Step 4) Get TAD offsets, per each channel
1034 for (i = 0; i < NUM_CHANNELS; i++) {
1035 if (!pvt->channel[i].dimms)
1037 for (j = 0; j < n_tads; j++) {
1038 pci_read_config_dword(pvt->pci_tad[i],
1039 tad_ch_nilv_offset[j],
1041 tmp_mb = TAD_OFFSET(reg) >> 20;
1042 mb = div_u64_rem(tmp_mb, 1000, &kb);
1043 edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
1046 ((u64)tmp_mb) << 20L,
1052 * Step 6) Get RIR Wayness/Limit, per each channel
1054 for (i = 0; i < NUM_CHANNELS; i++) {
1055 if (!pvt->channel[i].dimms)
1057 for (j = 0; j < MAX_RIR_RANGES; j++) {
1058 pci_read_config_dword(pvt->pci_tad[i],
1062 if (!IS_RIR_VALID(reg))
1065 tmp_mb = pvt->info.rir_limit(reg) >> 20;
1066 rir_way = 1 << RIR_WAY(reg);
1067 mb = div_u64_rem(tmp_mb, 1000, &kb);
1068 edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
1071 ((u64)tmp_mb) << 20L,
1075 for (k = 0; k < rir_way; k++) {
1076 pci_read_config_dword(pvt->pci_tad[i],
1079 tmp_mb = RIR_OFFSET(reg) << 6;
1081 mb = div_u64_rem(tmp_mb, 1000, &kb);
1082 edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
1085 ((u64)tmp_mb) << 20L,
1086 (u32)RIR_RNK_TGT(reg),
1093 static struct mem_ctl_info *get_mci_for_node_id(u8 node_id)
1095 struct sbridge_dev *sbridge_dev;
1097 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
1098 if (sbridge_dev->node_id == node_id)
1099 return sbridge_dev->mci;
1104 static int get_memory_error_data(struct mem_ctl_info *mci,
1109 char **area_type, char *msg)
1111 struct mem_ctl_info *new_mci;
1112 struct sbridge_pvt *pvt = mci->pvt_info;
1113 struct pci_dev *pci_ha;
1114 int n_rir, n_sads, n_tads, sad_way, sck_xch;
1115 int sad_interl, idx, base_ch;
1116 int interleave_mode, shiftup = 0;
1117 unsigned sad_interleave[pvt->info.max_interleave];
1119 u8 ch_way, sck_way, pkg, sad_ha = 0;
1123 u64 ch_addr, offset, limit = 0, prv = 0;
1127 * Step 0) Check if the address is at special memory ranges
1128 * The check bellow is probably enough to fill all cases where
1129 * the error is not inside a memory, except for the legacy
1130 * range (e. g. VGA addresses). It is unlikely, however, that the
1131 * memory controller would generate an error on that range.
1133 if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
1134 sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
1137 if (addr >= (u64)pvt->tohm) {
1138 sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
1143 * Step 1) Get socket
1145 for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
1146 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
1149 if (!DRAM_RULE_ENABLE(reg))
1152 limit = SAD_LIMIT(reg);
1154 sprintf(msg, "Can't discover the memory socket");
1161 if (n_sads == pvt->info.max_sad) {
1162 sprintf(msg, "Can't discover the memory socket");
1166 *area_type = get_dram_attr(dram_rule);
1167 interleave_mode = INTERLEAVE_MODE(dram_rule);
1169 pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
1172 if (pvt->info.type == SANDY_BRIDGE) {
1173 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
1174 for (sad_way = 0; sad_way < 8; sad_way++) {
1175 u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, sad_way);
1176 if (sad_way > 0 && sad_interl == pkg)
1178 sad_interleave[sad_way] = pkg;
1179 edac_dbg(0, "SAD interleave #%d: %d\n",
1180 sad_way, sad_interleave[sad_way]);
1182 edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
1183 pvt->sbridge_dev->mc,
1188 !interleave_mode ? "" : "XOR[18:16]");
1189 if (interleave_mode)
1190 idx = ((addr >> 6) ^ (addr >> 16)) & 7;
1192 idx = (addr >> 6) & 7;
1206 sprintf(msg, "Can't discover socket interleave");
1209 *socket = sad_interleave[idx];
1210 edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
1211 idx, sad_way, *socket);
1212 } else if (pvt->info.type == HASWELL) {
1213 int bits, a7mode = A7MODE(dram_rule);
1216 /* A7 mode swaps P9 with P6 */
1217 bits = GET_BITFIELD(addr, 7, 8) << 1;
1218 bits |= GET_BITFIELD(addr, 9, 9);
1220 bits = GET_BITFIELD(addr, 7, 9);
1222 if (interleave_mode) {
1223 /* interleave mode will XOR {8,7,6} with {18,17,16} */
1224 idx = GET_BITFIELD(addr, 16, 18);
1229 pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
1230 *socket = sad_pkg_socket(pkg);
1231 sad_ha = sad_pkg_ha(pkg);
1234 /* MCChanShiftUpEnable */
1235 pci_read_config_dword(pvt->pci_ha0,
1236 HASWELL_HASYSDEFEATURE2, ®);
1237 shiftup = GET_BITFIELD(reg, 22, 22);
1240 edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %i, shiftup: %i\n",
1241 idx, *socket, sad_ha, shiftup);
1243 /* Ivy Bridge's SAD mode doesn't support XOR interleave mode */
1244 idx = (addr >> 6) & 7;
1245 pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
1246 *socket = sad_pkg_socket(pkg);
1247 sad_ha = sad_pkg_ha(pkg);
1248 edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n",
1249 idx, *socket, sad_ha);
1253 * Move to the proper node structure, in order to access the
1254 * right PCI registers
1256 new_mci = get_mci_for_node_id(*socket);
1258 sprintf(msg, "Struct for socket #%u wasn't initialized",
1263 pvt = mci->pvt_info;
1266 * Step 2) Get memory channel
1269 if (pvt->info.type == SANDY_BRIDGE)
1270 pci_ha = pvt->pci_ha0;
1273 pci_ha = pvt->pci_ha1;
1275 pci_ha = pvt->pci_ha0;
1277 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
1278 pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], ®);
1279 limit = TAD_LIMIT(reg);
1281 sprintf(msg, "Can't discover the memory channel");
1288 if (n_tads == MAX_TAD) {
1289 sprintf(msg, "Can't discover the memory channel");
1293 ch_way = TAD_CH(reg) + 1;
1294 sck_way = TAD_SOCK(reg) + 1;
1299 idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
1303 * FIXME: Shouldn't we use CHN_IDX_OFFSET() here, when ch_way == 3 ???
1307 base_ch = TAD_TGT0(reg);
1310 base_ch = TAD_TGT1(reg);
1313 base_ch = TAD_TGT2(reg);
1316 base_ch = TAD_TGT3(reg);
1319 sprintf(msg, "Can't discover the TAD target");
1322 *channel_mask = 1 << base_ch;
1324 pci_read_config_dword(pvt->pci_tad[base_ch],
1325 tad_ch_nilv_offset[n_tads],
1328 if (pvt->is_mirrored) {
1329 *channel_mask |= 1 << ((base_ch + 2) % 4);
1333 sck_xch = 1 << sck_way * (ch_way >> 1);
1336 sprintf(msg, "Invalid mirror set. Can't decode addr");
1340 sck_xch = (1 << sck_way) * ch_way;
1342 if (pvt->is_lockstep)
1343 *channel_mask |= 1 << ((base_ch + 1) % 4);
1345 offset = TAD_OFFSET(tad_offset);
1347 edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
1358 /* Calculate channel address */
1359 /* Remove the TAD offset */
1361 if (offset > addr) {
1362 sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
1367 /* Store the low bits [0:6] of the addr */
1368 ch_addr = addr & 0x7f;
1369 /* Remove socket wayness and remove 6 bits */
1371 addr = div_u64(addr, sck_xch);
1373 /* Divide by channel way */
1374 addr = addr / ch_way;
1376 /* Recover the last 6 bits */
1377 ch_addr |= addr << 6;
1380 * Step 3) Decode rank
1382 for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) {
1383 pci_read_config_dword(pvt->pci_tad[base_ch],
1384 rir_way_limit[n_rir],
1387 if (!IS_RIR_VALID(reg))
1390 limit = pvt->info.rir_limit(reg);
1391 mb = div_u64_rem(limit >> 20, 1000, &kb);
1392 edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
1397 if (ch_addr <= limit)
1400 if (n_rir == MAX_RIR_RANGES) {
1401 sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
1405 rir_way = RIR_WAY(reg);
1407 if (pvt->is_close_pg)
1408 idx = (ch_addr >> 6);
1410 idx = (ch_addr >> 13); /* FIXME: Datasheet says to shift by 15 */
1411 idx %= 1 << rir_way;
1413 pci_read_config_dword(pvt->pci_tad[base_ch],
1414 rir_offset[n_rir][idx],
1416 *rank = RIR_RNK_TGT(reg);
1418 edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
1428 /****************************************************************************
1429 Device initialization routines: put/get, init/exit
1430 ****************************************************************************/
1433 * sbridge_put_all_devices 'put' all the devices that we have
1434 * reserved via 'get'
1436 static void sbridge_put_devices(struct sbridge_dev *sbridge_dev)
1441 for (i = 0; i < sbridge_dev->n_devs; i++) {
1442 struct pci_dev *pdev = sbridge_dev->pdev[i];
1445 edac_dbg(0, "Removing dev %02x:%02x.%d\n",
1447 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1452 static void sbridge_put_all_devices(void)
1454 struct sbridge_dev *sbridge_dev, *tmp;
1456 list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) {
1457 sbridge_put_devices(sbridge_dev);
1458 free_sbridge_dev(sbridge_dev);
1462 static int sbridge_get_onedevice(struct pci_dev **prev,
1464 const struct pci_id_table *table,
1465 const unsigned devno)
1467 struct sbridge_dev *sbridge_dev;
1468 const struct pci_id_descr *dev_descr = &table->descr[devno];
1469 struct pci_dev *pdev = NULL;
1472 sbridge_printk(KERN_DEBUG,
1473 "Seeking for: PCI ID %04x:%04x\n",
1474 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1476 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1477 dev_descr->dev_id, *prev);
1485 if (dev_descr->optional)
1488 /* if the HA wasn't found */
1492 sbridge_printk(KERN_INFO,
1493 "Device not found: %04x:%04x\n",
1494 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1496 /* End of list, leave */
1499 bus = pdev->bus->number;
1501 sbridge_dev = get_sbridge_dev(bus);
1503 sbridge_dev = alloc_sbridge_dev(bus, table);
1511 if (sbridge_dev->pdev[devno]) {
1512 sbridge_printk(KERN_ERR,
1513 "Duplicated device for %04x:%04x\n",
1514 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1519 sbridge_dev->pdev[devno] = pdev;
1521 /* Be sure that the device is enabled */
1522 if (unlikely(pci_enable_device(pdev) < 0)) {
1523 sbridge_printk(KERN_ERR,
1524 "Couldn't enable %04x:%04x\n",
1525 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1529 edac_dbg(0, "Detected %04x:%04x\n",
1530 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1533 * As stated on drivers/pci/search.c, the reference count for
1534 * @from is always decremented if it is not %NULL. So, as we need
1535 * to get all devices up to null, we need to do a get for the device
1545 * sbridge_get_all_devices - Find and perform 'get' operation on the MCH's
1546 * devices we want to reference for this driver.
1547 * @num_mc: pointer to the memory controllers count, to be incremented in case
1549 * @table: model specific table
1551 * returns 0 in case of success or error code
1553 static int sbridge_get_all_devices(u8 *num_mc,
1554 const struct pci_id_table *table)
1557 struct pci_dev *pdev = NULL;
1559 while (table && table->descr) {
1560 for (i = 0; i < table->n_devs; i++) {
1563 rc = sbridge_get_onedevice(&pdev, num_mc,
1570 sbridge_put_all_devices();
1581 static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
1582 struct sbridge_dev *sbridge_dev)
1584 struct sbridge_pvt *pvt = mci->pvt_info;
1585 struct pci_dev *pdev;
1588 for (i = 0; i < sbridge_dev->n_devs; i++) {
1589 pdev = sbridge_dev->pdev[i];
1593 switch (pdev->device) {
1594 case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0:
1595 pvt->pci_sad0 = pdev;
1597 case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1:
1598 pvt->pci_sad1 = pdev;
1600 case PCI_DEVICE_ID_INTEL_SBRIDGE_BR:
1601 pvt->pci_br0 = pdev;
1603 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
1604 pvt->pci_ha0 = pdev;
1606 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
1609 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS:
1610 pvt->pci_ras = pdev;
1612 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0:
1613 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1:
1614 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2:
1615 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3:
1617 int id = pdev->device - PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0;
1618 pvt->pci_tad[id] = pdev;
1621 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO:
1622 pvt->pci_ddrio = pdev;
1628 edac_dbg(0, "Associated PCI %02x:%02x, bus %d with dev = %p\n",
1629 pdev->vendor, pdev->device,
1634 /* Check if everything were registered */
1635 if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha0 ||
1636 !pvt-> pci_tad || !pvt->pci_ras || !pvt->pci_ta)
1639 for (i = 0; i < NUM_CHANNELS; i++) {
1640 if (!pvt->pci_tad[i])
1646 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
1650 sbridge_printk(KERN_ERR, "Unexpected device %02x:%02x\n",
1651 PCI_VENDOR_ID_INTEL, pdev->device);
1655 static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
1656 struct sbridge_dev *sbridge_dev)
1658 struct sbridge_pvt *pvt = mci->pvt_info;
1659 struct pci_dev *pdev, *tmp;
1661 bool mode_2ha = false;
1663 tmp = pci_get_device(PCI_VENDOR_ID_INTEL,
1664 PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, NULL);
1670 for (i = 0; i < sbridge_dev->n_devs; i++) {
1671 pdev = sbridge_dev->pdev[i];
1675 switch (pdev->device) {
1676 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0:
1677 pvt->pci_ha0 = pdev;
1679 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
1681 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
1682 pvt->pci_ras = pdev;
1684 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2:
1685 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3:
1686 /* if we have 2 HAs active, channels 2 and 3
1687 * are in other device */
1691 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0:
1692 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1:
1694 int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0;
1695 pvt->pci_tad[id] = pdev;
1698 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0:
1699 pvt->pci_ddrio = pdev;
1701 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0:
1703 pvt->pci_ddrio = pdev;
1705 case PCI_DEVICE_ID_INTEL_IBRIDGE_SAD:
1706 pvt->pci_sad0 = pdev;
1708 case PCI_DEVICE_ID_INTEL_IBRIDGE_BR0:
1709 pvt->pci_br0 = pdev;
1711 case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1:
1712 pvt->pci_br1 = pdev;
1714 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1:
1715 pvt->pci_ha1 = pdev;
1717 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0:
1718 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1:
1720 int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 + 2;
1722 /* we shouldn't have this device if we have just one
1725 pvt->pci_tad[id] = pdev;
1732 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
1734 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1738 /* Check if everything were registered */
1739 if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_br0 ||
1740 !pvt->pci_br1 || !pvt->pci_tad || !pvt->pci_ras ||
1744 for (i = 0; i < NUM_CHANNELS; i++) {
1745 if (!pvt->pci_tad[i])
1751 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
1755 sbridge_printk(KERN_ERR,
1756 "Unexpected device %02x:%02x\n", PCI_VENDOR_ID_INTEL,
1761 static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
1762 struct sbridge_dev *sbridge_dev)
1764 struct sbridge_pvt *pvt = mci->pvt_info;
1765 struct pci_dev *pdev, *tmp;
1767 bool mode_2ha = false;
1769 tmp = pci_get_device(PCI_VENDOR_ID_INTEL,
1770 PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, NULL);
1776 /* there's only one device per system; not tied to any bus */
1777 if (pvt->info.pci_vtd == NULL)
1778 /* result will be checked later */
1779 pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
1780 PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC,
1783 for (i = 0; i < sbridge_dev->n_devs; i++) {
1784 pdev = sbridge_dev->pdev[i];
1788 switch (pdev->device) {
1789 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0:
1790 pvt->pci_sad0 = pdev;
1792 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1:
1793 pvt->pci_sad1 = pdev;
1795 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
1796 pvt->pci_ha0 = pdev;
1798 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA:
1801 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL:
1802 pvt->pci_ras = pdev;
1804 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0:
1805 pvt->pci_tad[0] = pdev;
1807 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1:
1808 pvt->pci_tad[1] = pdev;
1810 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2:
1812 pvt->pci_tad[2] = pdev;
1814 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3:
1816 pvt->pci_tad[3] = pdev;
1818 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0:
1819 pvt->pci_ddrio = pdev;
1821 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
1822 pvt->pci_ha1 = pdev;
1824 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA:
1825 pvt->pci_ha1_ta = pdev;
1827 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0:
1829 pvt->pci_tad[2] = pdev;
1831 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1:
1833 pvt->pci_tad[3] = pdev;
1839 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
1841 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1845 /* Check if everything were registered */
1846 if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_sad1 ||
1847 !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd)
1850 for (i = 0; i < NUM_CHANNELS; i++) {
1851 if (!pvt->pci_tad[i])
1857 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
1861 /****************************************************************************
1862 Error check routines
1863 ****************************************************************************/
1866 * While Sandy Bridge has error count registers, SMI BIOS read values from
1867 * and resets the counters. So, they are not reliable for the OS to read
1868 * from them. So, we have no option but to just trust on whatever MCE is
1869 * telling us about the errors.
1871 static void sbridge_mce_output_error(struct mem_ctl_info *mci,
1872 const struct mce *m)
1874 struct mem_ctl_info *new_mci;
1875 struct sbridge_pvt *pvt = mci->pvt_info;
1876 enum hw_event_mc_err_type tp_event;
1877 char *type, *optype, msg[256];
1878 bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
1879 bool overflow = GET_BITFIELD(m->status, 62, 62);
1880 bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
1882 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1883 u32 mscod = GET_BITFIELD(m->status, 16, 31);
1884 u32 errcode = GET_BITFIELD(m->status, 0, 15);
1885 u32 channel = GET_BITFIELD(m->status, 0, 3);
1886 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1887 long channel_mask, first_channel;
1890 char *area_type = NULL;
1892 if (pvt->info.type == IVY_BRIDGE)
1895 recoverable = GET_BITFIELD(m->status, 56, 56);
1897 if (uncorrected_error) {
1900 tp_event = HW_EVENT_ERR_FATAL;
1903 tp_event = HW_EVENT_ERR_UNCORRECTED;
1907 tp_event = HW_EVENT_ERR_CORRECTED;
1911 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1912 * memory errors should fit in this mask:
1913 * 000f 0000 1mmm cccc (binary)
1915 * f = Correction Report Filtering Bit. If 1, subsequent errors
1919 * If the mask doesn't match, report an error to the parsing logic
1921 if (! ((errcode & 0xef80) == 0x80)) {
1922 optype = "Can't parse: it is not a mem";
1924 switch (optypenum) {
1926 optype = "generic undef request error";
1929 optype = "memory read error";
1932 optype = "memory write error";
1935 optype = "addr/cmd error";
1938 optype = "memory scrubbing error";
1941 optype = "reserved";
1946 /* Only decode errors with an valid address (ADDRV) */
1947 if (!GET_BITFIELD(m->status, 58, 58))
1950 rc = get_memory_error_data(mci, m->addr, &socket,
1951 &channel_mask, &rank, &area_type, msg);
1954 new_mci = get_mci_for_node_id(socket);
1956 strcpy(msg, "Error: socket got corrupted!");
1960 pvt = mci->pvt_info;
1962 first_channel = find_first_bit(&channel_mask, NUM_CHANNELS);
1973 * FIXME: On some memory configurations (mirror, lockstep), the
1974 * Memory Controller can't point the error to a single DIMM. The
1975 * EDAC core should be handling the channel mask, in order to point
1976 * to the group of dimm's where the error may be happening.
1978 if (!pvt->is_lockstep && !pvt->is_mirrored && !pvt->is_close_pg)
1979 channel = first_channel;
1981 snprintf(msg, sizeof(msg),
1982 "%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d",
1983 overflow ? " OVERFLOW" : "",
1984 (uncorrected_error && recoverable) ? " recoverable" : "",
1991 edac_dbg(0, "%s\n", msg);
1993 /* FIXME: need support for channel mask */
1995 if (channel == CHANNEL_UNSPECIFIED)
1998 /* Call the helper to output message */
1999 edac_mc_handle_error(tp_event, mci, core_err_cnt,
2000 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
2005 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0,
2012 * sbridge_check_error Retrieve and process errors reported by the
2013 * hardware. Called by the Core module.
2015 static void sbridge_check_error(struct mem_ctl_info *mci)
2017 struct sbridge_pvt *pvt = mci->pvt_info;
2023 * MCE first step: Copy all mce errors into a temporary buffer
2024 * We use a double buffering here, to reduce the risk of
2028 count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
2033 m = pvt->mce_outentry;
2034 if (pvt->mce_in + count > MCE_LOG_LEN) {
2035 unsigned l = MCE_LOG_LEN - pvt->mce_in;
2037 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
2043 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
2045 pvt->mce_in += count;
2048 if (pvt->mce_overrun) {
2049 sbridge_printk(KERN_ERR, "Lost %d memory errors\n",
2052 pvt->mce_overrun = 0;
2056 * MCE second step: parse errors and display
2058 for (i = 0; i < count; i++)
2059 sbridge_mce_output_error(mci, &pvt->mce_outentry[i]);
2063 * sbridge_mce_check_error Replicates mcelog routine to get errors
2064 * This routine simply queues mcelog errors, and
2065 * return. The error itself should be handled later
2066 * by sbridge_check_error.
2067 * WARNING: As this routine should be called at NMI time, extra care should
2068 * be taken to avoid deadlocks, and to be as fast as possible.
2070 static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
2073 struct mce *mce = (struct mce *)data;
2074 struct mem_ctl_info *mci;
2075 struct sbridge_pvt *pvt;
2078 if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
2081 mci = get_mci_for_node_id(mce->socketid);
2084 pvt = mci->pvt_info;
2087 * Just let mcelog handle it if the error is
2088 * outside the memory controller. A memory error
2089 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
2090 * bit 12 has an special meaning.
2092 if ((mce->status & 0xefff) >> 7 != 1)
2095 if (mce->mcgstatus & MCG_STATUS_MCIP)
2100 sbridge_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
2102 sbridge_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
2103 "Bank %d: %016Lx\n", mce->extcpu, type,
2104 mce->mcgstatus, mce->bank, mce->status);
2105 sbridge_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
2106 sbridge_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
2107 sbridge_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
2109 sbridge_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
2110 "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
2111 mce->time, mce->socketid, mce->apicid);
2114 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
2120 /* Copy memory error at the ringbuffer */
2121 memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
2123 pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
2125 /* Handle fatal errors immediately */
2126 if (mce->mcgstatus & 1)
2127 sbridge_check_error(mci);
2129 /* Advice mcelog that the error were handled */
2133 static struct notifier_block sbridge_mce_dec = {
2134 .notifier_call = sbridge_mce_check_error,
2137 /****************************************************************************
2138 EDAC register/unregister logic
2139 ****************************************************************************/
2141 static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
2143 struct mem_ctl_info *mci = sbridge_dev->mci;
2144 struct sbridge_pvt *pvt;
2146 if (unlikely(!mci || !mci->pvt_info)) {
2147 edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
2149 sbridge_printk(KERN_ERR, "Couldn't find mci handler\n");
2153 pvt = mci->pvt_info;
2155 edac_dbg(0, "MC: mci = %p, dev = %p\n",
2156 mci, &sbridge_dev->pdev[0]->dev);
2158 /* Remove MC sysfs nodes */
2159 edac_mc_del_mc(mci->pdev);
2161 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
2162 kfree(mci->ctl_name);
2164 sbridge_dev->mci = NULL;
2167 static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
2169 struct mem_ctl_info *mci;
2170 struct edac_mc_layer layers[2];
2171 struct sbridge_pvt *pvt;
2172 struct pci_dev *pdev = sbridge_dev->pdev[0];
2175 /* Check the number of active and not disabled channels */
2176 rc = check_if_ecc_is_active(sbridge_dev->bus, type);
2177 if (unlikely(rc < 0))
2180 /* allocate a new MC control structure */
2181 layers[0].type = EDAC_MC_LAYER_CHANNEL;
2182 layers[0].size = NUM_CHANNELS;
2183 layers[0].is_virt_csrow = false;
2184 layers[1].type = EDAC_MC_LAYER_SLOT;
2185 layers[1].size = MAX_DIMMS;
2186 layers[1].is_virt_csrow = true;
2187 mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
2193 edac_dbg(0, "MC: mci = %p, dev = %p\n",
2196 pvt = mci->pvt_info;
2197 memset(pvt, 0, sizeof(*pvt));
2199 /* Associate sbridge_dev and mci for future usage */
2200 pvt->sbridge_dev = sbridge_dev;
2201 sbridge_dev->mci = mci;
2203 mci->mtype_cap = MEM_FLAG_DDR3;
2204 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2205 mci->edac_cap = EDAC_FLAG_NONE;
2206 mci->mod_name = "sbridge_edac.c";
2207 mci->mod_ver = SBRIDGE_REVISION;
2208 mci->dev_name = pci_name(pdev);
2209 mci->ctl_page_to_phys = NULL;
2211 /* Set the function pointer to an actual operation function */
2212 mci->edac_check = sbridge_check_error;
2214 pvt->info.type = type;
2217 pvt->info.rankcfgr = IB_RANK_CFG_A;
2218 pvt->info.get_tolm = ibridge_get_tolm;
2219 pvt->info.get_tohm = ibridge_get_tohm;
2220 pvt->info.dram_rule = ibridge_dram_rule;
2221 pvt->info.get_memory_type = get_memory_type;
2222 pvt->info.get_node_id = get_node_id;
2223 pvt->info.rir_limit = rir_limit;
2224 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
2225 pvt->info.interleave_list = ibridge_interleave_list;
2226 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
2227 pvt->info.interleave_pkg = ibridge_interleave_pkg;
2228 mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge Socket#%d", mci->mc_idx);
2230 /* Store pci devices at mci for faster access */
2231 rc = ibridge_mci_bind_devs(mci, sbridge_dev);
2232 if (unlikely(rc < 0))
2236 pvt->info.rankcfgr = SB_RANK_CFG_A;
2237 pvt->info.get_tolm = sbridge_get_tolm;
2238 pvt->info.get_tohm = sbridge_get_tohm;
2239 pvt->info.dram_rule = sbridge_dram_rule;
2240 pvt->info.get_memory_type = get_memory_type;
2241 pvt->info.get_node_id = get_node_id;
2242 pvt->info.rir_limit = rir_limit;
2243 pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule);
2244 pvt->info.interleave_list = sbridge_interleave_list;
2245 pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list);
2246 pvt->info.interleave_pkg = sbridge_interleave_pkg;
2247 mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx);
2249 /* Store pci devices at mci for faster access */
2250 rc = sbridge_mci_bind_devs(mci, sbridge_dev);
2251 if (unlikely(rc < 0))
2255 /* rankcfgr isn't used */
2256 pvt->info.get_tolm = haswell_get_tolm;
2257 pvt->info.get_tohm = haswell_get_tohm;
2258 pvt->info.dram_rule = ibridge_dram_rule;
2259 pvt->info.get_memory_type = haswell_get_memory_type;
2260 pvt->info.get_node_id = haswell_get_node_id;
2261 pvt->info.rir_limit = haswell_rir_limit;
2262 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
2263 pvt->info.interleave_list = ibridge_interleave_list;
2264 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
2265 pvt->info.interleave_pkg = ibridge_interleave_pkg;
2266 mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell Socket#%d", mci->mc_idx);
2268 /* Store pci devices at mci for faster access */
2269 rc = haswell_mci_bind_devs(mci, sbridge_dev);
2270 if (unlikely(rc < 0))
2275 /* Get dimm basic config and the memory layout */
2276 get_dimm_config(mci);
2277 get_memory_layout(mci);
2279 /* record ptr to the generic device */
2280 mci->pdev = &pdev->dev;
2282 /* add this new MC control structure to EDAC's list of MCs */
2283 if (unlikely(edac_mc_add_mc(mci))) {
2284 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
2292 kfree(mci->ctl_name);
2294 sbridge_dev->mci = NULL;
2299 * sbridge_probe Probe for ONE instance of device to see if it is
2302 * 0 for FOUND a device
2303 * < 0 for error code
2306 static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2310 struct sbridge_dev *sbridge_dev;
2311 enum type type = SANDY_BRIDGE;
2313 /* get the pci devices we want to reserve for our use */
2314 mutex_lock(&sbridge_edac_lock);
2317 * All memory controllers are allocated at the first pass.
2319 if (unlikely(probed >= 1)) {
2320 mutex_unlock(&sbridge_edac_lock);
2325 switch (pdev->device) {
2326 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
2327 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_ibridge_table);
2330 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
2331 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_sbridge_table);
2332 type = SANDY_BRIDGE;
2334 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
2335 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_haswell_table);
2339 if (unlikely(rc < 0))
2343 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
2344 edac_dbg(0, "Registering MC#%d (%d of %d)\n",
2345 mc, mc + 1, num_mc);
2347 sbridge_dev->mc = mc++;
2348 rc = sbridge_register_mci(sbridge_dev, type);
2349 if (unlikely(rc < 0))
2353 sbridge_printk(KERN_INFO, "Driver loaded.\n");
2355 mutex_unlock(&sbridge_edac_lock);
2359 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
2360 sbridge_unregister_mci(sbridge_dev);
2362 sbridge_put_all_devices();
2364 mutex_unlock(&sbridge_edac_lock);
2369 * sbridge_remove destructor for one instance of device
2372 static void sbridge_remove(struct pci_dev *pdev)
2374 struct sbridge_dev *sbridge_dev;
2379 * we have a trouble here: pdev value for removal will be wrong, since
2380 * it will point to the X58 register used to detect that the machine
2381 * is a Nehalem or upper design. However, due to the way several PCI
2382 * devices are grouped together to provide MC functionality, we need
2383 * to use a different method for releasing the devices
2386 mutex_lock(&sbridge_edac_lock);
2388 if (unlikely(!probed)) {
2389 mutex_unlock(&sbridge_edac_lock);
2393 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
2394 sbridge_unregister_mci(sbridge_dev);
2396 /* Release PCI resources */
2397 sbridge_put_all_devices();
2401 mutex_unlock(&sbridge_edac_lock);
2404 MODULE_DEVICE_TABLE(pci, sbridge_pci_tbl);
2407 * sbridge_driver pci_driver structure for this module
2410 static struct pci_driver sbridge_driver = {
2411 .name = "sbridge_edac",
2412 .probe = sbridge_probe,
2413 .remove = sbridge_remove,
2414 .id_table = sbridge_pci_tbl,
2418 * sbridge_init Module entry function
2419 * Try to initialize this module for its devices
2421 static int __init sbridge_init(void)
2427 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
2430 pci_rc = pci_register_driver(&sbridge_driver);
2432 mce_register_decode_chain(&sbridge_mce_dec);
2433 if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
2434 sbridge_printk(KERN_WARNING, "Loading driver, error reporting disabled.\n");
2438 sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
2445 * sbridge_exit() Module exit function
2446 * Unregister the driver
2448 static void __exit sbridge_exit(void)
2451 pci_unregister_driver(&sbridge_driver);
2452 mce_unregister_decode_chain(&sbridge_mce_dec);
2455 module_init(sbridge_init);
2456 module_exit(sbridge_exit);
2458 module_param(edac_op_state, int, 0444);
2459 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
2461 MODULE_LICENSE("GPL");
2462 MODULE_AUTHOR("Mauro Carvalho Chehab");
2463 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
2464 MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - "