1 #include <linux/module.h>
2 #include "edac_mce_amd.h"
4 static bool report_gart_errors;
5 static void (*nb_bus_decoder)(int node_id, struct err_regs *regs);
7 void amd_report_gart_errors(bool v)
9 report_gart_errors = v;
11 EXPORT_SYMBOL_GPL(amd_report_gart_errors);
13 void amd_register_ecc_decoder(void (*f)(int, struct err_regs *))
17 EXPORT_SYMBOL_GPL(amd_register_ecc_decoder);
19 void amd_unregister_ecc_decoder(void (*f)(int, struct err_regs *))
22 WARN_ON(nb_bus_decoder != f);
24 nb_bus_decoder = NULL;
27 EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder);
30 * string representation for the different MCA reported error types, see F3x48
34 /* transaction type */
35 const char *tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" };
36 EXPORT_SYMBOL_GPL(tt_msgs);
39 const char *ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" };
40 EXPORT_SYMBOL_GPL(ll_msgs);
42 /* memory transaction type */
43 const char *rrrr_msgs[] = {
44 "GEN", "RD", "WR", "DRD", "DWR", "IRD", "PRF", "EV", "SNP"
46 EXPORT_SYMBOL_GPL(rrrr_msgs);
48 /* participating processor */
49 const char *pp_msgs[] = { "SRC", "RES", "OBS", "GEN" };
50 EXPORT_SYMBOL_GPL(pp_msgs);
53 const char *to_msgs[] = { "no timeout", "timed out" };
54 EXPORT_SYMBOL_GPL(to_msgs);
57 const char *ii_msgs[] = { "MEM", "RESV", "IO", "GEN" };
58 EXPORT_SYMBOL_GPL(ii_msgs);
61 * Map the 4 or 5 (family-specific) bits of Extended Error code to the
64 const char *ext_msgs[] = {
65 "K8 ECC error", /* 0_0000b */
66 "CRC error on link", /* 0_0001b */
67 "Sync error packets on link", /* 0_0010b */
68 "Master Abort during link operation", /* 0_0011b */
69 "Target Abort during link operation", /* 0_0100b */
70 "Invalid GART PTE entry during table walk", /* 0_0101b */
71 "Unsupported atomic RMW command received", /* 0_0110b */
72 "WDT error: NB transaction timeout", /* 0_0111b */
73 "ECC/ChipKill ECC error", /* 0_1000b */
74 "SVM DEV Error", /* 0_1001b */
75 "Link Data error", /* 0_1010b */
76 "Link/L3/Probe Filter Protocol error", /* 0_1011b */
77 "NB Internal Arrays Parity error", /* 0_1100b */
78 "DRAM Address/Control Parity error", /* 0_1101b */
79 "Link Transmission error", /* 0_1110b */
80 "GART/DEV Table Walk Data error" /* 0_1111b */
81 "Res 0x100 error", /* 1_0000b */
82 "Res 0x101 error", /* 1_0001b */
83 "Res 0x102 error", /* 1_0010b */
84 "Res 0x103 error", /* 1_0011b */
85 "Res 0x104 error", /* 1_0100b */
86 "Res 0x105 error", /* 1_0101b */
87 "Res 0x106 error", /* 1_0110b */
88 "Res 0x107 error", /* 1_0111b */
89 "Res 0x108 error", /* 1_1000b */
90 "Res 0x109 error", /* 1_1001b */
91 "Res 0x10A error", /* 1_1010b */
92 "Res 0x10B error", /* 1_1011b */
93 "ECC error in L3 Cache Data", /* 1_1100b */
94 "L3 Cache Tag error", /* 1_1101b */
95 "L3 Cache LRU Parity error", /* 1_1110b */
96 "Probe Filter error" /* 1_1111b */
98 EXPORT_SYMBOL_GPL(ext_msgs);
100 static void amd_decode_dc_mce(u64 mc0_status)
102 u32 ec = mc0_status & 0xffff;
103 u32 xec = (mc0_status >> 16) & 0xf;
105 pr_emerg(HW_ERR "Data Cache Error: ");
107 if (xec == 1 && TLB_ERROR(ec))
108 pr_cont(": %s TLB multimatch.\n", LL_MSG(ec));
110 if (mc0_status & (1ULL << 40))
111 pr_cont(" during Data Scrub.\n");
112 else if (TLB_ERROR(ec))
113 pr_cont(": %s TLB parity error.\n", LL_MSG(ec));
114 else if (MEM_ERROR(ec)) {
116 u8 tt = (ec >> 2) & 0x3;
117 u8 rrrr = (ec >> 4) & 0xf;
119 /* see F10h BKDG (31116), Table 92. */
124 pr_cont(": Data/Tag %s error.\n", RRRR_MSG(ec));
126 } else if (ll == 0x2 && rrrr == 0x3)
127 pr_cont(" during L1 linefill from L2.\n");
130 } else if (BUS_ERROR(ec) && boot_cpu_data.x86 == 0xf)
131 pr_cont(" during system linefill.\n");
140 pr_emerg(HW_ERR "Corrupted DC MCE info?\n");
143 static void amd_decode_ic_mce(u64 mc1_status)
145 u32 ec = mc1_status & 0xffff;
146 u32 xec = (mc1_status >> 16) & 0xf;
148 pr_emerg(HW_ERR "Instruction Cache Error");
150 if (xec == 1 && TLB_ERROR(ec))
151 pr_cont(": %s TLB multimatch.\n", LL_MSG(ec));
154 pr_cont(": %s TLB Parity error.\n", LL_MSG(ec));
155 else if (BUS_ERROR(ec)) {
156 if (boot_cpu_data.x86 == 0xf &&
157 (mc1_status & (1ULL << 58)))
158 pr_cont(" during system linefill.\n");
160 pr_cont(" during attempted NB data read.\n");
161 } else if (MEM_ERROR(ec)) {
163 u8 rrrr = (ec >> 4) & 0xf;
166 pr_cont(" during a linefill from L2.\n");
167 else if (ll == 0x1) {
171 pr_cont(": Parity error during "
176 pr_cont(": Copyback Parity/Victim"
181 pr_cont(": Tag Snoop error.\n");
197 pr_emerg(HW_ERR "Corrupted IC MCE info?\n");
200 static void amd_decode_bu_mce(u64 mc2_status)
202 u32 ec = mc2_status & 0xffff;
203 u32 xec = (mc2_status >> 16) & 0xf;
205 pr_emerg(HW_ERR "Bus Unit Error");
208 pr_cont(" in the write data buffers.\n");
210 pr_cont(" in the victim data buffers.\n");
211 else if (xec == 0x2 && MEM_ERROR(ec))
212 pr_cont(": %s error in the L2 cache tags.\n", RRRR_MSG(ec));
213 else if (xec == 0x0) {
215 pr_cont(": %s error in a Page Descriptor Cache or "
216 "Guest TLB.\n", TT_MSG(ec));
217 else if (BUS_ERROR(ec))
218 pr_cont(": %s/ECC error in data read from NB: %s.\n",
219 RRRR_MSG(ec), PP_MSG(ec));
220 else if (MEM_ERROR(ec)) {
221 u8 rrrr = (ec >> 4) & 0xf;
224 pr_cont(": %s error during data copyback.\n",
226 else if (rrrr <= 0x1)
227 pr_cont(": %s parity/ECC error during data "
228 "access from L2.\n", RRRR_MSG(ec));
239 pr_emerg(HW_ERR "Corrupted BU MCE info?\n");
242 static void amd_decode_ls_mce(u64 mc3_status)
244 u32 ec = mc3_status & 0xffff;
245 u32 xec = (mc3_status >> 16) & 0xf;
247 pr_emerg(HW_ERR "Load Store Error");
250 u8 rrrr = (ec >> 4) & 0xf;
252 if (!BUS_ERROR(ec) || (rrrr != 0x3 && rrrr != 0x4))
255 pr_cont(" during %s.\n", RRRR_MSG(ec));
260 pr_emerg(HW_ERR "Corrupted LS MCE info?\n");
263 void amd_decode_nb_mce(int node_id, struct err_regs *regs)
265 u32 ec = ERROR_CODE(regs->nbsl);
268 * GART TLB error reporting is disabled by default. Bail out early.
270 if (TLB_ERROR(ec) && !report_gart_errors)
273 pr_emerg(HW_ERR "Northbridge Error, node %d", node_id);
276 * F10h, revD can disable ErrCpu[3:0] so check that first and also the
277 * value encoding has changed so interpret those differently
279 if ((boot_cpu_data.x86 == 0x10) &&
280 (boot_cpu_data.x86_model > 7)) {
281 if (regs->nbsh & K8_NBSH_ERR_CPU_VAL)
282 pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf));
284 u8 assoc_cpus = regs->nbsh & 0xf;
287 pr_cont(", core: %d", fls(assoc_cpus) - 1);
292 pr_emerg(HW_ERR "%s.\n", EXT_ERR_MSG(regs->nbsl));
294 if (BUS_ERROR(ec) && nb_bus_decoder)
295 nb_bus_decoder(node_id, regs);
297 EXPORT_SYMBOL_GPL(amd_decode_nb_mce);
299 static void amd_decode_fr_mce(u64 mc5_status)
301 /* we have only one error signature so match all fields at once. */
302 if ((mc5_status & 0xffff) == 0x0f0f)
303 pr_emerg(HW_ERR " FR Error: CPU Watchdog timer expire.\n");
305 pr_emerg(HW_ERR "Corrupted FR MCE info?\n");
308 static inline void amd_decode_err_code(u16 ec)
311 pr_emerg(HW_ERR "Transaction: %s, Cache Level: %s\n",
312 TT_MSG(ec), LL_MSG(ec));
313 } else if (MEM_ERROR(ec)) {
314 pr_emerg(HW_ERR "Transaction: %s, Type: %s, Cache Level: %s\n",
315 RRRR_MSG(ec), TT_MSG(ec), LL_MSG(ec));
316 } else if (BUS_ERROR(ec)) {
317 pr_emerg(HW_ERR "Transaction: %s (%s), %s, Cache Level: %s, "
318 "Participating Processor: %s\n",
319 RRRR_MSG(ec), II_MSG(ec), TO_MSG(ec), LL_MSG(ec),
322 pr_emerg(HW_ERR "Huh? Unknown MCE error 0x%x\n", ec);
325 static int amd_decode_mce(struct notifier_block *nb, unsigned long val,
328 struct mce *m = (struct mce *)data;
329 struct err_regs regs;
332 pr_emerg(HW_ERR "MC%d_STATUS: ", m->bank);
334 pr_cont("%sorrected error, other errors lost: %s, "
335 "CPU context corrupt: %s",
336 ((m->status & MCI_STATUS_UC) ? "Unc" : "C"),
337 ((m->status & MCI_STATUS_OVER) ? "yes" : "no"),
338 ((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
340 /* do the two bits[14:13] together */
341 ecc = (m->status >> 45) & 0x3;
343 pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U"));
349 amd_decode_dc_mce(m->status);
353 amd_decode_ic_mce(m->status);
357 amd_decode_bu_mce(m->status);
361 amd_decode_ls_mce(m->status);
365 regs.nbsl = (u32) m->status;
366 regs.nbsh = (u32)(m->status >> 32);
367 regs.nbeal = (u32) m->addr;
368 regs.nbeah = (u32)(m->addr >> 32);
369 node = amd_get_nb_id(m->extcpu);
371 amd_decode_nb_mce(node, ®s);
375 amd_decode_fr_mce(m->status);
382 amd_decode_err_code(m->status & 0xffff);
387 static struct notifier_block amd_mce_dec_nb = {
388 .notifier_call = amd_decode_mce,
391 static int __init mce_amd_init(void)
394 * We can decode MCEs for K8, F10h and F11h CPUs:
396 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
399 if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
402 atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
406 early_initcall(mce_amd_init);
409 static void __exit mce_amd_exit(void)
411 atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &amd_mce_dec_nb);
414 MODULE_DESCRIPTION("AMD MCE decoder");
415 MODULE_ALIAS("edac-mce-amd");
416 MODULE_LICENSE("GPL");
417 module_exit(mce_amd_exit);