Use a more common debugging style.
Remove __FILE__ uses, add missing newlines,
coalesce formats and align arguments.
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
return edac_mc_find((int)node_id);
err_no_match:
- debugf2("sys_addr 0x%lx doesn't match any node\n",
- (unsigned long)sys_addr);
+ edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
+ (unsigned long)sys_addr);
return NULL;
}
mask = ~mask;
if ((input_addr & mask) == (base & mask)) {
- debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
- (unsigned long)input_addr, csrow,
- pvt->mc_node_id);
+ edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
+ (unsigned long)input_addr, csrow,
+ pvt->mc_node_id);
return csrow;
}
}
- debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
- (unsigned long)input_addr, pvt->mc_node_id);
+ edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
+ (unsigned long)input_addr, pvt->mc_node_id);
return -1;
}
/* only revE and later have the DRAM Hole Address Register */
if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
- debugf1(" revision %d for node %d does not support DHAR\n",
- pvt->ext_model, pvt->mc_node_id);
+ edac_dbg(1, " revision %d for node %d does not support DHAR\n",
+ pvt->ext_model, pvt->mc_node_id);
return 1;
}
/* valid for Fam10h and above */
if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
- debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
+ edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
return 1;
}
if (!dhar_valid(pvt)) {
- debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
- pvt->mc_node_id);
+ edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
+ pvt->mc_node_id);
return 1;
}
else
*hole_offset = k8_dhar_offset(pvt);
- debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
- pvt->mc_node_id, (unsigned long)*hole_base,
- (unsigned long)*hole_offset, (unsigned long)*hole_size);
+ edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
+ pvt->mc_node_id, (unsigned long)*hole_base,
+ (unsigned long)*hole_offset, (unsigned long)*hole_size);
return 0;
}
/* use DHAR to translate SysAddr to DramAddr */
dram_addr = sys_addr - hole_offset;
- debugf2("using DHAR to translate SysAddr 0x%lx to "
- "DramAddr 0x%lx\n",
- (unsigned long)sys_addr,
- (unsigned long)dram_addr);
+ edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
+ (unsigned long)sys_addr,
+ (unsigned long)dram_addr);
return dram_addr;
}
*/
dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base;
- debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
- "DramAddr 0x%lx\n", (unsigned long)sys_addr,
- (unsigned long)dram_addr);
+ edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
+ (unsigned long)sys_addr, (unsigned long)dram_addr);
return dram_addr;
}
input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) +
(dram_addr & 0xfff);
- debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
- intlv_shift, (unsigned long)dram_addr,
- (unsigned long)input_addr);
+ edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
+ intlv_shift, (unsigned long)dram_addr,
+ (unsigned long)input_addr);
return input_addr;
}
input_addr =
dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
- debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
- (unsigned long)sys_addr, (unsigned long)input_addr);
+ edac_dbg(2, "SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
+ (unsigned long)sys_addr, (unsigned long)input_addr);
return input_addr;
}
intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
if (intlv_shift == 0) {
- debugf1(" InputAddr 0x%lx translates to DramAddr of "
- "same value\n", (unsigned long)input_addr);
+ edac_dbg(1, " InputAddr 0x%lx translates to DramAddr of same value\n",
+ (unsigned long)input_addr);
return input_addr;
}
intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
dram_addr = bits + (intlv_sel << 12);
- debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
- "(%d node interleave bits)\n", (unsigned long)input_addr,
- (unsigned long)dram_addr, intlv_shift);
+ edac_dbg(1, "InputAddr 0x%lx translates to DramAddr 0x%lx (%d node interleave bits)\n",
+ (unsigned long)input_addr,
+ (unsigned long)dram_addr, intlv_shift);
return dram_addr;
}
(dram_addr < (hole_base + hole_size))) {
sys_addr = dram_addr + hole_offset;
- debugf1("using DHAR to translate DramAddr 0x%lx to "
- "SysAddr 0x%lx\n", (unsigned long)dram_addr,
- (unsigned long)sys_addr);
+ edac_dbg(1, "using DHAR to translate DramAddr 0x%lx to SysAddr 0x%lx\n",
+ (unsigned long)dram_addr,
+ (unsigned long)sys_addr);
return sys_addr;
}
*/
sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
- debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
- pvt->mc_node_id, (unsigned long)dram_addr,
- (unsigned long)sys_addr);
+ edac_dbg(1, " Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
+ pvt->mc_node_id, (unsigned long)dram_addr,
+ (unsigned long)sys_addr);
return sys_addr;
}
static void amd64_dump_dramcfg_low(u32 dclr, int chan)
{
- debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
+ edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
- debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
- (dclr & BIT(16)) ? "un" : "",
- (dclr & BIT(19)) ? "yes" : "no");
+ edac_dbg(1, " DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
+ (dclr & BIT(16)) ? "un" : "",
+ (dclr & BIT(19)) ? "yes" : "no");
- debugf1(" PAR/ERR parity: %s\n",
- (dclr & BIT(8)) ? "enabled" : "disabled");
+ edac_dbg(1, " PAR/ERR parity: %s\n",
+ (dclr & BIT(8)) ? "enabled" : "disabled");
if (boot_cpu_data.x86 == 0x10)
- debugf1(" DCT 128bit mode width: %s\n",
- (dclr & BIT(11)) ? "128b" : "64b");
+ edac_dbg(1, " DCT 128bit mode width: %s\n",
+ (dclr & BIT(11)) ? "128b" : "64b");
- debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
- (dclr & BIT(12)) ? "yes" : "no",
- (dclr & BIT(13)) ? "yes" : "no",
- (dclr & BIT(14)) ? "yes" : "no",
- (dclr & BIT(15)) ? "yes" : "no");
+ edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
+ (dclr & BIT(12)) ? "yes" : "no",
+ (dclr & BIT(13)) ? "yes" : "no",
+ (dclr & BIT(14)) ? "yes" : "no",
+ (dclr & BIT(15)) ? "yes" : "no");
}
/* Display and decode various NB registers for debug purposes. */
static void dump_misc_regs(struct amd64_pvt *pvt)
{
- debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
+ edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
- debugf1(" NB two channel DRAM capable: %s\n",
- (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
+ edac_dbg(1, " NB two channel DRAM capable: %s\n",
+ (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
- debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
- (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
- (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
+ edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
+ (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
+ (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
amd64_dump_dramcfg_low(pvt->dclr0, 0);
- debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
+ edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
- debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
- "offset: 0x%08x\n",
- pvt->dhar, dhar_base(pvt),
- (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
- : f10_dhar_offset(pvt));
+ edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
+ pvt->dhar, dhar_base(pvt),
+ (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
+ : f10_dhar_offset(pvt));
- debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
+ edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
amd64_debug_display_dimm_sizes(pvt, 0);
u32 *base1 = &pvt->csels[1].csbases[cs];
if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
- debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
- cs, *base0, reg0);
+ edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
+ cs, *base0, reg0);
if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
continue;
if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
- debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
- cs, *base1, reg1);
+ edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
+ cs, *base1, reg1);
}
for_each_chip_select_mask(cs, 0, pvt) {
u32 *mask1 = &pvt->csels[1].csmasks[cs];
if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
- debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
- cs, *mask0, reg0);
+ edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
+ cs, *mask0, reg0);
if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
continue;
if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
- debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
- cs, *mask1, reg1);
+ edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
+ cs, *mask1, reg1);
}
}
* Need to check DCT0[0] and DCT1[0] to see if only one of them has
* their CSEnable bit on. If so, then SINGLE DIMM case.
*/
- debugf0("Data width is not 128 bits - need more decoding\n");
+ edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
/*
* Check DRAM Bank Address Mapping values for each DIMM to see if there
return;
if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
- debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
- pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
+ edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
+ pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
- debugf0(" DCTs operate in %s mode.\n",
- (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
+ edac_dbg(0, " DCTs operate in %s mode\n",
+ (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
if (!dct_ganging_enabled(pvt))
- debugf0(" Address range split per DCT: %s\n",
- (dct_high_range_enabled(pvt) ? "yes" : "no"));
+ edac_dbg(0, " Address range split per DCT: %s\n",
+ (dct_high_range_enabled(pvt) ? "yes" : "no"));
- debugf0(" data interleave for ECC: %s, "
- "DRAM cleared since last warm reset: %s\n",
- (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
- (dct_memory_cleared(pvt) ? "yes" : "no"));
+ edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
+ (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
+ (dct_memory_cleared(pvt) ? "yes" : "no"));
- debugf0(" channel interleave: %s, "
- "interleave bits selector: 0x%x\n",
- (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
- dct_sel_interleave_addr(pvt));
+ edac_dbg(0, " channel interleave: %s, "
+ "interleave bits selector: 0x%x\n",
+ (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
+ dct_sel_interleave_addr(pvt));
}
amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
pvt = mci->pvt_info;
- debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct);
+ edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
for_each_chip_select(csrow, dct, pvt) {
if (!csrow_enabled(csrow, dct, pvt))
get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
- debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
- csrow, cs_base, cs_mask);
+ edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
+ csrow, cs_base, cs_mask);
cs_mask = ~cs_mask;
- debugf1(" (InputAddr & ~CSMask)=0x%llx "
- "(CSBase & ~CSMask)=0x%llx\n",
- (in_addr & cs_mask), (cs_base & cs_mask));
+ edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
+ (in_addr & cs_mask), (cs_base & cs_mask));
if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
cs_found = f10_process_possible_spare(pvt, dct, csrow);
- debugf1(" MATCH csrow=%d\n", cs_found);
+ edac_dbg(1, " MATCH csrow=%d\n", cs_found);
break;
}
}
u8 intlv_en = dram_intlv_en(pvt, range);
u32 intlv_sel = dram_intlv_sel(pvt, range);
- debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
- range, sys_addr, get_dram_limit(pvt, range));
+ edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
+ range, sys_addr, get_dram_limit(pvt, range));
if (dhar_valid(pvt) &&
dhar_base(pvt) <= sys_addr &&
(chan_addr & 0xfff);
}
- debugf1(" Normalized DCT addr: 0x%llx\n", chan_addr);
+ edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
: pvt->csels[0].csbases;
- debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
+ edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
+ ctrl, dbam);
edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
}
}
- debugf0("syndrome(%x) not found\n", syndrome);
+ edac_dbg(0, "syndrome(%x) not found\n", syndrome);
return -1;
}
return -ENODEV;
}
- debugf1("F1: %s\n", pci_name(pvt->F1));
- debugf1("F2: %s\n", pci_name(pvt->F2));
- debugf1("F3: %s\n", pci_name(pvt->F3));
+ edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
+ edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
+ edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
return 0;
}
* those are Read-As-Zero
*/
rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
- debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem);
+ edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
/* check first whether TOP_MEM2 is enabled */
rdmsrl(MSR_K8_SYSCFG, msr_val);
if (msr_val & (1U << 21)) {
rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
- debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
+ edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
} else
- debugf0(" TOP_MEM2 disabled.\n");
+ edac_dbg(0, " TOP_MEM2 disabled\n");
amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
if (!rw)
continue;
- debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
- range,
- get_dram_base(pvt, range),
- get_dram_limit(pvt, range));
+ edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
+ range,
+ get_dram_base(pvt, range),
+ get_dram_limit(pvt, range));
- debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
- dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
- (rw & 0x1) ? "R" : "-",
- (rw & 0x2) ? "W" : "-",
- dram_intlv_sel(pvt, range),
- dram_dst_node(pvt, range));
+ edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
+ dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
+ (rw & 0x1) ? "R" : "-",
+ (rw & 0x2) ? "W" : "-",
+ dram_intlv_sel(pvt, range),
+ dram_dst_node(pvt, range));
}
read_dct_base_mask(pvt);
nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
- debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
- debugf0(" nr_pages/channel= %u channel-count = %d\n",
- nr_pages, pvt->channel_count);
+ edac_dbg(0, " (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
+ edac_dbg(0, " nr_pages/channel= %u channel-count = %d\n",
+ nr_pages, pvt->channel_count);
return nr_pages;
}
pvt->nbcfg = val;
- debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
- pvt->mc_node_id, val,
- !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
+ edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
+ pvt->mc_node_id, val,
+ !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
for_each_chip_select(i, 0, pvt) {
csrow = mci->csrows[i];
if (!csrow_enabled(i, 0, pvt) && !csrow_enabled(i, 1, pvt)) {
- debugf1("----CSROW %d EMPTY for node %d\n", i,
- pvt->mc_node_id);
+ edac_dbg(1, "----CSROW %d VALID for MC node %d\n",
+ i, pvt->mc_node_id);
continue;
}
- debugf1("----CSROW %d VALID for MC node %d\n",
- i, pvt->mc_node_id);
-
empty = 0;
if (csrow_enabled(i, 0, pvt))
nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
mtype = amd64_determine_memory_type(pvt, i);
- debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
- debugf1(" nr_pages: %u\n", nr_pages * pvt->channel_count);
+ edac_dbg(1, " for MC node %d csrow %d:\n", pvt->mc_node_id, i);
+ edac_dbg(1, " nr_pages: %u\n",
+ nr_pages * pvt->channel_count);
/*
* determine whether CHIPKILL or JUST ECC or NO ECC is operating
struct msr *reg = per_cpu_ptr(msrs, cpu);
nbe = reg->l & MSR_MCGCTL_NBE;
- debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
- cpu, reg->q,
- (nbe ? "enabled" : "disabled"));
+ edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
+ cpu, reg->q,
+ (nbe ? "enabled" : "disabled"));
if (!nbe)
goto out;
amd64_read_pci_cfg(F3, NBCFG, &value);
- debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
- nid, value, !!(value & NBCFG_ECC_ENABLE));
+ edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
+ nid, value, !!(value & NBCFG_ECC_ENABLE));
if (!(value & NBCFG_ECC_ENABLE)) {
amd64_warn("DRAM ECC disabled on this node, enabling...\n");
s->flags.nb_ecc_prev = 1;
}
- debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
- nid, value, !!(value & NBCFG_ECC_ENABLE));
+ edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
+ nid, value, !!(value & NBCFG_ECC_ENABLE));
return ret;
}
ret = -ENODEV;
if (edac_mc_add_mc(mci)) {
- debugf1("failed edac_mc_add_mc()\n");
+ edac_dbg(1, "failed edac_mc_add_mc()\n");
goto err_add_mc;
}
if (set_mc_sysfs_attrs(mci)) {
- debugf1("failed edac_mc_add_mc()\n");
+ edac_dbg(1, "failed edac_mc_add_mc()\n");
goto err_add_sysfs;
}
ret = pci_enable_device(pdev);
if (ret < 0) {
- debugf0("ret=%d\n", ret);
+ edac_dbg(0, "ret=%d\n", ret);
return -EIO;
}
/* Issue 'word' and 'bit' along with the READ request */
amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
- debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
+ edac_dbg(0, "section=0x%x word_bits=0x%x\n",
+ section, word_bits);
return count;
}
/* Issue 'word' and 'bit' along with the READ request */
amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
- debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
+ edac_dbg(0, "section=0x%x word_bits=0x%x\n",
+ section, word_bits);
return count;
}
static void amd76x_check(struct mem_ctl_info *mci)
{
struct amd76x_error_info info;
- debugf3("\n");
+ edac_dbg(3, "\n");
amd76x_get_error_info(mci, &info);
amd76x_process_error_info(mci, &info, 1);
}
u32 ems_mode;
struct amd76x_error_info discard;
- debugf0("\n");
+ edac_dbg(0, "\n");
pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
ems_mode = (ems >> 10) & 0x3;
if (mci == NULL)
return -ENOMEM;
- debugf0("mci = %p\n", mci);
+ edac_dbg(0, "mci = %p\n", mci);
mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_RDDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
* type of memory controller. The ID is therefore hardcoded to 0.
*/
if (edac_mc_add_mc(mci)) {
- debugf3("failed edac_mc_add_mc()\n");
+ edac_dbg(3, "failed edac_mc_add_mc()\n");
goto fail;
}
}
/* get this far and it's successful */
- debugf3("success\n");
+ edac_dbg(3, "success\n");
return 0;
fail:
static int __devinit amd76x_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- debugf0("\n");
+ edac_dbg(0, "\n");
/* don't need to call pci_enable_device() */
return amd76x_probe1(pdev, ent->driver_data);
{
struct mem_ctl_info *mci;
- debugf0("\n");
+ edac_dbg(0, "\n");
if (amd76x_pci)
edac_pci_release_generic_ctl(amd76x_pci);
reg += aw;
size = of_read_number(reg, sw);
reg += sw;
- debugf1("start 0x%lx, size 0x%lx\n", start, size);
+ edac_dbg(1, "start 0x%lx, size 0x%lx\n", start, size);
pdata->total_mem += size;
} while (reg < reg_end);
of_node_put(np);
- debugf0("total_mem 0x%lx\n", pdata->total_mem);
+ edac_dbg(0, "total_mem 0x%lx\n", pdata->total_mem);
}
static void cpc925_init_csrows(struct mem_ctl_info *mci)
*offset = pa & (PAGE_SIZE - 1);
*pfn = pa >> PAGE_SHIFT;
- debugf0("ECC physical address 0x%lx\n", pa);
+ edac_dbg(0, "ECC physical address 0x%lx\n", pa);
}
static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome)
goto err2;
}
- debugf0("Successfully added edac device for %s\n",
- dev_info->ctl_name);
+ edac_dbg(0, "Successfully added edac device for %s\n",
+ dev_info->ctl_name);
continue;
if (dev_info->exit)
dev_info->exit(dev_info);
- debugf0("Successfully deleted edac device for %s\n",
- dev_info->ctl_name);
+ edac_dbg(0, "Successfully deleted edac device for %s\n",
+ dev_info->ctl_name);
}
}
mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET);
si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT;
- debugf0("Mem Scrub Ctrl Register 0x%x\n", mscr);
+ edac_dbg(0, "Mem Scrub Ctrl Register 0x%x\n", mscr);
if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) ||
(si == 0)) {
((mbcr & MBCR_64BITBUS_MASK) == 0))
dual = 1;
- debugf0("%s channel\n", (dual > 0) ? "Dual" : "Single");
+ edac_dbg(0, "%s channel\n", (dual > 0) ? "Dual" : "Single");
return dual;
}
struct resource *r;
int res = 0, nr_channels;
- debugf0("%s platform device found!\n", pdev->name);
+ edac_dbg(0, "%s platform device found!\n", pdev->name);
if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) {
res = -ENOMEM;
cpc925_add_edac_devices(vbase);
/* get this far and it's successful */
- debugf0("success\n");
+ edac_dbg(0, "success\n");
res = 0;
goto out;
u32 remap;
struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
- debugf3("\n");
+ edac_dbg(3, "\n");
if (page < pvt->tolm)
return page;
int i;
struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
- debugf3("\n");
+ edac_dbg(3, "\n");
/* convert the addr to 4k page */
page = sec1_add >> (PAGE_SHIFT - 4);
int row;
struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
- debugf3("\n");
+ edac_dbg(3, "\n");
if (error_one & 0x0202) {
error_2b = ded_add;
if (!handle_error)
return;
- debugf3("\n");
+ edac_dbg(3, "\n");
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
-1, -1, -1,
"e752x UE log memory write", "", NULL);
{
struct e752x_error_info info;
- debugf3("\n");
+ edac_dbg(3, "\n");
e752x_get_error_info(mci, &info);
e752x_process_error_info(mci, &info, 1);
}
pci_read_config_byte(pdev, E752X_DRB + index, &value);
/* convert a 128 or 64 MiB DRB to a page size. */
cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
- debugf3("(%d) cumul_size 0x%x\n", index,
- cumul_size);
+ edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
if (cumul_size == last_cumul_size)
continue; /* not populated */
for (i = 0; i < csrow->nr_channels; i++) {
struct dimm_info *dimm = csrow->channels[i]->dimm;
- debugf3("Initializing rank at (%i,%i)\n", index, i);
+ edac_dbg(3, "Initializing rank at (%i,%i)\n", index, i);
dimm->nr_pages = nr_pages / csrow->nr_channels;
dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
dimm->mtype = MEM_RDDR; /* only one type supported */
int drc_chan; /* Number of channels 0=1chan,1=2chan */
struct e752x_error_info discard;
- debugf0("mci\n");
- debugf0("Starting Probe1\n");
+ edac_dbg(0, "mci\n");
+ edac_dbg(0, "Starting Probe1\n");
/* check to see if device 0 function 1 is enabled; if it isn't, we
* assume the BIOS has reserved it for a reason and is expecting
if (mci == NULL)
return -ENOMEM;
- debugf3("init mci\n");
+ edac_dbg(3, "init mci\n");
mci->mtype_cap = MEM_FLAG_RDDR;
/* 3100 IMCH supports SECDEC only */
mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED :
mci->mod_ver = E752X_REVISION;
mci->pdev = &pdev->dev;
- debugf3("init pvt\n");
+ edac_dbg(3, "init pvt\n");
pvt = (struct e752x_pvt *)mci->pvt_info;
pvt->dev_info = &e752x_devs[dev_idx];
pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
return -ENODEV;
}
- debugf3("more mci init\n");
+ edac_dbg(3, "more mci init\n");
mci->ctl_name = pvt->dev_info->ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = e752x_check;
mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */
else
mci->edac_cap |= EDAC_FLAG_NONE;
- debugf3("tolm, remapbase, remaplimit\n");
+ edac_dbg(3, "tolm, remapbase, remaplimit\n");
/* load the top of low memory, remap base, and remap limit vars */
pci_read_config_word(pdev, E752X_TOLM, &pci_data);
* type of memory controller. The ID is therefore hardcoded to 0.
*/
if (edac_mc_add_mc(mci)) {
- debugf3("failed edac_mc_add_mc()\n");
+ edac_dbg(3, "failed edac_mc_add_mc()\n");
goto fail;
}
}
/* get this far and it's successful */
- debugf3("success\n");
+ edac_dbg(3, "success\n");
return 0;
fail:
static int __devinit e752x_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- debugf0("\n");
+ edac_dbg(0, "\n");
/* wake up and enable device */
if (pci_enable_device(pdev) < 0)
struct mem_ctl_info *mci;
struct e752x_pvt *pvt;
- debugf0("\n");
+ edac_dbg(0, "\n");
if (e752x_pci)
edac_pci_release_generic_ctl(e752x_pci);
{
int pci_rc;
- debugf3("\n");
+ edac_dbg(3, "\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
static void __exit e752x_exit(void)
{
- debugf3("\n");
+ edac_dbg(3, "\n");
pci_unregister_driver(&e752x_driver);
}
/* FIXME - is this valid for both SECDED and S4ECD4ED? */
static inline int e7xxx_find_channel(u16 syndrome)
{
- debugf3("\n");
+ edac_dbg(3, "\n");
if ((syndrome & 0xff00) == 0)
return 0;
u32 remap;
struct e7xxx_pvt *pvt = (struct e7xxx_pvt *)mci->pvt_info;
- debugf3("\n");
+ edac_dbg(3, "\n");
if ((page < pvt->tolm) ||
((page >= 0x100000) && (page < pvt->remapbase)))
int row;
int channel;
- debugf3("\n");
+ edac_dbg(3, "\n");
/* read the error address */
error_1b = info->dram_celog_add;
/* FIXME - should use PAGE_SHIFT */
static void process_ce_no_info(struct mem_ctl_info *mci)
{
- debugf3("\n");
+ edac_dbg(3, "\n");
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1,
"e7xxx CE log register overflow", "", NULL);
}
u32 error_2b, block_page;
int row;
- debugf3("\n");
+ edac_dbg(3, "\n");
/* read the error address */
error_2b = info->dram_uelog_add;
/* FIXME - should use PAGE_SHIFT */
static void process_ue_no_info(struct mem_ctl_info *mci)
{
- debugf3("\n");
+ edac_dbg(3, "\n");
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1,
"e7xxx UE log register overflow", "", NULL);
{
struct e7xxx_error_info info;
- debugf3("\n");
+ edac_dbg(3, "\n");
e7xxx_get_error_info(mci, &info);
e7xxx_process_error_info(mci, &info, 1);
}
pci_read_config_byte(pdev, E7XXX_DRB + index, &value);
/* convert a 64 or 32 MiB DRB to a page size. */
cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
- debugf3("(%d) cumul_size 0x%x\n", index,
- cumul_size);
+ edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
if (cumul_size == last_cumul_size)
continue; /* not populated */
int drc_chan;
struct e7xxx_error_info discard;
- debugf0("mci\n");
+ edac_dbg(0, "mci\n");
pci_read_config_dword(pdev, E7XXX_DRC, &drc);
if (mci == NULL)
return -ENOMEM;
- debugf3("init mci\n");
+ edac_dbg(3, "init mci\n");
mci->mtype_cap = MEM_FLAG_RDDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
EDAC_FLAG_S4ECD4ED;
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = E7XXX_REVISION;
mci->pdev = &pdev->dev;
- debugf3("init pvt\n");
+ edac_dbg(3, "init pvt\n");
pvt = (struct e7xxx_pvt *)mci->pvt_info;
pvt->dev_info = &e7xxx_devs[dev_idx];
pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
goto fail0;
}
- debugf3("more mci init\n");
+ edac_dbg(3, "more mci init\n");
mci->ctl_name = pvt->dev_info->ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = e7xxx_check;
mci->ctl_page_to_phys = ctl_page_to_phys;
e7xxx_init_csrows(mci, pdev, dev_idx, drc);
mci->edac_cap |= EDAC_FLAG_NONE;
- debugf3("tolm, remapbase, remaplimit\n");
+ edac_dbg(3, "tolm, remapbase, remaplimit\n");
/* load the top of low memory, remap base, and remap limit vars */
pci_read_config_word(pdev, E7XXX_TOLM, &pci_data);
pvt->tolm = ((u32) pci_data) << 4;
* type of memory controller. The ID is therefore hardcoded to 0.
*/
if (edac_mc_add_mc(mci)) {
- debugf3("failed edac_mc_add_mc()\n");
+ edac_dbg(3, "failed edac_mc_add_mc()\n");
goto fail1;
}
}
/* get this far and it's successful */
- debugf3("success\n");
+ edac_dbg(3, "success\n");
return 0;
fail1:
static int __devinit e7xxx_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- debugf0("\n");
+ edac_dbg(0, "\n");
/* wake up and enable device */
return pci_enable_device(pdev) ?
struct mem_ctl_info *mci;
struct e7xxx_pvt *pvt;
- debugf0("\n");
+ edac_dbg(0, "\n");
if (e7xxx_pci)
edac_pci_release_generic_ctl(e7xxx_pci);
#ifdef CONFIG_EDAC_DEBUG
extern int edac_debug_level;
-#define edac_debug_printk(level, fmt, ...) \
+#define edac_dbg(level, fmt, ...) \
do { \
if (level <= edac_debug_level) \
edac_printk(KERN_DEBUG, EDAC_DEBUG, \
#else /* !CONFIG_EDAC_DEBUG */
-#define edac_debug_printk(level, fmt, ...) \
+#define edac_dbg(level, fmt, ...) \
do { \
if (0) \
edac_printk(KERN_DEBUG, EDAC_DEBUG, \
#endif /* !CONFIG_EDAC_DEBUG */
-#define debugf0(fmt, ...) edac_debug_printk(0, fmt, ##__VA_ARGS__)
-#define debugf1(fmt, ...) edac_debug_printk(1, fmt, ##__VA_ARGS__)
-#define debugf2(fmt, ...) edac_debug_printk(2, fmt, ##__VA_ARGS__)
-#define debugf3(fmt, ...) edac_debug_printk(3, fmt, ##__VA_ARGS__)
-#define debugf4(fmt, ...) edac_debug_printk(4, fmt, ##__VA_ARGS__)
-
#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \
PCI_DEVICE_ID_ ## vend ## _ ## dev
#ifdef CONFIG_EDAC_DEBUG
static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
{
- debugf3("\tedac_dev = %p dev_idx=%d \n", edac_dev, edac_dev->dev_idx);
- debugf4("\tedac_dev->edac_check = %p\n", edac_dev->edac_check);
- debugf3("\tdev = %p\n", edac_dev->dev);
- debugf3("\tmod_name:ctl_name = %s:%s\n",
- edac_dev->mod_name, edac_dev->ctl_name);
- debugf3("\tpvt_info = %p\n\n", edac_dev->pvt_info);
+ edac_dbg(3, "\tedac_dev = %p dev_idx=%d\n",
+ edac_dev, edac_dev->dev_idx);
+ edac_dbg(4, "\tedac_dev->edac_check = %p\n", edac_dev->edac_check);
+ edac_dbg(3, "\tdev = %p\n", edac_dev->dev);
+ edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
+ edac_dev->mod_name, edac_dev->ctl_name);
+ edac_dbg(3, "\tpvt_info = %p\n\n", edac_dev->pvt_info);
}
#endif /* CONFIG_EDAC_DEBUG */
void *pvt, *p;
int err;
- debugf4("instances=%d blocks=%d\n",
- nr_instances, nr_blocks);
+ edac_dbg(4, "instances=%d blocks=%d\n", nr_instances, nr_blocks);
/* Calculate the size of memory we need to allocate AND
* determine the offsets of the various item arrays
/* Name of this edac device */
snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name);
- debugf4("edac_dev=%p next after end=%p\n",
- dev_ctl, pvt + sz_private );
+ edac_dbg(4, "edac_dev=%p next after end=%p\n",
+ dev_ctl, pvt + sz_private);
/* Initialize every Instance */
for (instance = 0; instance < nr_instances; instance++) {
snprintf(blk->name, sizeof(blk->name),
"%s%d", edac_block_name, block+offset_value);
- debugf4("instance=%d inst_p=%p block=#%d "
- "block_p=%p name='%s'\n",
- instance, inst, block,
- blk, blk->name);
+ edac_dbg(4, "instance=%d inst_p=%p block=#%d block_p=%p name='%s'\n",
+ instance, inst, block, blk, blk->name);
/* if there are NO attributes OR no attribute pointer
* then continue on to next block iteration
attrib_p = &dev_attrib[block*nr_instances*nr_attrib];
blk->block_attributes = attrib_p;
- debugf4("THIS BLOCK_ATTRIB=%p\n",
- blk->block_attributes);
+ edac_dbg(4, "THIS BLOCK_ATTRIB=%p\n",
+ blk->block_attributes);
/* Initialize every user specified attribute in this
* block with the data the caller passed in
attrib->block = blk; /* up link */
- debugf4("alloc-attrib=%p attrib_name='%s' "
- "attrib-spec=%p spec-name=%s\n",
- attrib, attrib->attr.name,
- &attrib_spec[attr],
- attrib_spec[attr].attr.name
+ edac_dbg(4, "alloc-attrib=%p attrib_name='%s' attrib-spec=%p spec-name=%s\n",
+ attrib, attrib->attr.name,
+ &attrib_spec[attr],
+ attrib_spec[attr].attr.name
);
}
}
struct edac_device_ctl_info *edac_dev;
struct list_head *item;
- debugf0("\n");
+ edac_dbg(0, "\n");
list_for_each(item, &edac_device_list) {
edac_dev = list_entry(item, struct edac_device_ctl_info, link);
void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
unsigned msec)
{
- debugf0("\n");
+ edac_dbg(0, "\n");
/* take the arg 'msec' and set it into the control structure
* to used in the time period calculation
*/
int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
{
- debugf0("\n");
+ edac_dbg(0, "\n");
#ifdef CONFIG_EDAC_DEBUG
if (edac_debug_level >= 3)
{
struct edac_device_ctl_info *edac_dev;
- debugf0("\n");
+ edac_dbg(0, "\n");
mutex_lock(&device_ctls_mutex);
{
struct edac_device_ctl_info *edac_dev = to_edacdev(kobj);
- debugf4("control index=%d\n", edac_dev->dev_idx);
+ edac_dbg(4, "control index=%d\n", edac_dev->dev_idx);
/* decrement the EDAC CORE module ref count */
module_put(edac_dev->owner);
struct bus_type *edac_subsys;
int err;
- debugf1("\n");
+ edac_dbg(1, "\n");
/* get the /sys/devices/system/edac reference */
edac_subsys = edac_get_sysfs_subsys();
if (edac_subsys == NULL) {
- debugf1("no edac_subsys error\n");
+ edac_dbg(1, "no edac_subsys error\n");
err = -ENODEV;
goto err_out;
}
&edac_subsys->dev_root->kobj,
"%s", edac_dev->name);
if (err) {
- debugf1("Failed to register '.../edac/%s'\n",
- edac_dev->name);
+ edac_dbg(1, "Failed to register '.../edac/%s'\n",
+ edac_dev->name);
goto err_kobj_reg;
}
kobject_uevent(&edac_dev->kobj, KOBJ_ADD);
* edac_device_unregister_sysfs_main_kobj() must be used
*/
- debugf4("Registered '.../edac/%s' kobject\n",
- edac_dev->name);
+ edac_dbg(4, "Registered '.../edac/%s' kobject\n", edac_dev->name);
return 0;
*/
void edac_device_unregister_sysfs_main_kobj(struct edac_device_ctl_info *dev)
{
- debugf0("\n");
- debugf4("name of kobject is: %s\n",
- kobject_name(&dev->kobj));
+ edac_dbg(0, "\n");
+ edac_dbg(4, "name of kobject is: %s\n", kobject_name(&dev->kobj));
/*
* Unregister the edac device's kobject and
{
struct edac_device_instance *instance;
- debugf1("\n");
+ edac_dbg(1, "\n");
/* map from this kobj to the main control struct
* and then dec the main kobj count
{
struct edac_device_block *block;
- debugf1("\n");
+ edac_dbg(1, "\n");
/* get the container of the kobj */
block = to_block(kobj);
struct edac_dev_sysfs_block_attribute *sysfs_attrib;
struct kobject *main_kobj;
- debugf4("Instance '%s' inst_p=%p block '%s' block_p=%p\n",
- instance->name, instance, block->name, block);
- debugf4("block kobj=%p block kobj->parent=%p\n",
- &block->kobj, &block->kobj.parent);
+ edac_dbg(4, "Instance '%s' inst_p=%p block '%s' block_p=%p\n",
+ instance->name, instance, block->name, block);
+ edac_dbg(4, "block kobj=%p block kobj->parent=%p\n",
+ &block->kobj, &block->kobj.parent);
/* init this block's kobject */
memset(&block->kobj, 0, sizeof(struct kobject));
&instance->kobj,
"%s", block->name);
if (err) {
- debugf1("Failed to register instance '%s'\n",
- block->name);
+ edac_dbg(1, "Failed to register instance '%s'\n", block->name);
kobject_put(main_kobj);
err = -ENODEV;
goto err_out;
if (sysfs_attrib && block->nr_attribs) {
for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) {
- debugf4("creating block attrib='%s' "
- "attrib->%p to kobj=%p\n",
- sysfs_attrib->attr.name,
- sysfs_attrib, &block->kobj);
+ edac_dbg(4, "creating block attrib='%s' attrib->%p to kobj=%p\n",
+ sysfs_attrib->attr.name,
+ sysfs_attrib, &block->kobj);
/* Create each block_attribute file */
err = sysfs_create_file(&block->kobj,
err = kobject_init_and_add(&instance->kobj, &ktype_instance_ctrl,
&edac_dev->kobj, "%s", instance->name);
if (err != 0) {
- debugf2("Failed to register instance '%s'\n",
- instance->name);
+ edac_dbg(2, "Failed to register instance '%s'\n",
+ instance->name);
kobject_put(main_kobj);
goto err_out;
}
- debugf4("now register '%d' blocks for instance %d\n",
- instance->nr_blocks, idx);
+ edac_dbg(4, "now register '%d' blocks for instance %d\n",
+ instance->nr_blocks, idx);
/* register all blocks of this instance */
for (i = 0; i < instance->nr_blocks; i++) {
}
kobject_uevent(&instance->kobj, KOBJ_ADD);
- debugf4("Registered instance %d '%s' kobject\n",
- idx, instance->name);
+ edac_dbg(4, "Registered instance %d '%s' kobject\n",
+ idx, instance->name);
return 0;
int i, j;
int err;
- debugf0("\n");
+ edac_dbg(0, "\n");
/* iterate over creation of the instances */
for (i = 0; i < edac_dev->nr_instances; i++) {
int err;
struct kobject *edac_kobj = &edac_dev->kobj;
- debugf0("idx=%d\n", edac_dev->dev_idx);
+ edac_dbg(0, "idx=%d\n", edac_dev->dev_idx);
/* go create any main attributes callers wants */
err = edac_device_add_main_sysfs_attributes(edac_dev);
if (err) {
- debugf0("failed to add sysfs attribs\n");
+ edac_dbg(0, "failed to add sysfs attribs\n");
goto err_out;
}
err = sysfs_create_link(edac_kobj,
&edac_dev->dev->kobj, EDAC_DEVICE_SYMLINK);
if (err) {
- debugf0("sysfs_create_link() returned err= %d\n",
- err);
+ edac_dbg(0, "sysfs_create_link() returned err= %d\n", err);
goto err_remove_main_attribs;
}
*/
err = edac_device_create_instances(edac_dev);
if (err) {
- debugf0("edac_device_create_instances() "
- "returned err= %d\n", err);
+ edac_dbg(0, "edac_device_create_instances() returned err= %d\n",
+ err);
goto err_remove_link;
}
- debugf4("create-instances done, idx=%d\n",
- edac_dev->dev_idx);
+ edac_dbg(4, "create-instances done, idx=%d\n", edac_dev->dev_idx);
return 0;
*/
void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev)
{
- debugf0("\n");
+ edac_dbg(0, "\n");
/* remove any main attributes for this device */
edac_device_remove_main_sysfs_attributes(edac_dev);
static void edac_mc_dump_channel(struct rank_info *chan)
{
- debugf4("\tchannel = %p\n", chan);
- debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
- debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
- debugf4("\tchannel->dimm = %p\n", chan->dimm);
+ edac_dbg(4, "\tchannel = %p\n", chan);
+ edac_dbg(4, "\tchannel->chan_idx = %d\n", chan->chan_idx);
+ edac_dbg(4, "\tchannel->csrow = %p\n", chan->csrow);
+ edac_dbg(4, "\tchannel->dimm = %p\n", chan->dimm);
}
static void edac_mc_dump_dimm(struct dimm_info *dimm)
{
int i;
- debugf4("\tdimm = %p\n", dimm);
- debugf4("\tdimm->label = '%s'\n", dimm->label);
- debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
- debugf4("\tdimm location ");
+ edac_dbg(4, "\tdimm = %p\n", dimm);
+ edac_dbg(4, "\tdimm->label = '%s'\n", dimm->label);
+ edac_dbg(4, "\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
+ edac_dbg(4, "\tdimm location ");
for (i = 0; i < dimm->mci->n_layers; i++) {
printk(KERN_CONT "%d", dimm->location[i]);
if (i < dimm->mci->n_layers - 1)
printk(KERN_CONT ".");
}
printk(KERN_CONT "\n");
- debugf4("\tdimm->grain = %d\n", dimm->grain);
- debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
+ edac_dbg(4, "\tdimm->grain = %d\n", dimm->grain);
+ edac_dbg(4, "\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
}
static void edac_mc_dump_csrow(struct csrow_info *csrow)
{
- debugf4("\tcsrow = %p\n", csrow);
- debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx);
- debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page);
- debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
- debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
- debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels);
- debugf4("\tcsrow->channels = %p\n", csrow->channels);
- debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
+ edac_dbg(4, "\tcsrow = %p\n", csrow);
+ edac_dbg(4, "\tcsrow->csrow_idx = %d\n", csrow->csrow_idx);
+ edac_dbg(4, "\tcsrow->first_page = 0x%lx\n", csrow->first_page);
+ edac_dbg(4, "\tcsrow->last_page = 0x%lx\n", csrow->last_page);
+ edac_dbg(4, "\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
+ edac_dbg(4, "\tcsrow->nr_channels = %d\n", csrow->nr_channels);
+ edac_dbg(4, "\tcsrow->channels = %p\n", csrow->channels);
+ edac_dbg(4, "\tcsrow->mci = %p\n", csrow->mci);
}
static void edac_mc_dump_mci(struct mem_ctl_info *mci)
{
- debugf3("\tmci = %p\n", mci);
- debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap);
- debugf3("\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
- debugf3("\tmci->edac_cap = %lx\n", mci->edac_cap);
- debugf4("\tmci->edac_check = %p\n", mci->edac_check);
- debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
- mci->nr_csrows, mci->csrows);
- debugf3("\tmci->nr_dimms = %d, dimms = %p\n",
- mci->tot_dimms, mci->dimms);
- debugf3("\tdev = %p\n", mci->pdev);
- debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name);
- debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
+ edac_dbg(3, "\tmci = %p\n", mci);
+ edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap);
+ edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
+ edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap);
+ edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check);
+ edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n",
+ mci->nr_csrows, mci->csrows);
+ edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n",
+ mci->tot_dimms, mci->dimms);
+ edac_dbg(3, "\tdev = %p\n", mci->pdev);
+ edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
+ mci->mod_name, mci->ctl_name);
+ edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info);
}
#endif /* CONFIG_EDAC_DEBUG */
layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
for (i = 0; i < n_layers; i++) {
count *= layers[i].size;
- debugf4("errcount layer %d size %d\n", i, count);
+ edac_dbg(4, "errcount layer %d size %d\n", i, count);
ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
tot_errcount += 2 * count;
}
- debugf4("allocating %d error counters\n", tot_errcount);
+ edac_dbg(4, "allocating %d error counters\n", tot_errcount);
pvt = edac_align_ptr(&ptr, sz_pvt, 1);
size = ((unsigned long)pvt) + sz_pvt;
- debugf1("allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
- size,
- tot_dimms,
- per_rank ? "ranks" : "dimms",
- tot_csrows * tot_channels);
+ edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
+ size,
+ tot_dimms,
+ per_rank ? "ranks" : "dimms",
+ tot_csrows * tot_channels);
mci = kzalloc(size, GFP_KERNEL);
if (mci == NULL)
memset(&pos, 0, sizeof(pos));
row = 0;
chn = 0;
- debugf4("initializing %d %s\n", tot_dimms,
- per_rank ? "ranks" : "dimms");
+ edac_dbg(4, "initializing %d %s\n",
+ tot_dimms, per_rank ? "ranks" : "dimms");
for (i = 0; i < tot_dimms; i++) {
chan = mci->csrows[row]->channels[chn];
off = EDAC_DIMM_OFF(layer, n_layers, pos[0], pos[1], pos[2]);
mci->dimms[off] = dimm;
dimm->mci = mci;
- debugf2("%d: %s%i (%d:%d:%d): row %d, chan %d\n", i,
- per_rank ? "rank" : "dimm", off,
- pos[0], pos[1], pos[2], row, chn);
+ edac_dbg(2, "%d: %s%i (%d:%d:%d): row %d, chan %d\n",
+ i, per_rank ? "rank" : "dimm", off,
+ pos[0], pos[1], pos[2], row, chn);
/*
* Copy DIMM location and initialize it.
*/
void edac_mc_free(struct mem_ctl_info *mci)
{
- debugf1("\n");
+ edac_dbg(1, "\n");
/* the mci instance is freed here, when the sysfs object is dropped */
edac_unregister_sysfs(mci);
struct mem_ctl_info *mci;
struct list_head *item;
- debugf3("\n");
+ edac_dbg(3, "\n");
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
*/
static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
{
- debugf0("\n");
+ edac_dbg(0, "\n");
/* if this instance is not in the POLL state, then simply return */
if (mci->op_state != OP_RUNNING_POLL)
status = cancel_delayed_work(&mci->work);
if (status == 0) {
- debugf0("not canceled, flush the queue\n");
+ edac_dbg(0, "not canceled, flush the queue\n");
/* workq instance might be running, wait for it */
flush_workqueue(edac_workqueue);
/* FIXME - should a warning be printed if no error detection? correction? */
int edac_mc_add_mc(struct mem_ctl_info *mci)
{
- debugf0("\n");
+ edac_dbg(0, "\n");
#ifdef CONFIG_EDAC_DEBUG
if (edac_debug_level >= 3)
{
struct mem_ctl_info *mci;
- debugf0("\n");
+ edac_dbg(0, "\n");
mutex_lock(&mem_ctls_mutex);
void *virt_addr;
unsigned long flags = 0;
- debugf3("\n");
+ edac_dbg(3, "\n");
/* ECC error page was not in our memory. Ignore it. */
if (!pfn_valid(page))
struct csrow_info **csrows = mci->csrows;
int row, i, j, n;
- debugf1("MC%d: 0x%lx\n", mci->mc_idx, page);
+ edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page);
row = -1;
for (i = 0; i < mci->nr_csrows; i++) {
if (n == 0)
continue;
- debugf3("MC%d: first(0x%lx) page(0x%lx) last(0x%lx) "
- "mask(0x%lx)\n", mci->mc_idx,
- csrow->first_page, page, csrow->last_page,
- csrow->page_mask);
+ edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n",
+ mci->mc_idx,
+ csrow->first_page, page, csrow->last_page,
+ csrow->page_mask);
if ((page >= csrow->first_page) &&
(page <= csrow->last_page) &&
u16 error_count; /* FIXME: make it a parameter */
u8 grain_bits;
- debugf3("MC%d\n", mci->mc_idx);
+ edac_dbg(3, "MC%d\n", mci->mc_idx);
/*
* Check if the event report is consistent and if the memory
* get csrow/channel of the DIMM, in order to allow
* incrementing the compat API counters
*/
- debugf4("%s csrows map: (%d,%d)\n",
- mci->mem_is_per_rank ? "rank" : "dimm",
- dimm->csrow, dimm->cschannel);
-
+ edac_dbg(4, "%s csrows map: (%d,%d)\n",
+ mci->mem_is_per_rank ? "rank" : "dimm",
+ dimm->csrow, dimm->cschannel);
if (row == -1)
row = dimm->csrow;
else if (row >= 0 && row != dimm->csrow)
if (!enable_per_layer_report) {
strcpy(label, "any memory");
} else {
- debugf4("csrow/channel to increment: (%d,%d)\n",
- row, chan);
+ edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan);
if (p == label)
strcpy(label, "unknown memory");
if (type == HW_EVENT_ERR_CORRECTED) {
{
struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
- debugf1("Releasing csrow device %s\n", dev_name(dev));
+ edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
kfree(csrow);
}
dev_set_name(&csrow->dev, "csrow%d", index);
dev_set_drvdata(&csrow->dev, csrow);
- debugf0("creating (virtual) csrow node %s\n", dev_name(&csrow->dev));
+ edac_dbg(0, "creating (virtual) csrow node %s\n",
+ dev_name(&csrow->dev));
err = device_add(&csrow->dev);
if (err < 0)
for (chan = csrow->nr_channels - 1; chan >= 0; chan--) {
if (!csrow->channels[chan]->dimm->nr_pages)
continue;
- debugf1("Removing csrow %d channel %d sysfs nodes\n",
- i, chan);
+ edac_dbg(1, "Removing csrow %d channel %d sysfs nodes\n",
+ i, chan);
device_remove_file(&csrow->dev,
dynamic_csrow_dimm_attr[chan]);
device_remove_file(&csrow->dev,
{
struct dimm_info *dimm = container_of(dev, struct dimm_info, dev);
- debugf1("Releasing dimm device %s\n", dev_name(dev));
+ edac_dbg(1, "Releasing dimm device %s\n", dev_name(dev));
kfree(dimm);
}
err = device_add(&dimm->dev);
- debugf0("creating rank/dimm device %s\n", dev_name(&dimm->dev));
+ edac_dbg(0, "creating rank/dimm device %s\n", dev_name(&dimm->dev));
return err;
}
{
struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
- debugf1("Releasing csrow device %s\n", dev_name(dev));
+ edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
kfree(mci);
}
mci->bus.name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
if (!mci->bus.name)
return -ENOMEM;
- debugf0("creating bus %s\n",mci->bus.name);
+ edac_dbg(0, "creating bus %s\n", mci->bus.name);
err = bus_register(&mci->bus);
if (err < 0)
return err;
dev_set_drvdata(&mci->dev, mci);
pm_runtime_forbid(&mci->dev);
- debugf0("creating device %s\n", dev_name(&mci->dev));
+ edac_dbg(0, "creating device %s\n", dev_name(&mci->dev));
err = device_add(&mci->dev);
if (err < 0) {
bus_unregister(&mci->bus);
if (dimm->nr_pages == 0)
continue;
#ifdef CONFIG_EDAC_DEBUG
- debugf1("creating dimm%d, located at ",
- i);
+ edac_dbg(1, "creating dimm%d, located at ", i);
if (edac_debug_level >= 1) {
int lay;
for (lay = 0; lay < mci->n_layers; lay++)
#endif
err = edac_create_dimm_object(mci, dimm, i);
if (err) {
- debugf1("failure: create dimm %d obj\n",
- i);
+ edac_dbg(1, "failure: create dimm %d obj\n", i);
goto fail;
}
}
{
int i;
- debugf0("\n");
+ edac_dbg(0, "\n");
#ifdef CONFIG_EDAC_DEBUG
debugfs_remove(mci->debugfs);
struct dimm_info *dimm = mci->dimms[i];
if (dimm->nr_pages == 0)
continue;
- debugf0("removing device %s\n", dev_name(&dimm->dev));
+ edac_dbg(0, "removing device %s\n", dev_name(&dimm->dev));
put_device(&dimm->dev);
device_del(&dimm->dev);
}
void edac_unregister_sysfs(struct mem_ctl_info *mci)
{
- debugf1("Unregistering device %s\n", dev_name(&mci->dev));
+ edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
put_device(&mci->dev);
device_del(&mci->dev);
bus_unregister(&mci->bus);
* parent device, used to create the /sys/devices/mc sysfs node.
* So, there are no attributes on it.
*/
- debugf1("Releasing device %s\n", dev_name(dev));
+ edac_dbg(1, "Releasing device %s\n", dev_name(dev));
kfree(dev);
}
/* get the /sys/devices/system/edac subsys reference */
edac_subsys = edac_get_sysfs_subsys();
if (edac_subsys == NULL) {
- debugf1("no edac_subsys\n");
+ edac_dbg(1, "no edac_subsys\n");
return -EINVAL;
}
if (err < 0)
return err;
- debugf0("device %s created\n", dev_name(mci_pdev));
+ edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
return 0;
}
*/
static void __exit edac_exit(void)
{
- debugf0("\n");
+ edac_dbg(0, "\n");
/* tear down the various subsystems */
edac_workqueue_teardown();
void *p = NULL, *pvt;
unsigned int size;
- debugf1("\n");
+ edac_dbg(1, "\n");
pci = edac_align_ptr(&p, sizeof(*pci), 1);
pvt = edac_align_ptr(&p, 1, sz_pvt);
*/
void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci)
{
- debugf1("\n");
+ edac_dbg(1, "\n");
edac_pci_remove_sysfs(pci);
}
struct edac_pci_ctl_info *pci;
struct list_head *item;
- debugf1("\n");
+ edac_dbg(1, "\n");
list_for_each(item, &edac_pci_list) {
pci = list_entry(item, struct edac_pci_ctl_info, link);
struct list_head *item, *insert_before;
struct edac_pci_ctl_info *rover;
- debugf1("\n");
+ edac_dbg(1, "\n");
insert_before = &edac_pci_list;
int msec;
unsigned long delay;
- debugf3("checking\n");
+ edac_dbg(3, "checking\n");
mutex_lock(&edac_pci_ctls_mutex);
static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
unsigned int msec)
{
- debugf0("\n");
+ edac_dbg(0, "\n");
INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function);
queue_delayed_work(edac_workqueue, &pci->work,
{
int status;
- debugf0("\n");
+ edac_dbg(0, "\n");
status = cancel_delayed_work(&pci->work);
if (status == 0)
void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci,
unsigned long value)
{
- debugf0("\n");
+ edac_dbg(0, "\n");
edac_pci_workq_teardown(pci);
*/
int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx)
{
- debugf0("\n");
+ edac_dbg(0, "\n");
pci->pci_idx = edac_idx;
pci->start_time = jiffies;
{
struct edac_pci_ctl_info *pci;
- debugf0("\n");
+ edac_dbg(0, "\n");
mutex_lock(&edac_pci_ctls_mutex);
*/
static void edac_pci_generic_check(struct edac_pci_ctl_info *pci)
{
- debugf4("\n");
+ edac_dbg(4, "\n");
edac_pci_do_parity_check();
}
pdata->edac_idx = edac_pci_idx++;
if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
- debugf3("failed edac_pci_add_device()\n");
+ edac_dbg(3, "failed edac_pci_add_device()\n");
edac_pci_free_ctl_info(pci);
return NULL;
}
*/
void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci)
{
- debugf0("pci mod=%s\n", pci->mod_name);
+ edac_dbg(0, "pci mod=%s\n", pci->mod_name);
edac_pci_del_device(pci->dev);
edac_pci_free_ctl_info(pci);
{
struct edac_pci_ctl_info *pci;
- debugf0("\n");
+ edac_dbg(0, "\n");
/* Form pointer to containing struct, the pci control struct */
pci = to_instance(kobj);
struct kobject *main_kobj;
int err;
- debugf0("\n");
+ edac_dbg(0, "\n");
/* First bump the ref count on the top main kobj, which will
* track the number of PCI instances we have, and thus nest
err = kobject_init_and_add(&pci->kobj, &ktype_pci_instance,
edac_pci_top_main_kobj, "pci%d", idx);
if (err != 0) {
- debugf2("failed to register instance pci%d\n",
- idx);
+ edac_dbg(2, "failed to register instance pci%d\n", idx);
kobject_put(edac_pci_top_main_kobj);
goto error_out;
}
kobject_uevent(&pci->kobj, KOBJ_ADD);
- debugf1("Register instance 'pci%d' kobject\n", idx);
+ edac_dbg(1, "Register instance 'pci%d' kobject\n", idx);
return 0;
static void edac_pci_unregister_sysfs_instance_kobj(
struct edac_pci_ctl_info *pci)
{
- debugf0("\n");
+ edac_dbg(0, "\n");
/* Unregister the instance kobject and allow its release
* function release the main reference count and then
*/
static void edac_pci_release_main_kobj(struct kobject *kobj)
{
- debugf0("here to module_put(THIS_MODULE)\n");
+ edac_dbg(0, "here to module_put(THIS_MODULE)\n");
kfree(kobj);
int err;
struct bus_type *edac_subsys;
- debugf0("\n");
+ edac_dbg(0, "\n");
/* check and count if we have already created the main kobject */
if (atomic_inc_return(&edac_pci_sysfs_refcount) != 1)
*/
edac_subsys = edac_get_sysfs_subsys();
if (edac_subsys == NULL) {
- debugf1("no edac_subsys\n");
+ edac_dbg(1, "no edac_subsys\n");
err = -ENODEV;
goto decrement_count_fail;
}
* level main kobj for EDAC PCI
*/
if (!try_module_get(THIS_MODULE)) {
- debugf1("try_module_get() failed\n");
+ edac_dbg(1, "try_module_get() failed\n");
err = -ENODEV;
goto mod_get_fail;
}
edac_pci_top_main_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
if (!edac_pci_top_main_kobj) {
- debugf1("Failed to allocate\n");
+ edac_dbg(1, "Failed to allocate\n");
err = -ENOMEM;
goto kzalloc_fail;
}
&ktype_edac_pci_main_kobj,
&edac_subsys->dev_root->kobj, "pci");
if (err) {
- debugf1("Failed to register '.../edac/pci'\n");
+ edac_dbg(1, "Failed to register '.../edac/pci'\n");
goto kobject_init_and_add_fail;
}
* must be used, for resources to be cleaned up properly
*/
kobject_uevent(edac_pci_top_main_kobj, KOBJ_ADD);
- debugf1("Registered '.../edac/pci' kobject\n");
+ edac_dbg(1, "Registered '.../edac/pci' kobject\n");
return 0;
*/
static void edac_pci_main_kobj_teardown(void)
{
- debugf0("\n");
+ edac_dbg(0, "\n");
/* Decrement the count and only if no more controller instances
* are connected perform the unregisteration of the top level
* main kobj
*/
if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) {
- debugf0("called kobject_put on main kobj\n");
+ edac_dbg(0, "called kobject_put on main kobj\n");
kobject_put(edac_pci_top_main_kobj);
}
edac_put_sysfs_subsys();
int err;
struct kobject *edac_kobj = &pci->kobj;
- debugf0("idx=%d\n", pci->pci_idx);
+ edac_dbg(0, "idx=%d\n", pci->pci_idx);
/* create the top main EDAC PCI kobject, IF needed */
err = edac_pci_main_kobj_setup();
err = sysfs_create_link(edac_kobj, &pci->dev->kobj, EDAC_PCI_SYMLINK);
if (err) {
- debugf0("sysfs_create_link() returned err= %d\n",
- err);
+ edac_dbg(0, "sysfs_create_link() returned err= %d\n", err);
goto symlink_fail;
}
*/
void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci)
{
- debugf0("index=%d\n", pci->pci_idx);
+ edac_dbg(0, "index=%d\n", pci->pci_idx);
/* Remove the symlink */
sysfs_remove_link(&pci->kobj, EDAC_PCI_SYMLINK);
* if this 'pci' is the last instance.
* If it is, the main kobject will be unregistered as a result
*/
- debugf0("calling edac_pci_main_kobj_teardown()\n");
+ edac_dbg(0, "calling edac_pci_main_kobj_teardown()\n");
edac_pci_main_kobj_teardown();
}
local_irq_restore(flags);
- debugf4("PCI STATUS= 0x%04x %s\n", status, dev_name(&dev->dev));
+ edac_dbg(4, "PCI STATUS= 0x%04x %s\n", status, dev_name(&dev->dev));
/* check the status reg for errors on boards NOT marked as broken
* if broken, we cannot trust any of the status bits
}
- debugf4("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev_name(&dev->dev));
+ edac_dbg(4, "PCI HEADER TYPE= 0x%02x %s\n",
+ header_type, dev_name(&dev->dev));
if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
/* On bridges, need to examine secondary status register */
status = get_pci_parity_status(dev, 1);
- debugf4("PCI SEC_STATUS= 0x%04x %s\n", status, dev_name(&dev->dev));
+ edac_dbg(4, "PCI SEC_STATUS= 0x%04x %s\n",
+ status, dev_name(&dev->dev));
/* check the secondary status reg for errors,
* on NOT broken boards
{
int before_count;
- debugf3("\n");
+ edac_dbg(3, "\n");
/* if policy has PCI check off, leave now */
if (!check_pci_errors)
{
struct i3000_error_info info;
- debugf1("MC%d\n", mci->mc_idx);
+ edac_dbg(1, "MC%d\n", mci->mc_idx);
i3000_get_error_info(mci, &info);
i3000_process_error_info(mci, &info, 1);
}
unsigned long mchbar;
void __iomem *window;
- debugf0("MC:\n");
+ edac_dbg(0, "MC:\n");
pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar);
mchbar &= I3000_MCHBAR_MASK;
if (!mci)
return -ENOMEM;
- debugf3("MC: init mci\n");
+ edac_dbg(3, "MC: init mci\n");
mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR2;
cumul_size = value << (I3000_DRB_SHIFT - PAGE_SHIFT);
if (interleaved)
cumul_size <<= 1;
- debugf3("MC: (%d) cumul_size 0x%x\n",
- i, cumul_size);
+ edac_dbg(3, "MC: (%d) cumul_size 0x%x\n", i, cumul_size);
if (cumul_size == last_cumul_size)
continue;
rc = -ENODEV;
if (edac_mc_add_mc(mci)) {
- debugf3("MC: failed edac_mc_add_mc()\n");
+ edac_dbg(3, "MC: failed edac_mc_add_mc()\n");
goto fail;
}
}
/* get this far and it's successful */
- debugf3("MC: success\n");
+ edac_dbg(3, "MC: success\n");
return 0;
fail:
{
int rc;
- debugf0("MC:\n");
+ edac_dbg(0, "MC:\n");
if (pci_enable_device(pdev) < 0)
return -EIO;
{
struct mem_ctl_info *mci;
- debugf0("\n");
+ edac_dbg(0, "\n");
if (i3000_pci)
edac_pci_release_generic_ctl(i3000_pci);
{
int pci_rc;
- debugf3("MC:\n");
+ edac_dbg(3, "MC:\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_3000_HB, NULL);
if (!mci_pdev) {
- debugf0("i3000 pci_get_device fail\n");
+ edac_dbg(0, "i3000 pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
}
pci_rc = i3000_init_one(mci_pdev, i3000_pci_tbl);
if (pci_rc < 0) {
- debugf0("i3000 init fail\n");
+ edac_dbg(0, "i3000 init fail\n");
pci_rc = -ENODEV;
goto fail1;
}
static void __exit i3000_exit(void)
{
- debugf3("MC:\n");
+ edac_dbg(3, "MC:\n");
pci_unregister_driver(&i3000_driver);
if (!i3000_registered) {
pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b);
if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
- debugf0("In single channel mode.\n");
+ edac_dbg(0, "In single channel mode\n");
return 1;
} else {
- debugf0("In dual channel mode.\n");
+ edac_dbg(0, "In dual channel mode\n");
return 2;
}
}
{
struct i3200_error_info info;
- debugf1("MC%d\n", mci->mc_idx);
+ edac_dbg(1, "MC%d\n", mci->mc_idx);
i3200_get_and_clear_error_info(mci, &info);
i3200_process_error_info(mci, &info);
}
void __iomem *window;
struct i3200_priv *priv;
- debugf0("MC:\n");
+ edac_dbg(0, "MC:\n");
window = i3200_map_mchbar(pdev);
if (!window)
if (!mci)
return -ENOMEM;
- debugf3("MC: init mci\n");
+ edac_dbg(3, "MC: init mci\n");
mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR2;
rc = -ENODEV;
if (edac_mc_add_mc(mci)) {
- debugf3("MC: failed edac_mc_add_mc()\n");
+ edac_dbg(3, "MC: failed edac_mc_add_mc()\n");
goto fail;
}
/* get this far and it's successful */
- debugf3("MC: success\n");
+ edac_dbg(3, "MC: success\n");
return 0;
fail:
{
int rc;
- debugf0("MC:\n");
+ edac_dbg(0, "MC:\n");
if (pci_enable_device(pdev) < 0)
return -EIO;
struct mem_ctl_info *mci;
struct i3200_priv *priv;
- debugf0("\n");
+ edac_dbg(0, "\n");
mci = edac_mc_del_mc(&pdev->dev);
if (!mci)
{
int pci_rc;
- debugf3("MC:\n");
+ edac_dbg(3, "MC:\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_3200_HB, NULL);
if (!mci_pdev) {
- debugf0("i3200 pci_get_device fail\n");
+ edac_dbg(0, "i3200 pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
}
pci_rc = i3200_init_one(mci_pdev, i3200_pci_tbl);
if (pci_rc < 0) {
- debugf0("i3200 init fail\n");
+ edac_dbg(0, "i3200 init fail\n");
pci_rc = -ENODEV;
goto fail1;
}
static void __exit i3200_exit(void)
{
- debugf3("MC:\n");
+ edac_dbg(3, "MC:\n");
pci_unregister_driver(&i3200_driver);
if (!i3200_registered) {
ras = NREC_RAS(info->nrecmemb);
cas = NREC_CAS(info->nrecmemb);
- debugf0("\t\tCSROW= %d Channel= %d "
- "(DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
- rank, channel, bank,
- rdwr ? "Write" : "Read", ras, cas);
+ edac_dbg(0, "\t\tCSROW= %d Channel= %d (DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+ rank, channel, bank,
+ rdwr ? "Write" : "Read", ras, cas);
/* Only 1 bit will be on */
switch (allErrors) {
/* ONLY ONE of the possible error bits will be set, as per the docs */
ue_errors = allErrors & FERR_NF_UNCORRECTABLE;
if (ue_errors) {
- debugf0("\tUncorrected bits= 0x%x\n", ue_errors);
+ edac_dbg(0, "\tUncorrected bits= 0x%x\n", ue_errors);
branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
ras = NREC_RAS(info->nrecmemb);
cas = NREC_CAS(info->nrecmemb);
- debugf0
- ("\t\tCSROW= %d Channels= %d,%d (Branch= %d "
- "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
- rank, channel, channel + 1, branch >> 1, bank,
- rdwr ? "Write" : "Read", ras, cas);
+ edac_dbg(0, "\t\tCSROW= %d Channels= %d,%d (Branch= %d DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+ rank, channel, channel + 1, branch >> 1, bank,
+ rdwr ? "Write" : "Read", ras, cas);
switch (ue_errors) {
case FERR_NF_M12ERR:
/* Check correctable errors */
ce_errors = allErrors & FERR_NF_CORRECTABLE;
if (ce_errors) {
- debugf0("\tCorrected bits= 0x%x\n", ce_errors);
+ edac_dbg(0, "\tCorrected bits= 0x%x\n", ce_errors);
branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
ras = REC_RAS(info->recmemb);
cas = REC_CAS(info->recmemb);
- debugf0("\t\tCSROW= %d Channel= %d (Branch %d "
- "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
- rank, channel, branch >> 1, bank,
- rdwr ? "Write" : "Read", ras, cas);
+ edac_dbg(0, "\t\tCSROW= %d Channel= %d (Branch %d DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+ rank, channel, branch >> 1, bank,
+ rdwr ? "Write" : "Read", ras, cas);
switch (ce_errors) {
case FERR_NF_M17ERR:
static void i5000_check_error(struct mem_ctl_info *mci)
{
struct i5000_error_info info;
- debugf4("MC%d\n", mci->mc_idx);
+ edac_dbg(4, "MC%d\n", mci->mc_idx);
i5000_get_error_info(mci, &info);
i5000_process_error_info(mci, &info, 1);
}
pvt->fsb_error_regs = pdev;
- debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->system_address),
- pvt->system_address->vendor, pvt->system_address->device);
- debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->branchmap_werrors),
- pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device);
- debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->fsb_error_regs),
- pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);
+ edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->system_address),
+ pvt->system_address->vendor, pvt->system_address->device);
+ edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->branchmap_werrors),
+ pvt->branchmap_werrors->vendor,
+ pvt->branchmap_werrors->device);
+ edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->fsb_error_regs),
+ pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);
pdev = NULL;
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
ans = MTR_DIMMS_PRESENT(mtr);
- debugf2("\tMTR%d=0x%x: DIMMs are %s\n", slot_row, mtr,
- ans ? "Present" : "NOT Present");
+ edac_dbg(2, "\tMTR%d=0x%x: DIMMs are %sPresent\n",
+ slot_row, mtr, ans ? "" : "NOT ");
if (!ans)
return;
- debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
- debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
- debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single");
- debugf2("\t\tNUMROW: %s\n",
- MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
- MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
- MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
- "reserved");
- debugf2("\t\tNUMCOL: %s\n",
- MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
- MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
- MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
- "reserved");
+ edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
+ edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
+ edac_dbg(2, "\t\tNUMRANK: %s\n",
+ MTR_DIMM_RANK(mtr) ? "double" : "single");
+ edac_dbg(2, "\t\tNUMROW: %s\n",
+ MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
+ MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
+ MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
+ "reserved");
+ edac_dbg(2, "\t\tNUMCOL: %s\n",
+ MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
+ MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
+ MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
+ "reserved");
}
static void handle_channel(struct i5000_pvt *pvt, int slot, int channel,
"--------------------------------");
p += n;
space -= n;
- debugf2("%s\n", mem_buffer);
+ edac_dbg(2, "%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
}
}
p += n;
space -= n;
- debugf2("%s\n", mem_buffer);
+ edac_dbg(2, "%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
}
"--------------------------------");
p += n;
space -= n;
- debugf2("%s\n", mem_buffer);
+ edac_dbg(2, "%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
p += n;
space -= n;
}
- debugf2("%s\n", mem_buffer);
+ edac_dbg(2, "%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
}
/* output the last message and free buffer */
- debugf2("%s\n", mem_buffer);
+ edac_dbg(2, "%s\n", mem_buffer);
kfree(mem_buffer);
}
maxdimmperch = pvt->maxdimmperch;
maxch = pvt->maxch;
- debugf2("AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n",
- (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
+ edac_dbg(2, "AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n",
+ (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
/* Get the Branch Map regs */
pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm);
pvt->tolm >>= 12;
- debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm,
- pvt->tolm);
+ edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n",
+ pvt->tolm, pvt->tolm);
actual_tolm = pvt->tolm << 28;
- debugf2("Actual TOLM byte addr=%u (0x%x)\n", actual_tolm, actual_tolm);
+ edac_dbg(2, "Actual TOLM byte addr=%u (0x%x)\n",
+ actual_tolm, actual_tolm);
pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0);
pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1);
limit = (pvt->mir0 >> 4) & 0x0FFF;
way0 = pvt->mir0 & 0x1;
way1 = pvt->mir0 & 0x2;
- debugf2("MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
+ edac_dbg(2, "MIR0: limit= 0x%x WAY1= %u WAY0= %x\n",
+ limit, way1, way0);
limit = (pvt->mir1 >> 4) & 0x0FFF;
way0 = pvt->mir1 & 0x1;
way1 = pvt->mir1 & 0x2;
- debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
+ edac_dbg(2, "MIR1: limit= 0x%x WAY1= %u WAY0= %x\n",
+ limit, way1, way0);
limit = (pvt->mir2 >> 4) & 0x0FFF;
way0 = pvt->mir2 & 0x1;
way1 = pvt->mir2 & 0x2;
- debugf2("MIR2: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
+ edac_dbg(2, "MIR2: limit= 0x%x WAY1= %u WAY0= %x\n",
+ limit, way1, way0);
/* Get the MTR[0-3] regs */
for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
pci_read_config_word(pvt->branch_0, where,
&pvt->b0_mtr[slot_row]);
- debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where,
- pvt->b0_mtr[slot_row]);
+ edac_dbg(2, "MTR%d where=0x%x B0 value=0x%x\n",
+ slot_row, where, pvt->b0_mtr[slot_row]);
if (pvt->maxch >= CHANNELS_PER_BRANCH) {
pci_read_config_word(pvt->branch_1, where,
&pvt->b1_mtr[slot_row]);
- debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row,
- where, pvt->b1_mtr[slot_row]);
+ edac_dbg(2, "MTR%d where=0x%x B1 value=0x%x\n",
+ slot_row, where, pvt->b1_mtr[slot_row]);
} else {
pvt->b1_mtr[slot_row] = 0;
}
}
/* Read and dump branch 0's MTRs */
- debugf2("\nMemory Technology Registers:\n");
- debugf2(" Branch 0:\n");
+ edac_dbg(2, "Memory Technology Registers:\n");
+ edac_dbg(2, " Branch 0:\n");
for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
}
pci_read_config_word(pvt->branch_0, AMB_PRESENT_0,
&pvt->b0_ambpresent0);
- debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
+ edac_dbg(2, "\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
pci_read_config_word(pvt->branch_0, AMB_PRESENT_1,
&pvt->b0_ambpresent1);
- debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
+ edac_dbg(2, "\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
/* Only if we have 2 branchs (4 channels) */
if (pvt->maxch < CHANNELS_PER_BRANCH) {
pvt->b1_ambpresent1 = 0;
} else {
/* Read and dump branch 1's MTRs */
- debugf2(" Branch 1:\n");
+ edac_dbg(2, " Branch 1:\n");
for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
}
pci_read_config_word(pvt->branch_1, AMB_PRESENT_0,
&pvt->b1_ambpresent0);
- debugf2("\t\tAMB-Branch 1-present0 0x%x:\n",
- pvt->b1_ambpresent0);
+ edac_dbg(2, "\t\tAMB-Branch 1-present0 0x%x:\n",
+ pvt->b1_ambpresent0);
pci_read_config_word(pvt->branch_1, AMB_PRESENT_1,
&pvt->b1_ambpresent1);
- debugf2("\t\tAMB-Branch 1-present1 0x%x:\n",
- pvt->b1_ambpresent1);
+ edac_dbg(2, "\t\tAMB-Branch 1-present1 0x%x:\n",
+ pvt->b1_ambpresent1);
}
/* Go and determine the size of each DIMM and place in an
int num_channels;
int num_dimms_per_channel;
- debugf0("MC: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
- __FILE__, pdev->bus->number,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n",
+ pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
/* We only are looking for func 0 of the set */
if (PCI_FUNC(pdev->devfn) != 0)
i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel,
&num_channels);
- debugf0("MC: Number of Branches=2 Channels= %d DIMMS= %d\n",
- num_channels, num_dimms_per_channel);
+ edac_dbg(0, "MC: Number of Branches=2 Channels= %d DIMMS= %d\n",
+ num_channels, num_dimms_per_channel);
/* allocate a new MC control structure */
if (mci == NULL)
return -ENOMEM;
- debugf0("MC: %s(): mci = %p\n", __FILE__, mci);
+ edac_dbg(0, "MC: mci = %p\n", mci);
mci->pdev = &pdev->dev; /* record ptr to the generic device */
/* initialize the MC control structure 'csrows' table
* with the mapping and control information */
if (i5000_init_csrows(mci)) {
- debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
- " because i5000_init_csrows() returned nonzero "
- "value\n");
+ edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i5000_init_csrows() returned nonzero value\n");
mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
} else {
- debugf1("MC: Enable error reporting now\n");
+ edac_dbg(1, "MC: Enable error reporting now\n");
i5000_enable_error_reporting(mci);
}
/* add this new MC control structure to EDAC's list of MCs */
if (edac_mc_add_mc(mci)) {
- debugf0("MC: %s(): failed edac_mc_add_mc()\n",
- __FILE__);
+ edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
/* FIXME: perhaps some code should go here that disables error
* reporting if we just enabled it
*/
{
int rc;
- debugf0("MC: %s()\n", __FILE__);
+ edac_dbg(0, "MC:\n");
/* wake up device */
rc = pci_enable_device(pdev);
{
struct mem_ctl_info *mci;
- debugf0("%s()\n", __FILE__);
+ edac_dbg(0, "\n");
if (i5000_pci)
edac_pci_release_generic_ctl(i5000_pci);
{
int pci_rc;
- debugf2("MC: %s()\n", __FILE__);
+ edac_dbg(2, "MC:\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
*/
static void __exit i5000_exit(void)
{
- debugf2("MC: %s()\n", __FILE__);
+ edac_dbg(2, "MC:\n");
pci_unregister_driver(&i5000_driver);
}
i5100_rank_to_slot(mci, chan, rank));
}
- debugf2("dimm channel %d, rank %d, size %ld\n",
- chan, rank, (long)PAGES_TO_MiB(npages));
+ edac_dbg(2, "dimm channel %d, rank %d, size %ld\n",
+ chan, rank, (long)PAGES_TO_MiB(npages));
}
}
ras = nrec_ras(info);
cas = nrec_cas(info);
- debugf0("\t\tDIMM= %d Channels= %d,%d (Branch= %d "
- "DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
- rank, channel, channel + 1, branch >> 1, bank,
- buf_id, rdwr_str(rdwr), ras, cas);
+ edac_dbg(0, "\t\tDIMM= %d Channels= %d,%d (Branch= %d DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
+ rank, channel, channel + 1, branch >> 1, bank,
+ buf_id, rdwr_str(rdwr), ras, cas);
/* Only 1 bit will be on */
errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
/* Correctable errors */
if (allErrors & ERROR_NF_CORRECTABLE) {
- debugf0("\tCorrected bits= 0x%lx\n", allErrors);
+ edac_dbg(0, "\tCorrected bits= 0x%lx\n", allErrors);
branch = extract_fbdchan_indx(info->ferr_nf_fbd);
/* Only 1 bit will be on */
errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
- debugf0("\t\tDIMM= %d Channel= %d (Branch %d "
- "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
- rank, channel, branch >> 1, bank,
- rdwr_str(rdwr), ras, cas);
+ edac_dbg(0, "\t\tDIMM= %d Channel= %d (Branch %d DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+ rank, channel, branch >> 1, bank,
+ rdwr_str(rdwr), ras, cas);
/* Form out message */
snprintf(msg, sizeof(msg),
static void i5400_check_error(struct mem_ctl_info *mci)
{
struct i5400_error_info info;
- debugf4("MC%d\n", mci->mc_idx);
+ edac_dbg(4, "MC%d\n", mci->mc_idx);
i5400_get_error_info(mci, &info);
i5400_process_error_info(mci, &info);
}
}
pvt->fsb_error_regs = pdev;
- debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->system_address),
- pvt->system_address->vendor, pvt->system_address->device);
- debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->branchmap_werrors),
- pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device);
- debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->fsb_error_regs),
- pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);
+ edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->system_address),
+ pvt->system_address->vendor, pvt->system_address->device);
+ edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->branchmap_werrors),
+ pvt->branchmap_werrors->vendor,
+ pvt->branchmap_werrors->device);
+ edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->fsb_error_regs),
+ pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);
pvt->branch_0 = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_5400_FBD0, NULL);
n = dimm;
if (n >= DIMMS_PER_CHANNEL) {
- debugf0("ERROR: trying to access an invalid dimm: %d\n",
- dimm);
+ edac_dbg(0, "ERROR: trying to access an invalid dimm: %d\n",
+ dimm);
return 0;
}
ans = MTR_DIMMS_PRESENT(mtr);
- debugf2("\tMTR%d=0x%x: DIMMs are %s\n", slot_row, mtr,
- ans ? "Present" : "NOT Present");
+ edac_dbg(2, "\tMTR%d=0x%x: DIMMs are %sPresent\n",
+ slot_row, mtr, ans ? "" : "NOT ");
if (!ans)
return;
- debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
-
- debugf2("\t\tELECTRICAL THROTTLING is %s\n",
- MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
-
- debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
- debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single");
- debugf2("\t\tNUMROW: %s\n",
- MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
- MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
- MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
- "65,536 - 16 rows");
- debugf2("\t\tNUMCOL: %s\n",
- MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
- MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
- MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
- "reserved");
+ edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
+
+ edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
+ MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
+
+ edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
+ edac_dbg(2, "\t\tNUMRANK: %s\n",
+ MTR_DIMM_RANK(mtr) ? "double" : "single");
+ edac_dbg(2, "\t\tNUMROW: %s\n",
+ MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
+ MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
+ MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
+ "65,536 - 16 rows");
+ edac_dbg(2, "\t\tNUMCOL: %s\n",
+ MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
+ MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
+ MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
+ "reserved");
}
static void handle_channel(struct i5400_pvt *pvt, int dimm, int channel,
"-------------------------------");
p += n;
space -= n;
- debugf2("%s\n", mem_buffer);
+ edac_dbg(2, "%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
}
p += n;
space -= n;
}
- debugf2("%s\n", mem_buffer);
+ edac_dbg(2, "%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
}
"-------------------------------");
p += n;
space -= n;
- debugf2("%s\n", mem_buffer);
+ edac_dbg(2, "%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
}
space -= n;
- debugf2("%s\n", mem_buffer);
+ edac_dbg(2, "%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
}
/* output the last message and free buffer */
- debugf2("%s\n", mem_buffer);
+ edac_dbg(2, "%s\n", mem_buffer);
kfree(mem_buffer);
}
maxdimmperch = pvt->maxdimmperch;
maxch = pvt->maxch;
- debugf2("AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n",
- (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
+ edac_dbg(2, "AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n",
+ (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
/* Get the Branch Map regs */
pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm);
pvt->tolm >>= 12;
- debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm,
- pvt->tolm);
+ edac_dbg(2, "\nTOLM (number of 256M regions) =%u (0x%x)\n",
+ pvt->tolm, pvt->tolm);
actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
- debugf2("Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
- actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
+ edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
+ actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0);
pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1);
limit = (pvt->mir0 >> 4) & 0x0fff;
way0 = pvt->mir0 & 0x1;
way1 = pvt->mir0 & 0x2;
- debugf2("MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
+ edac_dbg(2, "MIR0: limit= 0x%x WAY1= %u WAY0= %x\n",
+ limit, way1, way0);
limit = (pvt->mir1 >> 4) & 0xfff;
way0 = pvt->mir1 & 0x1;
way1 = pvt->mir1 & 0x2;
- debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
+ edac_dbg(2, "MIR1: limit= 0x%x WAY1= %u WAY0= %x\n",
+ limit, way1, way0);
/* Get the set of MTR[0-3] regs by each branch */
for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) {
pci_read_config_word(pvt->branch_0, where,
&pvt->b0_mtr[slot_row]);
- debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where,
- pvt->b0_mtr[slot_row]);
+ edac_dbg(2, "MTR%d where=0x%x B0 value=0x%x\n",
+ slot_row, where, pvt->b0_mtr[slot_row]);
if (pvt->maxch < CHANNELS_PER_BRANCH) {
pvt->b1_mtr[slot_row] = 0;
/* Branch 1 set of MTR registers */
pci_read_config_word(pvt->branch_1, where,
&pvt->b1_mtr[slot_row]);
- debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row, where,
- pvt->b1_mtr[slot_row]);
+ edac_dbg(2, "MTR%d where=0x%x B1 value=0x%x\n",
+ slot_row, where, pvt->b1_mtr[slot_row]);
}
/* Read and dump branch 0's MTRs */
- debugf2("\nMemory Technology Registers:\n");
- debugf2(" Branch 0:\n");
+ edac_dbg(2, "Memory Technology Registers:\n");
+ edac_dbg(2, " Branch 0:\n");
for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
pci_read_config_word(pvt->branch_0, AMBPRESENT_0,
&pvt->b0_ambpresent0);
- debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
+ edac_dbg(2, "\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
pci_read_config_word(pvt->branch_0, AMBPRESENT_1,
&pvt->b0_ambpresent1);
- debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
+ edac_dbg(2, "\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
/* Only if we have 2 branchs (4 channels) */
if (pvt->maxch < CHANNELS_PER_BRANCH) {
pvt->b1_ambpresent1 = 0;
} else {
/* Read and dump branch 1's MTRs */
- debugf2(" Branch 1:\n");
+ edac_dbg(2, " Branch 1:\n");
for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
pci_read_config_word(pvt->branch_1, AMBPRESENT_0,
&pvt->b1_ambpresent0);
- debugf2("\t\tAMB-Branch 1-present0 0x%x:\n",
- pvt->b1_ambpresent0);
+ edac_dbg(2, "\t\tAMB-Branch 1-present0 0x%x:\n",
+ pvt->b1_ambpresent0);
pci_read_config_word(pvt->branch_1, AMBPRESENT_1,
&pvt->b1_ambpresent1);
- debugf2("\t\tAMB-Branch 1-present1 0x%x:\n",
- pvt->b1_ambpresent1);
+ edac_dbg(2, "\t\tAMB-Branch 1-present1 0x%x:\n",
+ pvt->b1_ambpresent1);
}
/* Go and determine the size of each DIMM and place in an
size_mb = pvt->dimm_info[slot][channel].megabytes;
- debugf2("dimm (branch %d channel %d slot %d): %d.%03d GB\n",
- channel / 2, channel % 2, slot,
- size_mb / 1000, size_mb % 1000);
+ edac_dbg(2, "dimm (branch %d channel %d slot %d): %d.%03d GB\n",
+ channel / 2, channel % 2, slot,
+ size_mb / 1000, size_mb % 1000);
dimm->nr_pages = size_mb << 8;
dimm->grain = 8;
if (dev_idx >= ARRAY_SIZE(i5400_devs))
return -EINVAL;
- debugf0("MC: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
- __FILE__, pdev->bus->number,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n",
+ pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
/* We only are looking for func 0 of the set */
if (PCI_FUNC(pdev->devfn) != 0)
if (mci == NULL)
return -ENOMEM;
- debugf0("MC: %s(): mci = %p\n", __FILE__, mci);
+ edac_dbg(0, "MC: mci = %p\n", mci);
mci->pdev = &pdev->dev; /* record ptr to the generic device */
/* initialize the MC control structure 'dimms' table
* with the mapping and control information */
if (i5400_init_dimms(mci)) {
- debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
- " because i5400_init_dimms() returned nonzero "
- "value\n");
+ edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i5400_init_dimms() returned nonzero value\n");
mci->edac_cap = EDAC_FLAG_NONE; /* no dimms found */
} else {
- debugf1("MC: Enable error reporting now\n");
+ edac_dbg(1, "MC: Enable error reporting now\n");
i5400_enable_error_reporting(mci);
}
/* add this new MC control structure to EDAC's list of MCs */
if (edac_mc_add_mc(mci)) {
- debugf0("MC: %s(): failed edac_mc_add_mc()\n",
- __FILE__);
+ edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
/* FIXME: perhaps some code should go here that disables error
* reporting if we just enabled it
*/
{
int rc;
- debugf0("MC: %s()\n", __FILE__);
+ edac_dbg(0, "MC:\n");
/* wake up device */
rc = pci_enable_device(pdev);
{
struct mem_ctl_info *mci;
- debugf0("%s()\n", __FILE__);
+ edac_dbg(0, "\n");
if (i5400_pci)
edac_pci_release_generic_ctl(i5400_pci);
{
int pci_rc;
- debugf2("MC: %s()\n", __FILE__);
+ edac_dbg(2, "MC:\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
*/
static void __exit i5400_exit(void)
{
- debugf2("MC: %s()\n", __FILE__);
+ edac_dbg(2, "MC:\n");
pci_unregister_driver(&i5400_driver);
}
mtr = pvt->mtr[slot][branch];
ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0;
- debugf2("\tMTR%d CH%d: DIMMs are %s (mtr)\n",
- slot, channel,
- ans ? "Present" : "NOT Present");
+ edac_dbg(2, "\tMTR%d CH%d: DIMMs are %sPresent (mtr)\n",
+ slot, channel, ans ? "" : "NOT ");
/* Determine if there is a DIMM present in this DIMM slot */
if (!ans)
dinfo->megabytes = 1 << addrBits;
- debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
-
- debugf2("\t\tELECTRICAL THROTTLING is %s\n",
- MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
-
- debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
- debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANKS(mtr) ? "double" : "single");
- debugf2("\t\tNUMROW: %s\n",
- MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
- MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
- MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
- "65,536 - 16 rows");
- debugf2("\t\tNUMCOL: %s\n",
- MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
- MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
- MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
- "reserved");
- debugf2("\t\tSIZE: %d MB\n", dinfo->megabytes);
+ edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
+
+ edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
+ MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
+
+ edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
+ edac_dbg(2, "\t\tNUMRANK: %s\n",
+ MTR_DIMM_RANKS(mtr) ? "double" : "single");
+ edac_dbg(2, "\t\tNUMROW: %s\n",
+ MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
+ MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
+ MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
+ "65,536 - 16 rows");
+ edac_dbg(2, "\t\tNUMCOL: %s\n",
+ MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
+ MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
+ MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
+ "reserved");
+ edac_dbg(2, "\t\tSIZE: %d MB\n", dinfo->megabytes);
/*
* The type of error detection actually depends of the
dimm->mtype = MEM_FB_DDR2;
if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
dimm->edac_mode = EDAC_SECDED;
- debugf2("\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
+ edac_dbg(2, "\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
} else {
- debugf2("\t\tECC code is on Lockstep mode\n");
+ edac_dbg(2, "\t\tECC code is on Lockstep mode\n");
if (MTR_DRAM_WIDTH(mtr) == 8)
dimm->edac_mode = EDAC_S8ECD8ED;
else
/* ask what device type on this row */
if (MTR_DRAM_WIDTH(mtr) == 8) {
- debugf2("\t\tScrub algorithm for x8 is on %s mode\n",
- IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
- "enhanced" : "normal");
+ edac_dbg(2, "\t\tScrub algorithm for x8 is on %s mode\n",
+ IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
+ "enhanced" : "normal");
dimm->dtype = DEV_X8;
} else
p += n;
space -= n;
}
- debugf2("%s\n", pvt->tmp_prt_buffer);
+ edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
p = pvt->tmp_prt_buffer;
space = PAGE_SIZE;
n = snprintf(p, space, "-------------------------------"
"------------------------------");
p += n;
space -= n;
- debugf2("%s\n", pvt->tmp_prt_buffer);
+ edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
p = pvt->tmp_prt_buffer;
space = PAGE_SIZE;
space -= n;
}
- debugf2("%s\n", pvt->tmp_prt_buffer);
+ edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
p = pvt->tmp_prt_buffer;
space = PAGE_SIZE;
}
"------------------------------");
p += n;
space -= n;
- debugf2("%s\n", pvt->tmp_prt_buffer);
+ edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
p = pvt->tmp_prt_buffer;
space = PAGE_SIZE;
#endif
pvt = mci->pvt_info;
- debugf2("Memory Technology Registers:\n");
+ edac_dbg(2, "Memory Technology Registers:\n");
/* Get the AMB present registers for the four channels */
for (branch = 0; branch < MAX_BRANCHES; branch++) {
pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
AMBPRESENT_0,
&pvt->ambpresent[channel]);
- debugf2("\t\tAMB-present CH%d = 0x%x:\n",
- channel, pvt->ambpresent[channel]);
+ edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
+ channel, pvt->ambpresent[channel]);
channel = to_channel(1, branch);
pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
AMBPRESENT_1,
&pvt->ambpresent[channel]);
- debugf2("\t\tAMB-present CH%d = 0x%x:\n",
- channel, pvt->ambpresent[channel]);
+ edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
+ channel, pvt->ambpresent[channel]);
}
/* Get the set of MTR[0-7] regs by each branch */
static void decode_mir(int mir_no, u16 mir[MAX_MIR])
{
if (mir[mir_no] & 3)
- debugf2("MIR%d: limit= 0x%x Branch(es) that participate:"
- " %s %s\n",
- mir_no,
- (mir[mir_no] >> 4) & 0xfff,
- (mir[mir_no] & 1) ? "B0" : "",
- (mir[mir_no] & 2) ? "B1" : "");
+ edac_dbg(2, "MIR%d: limit= 0x%x Branch(es) that participate: %s %s\n",
+ mir_no,
+ (mir[mir_no] >> 4) & 0xfff,
+ (mir[mir_no] & 1) ? "B0" : "",
+ (mir[mir_no] & 2) ? "B1" : "");
}
/**
pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE,
(u32 *) &pvt->ambase);
- debugf2("AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase);
+ edac_dbg(2, "AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase);
/* Get the Branch Map regs */
pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm);
pvt->tolm >>= 12;
- debugf2("TOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm,
- pvt->tolm);
+ edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n",
+ pvt->tolm, pvt->tolm);
actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
- debugf2("Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
- actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
+ edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
+ actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
/* Get memory controller settings */
pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS,
&pvt->mc_settings_a);
if (IS_SINGLE_MODE(pvt->mc_settings_a))
- debugf0("Memory controller operating on single mode\n");
+ edac_dbg(0, "Memory controller operating on single mode\n");
else
- debugf0("Memory controller operating on %s mode\n",
- IS_MIRRORED(pvt->mc_settings) ? "mirrored" : "non-mirrored");
+ edac_dbg(0, "Memory controller operating on %smirrored mode\n",
+ IS_MIRRORED(pvt->mc_settings) ? "" : "non-");
- debugf0("Error detection is %s\n",
- IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
- debugf0("Retry is %s\n",
- IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
+ edac_dbg(0, "Error detection is %s\n",
+ IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
+ edac_dbg(0, "Retry is %s\n",
+ IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
/* Get Memory Interleave Range registers */
pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0,
}
}
- debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->pci_dev_16_0_fsb_ctlr),
- pvt->pci_dev_16_0_fsb_ctlr->vendor,
- pvt->pci_dev_16_0_fsb_ctlr->device);
- debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->pci_dev_16_1_fsb_addr_map),
- pvt->pci_dev_16_1_fsb_addr_map->vendor,
- pvt->pci_dev_16_1_fsb_addr_map->device);
- debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->pci_dev_16_2_fsb_err_regs),
- pvt->pci_dev_16_2_fsb_err_regs->vendor,
- pvt->pci_dev_16_2_fsb_err_regs->device);
+ edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->pci_dev_16_0_fsb_ctlr),
+ pvt->pci_dev_16_0_fsb_ctlr->vendor,
+ pvt->pci_dev_16_0_fsb_ctlr->device);
+ edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->pci_dev_16_1_fsb_addr_map),
+ pvt->pci_dev_16_1_fsb_addr_map->vendor,
+ pvt->pci_dev_16_1_fsb_addr_map->device);
+ edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->pci_dev_16_2_fsb_err_regs),
+ pvt->pci_dev_16_2_fsb_err_regs->vendor,
+ pvt->pci_dev_16_2_fsb_err_regs->device);
pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_I7300_MCH_FB0,
if (rc == -EIO)
return rc;
- debugf0("MC: pdev bus %u dev=0x%x fn=0x%x\n",
- pdev->bus->number,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n",
+ pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
/* We only are looking for func 0 of the set */
if (PCI_FUNC(pdev->devfn) != 0)
if (mci == NULL)
return -ENOMEM;
- debugf0("MC: mci = %p\n", mci);
+ edac_dbg(0, "MC: mci = %p\n", mci);
mci->pdev = &pdev->dev; /* record ptr to the generic device */
/* initialize the MC control structure 'csrows' table
* with the mapping and control information */
if (i7300_get_mc_regs(mci)) {
- debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
- " because i7300_init_csrows() returned nonzero "
- "value\n");
+ edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i7300_init_csrows() returned nonzero value\n");
mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
} else {
- debugf1("MC: Enable error reporting now\n");
+ edac_dbg(1, "MC: Enable error reporting now\n");
i7300_enable_error_reporting(mci);
}
/* add this new MC control structure to EDAC's list of MCs */
if (edac_mc_add_mc(mci)) {
- debugf0("MC: failed edac_mc_add_mc()\n");
+ edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
/* FIXME: perhaps some code should go here that disables error
* reporting if we just enabled it
*/
struct mem_ctl_info *mci;
char *tmp;
- debugf0("\n");
+ edac_dbg(0, "\n");
if (i7300_pci)
edac_pci_release_generic_ctl(i7300_pci);
{
int pci_rc;
- debugf2("\n");
+ edac_dbg(2, "\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
*/
static void __exit i7300_exit(void)
{
- debugf2("\n");
+ edac_dbg(2, "\n");
pci_unregister_driver(&i7300_driver);
}
pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
- debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
- pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status,
- pvt->info.max_dod, pvt->info.ch_map);
+ edac_dbg(0, "QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
+ pvt->i7core_dev->socket, pvt->info.mc_control,
+ pvt->info.mc_status, pvt->info.max_dod, pvt->info.ch_map);
if (ECC_ENABLED(pvt)) {
- debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
+ edac_dbg(0, "ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
if (ECCx8(pvt))
mode = EDAC_S8ECD8ED;
else
mode = EDAC_S4ECD4ED;
} else {
- debugf0("ECC disabled\n");
+ edac_dbg(0, "ECC disabled\n");
mode = EDAC_NONE;
}
/* FIXME: need to handle the error codes */
- debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked "
- "x%x x 0x%x\n",
- numdimms(pvt->info.max_dod),
- numrank(pvt->info.max_dod >> 2),
- numbank(pvt->info.max_dod >> 4),
- numrow(pvt->info.max_dod >> 6),
- numcol(pvt->info.max_dod >> 9));
+ edac_dbg(0, "DOD Max limits: DIMMS: %d, %d-ranked, %d-banked x%x x 0x%x\n",
+ numdimms(pvt->info.max_dod),
+ numrank(pvt->info.max_dod >> 2),
+ numbank(pvt->info.max_dod >> 4),
+ numrow(pvt->info.max_dod >> 6),
+ numcol(pvt->info.max_dod >> 9));
for (i = 0; i < NUM_CHANS; i++) {
u32 data, dimm_dod[3], value[8];
continue;
if (!CH_ACTIVE(pvt, i)) {
- debugf0("Channel %i is not active\n", i);
+ edac_dbg(0, "Channel %i is not active\n", i);
continue;
}
if (CH_DISABLED(pvt, i)) {
- debugf0("Channel %i is disabled\n", i);
+ edac_dbg(0, "Channel %i is disabled\n", i);
continue;
}
pci_read_config_dword(pvt->pci_ch[i][1],
MC_DOD_CH_DIMM2, &dimm_dod[2]);
- debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
- "%s%s%s%cDIMMs\n",
- i,
- RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
- data,
- pvt->channel[i].is_3dimms_present ? "3DIMMS " : "",
- pvt->channel[i].is_3dimms_present ? "SINGLE_4R " : "",
- pvt->channel[i].has_4rank ? "HAS_4R " : "",
- (data & REGISTERED_DIMM) ? 'R' : 'U');
+ edac_dbg(0, "Ch%d phy rd%d, wr%d (0x%08x): %s%s%s%cDIMMs\n",
+ i,
+ RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
+ data,
+ pvt->channel[i].is_3dimms_present ? "3DIMMS " : "",
+ pvt->channel[i].is_3dimms_present ? "SINGLE_4R " : "",
+ pvt->channel[i].has_4rank ? "HAS_4R " : "",
+ (data & REGISTERED_DIMM) ? 'R' : 'U');
for (j = 0; j < 3; j++) {
u32 banks, ranks, rows, cols;
/* DDR3 has 8 I/O banks */
size = (rows * cols * banks * ranks) >> (20 - 3);
- debugf0("\tdimm %d %d Mb offset: %x, "
- "bank: %d, rank: %d, row: %#x, col: %#x\n",
- j, size,
- RANKOFFSET(dimm_dod[j]),
- banks, ranks, rows, cols);
+ edac_dbg(0, "\tdimm %d %d Mb offset: %x, bank: %d, rank: %d, row: %#x, col: %#x\n",
+ j, size,
+ RANKOFFSET(dimm_dod[j]),
+ banks, ranks, rows, cols);
npages = MiB_TO_PAGES(size);
pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
- debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
+ edac_dbg(1, "\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
for (j = 0; j < 8; j++)
- debugf1("\t\t%#x\t%#x\t%#x\n",
- (value[j] >> 27) & 0x1,
- (value[j] >> 24) & 0x7,
- (value[j] & ((1 << 24) - 1)));
+ edac_dbg(1, "\t\t%#x\t%#x\t%#x\n",
+ (value[j] >> 27) & 0x1,
+ (value[j] >> 24) & 0x7,
+ (value[j] & ((1 << 24) - 1)));
}
return 0;
long value; \
int rc; \
\
- debugf1("\n"); \
+ edac_dbg(1, "\n"); \
pvt = mci->pvt_info; \
\
if (pvt->inject.enable) \
struct i7core_pvt *pvt; \
\
pvt = mci->pvt_info; \
- debugf1("pvt=%p\n", pvt); \
+ edac_dbg(1, "pvt=%p\n", pvt); \
if (pvt->inject.param < 0) \
return sprintf(data, "any\n"); \
else \
u32 read;
int count;
- debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n",
- dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
- where, val);
+ edac_dbg(0, "setting pci %02x:%02x.%x reg=%02x value=%08x\n",
+ dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
+ where, val);
for (count = 0; count < 10; count++) {
if (count)
pci_write_config_dword(pvt->pci_noncore,
MC_CFG_CONTROL, 8);
- debugf0("Error inject addr match 0x%016llx, ecc 0x%08x,"
- " inject 0x%08x\n",
- mask, pvt->inject.eccmask, injectmask);
+ edac_dbg(0, "Error inject addr match 0x%016llx, ecc 0x%08x, inject 0x%08x\n",
+ mask, pvt->inject.eccmask, injectmask);
return count;
pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
MC_CHANNEL_ERROR_INJECT, &injectmask);
- debugf0("Inject error read: 0x%018x\n", injectmask);
+ edac_dbg(0, "Inject error read: 0x%018x\n", injectmask);
if (injectmask & 0x0c)
pvt->inject.enable = 1;
struct mem_ctl_info *mci = to_mci(dev); \
struct i7core_pvt *pvt = mci->pvt_info; \
\
- debugf1("\n"); \
+ edac_dbg(1, "\n"); \
if (!pvt->ce_count_available || (pvt->is_registered)) \
return sprintf(data, "data unavailable\n"); \
return sprintf(data, "%lu\n", \
static void addrmatch_release(struct device *device)
{
- debugf1("Releasing device %s\n", dev_name(device));
+ edac_dbg(1, "Releasing device %s\n", dev_name(device));
kfree(device);
}
static void all_channel_counts_release(struct device *device)
{
- debugf1("Releasing device %s\n", dev_name(device));
+ edac_dbg(1, "Releasing device %s\n", dev_name(device));
kfree(device);
}
dev_set_name(pvt->addrmatch_dev, "inject_addrmatch");
dev_set_drvdata(pvt->addrmatch_dev, mci);
- debugf1("creating %s\n", dev_name(pvt->addrmatch_dev));
+ edac_dbg(1, "creating %s\n", dev_name(pvt->addrmatch_dev));
rc = device_add(pvt->addrmatch_dev);
if (rc < 0)
dev_set_name(pvt->chancounts_dev, "all_channel_counts");
dev_set_drvdata(pvt->chancounts_dev, mci);
- debugf1("creating %s\n", dev_name(pvt->chancounts_dev));
+ edac_dbg(1, "creating %s\n", dev_name(pvt->chancounts_dev));
rc = device_add(pvt->chancounts_dev);
if (rc < 0)
{
struct i7core_pvt *pvt = mci->pvt_info;
- debugf1("\n");
+ edac_dbg(1, "\n");
device_remove_file(&mci->dev, &dev_attr_inject_section);
device_remove_file(&mci->dev, &dev_attr_inject_type);
{
int i;
- debugf0("\n");
+ edac_dbg(0, "\n");
for (i = 0; i < i7core_dev->n_devs; i++) {
struct pci_dev *pdev = i7core_dev->pdev[i];
if (!pdev)
continue;
- debugf0("Removing dev %02x:%02x.%d\n",
- pdev->bus->number,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ edac_dbg(0, "Removing dev %02x:%02x.%d\n",
+ pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
pci_dev_put(pdev);
}
}
while ((b = pci_find_next_bus(b)) != NULL) {
bus = b->number;
- debugf0("Found bus %d\n", bus);
+ edac_dbg(0, "Found bus %d\n", bus);
if (bus > last_bus)
last_bus = bus;
}
- debugf0("Last bus %d\n", last_bus);
+ edac_dbg(0, "Last bus %d\n", last_bus);
return last_bus;
}
return -ENODEV;
}
- debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
- socket, bus, dev_descr->dev,
- dev_descr->func,
- PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+ edac_dbg(0, "Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
+ socket, bus, dev_descr->dev,
+ dev_descr->func,
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
/*
* As stated on drivers/pci/search.c, the reference count for
family = "unknown";
pvt->enable_scrub = false;
}
- debugf0("Detected a processor type %s\n", family);
+ edac_dbg(0, "Detected a processor type %s\n", family);
} else
goto error;
- debugf0("Associated fn %d.%d, dev = %p, socket %d\n",
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
- pdev, i7core_dev->socket);
+ edac_dbg(0, "Associated fn %d.%d, dev = %p, socket %d\n",
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
+ pdev, i7core_dev->socket);
if (PCI_SLOT(pdev->devfn) == 3 &&
PCI_FUNC(pdev->devfn) == 2)
pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5,
&rcv[2][1]);
for (i = 0 ; i < 3; i++) {
- debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
- (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
+ edac_dbg(3, "MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
+ (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
/*if the channel has 3 dimms*/
if (pvt->channel[i].dimms > 2) {
new0 = DIMM_BOT_COR_ERR(rcv[i][0]);
int new0, new1, new2;
if (!pvt->pci_mcr[4]) {
- debugf0("MCR registers not found\n");
+ edac_dbg(0, "MCR registers not found\n");
return;
}
struct i7core_pvt *pvt;
if (unlikely(!mci || !mci->pvt_info)) {
- debugf0("MC: dev = %p\n", &i7core_dev->pdev[0]->dev);
+ edac_dbg(0, "MC: dev = %p\n", &i7core_dev->pdev[0]->dev);
i7core_printk(KERN_ERR, "Couldn't find mci handler\n");
return;
pvt = mci->pvt_info;
- debugf0("MC: mci = %p, dev = %p\n", mci, &i7core_dev->pdev[0]->dev);
+ edac_dbg(0, "MC: mci = %p, dev = %p\n", mci, &i7core_dev->pdev[0]->dev);
/* Disable scrubrate setting */
if (pvt->enable_scrub)
i7core_delete_sysfs_devices(mci);
edac_mc_del_mc(mci->pdev);
- debugf1("%s: free mci struct\n", mci->ctl_name);
+ edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
kfree(mci->ctl_name);
edac_mc_free(mci);
i7core_dev->mci = NULL;
if (unlikely(!mci))
return -ENOMEM;
- debugf0("MC: mci = %p, dev = %p\n", mci, &i7core_dev->pdev[0]->dev);
+ edac_dbg(0, "MC: mci = %p, dev = %p\n", mci, &i7core_dev->pdev[0]->dev);
pvt = mci->pvt_info;
memset(pvt, 0, sizeof(*pvt));
/* add this new MC control structure to EDAC's list of MCs */
if (unlikely(edac_mc_add_mc(mci))) {
- debugf0("MC: failed edac_mc_add_mc()\n");
+ edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
/* FIXME: perhaps some code should go here that disables error
* reporting if we just enabled it
*/
goto fail0;
}
if (i7core_create_sysfs_devices(mci)) {
- debugf0("MC: failed to create sysfs nodes\n");
+ edac_dbg(0, "MC: failed to create sysfs nodes\n");
edac_mc_del_mc(mci->pdev);
rc = -EINVAL;
goto fail0;
{
struct i7core_dev *i7core_dev;
- debugf0("\n");
+ edac_dbg(0, "\n");
/*
* we have a trouble here: pdev value for removal will be wrong, since
{
int pci_rc;
- debugf2("\n");
+ edac_dbg(2, "\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
*/
static void __exit i7core_exit(void)
{
- debugf2("\n");
+ edac_dbg(2, "\n");
pci_unregister_driver(&i7core_driver);
}
{
struct i82443bxgx_edacmc_error_info info;
- debugf1("MC%d\n", mci->mc_idx);
+ edac_dbg(1, "MC%d\n", mci->mc_idx);
i82443bxgx_edacmc_get_error_info(mci, &info);
i82443bxgx_edacmc_process_error_info(mci, &info, 1);
}
dimm = csrow->channels[0]->dimm;
pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
- debugf1("MC%d: Row=%d DRB = %#0x\n",
- mci->mc_idx,index, drbar);
+ edac_dbg(1, "MC%d: Row=%d DRB = %#0x\n",
+ mci->mc_idx, index, drbar);
row_high_limit = ((u32) drbar << 23);
/* find the DRAM Chip Select Base address and mask */
- debugf1("MC%d: Row=%d, "
- "Boundary Address=%#0x, Last = %#0x\n",
- mci->mc_idx, index, row_high_limit,
- row_high_limit_last);
+ edac_dbg(1, "MC%d: Row=%d, Boundary Address=%#0x, Last = %#0x\n",
+ mci->mc_idx, index, row_high_limit,
+ row_high_limit_last);
/* 440GX goes to 2GB, represented with a DRB of 0. */
if (row_high_limit_last && !row_high_limit)
enum mem_type mtype;
enum edac_type edac_mode;
- debugf0("MC: %s()\n", __FILE__);
+ edac_dbg(0, "MC:\n");
/* Something is really hosed if PCI config space reads from
* the MC aren't working.
if (mci == NULL)
return -ENOMEM;
- debugf0("MC: %s(): mci = %p\n", __FILE__, mci);
+ edac_dbg(0, "MC: mci = %p\n", mci);
mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
mtype = MEM_RDR;
break;
default:
- debugf0("Unknown/reserved DRAM type value "
- "in DRAMC register!\n");
+ edac_dbg(0, "Unknown/reserved DRAM type value in DRAMC register!\n");
mtype = -MEM_UNKNOWN;
}
edac_mode = EDAC_SECDED;
break;
default:
- debugf0("Unknown/reserved ECC state "
- "in NBXCFG register!\n");
+ edac_dbg(0, "Unknown/reserved ECC state in NBXCFG register!\n");
edac_mode = EDAC_UNKNOWN;
break;
}
mci->ctl_page_to_phys = NULL;
if (edac_mc_add_mc(mci)) {
- debugf3("failed edac_mc_add_mc()\n");
+ edac_dbg(3, "failed edac_mc_add_mc()\n");
goto fail;
}
__func__);
}
- debugf3("MC: %s(): success\n", __FILE__);
+ edac_dbg(3, "MC: success\n");
return 0;
fail:
{
int rc;
- debugf0("MC: %s()\n", __FILE__);
+ edac_dbg(0, "MC:\n");
/* don't need to call pci_enable_device() */
rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data);
{
struct mem_ctl_info *mci;
- debugf0("%s()\n", __FILE__);
+ edac_dbg(0, "\n");
if (i82443bxgx_pci)
edac_pci_release_generic_ctl(i82443bxgx_pci);
id = &i82443bxgx_pci_tbl[i];
}
if (!mci_pdev) {
- debugf0("i82443bxgx pci_get_device fail\n");
+ edac_dbg(0, "i82443bxgx pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
}
pci_rc = i82443bxgx_edacmc_init_one(mci_pdev, i82443bxgx_pci_tbl);
if (pci_rc < 0) {
- debugf0("i82443bxgx init fail\n");
+ edac_dbg(0, "i82443bxgx init fail\n");
pci_rc = -ENODEV;
goto fail1;
}
{
struct i82860_error_info info;
- debugf1("MC%d\n", mci->mc_idx);
+ edac_dbg(1, "MC%d\n", mci->mc_idx);
i82860_get_error_info(mci, &info);
i82860_process_error_info(mci, &info, 1);
}
pci_read_config_word(pdev, I82860_GBA + index * 2, &value);
cumul_size = (value & I82860_GBA_MASK) <<
(I82860_GBA_SHIFT - PAGE_SHIFT);
- debugf3("(%d) cumul_size 0x%x\n", index,
- cumul_size);
+ edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
if (cumul_size == last_cumul_size)
continue; /* not populated */
if (!mci)
return -ENOMEM;
- debugf3("init mci\n");
+ edac_dbg(3, "init mci\n");
mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
* type of memory controller. The ID is therefore hardcoded to 0.
*/
if (edac_mc_add_mc(mci)) {
- debugf3("failed edac_mc_add_mc()\n");
+ edac_dbg(3, "failed edac_mc_add_mc()\n");
goto fail;
}
}
/* get this far and it's successful */
- debugf3("success\n");
+ edac_dbg(3, "success\n");
return 0;
{
int rc;
- debugf0("\n");
+ edac_dbg(0, "\n");
i82860_printk(KERN_INFO, "i82860 init one\n");
if (pci_enable_device(pdev) < 0)
{
struct mem_ctl_info *mci;
- debugf0("\n");
+ edac_dbg(0, "\n");
if (i82860_pci)
edac_pci_release_generic_ctl(i82860_pci);
{
int pci_rc;
- debugf3("\n");
+ edac_dbg(3, "\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
PCI_DEVICE_ID_INTEL_82860_0, NULL);
if (mci_pdev == NULL) {
- debugf0("860 pci_get_device fail\n");
+ edac_dbg(0, "860 pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
}
pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl);
if (pci_rc < 0) {
- debugf0("860 init fail\n");
+ edac_dbg(0, "860 init fail\n");
pci_rc = -ENODEV;
goto fail1;
}
static void __exit i82860_exit(void)
{
- debugf3("\n");
+ edac_dbg(3, "\n");
pci_unregister_driver(&i82860_driver);
{
struct i82875p_error_info info;
- debugf1("MC%d\n", mci->mc_idx);
+ edac_dbg(1, "MC%d\n", mci->mc_idx);
i82875p_get_error_info(mci, &info);
i82875p_process_error_info(mci, &info, 1);
}
value = readb(ovrfl_window + I82875P_DRB + index);
cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT);
- debugf3("(%d) cumul_size 0x%x\n", index,
- cumul_size);
+ edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
if (cumul_size == last_cumul_size)
continue; /* not populated */
u32 nr_chans;
struct i82875p_error_info discard;
- debugf0("\n");
+ edac_dbg(0, "\n");
ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
goto fail0;
}
- debugf3("init mci\n");
+ edac_dbg(3, "init mci\n");
mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->dev_name = pci_name(pdev);
mci->edac_check = i82875p_check;
mci->ctl_page_to_phys = NULL;
- debugf3("init pvt\n");
+ edac_dbg(3, "init pvt\n");
pvt = (struct i82875p_pvt *)mci->pvt_info;
pvt->ovrfl_pdev = ovrfl_pdev;
pvt->ovrfl_window = ovrfl_window;
* type of memory controller. The ID is therefore hardcoded to 0.
*/
if (edac_mc_add_mc(mci)) {
- debugf3("failed edac_mc_add_mc()\n");
+ edac_dbg(3, "failed edac_mc_add_mc()\n");
goto fail1;
}
}
/* get this far and it's successful */
- debugf3("success\n");
+ edac_dbg(3, "success\n");
return 0;
fail1:
{
int rc;
- debugf0("\n");
+ edac_dbg(0, "\n");
i82875p_printk(KERN_INFO, "i82875p init one\n");
if (pci_enable_device(pdev) < 0)
struct mem_ctl_info *mci;
struct i82875p_pvt *pvt = NULL;
- debugf0("\n");
+ edac_dbg(0, "\n");
if (i82875p_pci)
edac_pci_release_generic_ctl(i82875p_pci);
{
int pci_rc;
- debugf3("\n");
+ edac_dbg(3, "\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
PCI_DEVICE_ID_INTEL_82875_0, NULL);
if (!mci_pdev) {
- debugf0("875p pci_get_device fail\n");
+ edac_dbg(0, "875p pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
}
pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl);
if (pci_rc < 0) {
- debugf0("875p init fail\n");
+ edac_dbg(0, "875p init fail\n");
pci_rc = -ENODEV;
goto fail1;
}
static void __exit i82875p_exit(void)
{
- debugf3("\n");
+ edac_dbg(3, "\n");
i82875p_remove_one(mci_pdev);
pci_dev_put(mci_pdev);
{
struct i82975x_error_info info;
- debugf1("MC%d\n", mci->mc_idx);
+ edac_dbg(1, "MC%d\n", mci->mc_idx);
i82975x_get_error_info(mci, &info);
i82975x_process_error_info(mci, &info, 1);
}
*/
if (csrow->nr_channels > 1)
cumul_size <<= 1;
- debugf3("(%d) cumul_size 0x%x\n", index,
- cumul_size);
+ edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
nr_pages = cumul_size - last_cumul_size;
if (!nr_pages)
u8 c1drb[4];
#endif
- debugf0("\n");
+ edac_dbg(0, "\n");
pci_read_config_dword(pdev, I82975X_MCHBAR, &mchbar);
if (!(mchbar & 1)) {
- debugf3("failed, MCHBAR disabled!\n");
+ edac_dbg(3, "failed, MCHBAR disabled!\n");
goto fail0;
}
mchbar &= 0xffffc000; /* bits 31:14 used for 16K window */
goto fail1;
}
- debugf3("init mci\n");
+ edac_dbg(3, "init mci\n");
mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->dev_name = pci_name(pdev);
mci->edac_check = i82975x_check;
mci->ctl_page_to_phys = NULL;
- debugf3("init pvt\n");
+ edac_dbg(3, "init pvt\n");
pvt = (struct i82975x_pvt *) mci->pvt_info;
pvt->mch_window = mch_window;
i82975x_init_csrows(mci, pdev, mch_window);
/* finalize this instance of memory controller with edac core */
if (edac_mc_add_mc(mci)) {
- debugf3("failed edac_mc_add_mc()\n");
+ edac_dbg(3, "failed edac_mc_add_mc()\n");
goto fail2;
}
/* get this far and it's successful */
- debugf3("success\n");
+ edac_dbg(3, "success\n");
return 0;
fail2:
{
int rc;
- debugf0("\n");
+ edac_dbg(0, "\n");
if (pci_enable_device(pdev) < 0)
return -EIO;
struct mem_ctl_info *mci;
struct i82975x_pvt *pvt;
- debugf0("\n");
+ edac_dbg(0, "\n");
mci = edac_mc_del_mc(&pdev->dev);
if (mci == NULL)
{
int pci_rc;
- debugf3("\n");
+ edac_dbg(3, "\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
PCI_DEVICE_ID_INTEL_82975_0, NULL);
if (!mci_pdev) {
- debugf0("i82975x pci_get_device fail\n");
+ edac_dbg(0, "i82975x pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
}
pci_rc = i82975x_init_one(mci_pdev, i82975x_pci_tbl);
if (pci_rc < 0) {
- debugf0("i82975x init fail\n");
+ edac_dbg(0, "i82975x init fail\n");
pci_rc = -ENODEV;
goto fail1;
}
static void __exit i82975x_exit(void)
{
- debugf3("\n");
+ edac_dbg(3, "\n");
pci_unregister_driver(&i82975x_driver);
out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0);
if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
- debugf3("failed edac_pci_add_device()\n");
+ edac_dbg(3, "failed edac_pci_add_device()\n");
goto err;
}
}
devres_remove_group(&op->dev, mpc85xx_pci_err_probe);
- debugf3("success\n");
+ edac_dbg(3, "success\n");
printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n");
return 0;
struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev);
struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
- debugf0("\n");
+ edac_dbg(0, "\n");
out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR,
orig_pci_err_cap_dr);
pdata->edac_idx = edac_dev_idx++;
if (edac_device_add_device(edac_dev) > 0) {
- debugf3("failed edac_device_add_device()\n");
+ edac_dbg(3, "failed edac_device_add_device()\n");
goto err;
}
devres_remove_group(&op->dev, mpc85xx_l2_err_probe);
- debugf3("success\n");
+ edac_dbg(3, "success\n");
printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n");
return 0;
struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev);
struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
- debugf0("\n");
+ edac_dbg(0, "\n");
if (edac_op_state == EDAC_OPSTATE_INT) {
out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0);
goto err;
}
- debugf3("init mci\n");
+ edac_dbg(3, "init mci\n");
mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 |
MEM_FLAG_DDR | MEM_FLAG_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0);
if (edac_mc_add_mc(mci)) {
- debugf3("failed edac_mc_add_mc()\n");
+ edac_dbg(3, "failed edac_mc_add_mc()\n");
goto err;
}
if (mpc85xx_create_sysfs_attributes(mci)) {
edac_mc_del_mc(mci->pdev);
- debugf3("failed edac_mc_add_mc()\n");
+ edac_dbg(3, "failed edac_mc_add_mc()\n");
goto err;
}
}
devres_remove_group(&op->dev, mpc85xx_mc_err_probe);
- debugf3("success\n");
+ edac_dbg(3, "success\n");
printk(KERN_INFO EDAC_MOD_STR " MC err registered\n");
return 0;
struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
- debugf0("\n");
+ edac_dbg(0, "\n");
if (edac_op_state == EDAC_OPSTATE_INT) {
out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0);
MV64X60_PCIx_ERR_MASK_VAL);
if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
- debugf3("failed edac_pci_add_device()\n");
+ edac_dbg(3, "failed edac_pci_add_device()\n");
goto err;
}
devres_remove_group(&pdev->dev, mv64x60_pci_err_probe);
/* get this far and it's successful */
- debugf3("success\n");
+ edac_dbg(3, "success\n");
return 0;
{
struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev);
- debugf0("\n");
+ edac_dbg(0, "\n");
edac_pci_del_device(&pdev->dev);
pdata->edac_idx = edac_dev_idx++;
if (edac_device_add_device(edac_dev) > 0) {
- debugf3("failed edac_device_add_device()\n");
+ edac_dbg(3, "failed edac_device_add_device()\n");
goto err;
}
devres_remove_group(&pdev->dev, mv64x60_sram_err_probe);
/* get this far and it's successful */
- debugf3("success\n");
+ edac_dbg(3, "success\n");
return 0;
{
struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
- debugf0("\n");
+ edac_dbg(0, "\n");
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(edac_dev);
pdata->edac_idx = edac_dev_idx++;
if (edac_device_add_device(edac_dev) > 0) {
- debugf3("failed edac_device_add_device()\n");
+ edac_dbg(3, "failed edac_device_add_device()\n");
goto err;
}
devres_remove_group(&pdev->dev, mv64x60_cpu_err_probe);
/* get this far and it's successful */
- debugf3("success\n");
+ edac_dbg(3, "success\n");
return 0;
{
struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
- debugf0("\n");
+ edac_dbg(0, "\n");
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(edac_dev);
goto err2;
}
- debugf3("init mci\n");
+ edac_dbg(3, "init mci\n");
mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl);
if (edac_mc_add_mc(mci)) {
- debugf3("failed edac_mc_add_mc()\n");
+ edac_dbg(3, "failed edac_mc_add_mc()\n");
goto err;
}
}
/* get this far and it's successful */
- debugf3("success\n");
+ edac_dbg(3, "success\n");
return 0;
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
- debugf0("\n");
+ edac_dbg(0, "\n");
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
{
struct r82600_error_info info;
- debugf1("MC%d\n", mci->mc_idx);
+ edac_dbg(1, "MC%d\n", mci->mc_idx);
r82600_get_error_info(mci, &info);
r82600_process_error_info(mci, &info, 1);
}
/* find the DRAM Chip Select Base address and mask */
pci_read_config_byte(pdev, R82600_DRBA + index, &drbar);
- debugf1("Row=%d DRBA = %#0x\n", index, drbar);
+ edac_dbg(1, "Row=%d DRBA = %#0x\n", index, drbar);
row_high_limit = ((u32) drbar << 24);
/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
- debugf1("Row=%d, Boundary Address=%#0x, Last = %#0x\n",
- index, row_high_limit, row_high_limit_last);
+ edac_dbg(1, "Row=%d, Boundary Address=%#0x, Last = %#0x\n",
+ index, row_high_limit, row_high_limit_last);
/* Empty row [p.57] */
if (row_high_limit == row_high_limit_last)
u32 sdram_refresh_rate;
struct r82600_error_info discard;
- debugf0("\n");
+ edac_dbg(0, "\n");
pci_read_config_byte(pdev, R82600_DRAMC, &dramcr);
pci_read_config_dword(pdev, R82600_EAP, &eapr);
scrub_disabled = eapr & BIT(31);
sdram_refresh_rate = dramcr & (BIT(0) | BIT(1));
- debugf2("sdram refresh rate = %#0x\n", sdram_refresh_rate);
- debugf2("DRAMC register = %#0x\n", dramcr);
+ edac_dbg(2, "sdram refresh rate = %#0x\n", sdram_refresh_rate);
+ edac_dbg(2, "DRAMC register = %#0x\n", dramcr);
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = R82600_NR_CSROWS;
layers[0].is_virt_csrow = true;
if (mci == NULL)
return -ENOMEM;
- debugf0("mci = %p\n", mci);
+ edac_dbg(0, "mci = %p\n", mci);
mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
if (ecc_enabled(dramcr)) {
if (scrub_disabled)
- debugf3("mci = %p - Scrubbing disabled! EAP: "
- "%#0x\n", mci, eapr);
+ edac_dbg(3, "mci = %p - Scrubbing disabled! EAP: %#0x\n",
+ mci, eapr);
} else
mci->edac_cap = EDAC_FLAG_NONE;
* type of memory controller. The ID is therefore hardcoded to 0.
*/
if (edac_mc_add_mc(mci)) {
- debugf3("failed edac_mc_add_mc()\n");
+ edac_dbg(3, "failed edac_mc_add_mc()\n");
goto fail;
}
/* get this far and it's successful */
if (disable_hardware_scrub) {
- debugf3("Disabling Hardware Scrub (scrub on error)\n");
+ edac_dbg(3, "Disabling Hardware Scrub (scrub on error)\n");
pci_write_bits32(pdev, R82600_EAP, BIT(31), BIT(31));
}
__func__);
}
- debugf3("success\n");
+ edac_dbg(3, "success\n");
return 0;
fail:
static int __devinit r82600_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- debugf0("\n");
+ edac_dbg(0, "\n");
/* don't need to call pci_enable_device() */
return r82600_probe1(pdev, ent->driver_data);
{
struct mem_ctl_info *mci;
- debugf0("\n");
+ edac_dbg(0, "\n");
if (r82600_pci)
edac_pci_release_generic_ctl(r82600_pci);
int ranks = (1 << RANK_CNT_BITS(mtr));
if (ranks > 4) {
- debugf0("Invalid number of ranks: %d (max = 4) raw value = %x (%04x)",
- ranks, (unsigned int)RANK_CNT_BITS(mtr), mtr);
+ edac_dbg(0, "Invalid number of ranks: %d (max = 4) raw value = %x (%04x)\n",
+ ranks, (unsigned int)RANK_CNT_BITS(mtr), mtr);
return -EINVAL;
}
int rows = (RANK_WIDTH_BITS(mtr) + 12);
if (rows < 13 || rows > 18) {
- debugf0("Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)",
- rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
+ edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n",
+ rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
return -EINVAL;
}
int cols = (COL_WIDTH_BITS(mtr) + 10);
if (cols > 12) {
- debugf0("Invalid number of cols: %d (max = 4) raw value = %x (%04x)",
- cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
+ edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n",
+ cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
return -EINVAL;
}
if (PCI_SLOT(sbridge_dev->pdev[i]->devfn) == slot &&
PCI_FUNC(sbridge_dev->pdev[i]->devfn) == func) {
- debugf1("Associated %02x.%02x.%d with %p\n",
- bus, slot, func, sbridge_dev->pdev[i]);
+ edac_dbg(1, "Associated %02x.%02x.%d with %p\n",
+ bus, slot, func, sbridge_dev->pdev[i]);
return sbridge_dev->pdev[i];
}
}
pci_read_config_dword(pvt->pci_br, SAD_CONTROL, ®);
pvt->sbridge_dev->node_id = NODE_ID(reg);
- debugf0("mc#%d: Node ID: %d, source ID: %d\n",
- pvt->sbridge_dev->mc,
- pvt->sbridge_dev->node_id,
- pvt->sbridge_dev->source_id);
+ edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
+ pvt->sbridge_dev->mc,
+ pvt->sbridge_dev->node_id,
+ pvt->sbridge_dev->source_id);
pci_read_config_dword(pvt->pci_ras, RASENABLES, ®);
if (IS_MIRROR_ENABLED(reg)) {
- debugf0("Memory mirror is enabled\n");
+ edac_dbg(0, "Memory mirror is enabled\n");
pvt->is_mirrored = true;
} else {
- debugf0("Memory mirror is disabled\n");
+ edac_dbg(0, "Memory mirror is disabled\n");
pvt->is_mirrored = false;
}
pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr);
if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
- debugf0("Lockstep is enabled\n");
+ edac_dbg(0, "Lockstep is enabled\n");
mode = EDAC_S8ECD8ED;
pvt->is_lockstep = true;
} else {
- debugf0("Lockstep is disabled\n");
+ edac_dbg(0, "Lockstep is disabled\n");
mode = EDAC_S4ECD4ED;
pvt->is_lockstep = false;
}
if (IS_CLOSE_PG(pvt->info.mcmtr)) {
- debugf0("address map is on closed page mode\n");
+ edac_dbg(0, "address map is on closed page mode\n");
pvt->is_close_pg = true;
} else {
- debugf0("address map is on open page mode\n");
+ edac_dbg(0, "address map is on open page mode\n");
pvt->is_close_pg = false;
}
pci_read_config_dword(pvt->pci_ta, RANK_CFG_A, ®);
if (IS_RDIMM_ENABLED(reg)) {
/* FIXME: Can also be LRDIMM */
- debugf0("Memory is registered\n");
+ edac_dbg(0, "Memory is registered\n");
mtype = MEM_RDDR3;
} else {
- debugf0("Memory is unregistered\n");
+ edac_dbg(0, "Memory is unregistered\n");
mtype = MEM_DDR3;
}
i, j, 0);
pci_read_config_dword(pvt->pci_tad[i],
mtr_regs[j], &mtr);
- debugf4("Channel #%d MTR%d = %x\n", i, j, mtr);
+ edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr);
if (IS_DIMM_PRESENT(mtr)) {
pvt->channel[i].dimms++;
size = (rows * cols * banks * ranks) >> (20 - 3);
npages = MiB_TO_PAGES(size);
- debugf0("mc#%d: channel %d, dimm %d, %d Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
- pvt->sbridge_dev->mc, i, j,
- size, npages,
- banks, ranks, rows, cols);
+ edac_dbg(0, "mc#%d: channel %d, dimm %d, %d Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
+ pvt->sbridge_dev->mc, i, j,
+ size, npages,
+ banks, ranks, rows, cols);
dimm->nr_pages = npages;
dimm->grain = 32;
tmp_mb = (1 + pvt->tolm) >> 20;
mb = div_u64_rem(tmp_mb, 1000, &kb);
- debugf0("TOLM: %u.%03u GB (0x%016Lx)\n",
- mb, kb, (u64)pvt->tolm);
+ edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm);
/* Address range is already 45:25 */
pci_read_config_dword(pvt->pci_sad1, TOHM,
tmp_mb = (1 + pvt->tohm) >> 20;
mb = div_u64_rem(tmp_mb, 1000, &kb);
- debugf0("TOHM: %u.%03u GB (0x%016Lx)",
- mb, kb, (u64)pvt->tohm);
+ edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)", mb, kb, (u64)pvt->tohm);
/*
* Step 2) Get SAD range and SAD Interleave list
tmp_mb = (limit + 1) >> 20;
mb = div_u64_rem(tmp_mb, 1000, &kb);
- debugf0("SAD#%d %s up to %u.%03u GB (0x%016Lx) %s reg=0x%08x\n",
- n_sads,
- get_dram_attr(reg),
- mb, kb,
- ((u64)tmp_mb) << 20L,
- INTERLEAVE_MODE(reg) ? "Interleave: 8:6" : "Interleave: [8:6]XOR[18:16]",
- reg);
+ edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
+ n_sads,
+ get_dram_attr(reg),
+ mb, kb,
+ ((u64)tmp_mb) << 20L,
+ INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]",
+ reg);
prv = limit;
pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads],
if (j > 0 && sad_interl == sad_pkg(reg, j))
break;
- debugf0("SAD#%d, interleave #%d: %d\n",
- n_sads, j, sad_pkg(reg, j));
+ edac_dbg(0, "SAD#%d, interleave #%d: %d\n",
+ n_sads, j, sad_pkg(reg, j));
}
}
tmp_mb = (limit + 1) >> 20;
mb = div_u64_rem(tmp_mb, 1000, &kb);
- debugf0("TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
- n_tads, mb, kb,
- ((u64)tmp_mb) << 20L,
- (u32)TAD_SOCK(reg),
- (u32)TAD_CH(reg),
- (u32)TAD_TGT0(reg),
- (u32)TAD_TGT1(reg),
- (u32)TAD_TGT2(reg),
- (u32)TAD_TGT3(reg),
- reg);
+ edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
+ n_tads, mb, kb,
+ ((u64)tmp_mb) << 20L,
+ (u32)TAD_SOCK(reg),
+ (u32)TAD_CH(reg),
+ (u32)TAD_TGT0(reg),
+ (u32)TAD_TGT1(reg),
+ (u32)TAD_TGT2(reg),
+ (u32)TAD_TGT3(reg),
+ reg);
prv = limit;
}
®);
tmp_mb = TAD_OFFSET(reg) >> 20;
mb = div_u64_rem(tmp_mb, 1000, &kb);
- debugf0("TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
- i, j,
- mb, kb,
- ((u64)tmp_mb) << 20L,
- reg);
+ edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
+ i, j,
+ mb, kb,
+ ((u64)tmp_mb) << 20L,
+ reg);
}
}
tmp_mb = RIR_LIMIT(reg) >> 20;
rir_way = 1 << RIR_WAY(reg);
mb = div_u64_rem(tmp_mb, 1000, &kb);
- debugf0("CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
- i, j,
- mb, kb,
- ((u64)tmp_mb) << 20L,
- rir_way,
- reg);
+ edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
+ i, j,
+ mb, kb,
+ ((u64)tmp_mb) << 20L,
+ rir_way,
+ reg);
for (k = 0; k < rir_way; k++) {
pci_read_config_dword(pvt->pci_tad[i],
tmp_mb = RIR_OFFSET(reg) << 6;
mb = div_u64_rem(tmp_mb, 1000, &kb);
- debugf0("CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
- i, j, k,
- mb, kb,
- ((u64)tmp_mb) << 20L,
- (u32)RIR_RNK_TGT(reg),
- reg);
+ edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
+ i, j, k,
+ mb, kb,
+ ((u64)tmp_mb) << 20L,
+ (u32)RIR_RNK_TGT(reg),
+ reg);
}
}
}
if (sad_way > 0 && sad_interl == sad_pkg(reg, sad_way))
break;
sad_interleave[sad_way] = sad_pkg(reg, sad_way);
- debugf0("SAD interleave #%d: %d\n",
- sad_way, sad_interleave[sad_way]);
+ edac_dbg(0, "SAD interleave #%d: %d\n",
+ sad_way, sad_interleave[sad_way]);
}
- debugf0("mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
- pvt->sbridge_dev->mc,
- n_sads,
- addr,
- limit,
- sad_way + 7,
- interleave_mode ? "" : "XOR[18:16]");
+ edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
+ pvt->sbridge_dev->mc,
+ n_sads,
+ addr,
+ limit,
+ sad_way + 7,
+ interleave_mode ? "" : "XOR[18:16]");
if (interleave_mode)
idx = ((addr >> 6) ^ (addr >> 16)) & 7;
else
return -EINVAL;
}
*socket = sad_interleave[idx];
- debugf0("SAD interleave index: %d (wayness %d) = CPU socket %d\n",
- idx, sad_way, *socket);
+ edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
+ idx, sad_way, *socket);
/*
* Move to the proper node structure, in order to access the
offset = TAD_OFFSET(tad_offset);
- debugf0("TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
- n_tads,
- addr,
- limit,
- (u32)TAD_SOCK(reg),
- ch_way,
- offset,
- idx,
- base_ch,
- *channel_mask);
+ edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
+ n_tads,
+ addr,
+ limit,
+ (u32)TAD_SOCK(reg),
+ ch_way,
+ offset,
+ idx,
+ base_ch,
+ *channel_mask);
/* Calculate channel address */
/* Remove the TAD offset */
limit = RIR_LIMIT(reg);
mb = div_u64_rem(limit >> 20, 1000, &kb);
- debugf0("RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
- n_rir,
- mb, kb,
- limit,
- 1 << RIR_WAY(reg));
+ edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
+ n_rir,
+ mb, kb,
+ limit,
+ 1 << RIR_WAY(reg));
if (ch_addr <= limit)
break;
}
®);
*rank = RIR_RNK_TGT(reg);
- debugf0("RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
- n_rir,
- ch_addr,
- limit,
- rir_way,
- idx);
+ edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
+ n_rir,
+ ch_addr,
+ limit,
+ rir_way,
+ idx);
return 0;
}
{
int i;
- debugf0("\n");
+ edac_dbg(0, "\n");
for (i = 0; i < sbridge_dev->n_devs; i++) {
struct pci_dev *pdev = sbridge_dev->pdev[i];
if (!pdev)
continue;
- debugf0("Removing dev %02x:%02x.%d\n",
- pdev->bus->number,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ edac_dbg(0, "Removing dev %02x:%02x.%d\n",
+ pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
pci_dev_put(pdev);
}
}
return -ENODEV;
}
- debugf0("Detected dev %02x:%d.%d PCI ID %04x:%04x\n",
- bus, dev_descr->dev,
- dev_descr->func,
- PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+ edac_dbg(0, "Detected dev %02x:%d.%d PCI ID %04x:%04x\n",
+ bus, dev_descr->dev, dev_descr->func,
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
/*
* As stated on drivers/pci/search.c, the reference count for
goto error;
}
- debugf0("Associated PCI %02x.%02d.%d with dev = %p\n",
- sbridge_dev->bus,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
- pdev);
+ edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
+ sbridge_dev->bus,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
+ pdev);
}
/* Check if everything were registered */
channel_mask,
rank);
- debugf0("%s", msg);
+ edac_dbg(0, "%s\n", msg);
/* FIXME: need support for channel mask */
struct sbridge_pvt *pvt;
if (unlikely(!mci || !mci->pvt_info)) {
- debugf0("MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
+ edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
sbridge_printk(KERN_ERR, "Couldn't find mci handler\n");
return;
pvt = mci->pvt_info;
- debugf0("MC: mci = %p, dev = %p\n",
- mci, &sbridge_dev->pdev[0]->dev);
+ edac_dbg(0, "MC: mci = %p, dev = %p\n",
+ mci, &sbridge_dev->pdev[0]->dev);
mce_unregister_decode_chain(&sbridge_mce_dec);
/* Remove MC sysfs nodes */
edac_mc_del_mc(mci->pdev);
- debugf1("%s: free mci struct\n", mci->ctl_name);
+ edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
kfree(mci->ctl_name);
edac_mc_free(mci);
sbridge_dev->mci = NULL;
if (unlikely(!mci))
return -ENOMEM;
- debugf0("MC: mci = %p, dev = %p\n",
- mci, &sbridge_dev->pdev[0]->dev);
+ edac_dbg(0, "MC: mci = %p, dev = %p\n",
+ mci, &sbridge_dev->pdev[0]->dev);
pvt = mci->pvt_info;
memset(pvt, 0, sizeof(*pvt));
/* add this new MC control structure to EDAC's list of MCs */
if (unlikely(edac_mc_add_mc(mci))) {
- debugf0("MC: failed edac_mc_add_mc()\n");
+ edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
rc = -EINVAL;
goto fail0;
}
mc = 0;
list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
- debugf0("Registering MC#%d (%d of %d)\n", mc, mc + 1, num_mc);
+ edac_dbg(0, "Registering MC#%d (%d of %d)\n",
+ mc, mc + 1, num_mc);
sbridge_dev->mc = mc++;
rc = sbridge_register_mci(sbridge_dev);
if (unlikely(rc < 0))
{
struct sbridge_dev *sbridge_dev;
- debugf0("\n");
+ edac_dbg(0, "\n");
/*
* we have a trouble here: pdev value for removal will be wrong, since
{
int pci_rc;
- debugf2("\n");
+ edac_dbg(2, "\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
*/
static void __exit sbridge_exit(void)
{
- debugf2("\n");
+ edac_dbg(2, "\n");
pci_unregister_driver(&sbridge_driver);
}
pci_read_config_byte(pdev, X38_CAPID0 + 8, &capid0_8b);
if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
- debugf0("In single channel mode.\n");
+ edac_dbg(0, "In single channel mode\n");
x38_channel_num = 1;
} else {
- debugf0("In dual channel mode.\n");
+ edac_dbg(0, "In dual channel mode\n");
x38_channel_num = 2;
}
{
struct x38_error_info info;
- debugf1("MC%d\n", mci->mc_idx);
+ edac_dbg(1, "MC%d\n", mci->mc_idx);
x38_get_and_clear_error_info(mci, &info);
x38_process_error_info(mci, &info);
}
bool stacked;
void __iomem *window;
- debugf0("MC:\n");
+ edac_dbg(0, "MC:\n");
window = x38_map_mchbar(pdev);
if (!window)
if (!mci)
return -ENOMEM;
- debugf3("MC: init mci\n");
+ edac_dbg(3, "MC: init mci\n");
mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR2;
rc = -ENODEV;
if (edac_mc_add_mc(mci)) {
- debugf3("MC: failed edac_mc_add_mc()\n");
+ edac_dbg(3, "MC: failed edac_mc_add_mc()\n");
goto fail;
}
/* get this far and it's successful */
- debugf3("MC: success\n");
+ edac_dbg(3, "MC: success\n");
return 0;
fail:
{
int rc;
- debugf0("MC:\n");
+ edac_dbg(0, "MC:\n");
if (pci_enable_device(pdev) < 0)
return -EIO;
{
struct mem_ctl_info *mci;
- debugf0("\n");
+ edac_dbg(0, "\n");
mci = edac_mc_del_mc(&pdev->dev);
if (!mci)
{
int pci_rc;
- debugf3("MC:\n");
+ edac_dbg(3, "MC:\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_X38_HB, NULL);
if (!mci_pdev) {
- debugf0("x38 pci_get_device fail\n");
+ edac_dbg(0, "x38 pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
}
pci_rc = x38_init_one(mci_pdev, x38_pci_tbl);
if (pci_rc < 0) {
- debugf0("x38 init fail\n");
+ edac_dbg(0, "x38 init fail\n");
pci_rc = -ENODEV;
goto fail1;
}
static void __exit x38_exit(void)
{
- debugf3("MC:\n");
+ edac_dbg(3, "MC:\n");
pci_unregister_driver(&x38_driver);
if (!x38_registered) {