Merge tag 'nfs-for-3.12-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[firefly-linux-kernel-4.4.55.git] / drivers / edac / amd64_edac.c
index 8b6a0343c2208121cf858c6a22aea1816f427f16..3c9e4e98c65111a069dcbcc5916cd2e061052938 100644 (file)
@@ -123,7 +123,7 @@ static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
        u32 reg = 0;
 
        amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
-       reg &= 0xfffffffe;
+       reg &= (pvt->model >= 0x30) ? ~3 : ~1;
        reg |= dct;
        amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
 }
@@ -133,8 +133,9 @@ static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
 {
        u8 dct  = 0;
 
+       /* For F15 M30h, the second dct is DCT 3, refer to BKDG Section 2.10 */
        if (addr >= 0x140 && addr <= 0x1a0) {
-               dct   = 1;
+               dct   = (pvt->model >= 0x30) ? 3 : 1;
                addr -= 0x100;
        }
 
@@ -202,11 +203,11 @@ static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
        struct amd64_pvt *pvt = mci->pvt_info;
        u32 min_scrubrate = 0x5;
 
-       if (boot_cpu_data.x86 == 0xf)
+       if (pvt->fam == 0xf)
                min_scrubrate = 0x0;
 
-       /* F15h Erratum #505 */
-       if (boot_cpu_data.x86 == 0x15)
+       /* Erratum #505 */
+       if (pvt->fam == 0x15 && pvt->model < 0x10)
                f15h_select_dct(pvt, 0);
 
        return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate);
@@ -218,8 +219,8 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
        u32 scrubval = 0;
        int i, retval = -EINVAL;
 
-       /* F15h Erratum #505 */
-       if (boot_cpu_data.x86 == 0x15)
+       /* Erratum #505 */
+       if (pvt->fam == 0x15 && pvt->model < 0x10)
                f15h_select_dct(pvt, 0);
 
        amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
@@ -335,7 +336,7 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
        u64 csbase, csmask, base_bits, mask_bits;
        u8 addr_shift;
 
-       if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
+       if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
                csbase          = pvt->csels[dct].csbases[csrow];
                csmask          = pvt->csels[dct].csmasks[csrow];
                base_bits       = GENMASK(21, 31) | GENMASK(9, 15);
@@ -343,10 +344,11 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
                addr_shift      = 4;
 
        /*
-       * F16h needs two addr_shift values: 8 for high and 6 for low
-       * (cf. F16h BKDG).
-       */
-       } else if (boot_cpu_data.x86 == 0x16) {
+        * F16h and F15h, models 30h and later need two addr_shift values:
+        * 8 for high and 6 for low (cf. F16h BKDG).
+        */
+       } else if (pvt->fam == 0x16 ||
+                 (pvt->fam == 0x15 && pvt->model >= 0x30)) {
                csbase          = pvt->csels[dct].csbases[csrow];
                csmask          = pvt->csels[dct].csmasks[csrow >> 1];
 
@@ -367,7 +369,7 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
                csmask          = pvt->csels[dct].csmasks[csrow >> 1];
                addr_shift      = 8;
 
-               if (boot_cpu_data.x86 == 0x15)
+               if (pvt->fam == 0x15)
                        base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13);
                else
                        base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13);
@@ -447,14 +449,14 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
        struct amd64_pvt *pvt = mci->pvt_info;
 
        /* only revE and later have the DRAM Hole Address Register */
-       if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
+       if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
                edac_dbg(1, "  revision %d for node %d does not support DHAR\n",
                         pvt->ext_model, pvt->mc_node_id);
                return 1;
        }
 
        /* valid for Fam10h and above */
-       if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
+       if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
                edac_dbg(1, "  Dram Memory Hoisting is DISABLED on this system\n");
                return 1;
        }
@@ -486,10 +488,8 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
        *hole_base = dhar_base(pvt);
        *hole_size = (1ULL << 32) - *hole_base;
 
-       if (boot_cpu_data.x86 > 0xf)
-               *hole_offset = f10_dhar_offset(pvt);
-       else
-               *hole_offset = k8_dhar_offset(pvt);
+       *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
+                                       : k8_dhar_offset(pvt);
 
        edac_dbg(1, "  DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
                 pvt->mc_node_id, (unsigned long)*hole_base,
@@ -663,7 +663,7 @@ static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt)
        u8 bit;
        unsigned long edac_cap = EDAC_FLAG_NONE;
 
-       bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
+       bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
                ? 19
                : 17;
 
@@ -675,7 +675,7 @@ static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt)
 
 static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8);
 
-static void amd64_dump_dramcfg_low(u32 dclr, int chan)
+static void amd64_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
 {
        edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
 
@@ -686,7 +686,7 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan)
        edac_dbg(1, "  PAR/ERR parity: %s\n",
                 (dclr & BIT(8)) ?  "enabled" : "disabled");
 
-       if (boot_cpu_data.x86 == 0x10)
+       if (pvt->fam == 0x10)
                edac_dbg(1, "  DCT 128bit mode width: %s\n",
                         (dclr & BIT(11)) ?  "128b" : "64b");
 
@@ -709,21 +709,21 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
                 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
                 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
 
-       amd64_dump_dramcfg_low(pvt->dclr0, 0);
+       amd64_dump_dramcfg_low(pvt, pvt->dclr0, 0);
 
        edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
 
        edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
                 pvt->dhar, dhar_base(pvt),
-                (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
-                : f10_dhar_offset(pvt));
+                (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
+                                  : f10_dhar_offset(pvt));
 
        edac_dbg(1, "  DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
 
        amd64_debug_display_dimm_sizes(pvt, 0);
 
        /* everything below this point is Fam10h and above */
-       if (boot_cpu_data.x86 == 0xf)
+       if (pvt->fam == 0xf)
                return;
 
        amd64_debug_display_dimm_sizes(pvt, 1);
@@ -732,17 +732,20 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
 
        /* Only if NOT ganged does dclr1 have valid info */
        if (!dct_ganging_enabled(pvt))
-               amd64_dump_dramcfg_low(pvt->dclr1, 1);
+               amd64_dump_dramcfg_low(pvt, pvt->dclr1, 1);
 }
 
 /*
- * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
+ * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
  */
 static void prep_chip_selects(struct amd64_pvt *pvt)
 {
-       if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
+       if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
                pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
                pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
+       } else if (pvt->fam == 0x15 && pvt->model >= 0x30) {
+               pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
+               pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
        } else {
                pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
                pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
@@ -768,7 +771,7 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
                        edac_dbg(0, "  DCSB0[%d]=0x%08x reg: F2x%x\n",
                                 cs, *base0, reg0);
 
-               if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
+               if (pvt->fam == 0xf || dct_ganging_enabled(pvt))
                        continue;
 
                if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
@@ -786,7 +789,7 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
                        edac_dbg(0, "    DCSM0[%d]=0x%08x reg: F2x%x\n",
                                 cs, *mask0, reg0);
 
-               if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
+               if (pvt->fam == 0xf || dct_ganging_enabled(pvt))
                        continue;
 
                if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
@@ -800,9 +803,9 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
        enum mem_type type;
 
        /* F15h supports only DDR3 */
-       if (boot_cpu_data.x86 >= 0x15)
+       if (pvt->fam >= 0x15)
                type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
-       else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) {
+       else if (pvt->fam == 0x10 || pvt->ext_model >= K8_REV_F) {
                if (pvt->dchr0 & DDR3_MODE)
                        type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
                else
@@ -835,14 +838,13 @@ static int k8_early_channel_count(struct amd64_pvt *pvt)
 }
 
 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
-static u64 get_error_address(struct mce *m)
+static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
 {
-       struct cpuinfo_x86 *c = &boot_cpu_data;
        u64 addr;
        u8 start_bit = 1;
        u8 end_bit   = 47;
 
-       if (c->x86 == 0xf) {
+       if (pvt->fam == 0xf) {
                start_bit = 3;
                end_bit   = 39;
        }
@@ -852,7 +854,7 @@ static u64 get_error_address(struct mce *m)
        /*
         * Erratum 637 workaround
         */
-       if (c->x86 == 0x15) {
+       if (pvt->fam == 0x15) {
                struct amd64_pvt *pvt;
                u64 cc6_base, tmp_addr;
                u32 tmp;
@@ -916,15 +918,15 @@ static struct pci_dev *pci_get_related_function(unsigned int vendor,
 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
 {
        struct amd_northbridge *nb;
-       struct pci_dev *misc, *f1 = NULL;
-       struct cpuinfo_x86 *c = &boot_cpu_data;
+       struct pci_dev *f1 = NULL;
+       unsigned int pci_func;
        int off = range << 3;
        u32 llim;
 
        amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off,  &pvt->ranges[range].base.lo);
        amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
 
-       if (c->x86 == 0xf)
+       if (pvt->fam == 0xf)
                return;
 
        if (!dram_rw(pvt, range))
@@ -934,15 +936,17 @@ static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
        amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
 
        /* F15h: factor in CC6 save area by reading dst node's limit reg */
-       if (c->x86 != 0x15)
+       if (pvt->fam != 0x15)
                return;
 
        nb = node_to_amd_nb(dram_dst_node(pvt, range));
        if (WARN_ON(!nb))
                return;
 
-       misc = nb->misc;
-       f1 = pci_get_related_function(misc->vendor, PCI_DEVICE_ID_AMD_15H_NB_F1, misc);
+       pci_func = (pvt->model == 0x30) ? PCI_DEVICE_ID_AMD_15H_M30H_NB_F1
+                                       : PCI_DEVICE_ID_AMD_15H_NB_F1;
+
+       f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
        if (WARN_ON(!f1))
                return;
 
@@ -1089,7 +1093,7 @@ static int f1x_early_channel_count(struct amd64_pvt *pvt)
        int i, j, channels = 0;
 
        /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
-       if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128))
+       if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
                return 2;
 
        /*
@@ -1173,7 +1177,7 @@ static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
 }
 
 /*
- * F16h has only limited cs_modes
+ * F16h and F15h model 30h have only limited cs_modes.
  */
 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
                                unsigned cs_mode)
@@ -1190,7 +1194,7 @@ static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
 static void read_dram_ctl_register(struct amd64_pvt *pvt)
 {
 
-       if (boot_cpu_data.x86 == 0xf)
+       if (pvt->fam == 0xf)
                return;
 
        if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
@@ -1217,6 +1221,29 @@ static void read_dram_ctl_register(struct amd64_pvt *pvt)
        amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
 }
 
+/*
+ * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
+ * 2.10.12 Memory Interleaving Modes).
+ */
+static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
+                                    u8 intlv_en, int num_dcts_intlv,
+                                    u32 dct_sel)
+{
+       u8 channel = 0;
+       u8 select;
+
+       if (!(intlv_en))
+               return (u8)(dct_sel);
+
+       if (num_dcts_intlv == 2) {
+               select = (sys_addr >> 8) & 0x3;
+               channel = select ? 0x3 : 0;
+       } else if (num_dcts_intlv == 4)
+               channel = (sys_addr >> 8) & 0x7;
+
+       return channel;
+}
+
 /*
  * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
  * Interleaving Modes.
@@ -1366,6 +1393,10 @@ static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
                         (in_addr & cs_mask), (cs_base & cs_mask));
 
                if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
+                       if (pvt->fam == 0x15 && pvt->model >= 0x30) {
+                               cs_found =  csrow;
+                               break;
+                       }
                        cs_found = f10_process_possible_spare(pvt, dct, csrow);
 
                        edac_dbg(1, " MATCH csrow=%d\n", cs_found);
@@ -1384,11 +1415,9 @@ static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
 {
        u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
 
-       if (boot_cpu_data.x86 == 0x10) {
+       if (pvt->fam == 0x10) {
                /* only revC3 and revE have that feature */
-               if (boot_cpu_data.x86_model < 4 ||
-                   (boot_cpu_data.x86_model < 0xa &&
-                    boot_cpu_data.x86_mask < 3))
+               if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
                        return sys_addr;
        }
 
@@ -1492,20 +1521,143 @@ static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
        return cs_found;
 }
 
-static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
-                                      int *chan_sel)
+static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
+                                       u64 sys_addr, int *chan_sel)
+{
+       int cs_found = -EINVAL;
+       int num_dcts_intlv = 0;
+       u64 chan_addr, chan_offset;
+       u64 dct_base, dct_limit;
+       u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
+       u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
+
+       u64 dhar_offset         = f10_dhar_offset(pvt);
+       u8 intlv_addr           = dct_sel_interleave_addr(pvt);
+       u8 node_id              = dram_dst_node(pvt, range);
+       u8 intlv_en             = dram_intlv_en(pvt, range);
+
+       amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
+       amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
+
+       dct_offset_en           = (u8) ((dct_cont_base_reg >> 3) & BIT(0));
+       dct_sel                 = (u8) ((dct_cont_base_reg >> 4) & 0x7);
+
+       edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
+                range, sys_addr, get_dram_limit(pvt, range));
+
+       if (!(get_dram_base(pvt, range)  <= sys_addr) &&
+           !(get_dram_limit(pvt, range) >= sys_addr))
+               return -EINVAL;
+
+       if (dhar_valid(pvt) &&
+           dhar_base(pvt) <= sys_addr &&
+           sys_addr < BIT_64(32)) {
+               amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
+                           sys_addr);
+               return -EINVAL;
+       }
+
+       /* Verify sys_addr is within DCT Range. */
+       dct_base = (u64) dct_sel_baseaddr(pvt);
+       dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
+
+       if (!(dct_cont_base_reg & BIT(0)) &&
+           !(dct_base <= (sys_addr >> 27) &&
+             dct_limit >= (sys_addr >> 27)))
+               return -EINVAL;
+
+       /* Verify number of dct's that participate in channel interleaving. */
+       num_dcts_intlv = (int) hweight8(intlv_en);
+
+       if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
+               return -EINVAL;
+
+       channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
+                                            num_dcts_intlv, dct_sel);
+
+       /* Verify we stay within the MAX number of channels allowed */
+       if (channel > 4 || channel < 0)
+               return -EINVAL;
+
+       leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
+
+       /* Get normalized DCT addr */
+       if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
+               chan_offset = dhar_offset;
+       else
+               chan_offset = dct_base << 27;
+
+       chan_addr = sys_addr - chan_offset;
+
+       /* remove channel interleave */
+       if (num_dcts_intlv == 2) {
+               if (intlv_addr == 0x4)
+                       chan_addr = ((chan_addr >> 9) << 8) |
+                                               (chan_addr & 0xff);
+               else if (intlv_addr == 0x5)
+                       chan_addr = ((chan_addr >> 10) << 9) |
+                                               (chan_addr & 0x1ff);
+               else
+                       return -EINVAL;
+
+       } else if (num_dcts_intlv == 4) {
+               if (intlv_addr == 0x4)
+                       chan_addr = ((chan_addr >> 10) << 8) |
+                                                       (chan_addr & 0xff);
+               else if (intlv_addr == 0x5)
+                       chan_addr = ((chan_addr >> 11) << 9) |
+                                                       (chan_addr & 0x1ff);
+               else
+                       return -EINVAL;
+       }
+
+       if (dct_offset_en) {
+               amd64_read_pci_cfg(pvt->F1,
+                                  DRAM_CONT_HIGH_OFF + (int) channel * 4,
+                                  &tmp);
+               chan_addr +=  (u64) ((tmp >> 11) & 0xfff) << 27;
+       }
+
+       f15h_select_dct(pvt, channel);
+
+       edac_dbg(1, "   Normalized DCT addr: 0x%llx\n", chan_addr);
+
+       /*
+        * Find Chip select:
+        * if channel = 3, then alias it to 1. This is because, in F15 M30h,
+        * there is support for 4 DCT's, but only 2 are currently functional.
+        * They are DCT0 and DCT3. But we have read all registers of DCT3 into
+        * pvt->csels[1]. So we need to use '1' here to get correct info.
+        * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
+        */
+       alias_channel =  (channel == 3) ? 1 : channel;
+
+       cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
+
+       if (cs_found >= 0)
+               *chan_sel = alias_channel;
+
+       return cs_found;
+}
+
+static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
+                                       u64 sys_addr,
+                                       int *chan_sel)
 {
        int cs_found = -EINVAL;
        unsigned range;
 
        for (range = 0; range < DRAM_RANGES; range++) {
-
                if (!dram_rw(pvt, range))
                        continue;
 
-               if ((get_dram_base(pvt, range)  <= sys_addr) &&
-                   (get_dram_limit(pvt, range) >= sys_addr)) {
+               if (pvt->fam == 0x15 && pvt->model >= 0x30)
+                       cs_found = f15_m30h_match_to_this_node(pvt, range,
+                                                              sys_addr,
+                                                              chan_sel);
 
+               else if ((get_dram_base(pvt, range)  <= sys_addr) &&
+                        (get_dram_limit(pvt, range) >= sys_addr)) {
                        cs_found = f1x_match_to_this_node(pvt, range,
                                                          sys_addr, chan_sel);
                        if (cs_found >= 0)
@@ -1554,7 +1706,7 @@ static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
        u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
        u32 dbam  = ctrl ? pvt->dbam1 : pvt->dbam0;
 
-       if (boot_cpu_data.x86 == 0xf) {
+       if (pvt->fam == 0xf) {
                /* K8 families < revF not supported yet */
               if (pvt->ext_model < K8_REV_F)
                        return;
@@ -1624,6 +1776,17 @@ static struct amd64_family_type amd64_family_types[] = {
                        .read_dct_pci_cfg       = f15_read_dct_pci_cfg,
                }
        },
+       [F15_M30H_CPUS] = {
+               .ctl_name = "F15h_M30h",
+               .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
+               .f3_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F3,
+               .ops = {
+                       .early_channel_count    = f1x_early_channel_count,
+                       .map_sysaddr_to_csrow   = f1x_map_sysaddr_to_csrow,
+                       .dbam_to_cs             = f16_dbam_to_chip_select,
+                       .read_dct_pci_cfg       = f15_read_dct_pci_cfg,
+               }
+       },
        [F16_CPUS] = {
                .ctl_name = "F16h",
                .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
@@ -1860,7 +2023,7 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
 
        memset(&err, 0, sizeof(err));
 
-       sys_addr = get_error_address(m);
+       sys_addr = get_error_address(pvt, m);
 
        if (ecc_type == 2)
                err.syndrome = extract_syndrome(m->status);
@@ -1921,10 +2084,9 @@ static void free_mc_sibling_devs(struct amd64_pvt *pvt)
  */
 static void read_mc_regs(struct amd64_pvt *pvt)
 {
-       struct cpuinfo_x86 *c = &boot_cpu_data;
+       unsigned range;
        u64 msr_val;
        u32 tmp;
-       unsigned range;
 
        /*
         * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
@@ -1985,14 +2147,14 @@ static void read_mc_regs(struct amd64_pvt *pvt)
 
        pvt->ecc_sym_sz = 4;
 
-       if (c->x86 >= 0x10) {
+       if (pvt->fam >= 0x10) {
                amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
-               if (c->x86 != 0x16)
+               if (pvt->fam != 0x16)
                        /* F16h has only DCT0 */
                        amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
 
                /* F10h, revD and later can do x8 ECC too */
-               if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25))
+               if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
                        pvt->ecc_sym_sz = 8;
        }
        dump_misc_regs(pvt);
@@ -2086,7 +2248,7 @@ static int init_csrows(struct mem_ctl_info *mci)
                bool row_dct0 = !!csrow_enabled(i, 0, pvt);
                bool row_dct1 = false;
 
-               if (boot_cpu_data.x86 != 0xf)
+               if (pvt->fam != 0xf)
                        row_dct1 = !!csrow_enabled(i, 1, pvt);
 
                if (!row_dct0 && !row_dct1)
@@ -2104,7 +2266,7 @@ static int init_csrows(struct mem_ctl_info *mci)
                }
 
                /* K8 has only one DCT */
-               if (boot_cpu_data.x86 != 0xf && row_dct1) {
+               if (pvt->fam != 0xf && row_dct1) {
                        int row_dct1_pages = amd64_csrow_nr_pages(pvt, 1, i);
 
                        csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
@@ -2333,13 +2495,14 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
 
 static int set_mc_sysfs_attrs(struct mem_ctl_info *mci)
 {
+       struct amd64_pvt *pvt = mci->pvt_info;
        int rc;
 
        rc = amd64_create_sysfs_dbg_files(mci);
        if (rc < 0)
                return rc;
 
-       if (boot_cpu_data.x86 >= 0x10) {
+       if (pvt->fam >= 0x10) {
                rc = amd64_create_sysfs_inject_files(mci);
                if (rc < 0)
                        return rc;
@@ -2350,9 +2513,11 @@ static int set_mc_sysfs_attrs(struct mem_ctl_info *mci)
 
 static void del_mc_sysfs_attrs(struct mem_ctl_info *mci)
 {
+       struct amd64_pvt *pvt = mci->pvt_info;
+
        amd64_remove_sysfs_dbg_files(mci);
 
-       if (boot_cpu_data.x86 >= 0x10)
+       if (pvt->fam >= 0x10)
                amd64_remove_sysfs_inject_files(mci);
 }
 
@@ -2387,10 +2552,14 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
  */
 static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
 {
-       u8 fam = boot_cpu_data.x86;
        struct amd64_family_type *fam_type = NULL;
 
-       switch (fam) {
+       pvt->ext_model  = boot_cpu_data.x86_model >> 4;
+       pvt->stepping   = boot_cpu_data.x86_mask;
+       pvt->model      = boot_cpu_data.x86_model;
+       pvt->fam        = boot_cpu_data.x86;
+
+       switch (pvt->fam) {
        case 0xf:
                fam_type                = &amd64_family_types[K8_CPUS];
                pvt->ops                = &amd64_family_types[K8_CPUS].ops;
@@ -2402,6 +2571,12 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
                break;
 
        case 0x15:
+               if (pvt->model == 0x30) {
+                       fam_type        = &amd64_family_types[F15_M30H_CPUS];
+                       pvt->ops        = &amd64_family_types[F15_M30H_CPUS].ops;
+                       break;
+               }
+
                fam_type                = &amd64_family_types[F15_CPUS];
                pvt->ops                = &amd64_family_types[F15_CPUS].ops;
                break;
@@ -2416,10 +2591,8 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
                return NULL;
        }
 
-       pvt->ext_model = boot_cpu_data.x86_model >> 4;
-
        amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
-                    (fam == 0xf ?
+                    (pvt->fam == 0xf ?
                                (pvt->ext_model >= K8_REV_F  ? "revF or later "
                                                             : "revE or earlier ")
                                 : ""), pvt->mc_node_id);
@@ -2470,8 +2643,15 @@ static int amd64_init_one_instance(struct pci_dev *F2)
        layers[0].size = pvt->csels[0].b_cnt;
        layers[0].is_virt_csrow = true;
        layers[1].type = EDAC_MC_LAYER_CHANNEL;
-       layers[1].size = pvt->channel_count;
+
+       /*
+        * Always allocate two channels since we can have setups with DIMMs on
+        * only one channel. Also, this simplifies handling later for the price
+        * of a couple of KBs tops.
+        */
+       layers[1].size = 2;
        layers[1].is_virt_csrow = false;
+
        mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
        if (!mci)
                goto err_siblings;
@@ -2579,6 +2759,8 @@ static void amd64_remove_one_instance(struct pci_dev *pdev)
        struct ecc_settings *s = ecc_stngs[nid];
 
        mci = find_mci_by_dev(&pdev->dev);
+       WARN_ON(!mci);
+
        del_mc_sysfs_attrs(mci);
        /* Remove from EDAC CORE tracking list */
        mci = edac_mc_del_mc(&pdev->dev);
@@ -2636,6 +2818,14 @@ static DEFINE_PCI_DEVICE_TABLE(amd64_pci_table) = {
                .class          = 0,
                .class_mask     = 0,
        },
+       {
+               .vendor         = PCI_VENDOR_ID_AMD,
+               .device         = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .class          = 0,
+               .class_mask     = 0,
+       },
        {
                .vendor         = PCI_VENDOR_ID_AMD,
                .device         = PCI_DEVICE_ID_AMD_16H_NB_F2,