cassini/niu/sun*: Move the Sun drivers
authorJeff Kirsher <jeffrey.t.kirsher@intel.com>
Fri, 13 May 2011 06:04:46 +0000 (23:04 -0700)
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>
Thu, 11 Aug 2011 09:33:43 +0000 (02:33 -0700)
Moves the Sun drivers into drivers/net/ethernet/sun/ and make
the necessary Kconfig and Makefile changes.

Oliver Hartkopp <socketcan@hartkopp.net> suggested removing the
sun* prefix on the driver names.  This type of change I will
leave up to the driver maintainers.

CC: Sam Creasey <sammy@sammy.net>
CC: Adrian Sun <asun@darksunrising.com>
CC: Benjamin Herrenscmidt <benh@kernel.crashing.org>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
40 files changed:
MAINTAINERS
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/cassini.c [deleted file]
drivers/net/cassini.h [deleted file]
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/sun/Kconfig [new file with mode: 0644]
drivers/net/ethernet/sun/Makefile [new file with mode: 0644]
drivers/net/ethernet/sun/cassini.c [new file with mode: 0644]
drivers/net/ethernet/sun/cassini.h [new file with mode: 0644]
drivers/net/ethernet/sun/niu.c [new file with mode: 0644]
drivers/net/ethernet/sun/niu.h [new file with mode: 0644]
drivers/net/ethernet/sun/sunbmac.c [new file with mode: 0644]
drivers/net/ethernet/sun/sunbmac.h [new file with mode: 0644]
drivers/net/ethernet/sun/sungem.c [new file with mode: 0644]
drivers/net/ethernet/sun/sungem.h [new file with mode: 0644]
drivers/net/ethernet/sun/sungem_phy.c [new file with mode: 0644]
drivers/net/ethernet/sun/sungem_phy.h [new file with mode: 0644]
drivers/net/ethernet/sun/sunhme.c [new file with mode: 0644]
drivers/net/ethernet/sun/sunhme.h [new file with mode: 0644]
drivers/net/ethernet/sun/sunqe.c [new file with mode: 0644]
drivers/net/ethernet/sun/sunqe.h [new file with mode: 0644]
drivers/net/ethernet/sun/sunvnet.c [new file with mode: 0644]
drivers/net/ethernet/sun/sunvnet.h [new file with mode: 0644]
drivers/net/niu.c [deleted file]
drivers/net/niu.h [deleted file]
drivers/net/spider_net.h
drivers/net/sunbmac.c [deleted file]
drivers/net/sunbmac.h [deleted file]
drivers/net/sungem.c [deleted file]
drivers/net/sungem.h [deleted file]
drivers/net/sungem_phy.c [deleted file]
drivers/net/sungem_phy.h [deleted file]
drivers/net/sunhme.c [deleted file]
drivers/net/sunhme.h [deleted file]
drivers/net/sunqe.c [deleted file]
drivers/net/sunqe.h [deleted file]
drivers/net/sunvnet.c [deleted file]
drivers/net/sunvnet.h [deleted file]

index 19bd60deb13505ccd6bc110ad6b26e70cd37b4c6..1cb72da76e043bfa38136668ebc6fa79ca739fe4 100644 (file)
@@ -6272,6 +6272,7 @@ S:        Maintained
 F:     arch/m68k/kernel/*sun3*
 F:     arch/m68k/sun3*/
 F:     arch/m68k/include/asm/sun3*
+F:     drivers/net/ethernet/i825xx/sun3*
 
 SUPERH
 M:     Paul Mundt <lethal@linux-sh.org>
index b76de822b52fdd0a8b604255f587cc5b7a6f635e..7977002fc81db3470fb2b991c7132c80026df193 100644 (file)
@@ -363,64 +363,6 @@ config SH_ETH
          This driver supporting CPUs are:
                - SH7710, SH7712, SH7763, SH7619, SH7724, and SH7757.
 
-config HAPPYMEAL
-       tristate "Sun Happy Meal 10/100baseT support"
-       depends on SBUS || PCI
-       select CRC32
-       help
-         This driver supports the "hme" interface present on most Ultra
-         systems and as an option on older Sbus systems. This driver supports
-         both PCI and Sbus devices. This driver also supports the "qfe" quad
-         100baseT device available in both PCI and Sbus configurations.
-
-         To compile this driver as a module, choose M here: the module
-         will be called sunhme.
-
-config SUNBMAC
-       tristate "Sun BigMAC 10/100baseT support (EXPERIMENTAL)"
-       depends on SBUS && EXPERIMENTAL
-       select CRC32
-       help
-         This driver supports the "be" interface available as an Sbus option.
-         This is Sun's older 100baseT Ethernet device.
-
-         To compile this driver as a module, choose M here: the module
-         will be called sunbmac.
-
-config SUNQE
-       tristate "Sun QuadEthernet support"
-       depends on SBUS
-       select CRC32
-       help
-         This driver supports the "qe" 10baseT Ethernet device, available as
-         an Sbus option. Note that this is not the same as Quad FastEthernet
-         "qfe" which is supported by the Happy Meal driver instead.
-
-         To compile this driver as a module, choose M here: the module
-         will be called sunqe.
-
-config SUNGEM
-       tristate "Sun GEM support"
-       depends on PCI
-       select CRC32
-       help
-         Support for the Sun GEM chip, aka Sun GigabitEthernet/P 2.0.  See also
-         <http://www.sun.com/products-n-solutions/hardware/docs/pdf/806-3985-10.pdf>.
-
-config CASSINI
-       tristate "Sun Cassini support"
-       depends on PCI
-       select CRC32
-       help
-         Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also
-         <http://www.sun.com/products-n-solutions/hardware/docs/pdf/817-4341-10.pdf>
-
-config SUNVNET
-       tristate "Sun Virtual Network support"
-       depends on SUN_LDOMS
-       help
-         Support for virtual network devices under Sun Logical Domains.
-
 config BFIN_MAC
        tristate "Blackfin on-chip MAC support"
        depends on NET_ETHERNET && (BF516 || BF518 || BF526 || BF527 || BF536 || BF537)
@@ -1603,14 +1545,6 @@ config MYRI10GE_DCA
          driver.  DCA is a method for warming the CPU cache before data
          is used, with the intent of lessening the impact of cache misses.
 
-config NIU
-       tristate "Sun Neptune 10Gbit Ethernet support"
-       depends on PCI
-       select CRC32
-       help
-         This enables support for cards based upon Sun's
-         Neptune chipset.
-
 config PASEMI_MAC
        tristate "PA Semi 1/10Gbit MAC"
        depends on PPC_PASEMI && PCI && INET
index e641f703bbad8e859bca3014a079b45a1cad8ba5..cfbb060c0c69a00114654648ef83673ee83e37a2 100644 (file)
@@ -44,13 +44,6 @@ obj-$(CONFIG_PLIP) += plip.o
 
 obj-$(CONFIG_ROADRUNNER) += rrunner.o
 
-obj-$(CONFIG_HAPPYMEAL) += sunhme.o
-obj-$(CONFIG_SUNQE) += sunqe.o
-obj-$(CONFIG_SUNBMAC) += sunbmac.o
-obj-$(CONFIG_SUNGEM) += sungem.o sungem_phy.o
-obj-$(CONFIG_CASSINI) += cassini.o
-obj-$(CONFIG_SUNVNET) += sunvnet.o
-
 obj-$(CONFIG_MACE) += mace.o
 obj-$(CONFIG_BMAC) += bmac.o
 
@@ -64,7 +57,7 @@ obj-$(CONFIG_NATSEMI) += natsemi.o
 obj-$(CONFIG_NS83820) += ns83820.o
 obj-$(CONFIG_FEALNX) += fealnx.o
 spidernet-y += spider_net.o spider_net_ethtool.o
-obj-$(CONFIG_SPIDER_NET) += spidernet.o sungem_phy.o
+obj-$(CONFIG_SPIDER_NET) += spidernet.o ethernet/sun/sungem_phy.o
 obj-$(CONFIG_GELIC_NET) += ps3_gelic.o
 gelic_wireless-$(CONFIG_GELIC_WIRELESS) += ps3_gelic_wireless.o
 ps3_gelic-objs += ps3_gelic_net.o $(gelic_wireless-y)
@@ -207,7 +200,6 @@ obj-$(CONFIG_NETCONSOLE) += netconsole.o
 
 obj-$(CONFIG_FS_ENET) += fs_enet/
 
-obj-$(CONFIG_NIU) += niu.o
 obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
 obj-$(CONFIG_SFC) += sfc/
 
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
deleted file mode 100644 (file)
index 646c86b..0000000
+++ /dev/null
@@ -1,5305 +0,0 @@
-/* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
- *
- * Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
- * 02111-1307, USA.
- *
- * This driver uses the sungem driver (c) David Miller
- * (davem@redhat.com) as its basis.
- *
- * The cassini chip has a number of features that distinguish it from
- * the gem chip:
- *  4 transmit descriptor rings that are used for either QoS (VLAN) or
- *      load balancing (non-VLAN mode)
- *  batching of multiple packets
- *  multiple CPU dispatching
- *  page-based RX descriptor engine with separate completion rings
- *  Gigabit support (GMII and PCS interface)
- *  MIF link up/down detection works
- *
- * RX is handled by page sized buffers that are attached as fragments to
- * the skb. here's what's done:
- *  -- driver allocates pages at a time and keeps reference counts
- *     on them.
- *  -- the upper protocol layers assume that the header is in the skb
- *     itself. as a result, cassini will copy a small amount (64 bytes)
- *     to make them happy.
- *  -- driver appends the rest of the data pages as frags to skbuffs
- *     and increments the reference count
- *  -- on page reclamation, the driver swaps the page with a spare page.
- *     if that page is still in use, it frees its reference to that page,
- *     and allocates a new page for use. otherwise, it just recycles the
- *     the page.
- *
- * NOTE: cassini can parse the header. however, it's not worth it
- *       as long as the network stack requires a header copy.
- *
- * TX has 4 queues. currently these queues are used in a round-robin
- * fashion for load balancing. They can also be used for QoS. for that
- * to work, however, QoS information needs to be exposed down to the driver
- * level so that subqueues get targeted to particular transmit rings.
- * alternatively, the queues can be configured via use of the all-purpose
- * ioctl.
- *
- * RX DATA: the rx completion ring has all the info, but the rx desc
- * ring has all of the data. RX can conceivably come in under multiple
- * interrupts, but the INT# assignment needs to be set up properly by
- * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
- * that. also, the two descriptor rings are designed to distinguish between
- * encrypted and non-encrypted packets, but we use them for buffering
- * instead.
- *
- * by default, the selective clear mask is set up to process rx packets.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/compiler.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/vmalloc.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/mm.h>
-#include <linux/highmem.h>
-#include <linux/list.h>
-#include <linux/dma-mapping.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/ethtool.h>
-#include <linux/crc32.h>
-#include <linux/random.h>
-#include <linux/mii.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/mutex.h>
-#include <linux/firmware.h>
-
-#include <net/checksum.h>
-
-#include <linux/atomic.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/byteorder.h>
-#include <asm/uaccess.h>
-
-#define cas_page_map(x)      kmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
-#define cas_page_unmap(x)    kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
-#define CAS_NCPUS            num_online_cpus()
-
-#define cas_skb_release(x)  netif_rx(x)
-
-/* select which firmware to use */
-#define USE_HP_WORKAROUND
-#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
-#define CAS_HP_ALT_FIRMWARE   cas_prog_null /* alternate firmware */
-
-#include "cassini.h"
-
-#define USE_TX_COMPWB      /* use completion writeback registers */
-#define USE_CSMA_CD_PROTO  /* standard CSMA/CD */
-#define USE_RX_BLANK       /* hw interrupt mitigation */
-#undef USE_ENTROPY_DEV     /* don't test for entropy device */
-
-/* NOTE: these aren't useable unless PCI interrupts can be assigned.
- * also, we need to make cp->lock finer-grained.
- */
-#undef  USE_PCI_INTB
-#undef  USE_PCI_INTC
-#undef  USE_PCI_INTD
-#undef  USE_QOS
-
-#undef  USE_VPD_DEBUG       /* debug vpd information if defined */
-
-/* rx processing options */
-#define USE_PAGE_ORDER      /* specify to allocate large rx pages */
-#define RX_DONT_BATCH  0    /* if 1, don't batch flows */
-#define RX_COPY_ALWAYS 0    /* if 0, use frags */
-#define RX_COPY_MIN    64   /* copy a little to make upper layers happy */
-#undef  RX_COUNT_BUFFERS    /* define to calculate RX buffer stats */
-
-#define DRV_MODULE_NAME                "cassini"
-#define DRV_MODULE_VERSION     "1.6"
-#define DRV_MODULE_RELDATE     "21 May 2008"
-
-#define CAS_DEF_MSG_ENABLE       \
-       (NETIF_MSG_DRV          | \
-        NETIF_MSG_PROBE        | \
-        NETIF_MSG_LINK         | \
-        NETIF_MSG_TIMER        | \
-        NETIF_MSG_IFDOWN       | \
-        NETIF_MSG_IFUP         | \
-        NETIF_MSG_RX_ERR       | \
-        NETIF_MSG_TX_ERR)
-
-/* length of time before we decide the hardware is borked,
- * and dev->tx_timeout() should be called to fix the problem
- */
-#define CAS_TX_TIMEOUT                 (HZ)
-#define CAS_LINK_TIMEOUT                (22*HZ/10)
-#define CAS_LINK_FAST_TIMEOUT           (1)
-
-/* timeout values for state changing. these specify the number
- * of 10us delays to be used before giving up.
- */
-#define STOP_TRIES_PHY 1000
-#define STOP_TRIES     5000
-
-/* specify a minimum frame size to deal with some fifo issues
- * max mtu == 2 * page size - ethernet header - 64 - swivel =
- *            2 * page_size - 0x50
- */
-#define CAS_MIN_FRAME                  97
-#define CAS_1000MB_MIN_FRAME            255
-#define CAS_MIN_MTU                     60
-#define CAS_MAX_MTU                     min(((cp->page_size << 1) - 0x50), 9000)
-
-#if 1
-/*
- * Eliminate these and use separate atomic counters for each, to
- * avoid a race condition.
- */
-#else
-#define CAS_RESET_MTU                   1
-#define CAS_RESET_ALL                   2
-#define CAS_RESET_SPARE                 3
-#endif
-
-static char version[] __devinitdata =
-       DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-
-static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */
-static int link_mode;
-
-MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
-MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
-MODULE_LICENSE("GPL");
-MODULE_FIRMWARE("sun/cassini.bin");
-module_param(cassini_debug, int, 0);
-MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
-module_param(link_mode, int, 0);
-MODULE_PARM_DESC(link_mode, "default link mode");
-
-/*
- * Work around for a PCS bug in which the link goes down due to the chip
- * being confused and never showing a link status of "up."
- */
-#define DEFAULT_LINKDOWN_TIMEOUT 5
-/*
- * Value in seconds, for user input.
- */
-static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
-module_param(linkdown_timeout, int, 0);
-MODULE_PARM_DESC(linkdown_timeout,
-"min reset interval in sec. for PCS linkdown issue; disabled if not positive");
-
-/*
- * value in 'ticks' (units used by jiffies). Set when we init the
- * module because 'HZ' in actually a function call on some flavors of
- * Linux.  This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ.
- */
-static int link_transition_timeout;
-
-
-
-static u16 link_modes[] __devinitdata = {
-       BMCR_ANENABLE,                   /* 0 : autoneg */
-       0,                               /* 1 : 10bt half duplex */
-       BMCR_SPEED100,                   /* 2 : 100bt half duplex */
-       BMCR_FULLDPLX,                   /* 3 : 10bt full duplex */
-       BMCR_SPEED100|BMCR_FULLDPLX,     /* 4 : 100bt full duplex */
-       CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
-};
-
-static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = {
-       { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
-
-static void cas_set_link_modes(struct cas *cp);
-
-static inline void cas_lock_tx(struct cas *cp)
-{
-       int i;
-
-       for (i = 0; i < N_TX_RINGS; i++)
-               spin_lock(&cp->tx_lock[i]);
-}
-
-static inline void cas_lock_all(struct cas *cp)
-{
-       spin_lock_irq(&cp->lock);
-       cas_lock_tx(cp);
-}
-
-/* WTZ: QA was finding deadlock problems with the previous
- * versions after long test runs with multiple cards per machine.
- * See if replacing cas_lock_all with safer versions helps. The
- * symptoms QA is reporting match those we'd expect if interrupts
- * aren't being properly restored, and we fixed a previous deadlock
- * with similar symptoms by using save/restore versions in other
- * places.
- */
-#define cas_lock_all_save(cp, flags) \
-do { \
-       struct cas *xxxcp = (cp); \
-       spin_lock_irqsave(&xxxcp->lock, flags); \
-       cas_lock_tx(xxxcp); \
-} while (0)
-
-static inline void cas_unlock_tx(struct cas *cp)
-{
-       int i;
-
-       for (i = N_TX_RINGS; i > 0; i--)
-               spin_unlock(&cp->tx_lock[i - 1]);
-}
-
-static inline void cas_unlock_all(struct cas *cp)
-{
-       cas_unlock_tx(cp);
-       spin_unlock_irq(&cp->lock);
-}
-
-#define cas_unlock_all_restore(cp, flags) \
-do { \
-       struct cas *xxxcp = (cp); \
-       cas_unlock_tx(xxxcp); \
-       spin_unlock_irqrestore(&xxxcp->lock, flags); \
-} while (0)
-
-static void cas_disable_irq(struct cas *cp, const int ring)
-{
-       /* Make sure we won't get any more interrupts */
-       if (ring == 0) {
-               writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
-               return;
-       }
-
-       /* disable completion interrupts and selectively mask */
-       if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
-               switch (ring) {
-#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
-#ifdef USE_PCI_INTB
-               case 1:
-#endif
-#ifdef USE_PCI_INTC
-               case 2:
-#endif
-#ifdef USE_PCI_INTD
-               case 3:
-#endif
-                       writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
-                              cp->regs + REG_PLUS_INTRN_MASK(ring));
-                       break;
-#endif
-               default:
-                       writel(INTRN_MASK_CLEAR_ALL, cp->regs +
-                              REG_PLUS_INTRN_MASK(ring));
-                       break;
-               }
-       }
-}
-
-static inline void cas_mask_intr(struct cas *cp)
-{
-       int i;
-
-       for (i = 0; i < N_RX_COMP_RINGS; i++)
-               cas_disable_irq(cp, i);
-}
-
-static void cas_enable_irq(struct cas *cp, const int ring)
-{
-       if (ring == 0) { /* all but TX_DONE */
-               writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
-               return;
-       }
-
-       if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
-               switch (ring) {
-#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
-#ifdef USE_PCI_INTB
-               case 1:
-#endif
-#ifdef USE_PCI_INTC
-               case 2:
-#endif
-#ifdef USE_PCI_INTD
-               case 3:
-#endif
-                       writel(INTRN_MASK_RX_EN, cp->regs +
-                              REG_PLUS_INTRN_MASK(ring));
-                       break;
-#endif
-               default:
-                       break;
-               }
-       }
-}
-
-static inline void cas_unmask_intr(struct cas *cp)
-{
-       int i;
-
-       for (i = 0; i < N_RX_COMP_RINGS; i++)
-               cas_enable_irq(cp, i);
-}
-
-static inline void cas_entropy_gather(struct cas *cp)
-{
-#ifdef USE_ENTROPY_DEV
-       if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
-               return;
-
-       batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
-                           readl(cp->regs + REG_ENTROPY_IV),
-                           sizeof(uint64_t)*8);
-#endif
-}
-
-static inline void cas_entropy_reset(struct cas *cp)
-{
-#ifdef USE_ENTROPY_DEV
-       if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
-               return;
-
-       writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
-              cp->regs + REG_BIM_LOCAL_DEV_EN);
-       writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
-       writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
-
-       /* if we read back 0x0, we don't have an entropy device */
-       if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
-               cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
-#endif
-}
-
-/* access to the phy. the following assumes that we've initialized the MIF to
- * be in frame rather than bit-bang mode
- */
-static u16 cas_phy_read(struct cas *cp, int reg)
-{
-       u32 cmd;
-       int limit = STOP_TRIES_PHY;
-
-       cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
-       cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
-       cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
-       cmd |= MIF_FRAME_TURN_AROUND_MSB;
-       writel(cmd, cp->regs + REG_MIF_FRAME);
-
-       /* poll for completion */
-       while (limit-- > 0) {
-               udelay(10);
-               cmd = readl(cp->regs + REG_MIF_FRAME);
-               if (cmd & MIF_FRAME_TURN_AROUND_LSB)
-                       return cmd & MIF_FRAME_DATA_MASK;
-       }
-       return 0xFFFF; /* -1 */
-}
-
-static int cas_phy_write(struct cas *cp, int reg, u16 val)
-{
-       int limit = STOP_TRIES_PHY;
-       u32 cmd;
-
-       cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
-       cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
-       cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
-       cmd |= MIF_FRAME_TURN_AROUND_MSB;
-       cmd |= val & MIF_FRAME_DATA_MASK;
-       writel(cmd, cp->regs + REG_MIF_FRAME);
-
-       /* poll for completion */
-       while (limit-- > 0) {
-               udelay(10);
-               cmd = readl(cp->regs + REG_MIF_FRAME);
-               if (cmd & MIF_FRAME_TURN_AROUND_LSB)
-                       return 0;
-       }
-       return -1;
-}
-
-static void cas_phy_powerup(struct cas *cp)
-{
-       u16 ctl = cas_phy_read(cp, MII_BMCR);
-
-       if ((ctl & BMCR_PDOWN) == 0)
-               return;
-       ctl &= ~BMCR_PDOWN;
-       cas_phy_write(cp, MII_BMCR, ctl);
-}
-
-static void cas_phy_powerdown(struct cas *cp)
-{
-       u16 ctl = cas_phy_read(cp, MII_BMCR);
-
-       if (ctl & BMCR_PDOWN)
-               return;
-       ctl |= BMCR_PDOWN;
-       cas_phy_write(cp, MII_BMCR, ctl);
-}
-
-/* cp->lock held. note: the last put_page will free the buffer */
-static int cas_page_free(struct cas *cp, cas_page_t *page)
-{
-       pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
-                      PCI_DMA_FROMDEVICE);
-       __free_pages(page->buffer, cp->page_order);
-       kfree(page);
-       return 0;
-}
-
-#ifdef RX_COUNT_BUFFERS
-#define RX_USED_ADD(x, y)       ((x)->used += (y))
-#define RX_USED_SET(x, y)       ((x)->used  = (y))
-#else
-#define RX_USED_ADD(x, y)
-#define RX_USED_SET(x, y)
-#endif
-
-/* local page allocation routines for the receive buffers. jumbo pages
- * require at least 8K contiguous and 8K aligned buffers.
- */
-static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
-{
-       cas_page_t *page;
-
-       page = kmalloc(sizeof(cas_page_t), flags);
-       if (!page)
-               return NULL;
-
-       INIT_LIST_HEAD(&page->list);
-       RX_USED_SET(page, 0);
-       page->buffer = alloc_pages(flags, cp->page_order);
-       if (!page->buffer)
-               goto page_err;
-       page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
-                                     cp->page_size, PCI_DMA_FROMDEVICE);
-       return page;
-
-page_err:
-       kfree(page);
-       return NULL;
-}
-
-/* initialize spare pool of rx buffers, but allocate during the open */
-static void cas_spare_init(struct cas *cp)
-{
-       spin_lock(&cp->rx_inuse_lock);
-       INIT_LIST_HEAD(&cp->rx_inuse_list);
-       spin_unlock(&cp->rx_inuse_lock);
-
-       spin_lock(&cp->rx_spare_lock);
-       INIT_LIST_HEAD(&cp->rx_spare_list);
-       cp->rx_spares_needed = RX_SPARE_COUNT;
-       spin_unlock(&cp->rx_spare_lock);
-}
-
-/* used on close. free all the spare buffers. */
-static void cas_spare_free(struct cas *cp)
-{
-       struct list_head list, *elem, *tmp;
-
-       /* free spare buffers */
-       INIT_LIST_HEAD(&list);
-       spin_lock(&cp->rx_spare_lock);
-       list_splice_init(&cp->rx_spare_list, &list);
-       spin_unlock(&cp->rx_spare_lock);
-       list_for_each_safe(elem, tmp, &list) {
-               cas_page_free(cp, list_entry(elem, cas_page_t, list));
-       }
-
-       INIT_LIST_HEAD(&list);
-#if 1
-       /*
-        * Looks like Adrian had protected this with a different
-        * lock than used everywhere else to manipulate this list.
-        */
-       spin_lock(&cp->rx_inuse_lock);
-       list_splice_init(&cp->rx_inuse_list, &list);
-       spin_unlock(&cp->rx_inuse_lock);
-#else
-       spin_lock(&cp->rx_spare_lock);
-       list_splice_init(&cp->rx_inuse_list, &list);
-       spin_unlock(&cp->rx_spare_lock);
-#endif
-       list_for_each_safe(elem, tmp, &list) {
-               cas_page_free(cp, list_entry(elem, cas_page_t, list));
-       }
-}
-
-/* replenish spares if needed */
-static void cas_spare_recover(struct cas *cp, const gfp_t flags)
-{
-       struct list_head list, *elem, *tmp;
-       int needed, i;
-
-       /* check inuse list. if we don't need any more free buffers,
-        * just free it
-        */
-
-       /* make a local copy of the list */
-       INIT_LIST_HEAD(&list);
-       spin_lock(&cp->rx_inuse_lock);
-       list_splice_init(&cp->rx_inuse_list, &list);
-       spin_unlock(&cp->rx_inuse_lock);
-
-       list_for_each_safe(elem, tmp, &list) {
-               cas_page_t *page = list_entry(elem, cas_page_t, list);
-
-               /*
-                * With the lockless pagecache, cassini buffering scheme gets
-                * slightly less accurate: we might find that a page has an
-                * elevated reference count here, due to a speculative ref,
-                * and skip it as in-use. Ideally we would be able to reclaim
-                * it. However this would be such a rare case, it doesn't
-                * matter too much as we should pick it up the next time round.
-                *
-                * Importantly, if we find that the page has a refcount of 1
-                * here (our refcount), then we know it is definitely not inuse
-                * so we can reuse it.
-                */
-               if (page_count(page->buffer) > 1)
-                       continue;
-
-               list_del(elem);
-               spin_lock(&cp->rx_spare_lock);
-               if (cp->rx_spares_needed > 0) {
-                       list_add(elem, &cp->rx_spare_list);
-                       cp->rx_spares_needed--;
-                       spin_unlock(&cp->rx_spare_lock);
-               } else {
-                       spin_unlock(&cp->rx_spare_lock);
-                       cas_page_free(cp, page);
-               }
-       }
-
-       /* put any inuse buffers back on the list */
-       if (!list_empty(&list)) {
-               spin_lock(&cp->rx_inuse_lock);
-               list_splice(&list, &cp->rx_inuse_list);
-               spin_unlock(&cp->rx_inuse_lock);
-       }
-
-       spin_lock(&cp->rx_spare_lock);
-       needed = cp->rx_spares_needed;
-       spin_unlock(&cp->rx_spare_lock);
-       if (!needed)
-               return;
-
-       /* we still need spares, so try to allocate some */
-       INIT_LIST_HEAD(&list);
-       i = 0;
-       while (i < needed) {
-               cas_page_t *spare = cas_page_alloc(cp, flags);
-               if (!spare)
-                       break;
-               list_add(&spare->list, &list);
-               i++;
-       }
-
-       spin_lock(&cp->rx_spare_lock);
-       list_splice(&list, &cp->rx_spare_list);
-       cp->rx_spares_needed -= i;
-       spin_unlock(&cp->rx_spare_lock);
-}
-
-/* pull a page from the list. */
-static cas_page_t *cas_page_dequeue(struct cas *cp)
-{
-       struct list_head *entry;
-       int recover;
-
-       spin_lock(&cp->rx_spare_lock);
-       if (list_empty(&cp->rx_spare_list)) {
-               /* try to do a quick recovery */
-               spin_unlock(&cp->rx_spare_lock);
-               cas_spare_recover(cp, GFP_ATOMIC);
-               spin_lock(&cp->rx_spare_lock);
-               if (list_empty(&cp->rx_spare_list)) {
-                       netif_err(cp, rx_err, cp->dev,
-                                 "no spare buffers available\n");
-                       spin_unlock(&cp->rx_spare_lock);
-                       return NULL;
-               }
-       }
-
-       entry = cp->rx_spare_list.next;
-       list_del(entry);
-       recover = ++cp->rx_spares_needed;
-       spin_unlock(&cp->rx_spare_lock);
-
-       /* trigger the timer to do the recovery */
-       if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
-#if 1
-               atomic_inc(&cp->reset_task_pending);
-               atomic_inc(&cp->reset_task_pending_spare);
-               schedule_work(&cp->reset_task);
-#else
-               atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
-               schedule_work(&cp->reset_task);
-#endif
-       }
-       return list_entry(entry, cas_page_t, list);
-}
-
-
-static void cas_mif_poll(struct cas *cp, const int enable)
-{
-       u32 cfg;
-
-       cfg  = readl(cp->regs + REG_MIF_CFG);
-       cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
-
-       if (cp->phy_type & CAS_PHY_MII_MDIO1)
-               cfg |= MIF_CFG_PHY_SELECT;
-
-       /* poll and interrupt on link status change. */
-       if (enable) {
-               cfg |= MIF_CFG_POLL_EN;
-               cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
-               cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
-       }
-       writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
-              cp->regs + REG_MIF_MASK);
-       writel(cfg, cp->regs + REG_MIF_CFG);
-}
-
-/* Must be invoked under cp->lock */
-static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
-{
-       u16 ctl;
-#if 1
-       int lcntl;
-       int changed = 0;
-       int oldstate = cp->lstate;
-       int link_was_not_down = !(oldstate == link_down);
-#endif
-       /* Setup link parameters */
-       if (!ep)
-               goto start_aneg;
-       lcntl = cp->link_cntl;
-       if (ep->autoneg == AUTONEG_ENABLE)
-               cp->link_cntl = BMCR_ANENABLE;
-       else {
-               u32 speed = ethtool_cmd_speed(ep);
-               cp->link_cntl = 0;
-               if (speed == SPEED_100)
-                       cp->link_cntl |= BMCR_SPEED100;
-               else if (speed == SPEED_1000)
-                       cp->link_cntl |= CAS_BMCR_SPEED1000;
-               if (ep->duplex == DUPLEX_FULL)
-                       cp->link_cntl |= BMCR_FULLDPLX;
-       }
-#if 1
-       changed = (lcntl != cp->link_cntl);
-#endif
-start_aneg:
-       if (cp->lstate == link_up) {
-               netdev_info(cp->dev, "PCS link down\n");
-       } else {
-               if (changed) {
-                       netdev_info(cp->dev, "link configuration changed\n");
-               }
-       }
-       cp->lstate = link_down;
-       cp->link_transition = LINK_TRANSITION_LINK_DOWN;
-       if (!cp->hw_running)
-               return;
-#if 1
-       /*
-        * WTZ: If the old state was link_up, we turn off the carrier
-        * to replicate everything we do elsewhere on a link-down
-        * event when we were already in a link-up state..
-        */
-       if (oldstate == link_up)
-               netif_carrier_off(cp->dev);
-       if (changed  && link_was_not_down) {
-               /*
-                * WTZ: This branch will simply schedule a full reset after
-                * we explicitly changed link modes in an ioctl. See if this
-                * fixes the link-problems we were having for forced mode.
-                */
-               atomic_inc(&cp->reset_task_pending);
-               atomic_inc(&cp->reset_task_pending_all);
-               schedule_work(&cp->reset_task);
-               cp->timer_ticks = 0;
-               mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
-               return;
-       }
-#endif
-       if (cp->phy_type & CAS_PHY_SERDES) {
-               u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
-
-               if (cp->link_cntl & BMCR_ANENABLE) {
-                       val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
-                       cp->lstate = link_aneg;
-               } else {
-                       if (cp->link_cntl & BMCR_FULLDPLX)
-                               val |= PCS_MII_CTRL_DUPLEX;
-                       val &= ~PCS_MII_AUTONEG_EN;
-                       cp->lstate = link_force_ok;
-               }
-               cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
-               writel(val, cp->regs + REG_PCS_MII_CTRL);
-
-       } else {
-               cas_mif_poll(cp, 0);
-               ctl = cas_phy_read(cp, MII_BMCR);
-               ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
-                        CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
-               ctl |= cp->link_cntl;
-               if (ctl & BMCR_ANENABLE) {
-                       ctl |= BMCR_ANRESTART;
-                       cp->lstate = link_aneg;
-               } else {
-                       cp->lstate = link_force_ok;
-               }
-               cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
-               cas_phy_write(cp, MII_BMCR, ctl);
-               cas_mif_poll(cp, 1);
-       }
-
-       cp->timer_ticks = 0;
-       mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
-}
-
-/* Must be invoked under cp->lock. */
-static int cas_reset_mii_phy(struct cas *cp)
-{
-       int limit = STOP_TRIES_PHY;
-       u16 val;
-
-       cas_phy_write(cp, MII_BMCR, BMCR_RESET);
-       udelay(100);
-       while (--limit) {
-               val = cas_phy_read(cp, MII_BMCR);
-               if ((val & BMCR_RESET) == 0)
-                       break;
-               udelay(10);
-       }
-       return limit <= 0;
-}
-
-static int cas_saturn_firmware_init(struct cas *cp)
-{
-       const struct firmware *fw;
-       const char fw_name[] = "sun/cassini.bin";
-       int err;
-
-       if (PHY_NS_DP83065 != cp->phy_id)
-               return 0;
-
-       err = request_firmware(&fw, fw_name, &cp->pdev->dev);
-       if (err) {
-               pr_err("Failed to load firmware \"%s\"\n",
-                      fw_name);
-               return err;
-       }
-       if (fw->size < 2) {
-               pr_err("bogus length %zu in \"%s\"\n",
-                      fw->size, fw_name);
-               err = -EINVAL;
-               goto out;
-       }
-       cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
-       cp->fw_size = fw->size - 2;
-       cp->fw_data = vmalloc(cp->fw_size);
-       if (!cp->fw_data) {
-               err = -ENOMEM;
-               pr_err("\"%s\" Failed %d\n", fw_name, err);
-               goto out;
-       }
-       memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
-out:
-       release_firmware(fw);
-       return err;
-}
-
-static void cas_saturn_firmware_load(struct cas *cp)
-{
-       int i;
-
-       cas_phy_powerdown(cp);
-
-       /* expanded memory access mode */
-       cas_phy_write(cp, DP83065_MII_MEM, 0x0);
-
-       /* pointer configuration for new firmware */
-       cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
-       cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
-       cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
-       cas_phy_write(cp, DP83065_MII_REGD, 0x82);
-       cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
-       cas_phy_write(cp, DP83065_MII_REGD, 0x0);
-       cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
-       cas_phy_write(cp, DP83065_MII_REGD, 0x39);
-
-       /* download new firmware */
-       cas_phy_write(cp, DP83065_MII_MEM, 0x1);
-       cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
-       for (i = 0; i < cp->fw_size; i++)
-               cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
-
-       /* enable firmware */
-       cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
-       cas_phy_write(cp, DP83065_MII_REGD, 0x1);
-}
-
-
-/* phy initialization */
-static void cas_phy_init(struct cas *cp)
-{
-       u16 val;
-
-       /* if we're in MII/GMII mode, set up phy */
-       if (CAS_PHY_MII(cp->phy_type)) {
-               writel(PCS_DATAPATH_MODE_MII,
-                      cp->regs + REG_PCS_DATAPATH_MODE);
-
-               cas_mif_poll(cp, 0);
-               cas_reset_mii_phy(cp); /* take out of isolate mode */
-
-               if (PHY_LUCENT_B0 == cp->phy_id) {
-                       /* workaround link up/down issue with lucent */
-                       cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
-                       cas_phy_write(cp, MII_BMCR, 0x00f1);
-                       cas_phy_write(cp, LUCENT_MII_REG, 0x0);
-
-               } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
-                       /* workarounds for broadcom phy */
-                       cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
-                       cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
-                       cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
-                       cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
-                       cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
-                       cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
-                       cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
-                       cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
-                       cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
-                       cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
-                       cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
-
-               } else if (PHY_BROADCOM_5411 == cp->phy_id) {
-                       val = cas_phy_read(cp, BROADCOM_MII_REG4);
-                       val = cas_phy_read(cp, BROADCOM_MII_REG4);
-                       if (val & 0x0080) {
-                               /* link workaround */
-                               cas_phy_write(cp, BROADCOM_MII_REG4,
-                                             val & ~0x0080);
-                       }
-
-               } else if (cp->cas_flags & CAS_FLAG_SATURN) {
-                       writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
-                              SATURN_PCFG_FSI : 0x0,
-                              cp->regs + REG_SATURN_PCFG);
-
-                       /* load firmware to address 10Mbps auto-negotiation
-                        * issue. NOTE: this will need to be changed if the
-                        * default firmware gets fixed.
-                        */
-                       if (PHY_NS_DP83065 == cp->phy_id) {
-                               cas_saturn_firmware_load(cp);
-                       }
-                       cas_phy_powerup(cp);
-               }
-
-               /* advertise capabilities */
-               val = cas_phy_read(cp, MII_BMCR);
-               val &= ~BMCR_ANENABLE;
-               cas_phy_write(cp, MII_BMCR, val);
-               udelay(10);
-
-               cas_phy_write(cp, MII_ADVERTISE,
-                             cas_phy_read(cp, MII_ADVERTISE) |
-                             (ADVERTISE_10HALF | ADVERTISE_10FULL |
-                              ADVERTISE_100HALF | ADVERTISE_100FULL |
-                              CAS_ADVERTISE_PAUSE |
-                              CAS_ADVERTISE_ASYM_PAUSE));
-
-               if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
-                       /* make sure that we don't advertise half
-                        * duplex to avoid a chip issue
-                        */
-                       val  = cas_phy_read(cp, CAS_MII_1000_CTRL);
-                       val &= ~CAS_ADVERTISE_1000HALF;
-                       val |= CAS_ADVERTISE_1000FULL;
-                       cas_phy_write(cp, CAS_MII_1000_CTRL, val);
-               }
-
-       } else {
-               /* reset pcs for serdes */
-               u32 val;
-               int limit;
-
-               writel(PCS_DATAPATH_MODE_SERDES,
-                      cp->regs + REG_PCS_DATAPATH_MODE);
-
-               /* enable serdes pins on saturn */
-               if (cp->cas_flags & CAS_FLAG_SATURN)
-                       writel(0, cp->regs + REG_SATURN_PCFG);
-
-               /* Reset PCS unit. */
-               val = readl(cp->regs + REG_PCS_MII_CTRL);
-               val |= PCS_MII_RESET;
-               writel(val, cp->regs + REG_PCS_MII_CTRL);
-
-               limit = STOP_TRIES;
-               while (--limit > 0) {
-                       udelay(10);
-                       if ((readl(cp->regs + REG_PCS_MII_CTRL) &
-                            PCS_MII_RESET) == 0)
-                               break;
-               }
-               if (limit <= 0)
-                       netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
-                                   readl(cp->regs + REG_PCS_STATE_MACHINE));
-
-               /* Make sure PCS is disabled while changing advertisement
-                * configuration.
-                */
-               writel(0x0, cp->regs + REG_PCS_CFG);
-
-               /* Advertise all capabilities except half-duplex. */
-               val  = readl(cp->regs + REG_PCS_MII_ADVERT);
-               val &= ~PCS_MII_ADVERT_HD;
-               val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
-                       PCS_MII_ADVERT_ASYM_PAUSE);
-               writel(val, cp->regs + REG_PCS_MII_ADVERT);
-
-               /* enable PCS */
-               writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
-
-               /* pcs workaround: enable sync detect */
-               writel(PCS_SERDES_CTRL_SYNCD_EN,
-                      cp->regs + REG_PCS_SERDES_CTRL);
-       }
-}
-
-
-static int cas_pcs_link_check(struct cas *cp)
-{
-       u32 stat, state_machine;
-       int retval = 0;
-
-       /* The link status bit latches on zero, so you must
-        * read it twice in such a case to see a transition
-        * to the link being up.
-        */
-       stat = readl(cp->regs + REG_PCS_MII_STATUS);
-       if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
-               stat = readl(cp->regs + REG_PCS_MII_STATUS);
-
-       /* The remote-fault indication is only valid
-        * when autoneg has completed.
-        */
-       if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
-                    PCS_MII_STATUS_REMOTE_FAULT)) ==
-           (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
-               netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
-
-       /* work around link detection issue by querying the PCS state
-        * machine directly.
-        */
-       state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
-       if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
-               stat &= ~PCS_MII_STATUS_LINK_STATUS;
-       } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
-               stat |= PCS_MII_STATUS_LINK_STATUS;
-       }
-
-       if (stat & PCS_MII_STATUS_LINK_STATUS) {
-               if (cp->lstate != link_up) {
-                       if (cp->opened) {
-                               cp->lstate = link_up;
-                               cp->link_transition = LINK_TRANSITION_LINK_UP;
-
-                               cas_set_link_modes(cp);
-                               netif_carrier_on(cp->dev);
-                       }
-               }
-       } else if (cp->lstate == link_up) {
-               cp->lstate = link_down;
-               if (link_transition_timeout != 0 &&
-                   cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
-                   !cp->link_transition_jiffies_valid) {
-                       /*
-                        * force a reset, as a workaround for the
-                        * link-failure problem. May want to move this to a
-                        * point a bit earlier in the sequence. If we had
-                        * generated a reset a short time ago, we'll wait for
-                        * the link timer to check the status until a
-                        * timer expires (link_transistion_jiffies_valid is
-                        * true when the timer is running.)  Instead of using
-                        * a system timer, we just do a check whenever the
-                        * link timer is running - this clears the flag after
-                        * a suitable delay.
-                        */
-                       retval = 1;
-                       cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
-                       cp->link_transition_jiffies = jiffies;
-                       cp->link_transition_jiffies_valid = 1;
-               } else {
-                       cp->link_transition = LINK_TRANSITION_ON_FAILURE;
-               }
-               netif_carrier_off(cp->dev);
-               if (cp->opened)
-                       netif_info(cp, link, cp->dev, "PCS link down\n");
-
-               /* Cassini only: if you force a mode, there can be
-                * sync problems on link down. to fix that, the following
-                * things need to be checked:
-                * 1) read serialink state register
-                * 2) read pcs status register to verify link down.
-                * 3) if link down and serial link == 0x03, then you need
-                *    to global reset the chip.
-                */
-               if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
-                       /* should check to see if we're in a forced mode */
-                       stat = readl(cp->regs + REG_PCS_SERDES_STATE);
-                       if (stat == 0x03)
-                               return 1;
-               }
-       } else if (cp->lstate == link_down) {
-               if (link_transition_timeout != 0 &&
-                   cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
-                   !cp->link_transition_jiffies_valid) {
-                       /* force a reset, as a workaround for the
-                        * link-failure problem.  May want to move
-                        * this to a point a bit earlier in the
-                        * sequence.
-                        */
-                       retval = 1;
-                       cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
-                       cp->link_transition_jiffies = jiffies;
-                       cp->link_transition_jiffies_valid = 1;
-               } else {
-                       cp->link_transition = LINK_TRANSITION_STILL_FAILED;
-               }
-       }
-
-       return retval;
-}
-
-static int cas_pcs_interrupt(struct net_device *dev,
-                            struct cas *cp, u32 status)
-{
-       u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
-
-       if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
-               return 0;
-       return cas_pcs_link_check(cp);
-}
-
-static int cas_txmac_interrupt(struct net_device *dev,
-                              struct cas *cp, u32 status)
-{
-       u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
-
-       if (!txmac_stat)
-               return 0;
-
-       netif_printk(cp, intr, KERN_DEBUG, cp->dev,
-                    "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
-
-       /* Defer timer expiration is quite normal,
-        * don't even log the event.
-        */
-       if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
-           !(txmac_stat & ~MAC_TX_DEFER_TIMER))
-               return 0;
-
-       spin_lock(&cp->stat_lock[0]);
-       if (txmac_stat & MAC_TX_UNDERRUN) {
-               netdev_err(dev, "TX MAC xmit underrun\n");
-               cp->net_stats[0].tx_fifo_errors++;
-       }
-
-       if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
-               netdev_err(dev, "TX MAC max packet size error\n");
-               cp->net_stats[0].tx_errors++;
-       }
-
-       /* The rest are all cases of one of the 16-bit TX
-        * counters expiring.
-        */
-       if (txmac_stat & MAC_TX_COLL_NORMAL)
-               cp->net_stats[0].collisions += 0x10000;
-
-       if (txmac_stat & MAC_TX_COLL_EXCESS) {
-               cp->net_stats[0].tx_aborted_errors += 0x10000;
-               cp->net_stats[0].collisions += 0x10000;
-       }
-
-       if (txmac_stat & MAC_TX_COLL_LATE) {
-               cp->net_stats[0].tx_aborted_errors += 0x10000;
-               cp->net_stats[0].collisions += 0x10000;
-       }
-       spin_unlock(&cp->stat_lock[0]);
-
-       /* We do not keep track of MAC_TX_COLL_FIRST and
-        * MAC_TX_PEAK_ATTEMPTS events.
-        */
-       return 0;
-}
-
-static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
-{
-       cas_hp_inst_t *inst;
-       u32 val;
-       int i;
-
-       i = 0;
-       while ((inst = firmware) && inst->note) {
-               writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
-
-               val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
-               val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
-               writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
-
-               val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
-               val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
-               val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
-               val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
-               val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
-               val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
-               val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
-               writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
-
-               val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
-               val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
-               val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
-               val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
-               writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
-               ++firmware;
-               ++i;
-       }
-}
-
-static void cas_init_rx_dma(struct cas *cp)
-{
-       u64 desc_dma = cp->block_dvma;
-       u32 val;
-       int i, size;
-
-       /* rx free descriptors */
-       val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
-       val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
-       val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
-       if ((N_RX_DESC_RINGS > 1) &&
-           (cp->cas_flags & CAS_FLAG_REG_PLUS))  /* do desc 2 */
-               val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
-       writel(val, cp->regs + REG_RX_CFG);
-
-       val = (unsigned long) cp->init_rxds[0] -
-               (unsigned long) cp->init_block;
-       writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
-       writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
-       writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
-
-       if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
-               /* rx desc 2 is for IPSEC packets. however,
-                * we don't it that for that purpose.
-                */
-               val = (unsigned long) cp->init_rxds[1] -
-                       (unsigned long) cp->init_block;
-               writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
-               writel((desc_dma + val) & 0xffffffff, cp->regs +
-                      REG_PLUS_RX_DB1_LOW);
-               writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
-                      REG_PLUS_RX_KICK1);
-       }
-
-       /* rx completion registers */
-       val = (unsigned long) cp->init_rxcs[0] -
-               (unsigned long) cp->init_block;
-       writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
-       writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
-
-       if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
-               /* rx comp 2-4 */
-               for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
-                       val = (unsigned long) cp->init_rxcs[i] -
-                               (unsigned long) cp->init_block;
-                       writel((desc_dma + val) >> 32, cp->regs +
-                              REG_PLUS_RX_CBN_HI(i));
-                       writel((desc_dma + val) & 0xffffffff, cp->regs +
-                              REG_PLUS_RX_CBN_LOW(i));
-               }
-       }
-
-       /* read selective clear regs to prevent spurious interrupts
-        * on reset because complete == kick.
-        * selective clear set up to prevent interrupts on resets
-        */
-       readl(cp->regs + REG_INTR_STATUS_ALIAS);
-       writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
-       if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
-               for (i = 1; i < N_RX_COMP_RINGS; i++)
-                       readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
-
-               /* 2 is different from 3 and 4 */
-               if (N_RX_COMP_RINGS > 1)
-                       writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
-                              cp->regs + REG_PLUS_ALIASN_CLEAR(1));
-
-               for (i = 2; i < N_RX_COMP_RINGS; i++)
-                       writel(INTR_RX_DONE_ALT,
-                              cp->regs + REG_PLUS_ALIASN_CLEAR(i));
-       }
-
-       /* set up pause thresholds */
-       val  = CAS_BASE(RX_PAUSE_THRESH_OFF,
-                       cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
-       val |= CAS_BASE(RX_PAUSE_THRESH_ON,
-                       cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
-       writel(val, cp->regs + REG_RX_PAUSE_THRESH);
-
-       /* zero out dma reassembly buffers */
-       for (i = 0; i < 64; i++) {
-               writel(i, cp->regs + REG_RX_TABLE_ADDR);
-               writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
-               writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
-               writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
-       }
-
-       /* make sure address register is 0 for normal operation */
-       writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
-       writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
-
-       /* interrupt mitigation */
-#ifdef USE_RX_BLANK
-       val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
-       val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
-       writel(val, cp->regs + REG_RX_BLANK);
-#else
-       writel(0x0, cp->regs + REG_RX_BLANK);
-#endif
-
-       /* interrupt generation as a function of low water marks for
-        * free desc and completion entries. these are used to trigger
-        * housekeeping for rx descs. we don't use the free interrupt
-        * as it's not very useful
-        */
-       /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */
-       val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
-       writel(val, cp->regs + REG_RX_AE_THRESH);
-       if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
-               val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
-               writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
-       }
-
-       /* Random early detect registers. useful for congestion avoidance.
-        * this should be tunable.
-        */
-       writel(0x0, cp->regs + REG_RX_RED);
-
-       /* receive page sizes. default == 2K (0x800) */
-       val = 0;
-       if (cp->page_size == 0x1000)
-               val = 0x1;
-       else if (cp->page_size == 0x2000)
-               val = 0x2;
-       else if (cp->page_size == 0x4000)
-               val = 0x3;
-
-       /* round mtu + offset. constrain to page size. */
-       size = cp->dev->mtu + 64;
-       if (size > cp->page_size)
-               size = cp->page_size;
-
-       if (size <= 0x400)
-               i = 0x0;
-       else if (size <= 0x800)
-               i = 0x1;
-       else if (size <= 0x1000)
-               i = 0x2;
-       else
-               i = 0x3;
-
-       cp->mtu_stride = 1 << (i + 10);
-       val  = CAS_BASE(RX_PAGE_SIZE, val);
-       val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
-       val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
-       val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
-       writel(val, cp->regs + REG_RX_PAGE_SIZE);
-
-       /* enable the header parser if desired */
-       if (CAS_HP_FIRMWARE == cas_prog_null)
-               return;
-
-       val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
-       val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
-       val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
-       writel(val, cp->regs + REG_HP_CFG);
-}
-
-static inline void cas_rxc_init(struct cas_rx_comp *rxc)
-{
-       memset(rxc, 0, sizeof(*rxc));
-       rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
-}
-
-/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
- * flipping is protected by the fact that the chip will not
- * hand back the same page index while it's being processed.
- */
-static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
-{
-       cas_page_t *page = cp->rx_pages[1][index];
-       cas_page_t *new;
-
-       if (page_count(page->buffer) == 1)
-               return page;
-
-       new = cas_page_dequeue(cp);
-       if (new) {
-               spin_lock(&cp->rx_inuse_lock);
-               list_add(&page->list, &cp->rx_inuse_list);
-               spin_unlock(&cp->rx_inuse_lock);
-       }
-       return new;
-}
-
-/* this needs to be changed if we actually use the ENC RX DESC ring */
-static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
-                                const int index)
-{
-       cas_page_t **page0 = cp->rx_pages[0];
-       cas_page_t **page1 = cp->rx_pages[1];
-
-       /* swap if buffer is in use */
-       if (page_count(page0[index]->buffer) > 1) {
-               cas_page_t *new = cas_page_spare(cp, index);
-               if (new) {
-                       page1[index] = page0[index];
-                       page0[index] = new;
-               }
-       }
-       RX_USED_SET(page0[index], 0);
-       return page0[index];
-}
-
-static void cas_clean_rxds(struct cas *cp)
-{
-       /* only clean ring 0 as ring 1 is used for spare buffers */
-        struct cas_rx_desc *rxd = cp->init_rxds[0];
-       int i, size;
-
-       /* release all rx flows */
-       for (i = 0; i < N_RX_FLOWS; i++) {
-               struct sk_buff *skb;
-               while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
-                       cas_skb_release(skb);
-               }
-       }
-
-       /* initialize descriptors */
-       size = RX_DESC_RINGN_SIZE(0);
-       for (i = 0; i < size; i++) {
-               cas_page_t *page = cas_page_swap(cp, 0, i);
-               rxd[i].buffer = cpu_to_le64(page->dma_addr);
-               rxd[i].index  = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
-                                           CAS_BASE(RX_INDEX_RING, 0));
-       }
-
-       cp->rx_old[0]  = RX_DESC_RINGN_SIZE(0) - 4;
-       cp->rx_last[0] = 0;
-       cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
-}
-
-static void cas_clean_rxcs(struct cas *cp)
-{
-       int i, j;
-
-       /* take ownership of rx comp descriptors */
-       memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
-       memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
-       for (i = 0; i < N_RX_COMP_RINGS; i++) {
-               struct cas_rx_comp *rxc = cp->init_rxcs[i];
-               for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
-                       cas_rxc_init(rxc + j);
-               }
-       }
-}
-
-#if 0
-/* When we get a RX fifo overflow, the RX unit is probably hung
- * so we do the following.
- *
- * If any part of the reset goes wrong, we return 1 and that causes the
- * whole chip to be reset.
- */
-static int cas_rxmac_reset(struct cas *cp)
-{
-       struct net_device *dev = cp->dev;
-       int limit;
-       u32 val;
-
-       /* First, reset MAC RX. */
-       writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
-       for (limit = 0; limit < STOP_TRIES; limit++) {
-               if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
-                       break;
-               udelay(10);
-       }
-       if (limit == STOP_TRIES) {
-               netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
-               return 1;
-       }
-
-       /* Second, disable RX DMA. */
-       writel(0, cp->regs + REG_RX_CFG);
-       for (limit = 0; limit < STOP_TRIES; limit++) {
-               if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
-                       break;
-               udelay(10);
-       }
-       if (limit == STOP_TRIES) {
-               netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
-               return 1;
-       }
-
-       mdelay(5);
-
-       /* Execute RX reset command. */
-       writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
-       for (limit = 0; limit < STOP_TRIES; limit++) {
-               if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
-                       break;
-               udelay(10);
-       }
-       if (limit == STOP_TRIES) {
-               netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
-               return 1;
-       }
-
-       /* reset driver rx state */
-       cas_clean_rxds(cp);
-       cas_clean_rxcs(cp);
-
-       /* Now, reprogram the rest of RX unit. */
-       cas_init_rx_dma(cp);
-
-       /* re-enable */
-       val = readl(cp->regs + REG_RX_CFG);
-       writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
-       writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
-       val = readl(cp->regs + REG_MAC_RX_CFG);
-       writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
-       return 0;
-}
-#endif
-
-static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
-                              u32 status)
-{
-       u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
-
-       if (!stat)
-               return 0;
-
-       netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
-
-       /* these are all rollovers */
-       spin_lock(&cp->stat_lock[0]);
-       if (stat & MAC_RX_ALIGN_ERR)
-               cp->net_stats[0].rx_frame_errors += 0x10000;
-
-       if (stat & MAC_RX_CRC_ERR)
-               cp->net_stats[0].rx_crc_errors += 0x10000;
-
-       if (stat & MAC_RX_LEN_ERR)
-               cp->net_stats[0].rx_length_errors += 0x10000;
-
-       if (stat & MAC_RX_OVERFLOW) {
-               cp->net_stats[0].rx_over_errors++;
-               cp->net_stats[0].rx_fifo_errors++;
-       }
-
-       /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR
-        * events.
-        */
-       spin_unlock(&cp->stat_lock[0]);
-       return 0;
-}
-
-static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
-                            u32 status)
-{
-       u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
-
-       if (!stat)
-               return 0;
-
-       netif_printk(cp, intr, KERN_DEBUG, cp->dev,
-                    "mac interrupt, stat: 0x%x\n", stat);
-
-       /* This interrupt is just for pause frame and pause
-        * tracking.  It is useful for diagnostics and debug
-        * but probably by default we will mask these events.
-        */
-       if (stat & MAC_CTRL_PAUSE_STATE)
-               cp->pause_entered++;
-
-       if (stat & MAC_CTRL_PAUSE_RECEIVED)
-               cp->pause_last_time_recvd = (stat >> 16);
-
-       return 0;
-}
-
-
-/* Must be invoked under cp->lock. */
-static inline int cas_mdio_link_not_up(struct cas *cp)
-{
-       u16 val;
-
-       switch (cp->lstate) {
-       case link_force_ret:
-               netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
-               cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
-               cp->timer_ticks = 5;
-               cp->lstate = link_force_ok;
-               cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
-               break;
-
-       case link_aneg:
-               val = cas_phy_read(cp, MII_BMCR);
-
-               /* Try forced modes. we try things in the following order:
-                * 1000 full -> 100 full/half -> 10 half
-                */
-               val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
-               val |= BMCR_FULLDPLX;
-               val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
-                       CAS_BMCR_SPEED1000 : BMCR_SPEED100;
-               cas_phy_write(cp, MII_BMCR, val);
-               cp->timer_ticks = 5;
-               cp->lstate = link_force_try;
-               cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
-               break;
-
-       case link_force_try:
-               /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */
-               val = cas_phy_read(cp, MII_BMCR);
-               cp->timer_ticks = 5;
-               if (val & CAS_BMCR_SPEED1000) { /* gigabit */
-                       val &= ~CAS_BMCR_SPEED1000;
-                       val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
-                       cas_phy_write(cp, MII_BMCR, val);
-                       break;
-               }
-
-               if (val & BMCR_SPEED100) {
-                       if (val & BMCR_FULLDPLX) /* fd failed */
-                               val &= ~BMCR_FULLDPLX;
-                       else { /* 100Mbps failed */
-                               val &= ~BMCR_SPEED100;
-                       }
-                       cas_phy_write(cp, MII_BMCR, val);
-                       break;
-               }
-       default:
-               break;
-       }
-       return 0;
-}
-
-
-/* must be invoked with cp->lock held */
-static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
-{
-       int restart;
-
-       if (bmsr & BMSR_LSTATUS) {
-               /* Ok, here we got a link. If we had it due to a forced
-                * fallback, and we were configured for autoneg, we
-                * retry a short autoneg pass. If you know your hub is
-                * broken, use ethtool ;)
-                */
-               if ((cp->lstate == link_force_try) &&
-                   (cp->link_cntl & BMCR_ANENABLE)) {
-                       cp->lstate = link_force_ret;
-                       cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
-                       cas_mif_poll(cp, 0);
-                       cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
-                       cp->timer_ticks = 5;
-                       if (cp->opened)
-                               netif_info(cp, link, cp->dev,
-                                          "Got link after fallback, retrying autoneg once...\n");
-                       cas_phy_write(cp, MII_BMCR,
-                                     cp->link_fcntl | BMCR_ANENABLE |
-                                     BMCR_ANRESTART);
-                       cas_mif_poll(cp, 1);
-
-               } else if (cp->lstate != link_up) {
-                       cp->lstate = link_up;
-                       cp->link_transition = LINK_TRANSITION_LINK_UP;
-
-                       if (cp->opened) {
-                               cas_set_link_modes(cp);
-                               netif_carrier_on(cp->dev);
-                       }
-               }
-               return 0;
-       }
-
-       /* link not up. if the link was previously up, we restart the
-        * whole process
-        */
-       restart = 0;
-       if (cp->lstate == link_up) {
-               cp->lstate = link_down;
-               cp->link_transition = LINK_TRANSITION_LINK_DOWN;
-
-               netif_carrier_off(cp->dev);
-               if (cp->opened)
-                       netif_info(cp, link, cp->dev, "Link down\n");
-               restart = 1;
-
-       } else if (++cp->timer_ticks > 10)
-               cas_mdio_link_not_up(cp);
-
-       return restart;
-}
-
-static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
-                            u32 status)
-{
-       u32 stat = readl(cp->regs + REG_MIF_STATUS);
-       u16 bmsr;
-
-       /* check for a link change */
-       if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
-               return 0;
-
-       bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
-       return cas_mii_link_check(cp, bmsr);
-}
-
-static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
-                            u32 status)
-{
-       u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
-
-       if (!stat)
-               return 0;
-
-       netdev_err(dev, "PCI error [%04x:%04x]",
-                  stat, readl(cp->regs + REG_BIM_DIAG));
-
-       /* cassini+ has this reserved */
-       if ((stat & PCI_ERR_BADACK) &&
-           ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
-               pr_cont(" <No ACK64# during ABS64 cycle>");
-
-       if (stat & PCI_ERR_DTRTO)
-               pr_cont(" <Delayed transaction timeout>");
-       if (stat & PCI_ERR_OTHER)
-               pr_cont(" <other>");
-       if (stat & PCI_ERR_BIM_DMA_WRITE)
-               pr_cont(" <BIM DMA 0 write req>");
-       if (stat & PCI_ERR_BIM_DMA_READ)
-               pr_cont(" <BIM DMA 0 read req>");
-       pr_cont("\n");
-
-       if (stat & PCI_ERR_OTHER) {
-               u16 cfg;
-
-               /* Interrogate PCI config space for the
-                * true cause.
-                */
-               pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
-               netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
-               if (cfg & PCI_STATUS_PARITY)
-                       netdev_err(dev, "PCI parity error detected\n");
-               if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
-                       netdev_err(dev, "PCI target abort\n");
-               if (cfg & PCI_STATUS_REC_TARGET_ABORT)
-                       netdev_err(dev, "PCI master acks target abort\n");
-               if (cfg & PCI_STATUS_REC_MASTER_ABORT)
-                       netdev_err(dev, "PCI master abort\n");
-               if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
-                       netdev_err(dev, "PCI system error SERR#\n");
-               if (cfg & PCI_STATUS_DETECTED_PARITY)
-                       netdev_err(dev, "PCI parity error\n");
-
-               /* Write the error bits back to clear them. */
-               cfg &= (PCI_STATUS_PARITY |
-                       PCI_STATUS_SIG_TARGET_ABORT |
-                       PCI_STATUS_REC_TARGET_ABORT |
-                       PCI_STATUS_REC_MASTER_ABORT |
-                       PCI_STATUS_SIG_SYSTEM_ERROR |
-                       PCI_STATUS_DETECTED_PARITY);
-               pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
-       }
-
-       /* For all PCI errors, we should reset the chip. */
-       return 1;
-}
-
-/* All non-normal interrupt conditions get serviced here.
- * Returns non-zero if we should just exit the interrupt
- * handler right now (ie. if we reset the card which invalidates
- * all of the other original irq status bits).
- */
-static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
-                           u32 status)
-{
-       if (status & INTR_RX_TAG_ERROR) {
-               /* corrupt RX tag framing */
-               netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
-                            "corrupt rx tag framing\n");
-               spin_lock(&cp->stat_lock[0]);
-               cp->net_stats[0].rx_errors++;
-               spin_unlock(&cp->stat_lock[0]);
-               goto do_reset;
-       }
-
-       if (status & INTR_RX_LEN_MISMATCH) {
-               /* length mismatch. */
-               netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
-                            "length mismatch for rx frame\n");
-               spin_lock(&cp->stat_lock[0]);
-               cp->net_stats[0].rx_errors++;
-               spin_unlock(&cp->stat_lock[0]);
-               goto do_reset;
-       }
-
-       if (status & INTR_PCS_STATUS) {
-               if (cas_pcs_interrupt(dev, cp, status))
-                       goto do_reset;
-       }
-
-       if (status & INTR_TX_MAC_STATUS) {
-               if (cas_txmac_interrupt(dev, cp, status))
-                       goto do_reset;
-       }
-
-       if (status & INTR_RX_MAC_STATUS) {
-               if (cas_rxmac_interrupt(dev, cp, status))
-                       goto do_reset;
-       }
-
-       if (status & INTR_MAC_CTRL_STATUS) {
-               if (cas_mac_interrupt(dev, cp, status))
-                       goto do_reset;
-       }
-
-       if (status & INTR_MIF_STATUS) {
-               if (cas_mif_interrupt(dev, cp, status))
-                       goto do_reset;
-       }
-
-       if (status & INTR_PCI_ERROR_STATUS) {
-               if (cas_pci_interrupt(dev, cp, status))
-                       goto do_reset;
-       }
-       return 0;
-
-do_reset:
-#if 1
-       atomic_inc(&cp->reset_task_pending);
-       atomic_inc(&cp->reset_task_pending_all);
-       netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
-       schedule_work(&cp->reset_task);
-#else
-       atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
-       netdev_err(dev, "reset called in cas_abnormal_irq\n");
-       schedule_work(&cp->reset_task);
-#endif
-       return 1;
-}
-
-/* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when
- *       determining whether to do a netif_stop/wakeup
- */
-#define CAS_TABORT(x)      (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
-#define CAS_ROUND_PAGE(x)  (((x) + PAGE_SIZE - 1) & PAGE_MASK)
-static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
-                                 const int len)
-{
-       unsigned long off = addr + len;
-
-       if (CAS_TABORT(cp) == 1)
-               return 0;
-       if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
-               return 0;
-       return TX_TARGET_ABORT_LEN;
-}
-
-static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
-{
-       struct cas_tx_desc *txds;
-       struct sk_buff **skbs;
-       struct net_device *dev = cp->dev;
-       int entry, count;
-
-       spin_lock(&cp->tx_lock[ring]);
-       txds = cp->init_txds[ring];
-       skbs = cp->tx_skbs[ring];
-       entry = cp->tx_old[ring];
-
-       count = TX_BUFF_COUNT(ring, entry, limit);
-       while (entry != limit) {
-               struct sk_buff *skb = skbs[entry];
-               dma_addr_t daddr;
-               u32 dlen;
-               int frag;
-
-               if (!skb) {
-                       /* this should never occur */
-                       entry = TX_DESC_NEXT(ring, entry);
-                       continue;
-               }
-
-               /* however, we might get only a partial skb release. */
-               count -= skb_shinfo(skb)->nr_frags +
-                       + cp->tx_tiny_use[ring][entry].nbufs + 1;
-               if (count < 0)
-                       break;
-
-               netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
-                            "tx[%d] done, slot %d\n", ring, entry);
-
-               skbs[entry] = NULL;
-               cp->tx_tiny_use[ring][entry].nbufs = 0;
-
-               for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
-                       struct cas_tx_desc *txd = txds + entry;
-
-                       daddr = le64_to_cpu(txd->buffer);
-                       dlen = CAS_VAL(TX_DESC_BUFLEN,
-                                      le64_to_cpu(txd->control));
-                       pci_unmap_page(cp->pdev, daddr, dlen,
-                                      PCI_DMA_TODEVICE);
-                       entry = TX_DESC_NEXT(ring, entry);
-
-                       /* tiny buffer may follow */
-                       if (cp->tx_tiny_use[ring][entry].used) {
-                               cp->tx_tiny_use[ring][entry].used = 0;
-                               entry = TX_DESC_NEXT(ring, entry);
-                       }
-               }
-
-               spin_lock(&cp->stat_lock[ring]);
-               cp->net_stats[ring].tx_packets++;
-               cp->net_stats[ring].tx_bytes += skb->len;
-               spin_unlock(&cp->stat_lock[ring]);
-               dev_kfree_skb_irq(skb);
-       }
-       cp->tx_old[ring] = entry;
-
-       /* this is wrong for multiple tx rings. the net device needs
-        * multiple queues for this to do the right thing.  we wait
-        * for 2*packets to be available when using tiny buffers
-        */
-       if (netif_queue_stopped(dev) &&
-           (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
-               netif_wake_queue(dev);
-       spin_unlock(&cp->tx_lock[ring]);
-}
-
-static void cas_tx(struct net_device *dev, struct cas *cp,
-                  u32 status)
-{
-        int limit, ring;
-#ifdef USE_TX_COMPWB
-       u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
-#endif
-       netif_printk(cp, intr, KERN_DEBUG, cp->dev,
-                    "tx interrupt, status: 0x%x, %llx\n",
-                    status, (unsigned long long)compwb);
-       /* process all the rings */
-       for (ring = 0; ring < N_TX_RINGS; ring++) {
-#ifdef USE_TX_COMPWB
-               /* use the completion writeback registers */
-               limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
-                       CAS_VAL(TX_COMPWB_LSB, compwb);
-               compwb = TX_COMPWB_NEXT(compwb);
-#else
-               limit = readl(cp->regs + REG_TX_COMPN(ring));
-#endif
-               if (cp->tx_old[ring] != limit)
-                       cas_tx_ringN(cp, ring, limit);
-       }
-}
-
-
-static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
-                             int entry, const u64 *words,
-                             struct sk_buff **skbref)
-{
-       int dlen, hlen, len, i, alloclen;
-       int off, swivel = RX_SWIVEL_OFF_VAL;
-       struct cas_page *page;
-       struct sk_buff *skb;
-       void *addr, *crcaddr;
-       __sum16 csum;
-       char *p;
-
-       hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
-       dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
-       len  = hlen + dlen;
-
-       if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
-               alloclen = len;
-       else
-               alloclen = max(hlen, RX_COPY_MIN);
-
-       skb = dev_alloc_skb(alloclen + swivel + cp->crc_size);
-       if (skb == NULL)
-               return -1;
-
-       *skbref = skb;
-       skb_reserve(skb, swivel);
-
-       p = skb->data;
-       addr = crcaddr = NULL;
-       if (hlen) { /* always copy header pages */
-               i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
-               page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
-               off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
-                       swivel;
-
-               i = hlen;
-               if (!dlen) /* attach FCS */
-                       i += cp->crc_size;
-               pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
-                                   PCI_DMA_FROMDEVICE);
-               addr = cas_page_map(page->buffer);
-               memcpy(p, addr + off, i);
-               pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
-                                   PCI_DMA_FROMDEVICE);
-               cas_page_unmap(addr);
-               RX_USED_ADD(page, 0x100);
-               p += hlen;
-               swivel = 0;
-       }
-
-
-       if (alloclen < (hlen + dlen)) {
-               skb_frag_t *frag = skb_shinfo(skb)->frags;
-
-               /* normal or jumbo packets. we use frags */
-               i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
-               page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
-               off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
-
-               hlen = min(cp->page_size - off, dlen);
-               if (hlen < 0) {
-                       netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
-                                    "rx page overflow: %d\n", hlen);
-                       dev_kfree_skb_irq(skb);
-                       return -1;
-               }
-               i = hlen;
-               if (i == dlen)  /* attach FCS */
-                       i += cp->crc_size;
-               pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
-                                   PCI_DMA_FROMDEVICE);
-
-               /* make sure we always copy a header */
-               swivel = 0;
-               if (p == (char *) skb->data) { /* not split */
-                       addr = cas_page_map(page->buffer);
-                       memcpy(p, addr + off, RX_COPY_MIN);
-                       pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
-                                       PCI_DMA_FROMDEVICE);
-                       cas_page_unmap(addr);
-                       off += RX_COPY_MIN;
-                       swivel = RX_COPY_MIN;
-                       RX_USED_ADD(page, cp->mtu_stride);
-               } else {
-                       RX_USED_ADD(page, hlen);
-               }
-               skb_put(skb, alloclen);
-
-               skb_shinfo(skb)->nr_frags++;
-               skb->data_len += hlen - swivel;
-               skb->truesize += hlen - swivel;
-               skb->len      += hlen - swivel;
-
-               get_page(page->buffer);
-               frag->page = page->buffer;
-               frag->page_offset = off;
-               frag->size = hlen - swivel;
-
-               /* any more data? */
-               if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
-                       hlen = dlen;
-                       off = 0;
-
-                       i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
-                       page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
-                       pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
-                                           hlen + cp->crc_size,
-                                           PCI_DMA_FROMDEVICE);
-                       pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
-                                           hlen + cp->crc_size,
-                                           PCI_DMA_FROMDEVICE);
-
-                       skb_shinfo(skb)->nr_frags++;
-                       skb->data_len += hlen;
-                       skb->len      += hlen;
-                       frag++;
-
-                       get_page(page->buffer);
-                       frag->page = page->buffer;
-                       frag->page_offset = 0;
-                       frag->size = hlen;
-                       RX_USED_ADD(page, hlen + cp->crc_size);
-               }
-
-               if (cp->crc_size) {
-                       addr = cas_page_map(page->buffer);
-                       crcaddr  = addr + off + hlen;
-               }
-
-       } else {
-               /* copying packet */
-               if (!dlen)
-                       goto end_copy_pkt;
-
-               i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
-               page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
-               off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
-               hlen = min(cp->page_size - off, dlen);
-               if (hlen < 0) {
-                       netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
-                                    "rx page overflow: %d\n", hlen);
-                       dev_kfree_skb_irq(skb);
-                       return -1;
-               }
-               i = hlen;
-               if (i == dlen) /* attach FCS */
-                       i += cp->crc_size;
-               pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
-                                   PCI_DMA_FROMDEVICE);
-               addr = cas_page_map(page->buffer);
-               memcpy(p, addr + off, i);
-               pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
-                                   PCI_DMA_FROMDEVICE);
-               cas_page_unmap(addr);
-               if (p == (char *) skb->data) /* not split */
-                       RX_USED_ADD(page, cp->mtu_stride);
-               else
-                       RX_USED_ADD(page, i);
-
-               /* any more data? */
-               if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
-                       p += hlen;
-                       i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
-                       page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
-                       pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
-                                           dlen + cp->crc_size,
-                                           PCI_DMA_FROMDEVICE);
-                       addr = cas_page_map(page->buffer);
-                       memcpy(p, addr, dlen + cp->crc_size);
-                       pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
-                                           dlen + cp->crc_size,
-                                           PCI_DMA_FROMDEVICE);
-                       cas_page_unmap(addr);
-                       RX_USED_ADD(page, dlen + cp->crc_size);
-               }
-end_copy_pkt:
-               if (cp->crc_size) {
-                       addr    = NULL;
-                       crcaddr = skb->data + alloclen;
-               }
-               skb_put(skb, alloclen);
-       }
-
-       csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
-       if (cp->crc_size) {
-               /* checksum includes FCS. strip it out. */
-               csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
-                                             csum_unfold(csum)));
-               if (addr)
-                       cas_page_unmap(addr);
-       }
-       skb->protocol = eth_type_trans(skb, cp->dev);
-       if (skb->protocol == htons(ETH_P_IP)) {
-               skb->csum = csum_unfold(~csum);
-               skb->ip_summed = CHECKSUM_COMPLETE;
-       } else
-               skb_checksum_none_assert(skb);
-       return len;
-}
-
-
-/* we can handle up to 64 rx flows at a time. we do the same thing
- * as nonreassm except that we batch up the buffers.
- * NOTE: we currently just treat each flow as a bunch of packets that
- *       we pass up. a better way would be to coalesce the packets
- *       into a jumbo packet. to do that, we need to do the following:
- *       1) the first packet will have a clean split between header and
- *          data. save both.
- *       2) each time the next flow packet comes in, extend the
- *          data length and merge the checksums.
- *       3) on flow release, fix up the header.
- *       4) make sure the higher layer doesn't care.
- * because packets get coalesced, we shouldn't run into fragment count
- * issues.
- */
-static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
-                                  struct sk_buff *skb)
-{
-       int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
-       struct sk_buff_head *flow = &cp->rx_flows[flowid];
-
-       /* this is protected at a higher layer, so no need to
-        * do any additional locking here. stick the buffer
-        * at the end.
-        */
-       __skb_queue_tail(flow, skb);
-       if (words[0] & RX_COMP1_RELEASE_FLOW) {
-               while ((skb = __skb_dequeue(flow))) {
-                       cas_skb_release(skb);
-               }
-       }
-}
-
-/* put rx descriptor back on ring. if a buffer is in use by a higher
- * layer, this will need to put in a replacement.
- */
-static void cas_post_page(struct cas *cp, const int ring, const int index)
-{
-       cas_page_t *new;
-       int entry;
-
-       entry = cp->rx_old[ring];
-
-       new = cas_page_swap(cp, ring, index);
-       cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
-       cp->init_rxds[ring][entry].index  =
-               cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
-                           CAS_BASE(RX_INDEX_RING, ring));
-
-       entry = RX_DESC_ENTRY(ring, entry + 1);
-       cp->rx_old[ring] = entry;
-
-       if (entry % 4)
-               return;
-
-       if (ring == 0)
-               writel(entry, cp->regs + REG_RX_KICK);
-       else if ((N_RX_DESC_RINGS > 1) &&
-                (cp->cas_flags & CAS_FLAG_REG_PLUS))
-               writel(entry, cp->regs + REG_PLUS_RX_KICK1);
-}
-
-
-/* only when things are bad */
-static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
-{
-       unsigned int entry, last, count, released;
-       int cluster;
-       cas_page_t **page = cp->rx_pages[ring];
-
-       entry = cp->rx_old[ring];
-
-       netif_printk(cp, intr, KERN_DEBUG, cp->dev,
-                    "rxd[%d] interrupt, done: %d\n", ring, entry);
-
-       cluster = -1;
-       count = entry & 0x3;
-       last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
-       released = 0;
-       while (entry != last) {
-               /* make a new buffer if it's still in use */
-               if (page_count(page[entry]->buffer) > 1) {
-                       cas_page_t *new = cas_page_dequeue(cp);
-                       if (!new) {
-                               /* let the timer know that we need to
-                                * do this again
-                                */
-                               cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
-                               if (!timer_pending(&cp->link_timer))
-                                       mod_timer(&cp->link_timer, jiffies +
-                                                 CAS_LINK_FAST_TIMEOUT);
-                               cp->rx_old[ring]  = entry;
-                               cp->rx_last[ring] = num ? num - released : 0;
-                               return -ENOMEM;
-                       }
-                       spin_lock(&cp->rx_inuse_lock);
-                       list_add(&page[entry]->list, &cp->rx_inuse_list);
-                       spin_unlock(&cp->rx_inuse_lock);
-                       cp->init_rxds[ring][entry].buffer =
-                               cpu_to_le64(new->dma_addr);
-                       page[entry] = new;
-
-               }
-
-               if (++count == 4) {
-                       cluster = entry;
-                       count = 0;
-               }
-               released++;
-               entry = RX_DESC_ENTRY(ring, entry + 1);
-       }
-       cp->rx_old[ring] = entry;
-
-       if (cluster < 0)
-               return 0;
-
-       if (ring == 0)
-               writel(cluster, cp->regs + REG_RX_KICK);
-       else if ((N_RX_DESC_RINGS > 1) &&
-                (cp->cas_flags & CAS_FLAG_REG_PLUS))
-               writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
-       return 0;
-}
-
-
-/* process a completion ring. packets are set up in three basic ways:
- * small packets: should be copied header + data in single buffer.
- * large packets: header and data in a single buffer.
- * split packets: header in a separate buffer from data.
- *                data may be in multiple pages. data may be > 256
- *                bytes but in a single page.
- *
- * NOTE: RX page posting is done in this routine as well. while there's
- *       the capability of using multiple RX completion rings, it isn't
- *       really worthwhile due to the fact that the page posting will
- *       force serialization on the single descriptor ring.
- */
-static int cas_rx_ringN(struct cas *cp, int ring, int budget)
-{
-       struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
-       int entry, drops;
-       int npackets = 0;
-
-       netif_printk(cp, intr, KERN_DEBUG, cp->dev,
-                    "rx[%d] interrupt, done: %d/%d\n",
-                    ring,
-                    readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
-
-       entry = cp->rx_new[ring];
-       drops = 0;
-       while (1) {
-               struct cas_rx_comp *rxc = rxcs + entry;
-               struct sk_buff *uninitialized_var(skb);
-               int type, len;
-               u64 words[4];
-               int i, dring;
-
-               words[0] = le64_to_cpu(rxc->word1);
-               words[1] = le64_to_cpu(rxc->word2);
-               words[2] = le64_to_cpu(rxc->word3);
-               words[3] = le64_to_cpu(rxc->word4);
-
-               /* don't touch if still owned by hw */
-               type = CAS_VAL(RX_COMP1_TYPE, words[0]);
-               if (type == 0)
-                       break;
-
-               /* hw hasn't cleared the zero bit yet */
-               if (words[3] & RX_COMP4_ZERO) {
-                       break;
-               }
-
-               /* get info on the packet */
-               if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
-                       spin_lock(&cp->stat_lock[ring]);
-                       cp->net_stats[ring].rx_errors++;
-                       if (words[3] & RX_COMP4_LEN_MISMATCH)
-                               cp->net_stats[ring].rx_length_errors++;
-                       if (words[3] & RX_COMP4_BAD)
-                               cp->net_stats[ring].rx_crc_errors++;
-                       spin_unlock(&cp->stat_lock[ring]);
-
-                       /* We'll just return it to Cassini. */
-               drop_it:
-                       spin_lock(&cp->stat_lock[ring]);
-                       ++cp->net_stats[ring].rx_dropped;
-                       spin_unlock(&cp->stat_lock[ring]);
-                       goto next;
-               }
-
-               len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
-               if (len < 0) {
-                       ++drops;
-                       goto drop_it;
-               }
-
-               /* see if it's a flow re-assembly or not. the driver
-                * itself handles release back up.
-                */
-               if (RX_DONT_BATCH || (type == 0x2)) {
-                       /* non-reassm: these always get released */
-                       cas_skb_release(skb);
-               } else {
-                       cas_rx_flow_pkt(cp, words, skb);
-               }
-
-               spin_lock(&cp->stat_lock[ring]);
-               cp->net_stats[ring].rx_packets++;
-               cp->net_stats[ring].rx_bytes += len;
-               spin_unlock(&cp->stat_lock[ring]);
-
-       next:
-               npackets++;
-
-               /* should it be released? */
-               if (words[0] & RX_COMP1_RELEASE_HDR) {
-                       i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
-                       dring = CAS_VAL(RX_INDEX_RING, i);
-                       i = CAS_VAL(RX_INDEX_NUM, i);
-                       cas_post_page(cp, dring, i);
-               }
-
-               if (words[0] & RX_COMP1_RELEASE_DATA) {
-                       i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
-                       dring = CAS_VAL(RX_INDEX_RING, i);
-                       i = CAS_VAL(RX_INDEX_NUM, i);
-                       cas_post_page(cp, dring, i);
-               }
-
-               if (words[0] & RX_COMP1_RELEASE_NEXT) {
-                       i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
-                       dring = CAS_VAL(RX_INDEX_RING, i);
-                       i = CAS_VAL(RX_INDEX_NUM, i);
-                       cas_post_page(cp, dring, i);
-               }
-
-               /* skip to the next entry */
-               entry = RX_COMP_ENTRY(ring, entry + 1 +
-                                     CAS_VAL(RX_COMP1_SKIP, words[0]));
-#ifdef USE_NAPI
-               if (budget && (npackets >= budget))
-                       break;
-#endif
-       }
-       cp->rx_new[ring] = entry;
-
-       if (drops)
-               netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
-       return npackets;
-}
-
-
-/* put completion entries back on the ring */
-static void cas_post_rxcs_ringN(struct net_device *dev,
-                               struct cas *cp, int ring)
-{
-       struct cas_rx_comp *rxc = cp->init_rxcs[ring];
-       int last, entry;
-
-       last = cp->rx_cur[ring];
-       entry = cp->rx_new[ring];
-       netif_printk(cp, intr, KERN_DEBUG, dev,
-                    "rxc[%d] interrupt, done: %d/%d\n",
-                    ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
-
-       /* zero and re-mark descriptors */
-       while (last != entry) {
-               cas_rxc_init(rxc + last);
-               last = RX_COMP_ENTRY(ring, last + 1);
-       }
-       cp->rx_cur[ring] = last;
-
-       if (ring == 0)
-               writel(last, cp->regs + REG_RX_COMP_TAIL);
-       else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
-               writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
-}
-
-
-
-/* cassini can use all four PCI interrupts for the completion ring.
- * rings 3 and 4 are identical
- */
-#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
-static inline void cas_handle_irqN(struct net_device *dev,
-                                  struct cas *cp, const u32 status,
-                                  const int ring)
-{
-       if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
-               cas_post_rxcs_ringN(dev, cp, ring);
-}
-
-static irqreturn_t cas_interruptN(int irq, void *dev_id)
-{
-       struct net_device *dev = dev_id;
-       struct cas *cp = netdev_priv(dev);
-       unsigned long flags;
-       int ring;
-       u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
-
-       /* check for shared irq */
-       if (status == 0)
-               return IRQ_NONE;
-
-       ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
-       spin_lock_irqsave(&cp->lock, flags);
-       if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
-#ifdef USE_NAPI
-               cas_mask_intr(cp);
-               napi_schedule(&cp->napi);
-#else
-               cas_rx_ringN(cp, ring, 0);
-#endif
-               status &= ~INTR_RX_DONE_ALT;
-       }
-
-       if (status)
-               cas_handle_irqN(dev, cp, status, ring);
-       spin_unlock_irqrestore(&cp->lock, flags);
-       return IRQ_HANDLED;
-}
-#endif
-
-#ifdef USE_PCI_INTB
-/* everything but rx packets */
-static inline void cas_handle_irq1(struct cas *cp, const u32 status)
-{
-       if (status & INTR_RX_BUF_UNAVAIL_1) {
-               /* Frame arrived, no free RX buffers available.
-                * NOTE: we can get this on a link transition. */
-               cas_post_rxds_ringN(cp, 1, 0);
-               spin_lock(&cp->stat_lock[1]);
-               cp->net_stats[1].rx_dropped++;
-               spin_unlock(&cp->stat_lock[1]);
-       }
-
-       if (status & INTR_RX_BUF_AE_1)
-               cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
-                                   RX_AE_FREEN_VAL(1));
-
-       if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
-               cas_post_rxcs_ringN(cp, 1);
-}
-
-/* ring 2 handles a few more events than 3 and 4 */
-static irqreturn_t cas_interrupt1(int irq, void *dev_id)
-{
-       struct net_device *dev = dev_id;
-       struct cas *cp = netdev_priv(dev);
-       unsigned long flags;
-       u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
-
-       /* check for shared interrupt */
-       if (status == 0)
-               return IRQ_NONE;
-
-       spin_lock_irqsave(&cp->lock, flags);
-       if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
-#ifdef USE_NAPI
-               cas_mask_intr(cp);
-               napi_schedule(&cp->napi);
-#else
-               cas_rx_ringN(cp, 1, 0);
-#endif
-               status &= ~INTR_RX_DONE_ALT;
-       }
-       if (status)
-               cas_handle_irq1(cp, status);
-       spin_unlock_irqrestore(&cp->lock, flags);
-       return IRQ_HANDLED;
-}
-#endif
-
-static inline void cas_handle_irq(struct net_device *dev,
-                                 struct cas *cp, const u32 status)
-{
-       /* housekeeping interrupts */
-       if (status & INTR_ERROR_MASK)
-               cas_abnormal_irq(dev, cp, status);
-
-       if (status & INTR_RX_BUF_UNAVAIL) {
-               /* Frame arrived, no free RX buffers available.
-                * NOTE: we can get this on a link transition.
-                */
-               cas_post_rxds_ringN(cp, 0, 0);
-               spin_lock(&cp->stat_lock[0]);
-               cp->net_stats[0].rx_dropped++;
-               spin_unlock(&cp->stat_lock[0]);
-       } else if (status & INTR_RX_BUF_AE) {
-               cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
-                                   RX_AE_FREEN_VAL(0));
-       }
-
-       if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
-               cas_post_rxcs_ringN(dev, cp, 0);
-}
-
-static irqreturn_t cas_interrupt(int irq, void *dev_id)
-{
-       struct net_device *dev = dev_id;
-       struct cas *cp = netdev_priv(dev);
-       unsigned long flags;
-       u32 status = readl(cp->regs + REG_INTR_STATUS);
-
-       if (status == 0)
-               return IRQ_NONE;
-
-       spin_lock_irqsave(&cp->lock, flags);
-       if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
-               cas_tx(dev, cp, status);
-               status &= ~(INTR_TX_ALL | INTR_TX_INTME);
-       }
-
-       if (status & INTR_RX_DONE) {
-#ifdef USE_NAPI
-               cas_mask_intr(cp);
-               napi_schedule(&cp->napi);
-#else
-               cas_rx_ringN(cp, 0, 0);
-#endif
-               status &= ~INTR_RX_DONE;
-       }
-
-       if (status)
-               cas_handle_irq(dev, cp, status);
-       spin_unlock_irqrestore(&cp->lock, flags);
-       return IRQ_HANDLED;
-}
-
-
-#ifdef USE_NAPI
-static int cas_poll(struct napi_struct *napi, int budget)
-{
-       struct cas *cp = container_of(napi, struct cas, napi);
-       struct net_device *dev = cp->dev;
-       int i, enable_intr, credits;
-       u32 status = readl(cp->regs + REG_INTR_STATUS);
-       unsigned long flags;
-
-       spin_lock_irqsave(&cp->lock, flags);
-       cas_tx(dev, cp, status);
-       spin_unlock_irqrestore(&cp->lock, flags);
-
-       /* NAPI rx packets. we spread the credits across all of the
-        * rxc rings
-        *
-        * to make sure we're fair with the work we loop through each
-        * ring N_RX_COMP_RING times with a request of
-        * budget / N_RX_COMP_RINGS
-        */
-       enable_intr = 1;
-       credits = 0;
-       for (i = 0; i < N_RX_COMP_RINGS; i++) {
-               int j;
-               for (j = 0; j < N_RX_COMP_RINGS; j++) {
-                       credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
-                       if (credits >= budget) {
-                               enable_intr = 0;
-                               goto rx_comp;
-                       }
-               }
-       }
-
-rx_comp:
-       /* final rx completion */
-       spin_lock_irqsave(&cp->lock, flags);
-       if (status)
-               cas_handle_irq(dev, cp, status);
-
-#ifdef USE_PCI_INTB
-       if (N_RX_COMP_RINGS > 1) {
-               status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
-               if (status)
-                       cas_handle_irq1(dev, cp, status);
-       }
-#endif
-
-#ifdef USE_PCI_INTC
-       if (N_RX_COMP_RINGS > 2) {
-               status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
-               if (status)
-                       cas_handle_irqN(dev, cp, status, 2);
-       }
-#endif
-
-#ifdef USE_PCI_INTD
-       if (N_RX_COMP_RINGS > 3) {
-               status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
-               if (status)
-                       cas_handle_irqN(dev, cp, status, 3);
-       }
-#endif
-       spin_unlock_irqrestore(&cp->lock, flags);
-       if (enable_intr) {
-               napi_complete(napi);
-               cas_unmask_intr(cp);
-       }
-       return credits;
-}
-#endif
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void cas_netpoll(struct net_device *dev)
-{
-       struct cas *cp = netdev_priv(dev);
-
-       cas_disable_irq(cp, 0);
-       cas_interrupt(cp->pdev->irq, dev);
-       cas_enable_irq(cp, 0);
-
-#ifdef USE_PCI_INTB
-       if (N_RX_COMP_RINGS > 1) {
-               /* cas_interrupt1(); */
-       }
-#endif
-#ifdef USE_PCI_INTC
-       if (N_RX_COMP_RINGS > 2) {
-               /* cas_interruptN(); */
-       }
-#endif
-#ifdef USE_PCI_INTD
-       if (N_RX_COMP_RINGS > 3) {
-               /* cas_interruptN(); */
-       }
-#endif
-}
-#endif
-
-static void cas_tx_timeout(struct net_device *dev)
-{
-       struct cas *cp = netdev_priv(dev);
-
-       netdev_err(dev, "transmit timed out, resetting\n");
-       if (!cp->hw_running) {
-               netdev_err(dev, "hrm.. hw not running!\n");
-               return;
-       }
-
-       netdev_err(dev, "MIF_STATE[%08x]\n",
-                  readl(cp->regs + REG_MIF_STATE_MACHINE));
-
-       netdev_err(dev, "MAC_STATE[%08x]\n",
-                  readl(cp->regs + REG_MAC_STATE_MACHINE));
-
-       netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
-                  readl(cp->regs + REG_TX_CFG),
-                  readl(cp->regs + REG_MAC_TX_STATUS),
-                  readl(cp->regs + REG_MAC_TX_CFG),
-                  readl(cp->regs + REG_TX_FIFO_PKT_CNT),
-                  readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
-                  readl(cp->regs + REG_TX_FIFO_READ_PTR),
-                  readl(cp->regs + REG_TX_SM_1),
-                  readl(cp->regs + REG_TX_SM_2));
-
-       netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
-                  readl(cp->regs + REG_RX_CFG),
-                  readl(cp->regs + REG_MAC_RX_STATUS),
-                  readl(cp->regs + REG_MAC_RX_CFG));
-
-       netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
-                  readl(cp->regs + REG_HP_STATE_MACHINE),
-                  readl(cp->regs + REG_HP_STATUS0),
-                  readl(cp->regs + REG_HP_STATUS1),
-                  readl(cp->regs + REG_HP_STATUS2));
-
-#if 1
-       atomic_inc(&cp->reset_task_pending);
-       atomic_inc(&cp->reset_task_pending_all);
-       schedule_work(&cp->reset_task);
-#else
-       atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
-       schedule_work(&cp->reset_task);
-#endif
-}
-
-static inline int cas_intme(int ring, int entry)
-{
-       /* Algorithm: IRQ every 1/2 of descriptors. */
-       if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
-               return 1;
-       return 0;
-}
-
-
-static void cas_write_txd(struct cas *cp, int ring, int entry,
-                         dma_addr_t mapping, int len, u64 ctrl, int last)
-{
-       struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
-
-       ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
-       if (cas_intme(ring, entry))
-               ctrl |= TX_DESC_INTME;
-       if (last)
-               ctrl |= TX_DESC_EOF;
-       txd->control = cpu_to_le64(ctrl);
-       txd->buffer = cpu_to_le64(mapping);
-}
-
-static inline void *tx_tiny_buf(struct cas *cp, const int ring,
-                               const int entry)
-{
-       return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
-}
-
-static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
-                                    const int entry, const int tentry)
-{
-       cp->tx_tiny_use[ring][tentry].nbufs++;
-       cp->tx_tiny_use[ring][entry].used = 1;
-       return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
-}
-
-static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
-                                   struct sk_buff *skb)
-{
-       struct net_device *dev = cp->dev;
-       int entry, nr_frags, frag, tabort, tentry;
-       dma_addr_t mapping;
-       unsigned long flags;
-       u64 ctrl;
-       u32 len;
-
-       spin_lock_irqsave(&cp->tx_lock[ring], flags);
-
-       /* This is a hard error, log it. */
-       if (TX_BUFFS_AVAIL(cp, ring) <=
-           CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
-               netif_stop_queue(dev);
-               spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
-               netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
-               return 1;
-       }
-
-       ctrl = 0;
-       if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               const u64 csum_start_off = skb_checksum_start_offset(skb);
-               const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
-
-               ctrl =  TX_DESC_CSUM_EN |
-                       CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
-                       CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
-       }
-
-       entry = cp->tx_new[ring];
-       cp->tx_skbs[ring][entry] = skb;
-
-       nr_frags = skb_shinfo(skb)->nr_frags;
-       len = skb_headlen(skb);
-       mapping = pci_map_page(cp->pdev, virt_to_page(skb->data),
-                              offset_in_page(skb->data), len,
-                              PCI_DMA_TODEVICE);
-
-       tentry = entry;
-       tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
-       if (unlikely(tabort)) {
-               /* NOTE: len is always >  tabort */
-               cas_write_txd(cp, ring, entry, mapping, len - tabort,
-                             ctrl | TX_DESC_SOF, 0);
-               entry = TX_DESC_NEXT(ring, entry);
-
-               skb_copy_from_linear_data_offset(skb, len - tabort,
-                             tx_tiny_buf(cp, ring, entry), tabort);
-               mapping = tx_tiny_map(cp, ring, entry, tentry);
-               cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
-                             (nr_frags == 0));
-       } else {
-               cas_write_txd(cp, ring, entry, mapping, len, ctrl |
-                             TX_DESC_SOF, (nr_frags == 0));
-       }
-       entry = TX_DESC_NEXT(ring, entry);
-
-       for (frag = 0; frag < nr_frags; frag++) {
-               skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
-
-               len = fragp->size;
-               mapping = pci_map_page(cp->pdev, fragp->page,
-                                      fragp->page_offset, len,
-                                      PCI_DMA_TODEVICE);
-
-               tabort = cas_calc_tabort(cp, fragp->page_offset, len);
-               if (unlikely(tabort)) {
-                       void *addr;
-
-                       /* NOTE: len is always > tabort */
-                       cas_write_txd(cp, ring, entry, mapping, len - tabort,
-                                     ctrl, 0);
-                       entry = TX_DESC_NEXT(ring, entry);
-
-                       addr = cas_page_map(fragp->page);
-                       memcpy(tx_tiny_buf(cp, ring, entry),
-                              addr + fragp->page_offset + len - tabort,
-                              tabort);
-                       cas_page_unmap(addr);
-                       mapping = tx_tiny_map(cp, ring, entry, tentry);
-                       len     = tabort;
-               }
-
-               cas_write_txd(cp, ring, entry, mapping, len, ctrl,
-                             (frag + 1 == nr_frags));
-               entry = TX_DESC_NEXT(ring, entry);
-       }
-
-       cp->tx_new[ring] = entry;
-       if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
-               netif_stop_queue(dev);
-
-       netif_printk(cp, tx_queued, KERN_DEBUG, dev,
-                    "tx[%d] queued, slot %d, skblen %d, avail %d\n",
-                    ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
-       writel(entry, cp->regs + REG_TX_KICKN(ring));
-       spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
-       return 0;
-}
-
-static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-       struct cas *cp = netdev_priv(dev);
-
-       /* this is only used as a load-balancing hint, so it doesn't
-        * need to be SMP safe
-        */
-       static int ring;
-
-       if (skb_padto(skb, cp->min_frame_size))
-               return NETDEV_TX_OK;
-
-       /* XXX: we need some higher-level QoS hooks to steer packets to
-        *      individual queues.
-        */
-       if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
-               return NETDEV_TX_BUSY;
-       return NETDEV_TX_OK;
-}
-
-static void cas_init_tx_dma(struct cas *cp)
-{
-       u64 desc_dma = cp->block_dvma;
-       unsigned long off;
-       u32 val;
-       int i;
-
-       /* set up tx completion writeback registers. must be 8-byte aligned */
-#ifdef USE_TX_COMPWB
-       off = offsetof(struct cas_init_block, tx_compwb);
-       writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
-       writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
-#endif
-
-       /* enable completion writebacks, enable paced mode,
-        * disable read pipe, and disable pre-interrupt compwbs
-        */
-       val =   TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
-               TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
-               TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
-               TX_CFG_INTR_COMPWB_DIS;
-
-       /* write out tx ring info and tx desc bases */
-       for (i = 0; i < MAX_TX_RINGS; i++) {
-               off = (unsigned long) cp->init_txds[i] -
-                       (unsigned long) cp->init_block;
-
-               val |= CAS_TX_RINGN_BASE(i);
-               writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
-               writel((desc_dma + off) & 0xffffffff, cp->regs +
-                      REG_TX_DBN_LOW(i));
-               /* don't zero out the kick register here as the system
-                * will wedge
-                */
-       }
-       writel(val, cp->regs + REG_TX_CFG);
-
-       /* program max burst sizes. these numbers should be different
-        * if doing QoS.
-        */
-#ifdef USE_QOS
-       writel(0x800, cp->regs + REG_TX_MAXBURST_0);
-       writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
-       writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
-       writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
-#else
-       writel(0x800, cp->regs + REG_TX_MAXBURST_0);
-       writel(0x800, cp->regs + REG_TX_MAXBURST_1);
-       writel(0x800, cp->regs + REG_TX_MAXBURST_2);
-       writel(0x800, cp->regs + REG_TX_MAXBURST_3);
-#endif
-}
-
-/* Must be invoked under cp->lock. */
-static inline void cas_init_dma(struct cas *cp)
-{
-       cas_init_tx_dma(cp);
-       cas_init_rx_dma(cp);
-}
-
-static void cas_process_mc_list(struct cas *cp)
-{
-       u16 hash_table[16];
-       u32 crc;
-       struct netdev_hw_addr *ha;
-       int i = 1;
-
-       memset(hash_table, 0, sizeof(hash_table));
-       netdev_for_each_mc_addr(ha, cp->dev) {
-               if (i <= CAS_MC_EXACT_MATCH_SIZE) {
-                       /* use the alternate mac address registers for the
-                        * first 15 multicast addresses
-                        */
-                       writel((ha->addr[4] << 8) | ha->addr[5],
-                              cp->regs + REG_MAC_ADDRN(i*3 + 0));
-                       writel((ha->addr[2] << 8) | ha->addr[3],
-                              cp->regs + REG_MAC_ADDRN(i*3 + 1));
-                       writel((ha->addr[0] << 8) | ha->addr[1],
-                              cp->regs + REG_MAC_ADDRN(i*3 + 2));
-                       i++;
-               }
-               else {
-                       /* use hw hash table for the next series of
-                        * multicast addresses
-                        */
-                       crc = ether_crc_le(ETH_ALEN, ha->addr);
-                       crc >>= 24;
-                       hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
-               }
-       }
-       for (i = 0; i < 16; i++)
-               writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
-}
-
-/* Must be invoked under cp->lock. */
-static u32 cas_setup_multicast(struct cas *cp)
-{
-       u32 rxcfg = 0;
-       int i;
-
-       if (cp->dev->flags & IFF_PROMISC) {
-               rxcfg |= MAC_RX_CFG_PROMISC_EN;
-
-       } else if (cp->dev->flags & IFF_ALLMULTI) {
-               for (i=0; i < 16; i++)
-                       writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
-               rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
-
-       } else {
-               cas_process_mc_list(cp);
-               rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
-       }
-
-       return rxcfg;
-}
-
-/* must be invoked under cp->stat_lock[N_TX_RINGS] */
-static void cas_clear_mac_err(struct cas *cp)
-{
-       writel(0, cp->regs + REG_MAC_COLL_NORMAL);
-       writel(0, cp->regs + REG_MAC_COLL_FIRST);
-       writel(0, cp->regs + REG_MAC_COLL_EXCESS);
-       writel(0, cp->regs + REG_MAC_COLL_LATE);
-       writel(0, cp->regs + REG_MAC_TIMER_DEFER);
-       writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
-       writel(0, cp->regs + REG_MAC_RECV_FRAME);
-       writel(0, cp->regs + REG_MAC_LEN_ERR);
-       writel(0, cp->regs + REG_MAC_ALIGN_ERR);
-       writel(0, cp->regs + REG_MAC_FCS_ERR);
-       writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
-}
-
-
-static void cas_mac_reset(struct cas *cp)
-{
-       int i;
-
-       /* do both TX and RX reset */
-       writel(0x1, cp->regs + REG_MAC_TX_RESET);
-       writel(0x1, cp->regs + REG_MAC_RX_RESET);
-
-       /* wait for TX */
-       i = STOP_TRIES;
-       while (i-- > 0) {
-               if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
-                       break;
-               udelay(10);
-       }
-
-       /* wait for RX */
-       i = STOP_TRIES;
-       while (i-- > 0) {
-               if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
-                       break;
-               udelay(10);
-       }
-
-       if (readl(cp->regs + REG_MAC_TX_RESET) |
-           readl(cp->regs + REG_MAC_RX_RESET))
-               netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
-                          readl(cp->regs + REG_MAC_TX_RESET),
-                          readl(cp->regs + REG_MAC_RX_RESET),
-                          readl(cp->regs + REG_MAC_STATE_MACHINE));
-}
-
-
-/* Must be invoked under cp->lock. */
-static void cas_init_mac(struct cas *cp)
-{
-       unsigned char *e = &cp->dev->dev_addr[0];
-       int i;
-       cas_mac_reset(cp);
-
-       /* setup core arbitration weight register */
-       writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
-
-       /* XXX Use pci_dma_burst_advice() */
-#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
-       /* set the infinite burst register for chips that don't have
-        * pci issues.
-        */
-       if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
-               writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
-#endif
-
-       writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
-
-       writel(0x00, cp->regs + REG_MAC_IPG0);
-       writel(0x08, cp->regs + REG_MAC_IPG1);
-       writel(0x04, cp->regs + REG_MAC_IPG2);
-
-       /* change later for 802.3z */
-       writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
-
-       /* min frame + FCS */
-       writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
-
-       /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
-        * specify the maximum frame size to prevent RX tag errors on
-        * oversized frames.
-        */
-       writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
-              CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
-                       (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
-              cp->regs + REG_MAC_FRAMESIZE_MAX);
-
-       /* NOTE: crc_size is used as a surrogate for half-duplex.
-        * workaround saturn half-duplex issue by increasing preamble
-        * size to 65 bytes.
-        */
-       if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
-               writel(0x41, cp->regs + REG_MAC_PA_SIZE);
-       else
-               writel(0x07, cp->regs + REG_MAC_PA_SIZE);
-       writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
-       writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
-       writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
-
-       writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
-
-       writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
-       writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
-       writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
-       writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
-       writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
-
-       /* setup mac address in perfect filter array */
-       for (i = 0; i < 45; i++)
-               writel(0x0, cp->regs + REG_MAC_ADDRN(i));
-
-       writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
-       writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
-       writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
-
-       writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
-       writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
-       writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
-
-       cp->mac_rx_cfg = cas_setup_multicast(cp);
-
-       spin_lock(&cp->stat_lock[N_TX_RINGS]);
-       cas_clear_mac_err(cp);
-       spin_unlock(&cp->stat_lock[N_TX_RINGS]);
-
-       /* Setup MAC interrupts.  We want to get all of the interesting
-        * counter expiration events, but we do not want to hear about
-        * normal rx/tx as the DMA engine tells us that.
-        */
-       writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
-       writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
-
-       /* Don't enable even the PAUSE interrupts for now, we
-        * make no use of those events other than to record them.
-        */
-       writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
-}
-
-/* Must be invoked under cp->lock. */
-static void cas_init_pause_thresholds(struct cas *cp)
-{
-       /* Calculate pause thresholds.  Setting the OFF threshold to the
-        * full RX fifo size effectively disables PAUSE generation
-        */
-       if (cp->rx_fifo_size <= (2 * 1024)) {
-               cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
-       } else {
-               int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
-               if (max_frame * 3 > cp->rx_fifo_size) {
-                       cp->rx_pause_off = 7104;
-                       cp->rx_pause_on  = 960;
-               } else {
-                       int off = (cp->rx_fifo_size - (max_frame * 2));
-                       int on = off - max_frame;
-                       cp->rx_pause_off = off;
-                       cp->rx_pause_on = on;
-               }
-       }
-}
-
-static int cas_vpd_match(const void __iomem *p, const char *str)
-{
-       int len = strlen(str) + 1;
-       int i;
-
-       for (i = 0; i < len; i++) {
-               if (readb(p + i) != str[i])
-                       return 0;
-       }
-       return 1;
-}
-
-
-/* get the mac address by reading the vpd information in the rom.
- * also get the phy type and determine if there's an entropy generator.
- * NOTE: this is a bit convoluted for the following reasons:
- *  1) vpd info has order-dependent mac addresses for multinic cards
- *  2) the only way to determine the nic order is to use the slot
- *     number.
- *  3) fiber cards don't have bridges, so their slot numbers don't
- *     mean anything.
- *  4) we don't actually know we have a fiber card until after
- *     the mac addresses are parsed.
- */
-static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
-                           const int offset)
-{
-       void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
-       void __iomem *base, *kstart;
-       int i, len;
-       int found = 0;
-#define VPD_FOUND_MAC        0x01
-#define VPD_FOUND_PHY        0x02
-
-       int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
-       int mac_off  = 0;
-
-#if defined(CONFIG_SPARC)
-       const unsigned char *addr;
-#endif
-
-       /* give us access to the PROM */
-       writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
-              cp->regs + REG_BIM_LOCAL_DEV_EN);
-
-       /* check for an expansion rom */
-       if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
-               goto use_random_mac_addr;
-
-       /* search for beginning of vpd */
-       base = NULL;
-       for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
-               /* check for PCIR */
-               if ((readb(p + i + 0) == 0x50) &&
-                   (readb(p + i + 1) == 0x43) &&
-                   (readb(p + i + 2) == 0x49) &&
-                   (readb(p + i + 3) == 0x52)) {
-                       base = p + (readb(p + i + 8) |
-                                   (readb(p + i + 9) << 8));
-                       break;
-               }
-       }
-
-       if (!base || (readb(base) != 0x82))
-               goto use_random_mac_addr;
-
-       i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
-       while (i < EXPANSION_ROM_SIZE) {
-               if (readb(base + i) != 0x90) /* no vpd found */
-                       goto use_random_mac_addr;
-
-               /* found a vpd field */
-               len = readb(base + i + 1) | (readb(base + i + 2) << 8);
-
-               /* extract keywords */
-               kstart = base + i + 3;
-               p = kstart;
-               while ((p - kstart) < len) {
-                       int klen = readb(p + 2);
-                       int j;
-                       char type;
-
-                       p += 3;
-
-                       /* look for the following things:
-                        * -- correct length == 29
-                        * 3 (type) + 2 (size) +
-                        * 18 (strlen("local-mac-address") + 1) +
-                        * 6 (mac addr)
-                        * -- VPD Instance 'I'
-                        * -- VPD Type Bytes 'B'
-                        * -- VPD data length == 6
-                        * -- property string == local-mac-address
-                        *
-                        * -- correct length == 24
-                        * 3 (type) + 2 (size) +
-                        * 12 (strlen("entropy-dev") + 1) +
-                        * 7 (strlen("vms110") + 1)
-                        * -- VPD Instance 'I'
-                        * -- VPD Type String 'B'
-                        * -- VPD data length == 7
-                        * -- property string == entropy-dev
-                        *
-                        * -- correct length == 18
-                        * 3 (type) + 2 (size) +
-                        * 9 (strlen("phy-type") + 1) +
-                        * 4 (strlen("pcs") + 1)
-                        * -- VPD Instance 'I'
-                        * -- VPD Type String 'S'
-                        * -- VPD data length == 4
-                        * -- property string == phy-type
-                        *
-                        * -- correct length == 23
-                        * 3 (type) + 2 (size) +
-                        * 14 (strlen("phy-interface") + 1) +
-                        * 4 (strlen("pcs") + 1)
-                        * -- VPD Instance 'I'
-                        * -- VPD Type String 'S'
-                        * -- VPD data length == 4
-                        * -- property string == phy-interface
-                        */
-                       if (readb(p) != 'I')
-                               goto next;
-
-                       /* finally, check string and length */
-                       type = readb(p + 3);
-                       if (type == 'B') {
-                               if ((klen == 29) && readb(p + 4) == 6 &&
-                                   cas_vpd_match(p + 5,
-                                                 "local-mac-address")) {
-                                       if (mac_off++ > offset)
-                                               goto next;
-
-                                       /* set mac address */
-                                       for (j = 0; j < 6; j++)
-                                               dev_addr[j] =
-                                                       readb(p + 23 + j);
-                                       goto found_mac;
-                               }
-                       }
-
-                       if (type != 'S')
-                               goto next;
-
-#ifdef USE_ENTROPY_DEV
-                       if ((klen == 24) &&
-                           cas_vpd_match(p + 5, "entropy-dev") &&
-                           cas_vpd_match(p + 17, "vms110")) {
-                               cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
-                               goto next;
-                       }
-#endif
-
-                       if (found & VPD_FOUND_PHY)
-                               goto next;
-
-                       if ((klen == 18) && readb(p + 4) == 4 &&
-                           cas_vpd_match(p + 5, "phy-type")) {
-                               if (cas_vpd_match(p + 14, "pcs")) {
-                                       phy_type = CAS_PHY_SERDES;
-                                       goto found_phy;
-                               }
-                       }
-
-                       if ((klen == 23) && readb(p + 4) == 4 &&
-                           cas_vpd_match(p + 5, "phy-interface")) {
-                               if (cas_vpd_match(p + 19, "pcs")) {
-                                       phy_type = CAS_PHY_SERDES;
-                                       goto found_phy;
-                               }
-                       }
-found_mac:
-                       found |= VPD_FOUND_MAC;
-                       goto next;
-
-found_phy:
-                       found |= VPD_FOUND_PHY;
-
-next:
-                       p += klen;
-               }
-               i += len + 3;
-       }
-
-use_random_mac_addr:
-       if (found & VPD_FOUND_MAC)
-               goto done;
-
-#if defined(CONFIG_SPARC)
-       addr = of_get_property(cp->of_node, "local-mac-address", NULL);
-       if (addr != NULL) {
-               memcpy(dev_addr, addr, 6);
-               goto done;
-       }
-#endif
-
-       /* Sun MAC prefix then 3 random bytes. */
-       pr_info("MAC address not found in ROM VPD\n");
-       dev_addr[0] = 0x08;
-       dev_addr[1] = 0x00;
-       dev_addr[2] = 0x20;
-       get_random_bytes(dev_addr + 3, 3);
-
-done:
-       writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
-       return phy_type;
-}
-
-/* check pci invariants */
-static void cas_check_pci_invariants(struct cas *cp)
-{
-       struct pci_dev *pdev = cp->pdev;
-
-       cp->cas_flags = 0;
-       if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
-           (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
-               if (pdev->revision >= CAS_ID_REVPLUS)
-                       cp->cas_flags |= CAS_FLAG_REG_PLUS;
-               if (pdev->revision < CAS_ID_REVPLUS02u)
-                       cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
-
-               /* Original Cassini supports HW CSUM, but it's not
-                * enabled by default as it can trigger TX hangs.
-                */
-               if (pdev->revision < CAS_ID_REV2)
-                       cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
-       } else {
-               /* Only sun has original cassini chips.  */
-               cp->cas_flags |= CAS_FLAG_REG_PLUS;
-
-               /* We use a flag because the same phy might be externally
-                * connected.
-                */
-               if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
-                   (pdev->device == PCI_DEVICE_ID_NS_SATURN))
-                       cp->cas_flags |= CAS_FLAG_SATURN;
-       }
-}
-
-
-static int cas_check_invariants(struct cas *cp)
-{
-       struct pci_dev *pdev = cp->pdev;
-       u32 cfg;
-       int i;
-
-       /* get page size for rx buffers. */
-       cp->page_order = 0;
-#ifdef USE_PAGE_ORDER
-       if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
-               /* see if we can allocate larger pages */
-               struct page *page = alloc_pages(GFP_ATOMIC,
-                                               CAS_JUMBO_PAGE_SHIFT -
-                                               PAGE_SHIFT);
-               if (page) {
-                       __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
-                       cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
-               } else {
-                       printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
-               }
-       }
-#endif
-       cp->page_size = (PAGE_SIZE << cp->page_order);
-
-       /* Fetch the FIFO configurations. */
-       cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
-       cp->rx_fifo_size = RX_FIFO_SIZE;
-
-       /* finish phy determination. MDIO1 takes precedence over MDIO0 if
-        * they're both connected.
-        */
-       cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
-                                       PCI_SLOT(pdev->devfn));
-       if (cp->phy_type & CAS_PHY_SERDES) {
-               cp->cas_flags |= CAS_FLAG_1000MB_CAP;
-               return 0; /* no more checking needed */
-       }
-
-       /* MII */
-       cfg = readl(cp->regs + REG_MIF_CFG);
-       if (cfg & MIF_CFG_MDIO_1) {
-               cp->phy_type = CAS_PHY_MII_MDIO1;
-       } else if (cfg & MIF_CFG_MDIO_0) {
-               cp->phy_type = CAS_PHY_MII_MDIO0;
-       }
-
-       cas_mif_poll(cp, 0);
-       writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
-
-       for (i = 0; i < 32; i++) {
-               u32 phy_id;
-               int j;
-
-               for (j = 0; j < 3; j++) {
-                       cp->phy_addr = i;
-                       phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
-                       phy_id |= cas_phy_read(cp, MII_PHYSID2);
-                       if (phy_id && (phy_id != 0xFFFFFFFF)) {
-                               cp->phy_id = phy_id;
-                               goto done;
-                       }
-               }
-       }
-       pr_err("MII phy did not respond [%08x]\n",
-              readl(cp->regs + REG_MIF_STATE_MACHINE));
-       return -1;
-
-done:
-       /* see if we can do gigabit */
-       cfg = cas_phy_read(cp, MII_BMSR);
-       if ((cfg & CAS_BMSR_1000_EXTEND) &&
-           cas_phy_read(cp, CAS_MII_1000_EXTEND))
-               cp->cas_flags |= CAS_FLAG_1000MB_CAP;
-       return 0;
-}
-
-/* Must be invoked under cp->lock. */
-static inline void cas_start_dma(struct cas *cp)
-{
-       int i;
-       u32 val;
-       int txfailed = 0;
-
-       /* enable dma */
-       val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
-       writel(val, cp->regs + REG_TX_CFG);
-       val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
-       writel(val, cp->regs + REG_RX_CFG);
-
-       /* enable the mac */
-       val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
-       writel(val, cp->regs + REG_MAC_TX_CFG);
-       val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
-       writel(val, cp->regs + REG_MAC_RX_CFG);
-
-       i = STOP_TRIES;
-       while (i-- > 0) {
-               val = readl(cp->regs + REG_MAC_TX_CFG);
-               if ((val & MAC_TX_CFG_EN))
-                       break;
-               udelay(10);
-       }
-       if (i < 0) txfailed = 1;
-       i = STOP_TRIES;
-       while (i-- > 0) {
-               val = readl(cp->regs + REG_MAC_RX_CFG);
-               if ((val & MAC_RX_CFG_EN)) {
-                       if (txfailed) {
-                               netdev_err(cp->dev,
-                                          "enabling mac failed [tx:%08x:%08x]\n",
-                                          readl(cp->regs + REG_MIF_STATE_MACHINE),
-                                          readl(cp->regs + REG_MAC_STATE_MACHINE));
-                       }
-                       goto enable_rx_done;
-               }
-               udelay(10);
-       }
-       netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
-                  (txfailed ? "tx,rx" : "rx"),
-                  readl(cp->regs + REG_MIF_STATE_MACHINE),
-                  readl(cp->regs + REG_MAC_STATE_MACHINE));
-
-enable_rx_done:
-       cas_unmask_intr(cp); /* enable interrupts */
-       writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
-       writel(0, cp->regs + REG_RX_COMP_TAIL);
-
-       if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
-               if (N_RX_DESC_RINGS > 1)
-                       writel(RX_DESC_RINGN_SIZE(1) - 4,
-                              cp->regs + REG_PLUS_RX_KICK1);
-
-               for (i = 1; i < N_RX_COMP_RINGS; i++)
-                       writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
-       }
-}
-
-/* Must be invoked under cp->lock. */
-static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
-                                  int *pause)
-{
-       u32 val = readl(cp->regs + REG_PCS_MII_LPA);
-       *fd     = (val & PCS_MII_LPA_FD) ? 1 : 0;
-       *pause  = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
-       if (val & PCS_MII_LPA_ASYM_PAUSE)
-               *pause |= 0x10;
-       *spd = 1000;
-}
-
-/* Must be invoked under cp->lock. */
-static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
-                                  int *pause)
-{
-       u32 val;
-
-       *fd = 0;
-       *spd = 10;
-       *pause = 0;
-
-       /* use GMII registers */
-       val = cas_phy_read(cp, MII_LPA);
-       if (val & CAS_LPA_PAUSE)
-               *pause = 0x01;
-
-       if (val & CAS_LPA_ASYM_PAUSE)
-               *pause |= 0x10;
-
-       if (val & LPA_DUPLEX)
-               *fd = 1;
-       if (val & LPA_100)
-               *spd = 100;
-
-       if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
-               val = cas_phy_read(cp, CAS_MII_1000_STATUS);
-               if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
-                       *spd = 1000;
-               if (val & CAS_LPA_1000FULL)
-                       *fd = 1;
-       }
-}
-
-/* A link-up condition has occurred, initialize and enable the
- * rest of the chip.
- *
- * Must be invoked under cp->lock.
- */
-static void cas_set_link_modes(struct cas *cp)
-{
-       u32 val;
-       int full_duplex, speed, pause;
-
-       full_duplex = 0;
-       speed = 10;
-       pause = 0;
-
-       if (CAS_PHY_MII(cp->phy_type)) {
-               cas_mif_poll(cp, 0);
-               val = cas_phy_read(cp, MII_BMCR);
-               if (val & BMCR_ANENABLE) {
-                       cas_read_mii_link_mode(cp, &full_duplex, &speed,
-                                              &pause);
-               } else {
-                       if (val & BMCR_FULLDPLX)
-                               full_duplex = 1;
-
-                       if (val & BMCR_SPEED100)
-                               speed = 100;
-                       else if (val & CAS_BMCR_SPEED1000)
-                               speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
-                                       1000 : 100;
-               }
-               cas_mif_poll(cp, 1);
-
-       } else {
-               val = readl(cp->regs + REG_PCS_MII_CTRL);
-               cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
-               if ((val & PCS_MII_AUTONEG_EN) == 0) {
-                       if (val & PCS_MII_CTRL_DUPLEX)
-                               full_duplex = 1;
-               }
-       }
-
-       netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
-                  speed, full_duplex ? "full" : "half");
-
-       val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
-       if (CAS_PHY_MII(cp->phy_type)) {
-               val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
-               if (!full_duplex)
-                       val |= MAC_XIF_DISABLE_ECHO;
-       }
-       if (full_duplex)
-               val |= MAC_XIF_FDPLX_LED;
-       if (speed == 1000)
-               val |= MAC_XIF_GMII_MODE;
-       writel(val, cp->regs + REG_MAC_XIF_CFG);
-
-       /* deal with carrier and collision detect. */
-       val = MAC_TX_CFG_IPG_EN;
-       if (full_duplex) {
-               val |= MAC_TX_CFG_IGNORE_CARRIER;
-               val |= MAC_TX_CFG_IGNORE_COLL;
-       } else {
-#ifndef USE_CSMA_CD_PROTO
-               val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
-               val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
-#endif
-       }
-       /* val now set up for REG_MAC_TX_CFG */
-
-       /* If gigabit and half-duplex, enable carrier extension
-        * mode.  increase slot time to 512 bytes as well.
-        * else, disable it and make sure slot time is 64 bytes.
-        * also activate checksum bug workaround
-        */
-       if ((speed == 1000) && !full_duplex) {
-               writel(val | MAC_TX_CFG_CARRIER_EXTEND,
-                      cp->regs + REG_MAC_TX_CFG);
-
-               val = readl(cp->regs + REG_MAC_RX_CFG);
-               val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */
-               writel(val | MAC_RX_CFG_CARRIER_EXTEND,
-                      cp->regs + REG_MAC_RX_CFG);
-
-               writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
-
-               cp->crc_size = 4;
-               /* minimum size gigabit frame at half duplex */
-               cp->min_frame_size = CAS_1000MB_MIN_FRAME;
-
-       } else {
-               writel(val, cp->regs + REG_MAC_TX_CFG);
-
-               /* checksum bug workaround. don't strip FCS when in
-                * half-duplex mode
-                */
-               val = readl(cp->regs + REG_MAC_RX_CFG);
-               if (full_duplex) {
-                       val |= MAC_RX_CFG_STRIP_FCS;
-                       cp->crc_size = 0;
-                       cp->min_frame_size = CAS_MIN_MTU;
-               } else {
-                       val &= ~MAC_RX_CFG_STRIP_FCS;
-                       cp->crc_size = 4;
-                       cp->min_frame_size = CAS_MIN_FRAME;
-               }
-               writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
-                      cp->regs + REG_MAC_RX_CFG);
-               writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
-       }
-
-       if (netif_msg_link(cp)) {
-               if (pause & 0x01) {
-                       netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
-                                   cp->rx_fifo_size,
-                                   cp->rx_pause_off,
-                                   cp->rx_pause_on);
-               } else if (pause & 0x10) {
-                       netdev_info(cp->dev, "TX pause enabled\n");
-               } else {
-                       netdev_info(cp->dev, "Pause is disabled\n");
-               }
-       }
-
-       val = readl(cp->regs + REG_MAC_CTRL_CFG);
-       val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
-       if (pause) { /* symmetric or asymmetric pause */
-               val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
-               if (pause & 0x01) { /* symmetric pause */
-                       val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
-               }
-       }
-       writel(val, cp->regs + REG_MAC_CTRL_CFG);
-       cas_start_dma(cp);
-}
-
-/* Must be invoked under cp->lock. */
-static void cas_init_hw(struct cas *cp, int restart_link)
-{
-       if (restart_link)
-               cas_phy_init(cp);
-
-       cas_init_pause_thresholds(cp);
-       cas_init_mac(cp);
-       cas_init_dma(cp);
-
-       if (restart_link) {
-               /* Default aneg parameters */
-               cp->timer_ticks = 0;
-               cas_begin_auto_negotiation(cp, NULL);
-       } else if (cp->lstate == link_up) {
-               cas_set_link_modes(cp);
-               netif_carrier_on(cp->dev);
-       }
-}
-
-/* Must be invoked under cp->lock. on earlier cassini boards,
- * SOFT_0 is tied to PCI reset. we use this to force a pci reset,
- * let it settle out, and then restore pci state.
- */
-static void cas_hard_reset(struct cas *cp)
-{
-       writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
-       udelay(20);
-       pci_restore_state(cp->pdev);
-}
-
-
-static void cas_global_reset(struct cas *cp, int blkflag)
-{
-       int limit;
-
-       /* issue a global reset. don't use RSTOUT. */
-       if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
-               /* For PCS, when the blkflag is set, we should set the
-                * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of
-                * the last autonegotiation from being cleared.  We'll
-                * need some special handling if the chip is set into a
-                * loopback mode.
-                */
-               writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
-                      cp->regs + REG_SW_RESET);
-       } else {
-               writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
-       }
-
-       /* need to wait at least 3ms before polling register */
-       mdelay(3);
-
-       limit = STOP_TRIES;
-       while (limit-- > 0) {
-               u32 val = readl(cp->regs + REG_SW_RESET);
-               if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
-                       goto done;
-               udelay(10);
-       }
-       netdev_err(cp->dev, "sw reset failed\n");
-
-done:
-       /* enable various BIM interrupts */
-       writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
-              BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
-
-       /* clear out pci error status mask for handled errors.
-        * we don't deal with DMA counter overflows as they happen
-        * all the time.
-        */
-       writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
-                              PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
-                              PCI_ERR_BIM_DMA_READ), cp->regs +
-              REG_PCI_ERR_STATUS_MASK);
-
-       /* set up for MII by default to address mac rx reset timeout
-        * issue
-        */
-       writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
-}
-
-static void cas_reset(struct cas *cp, int blkflag)
-{
-       u32 val;
-
-       cas_mask_intr(cp);
-       cas_global_reset(cp, blkflag);
-       cas_mac_reset(cp);
-       cas_entropy_reset(cp);
-
-       /* disable dma engines. */
-       val = readl(cp->regs + REG_TX_CFG);
-       val &= ~TX_CFG_DMA_EN;
-       writel(val, cp->regs + REG_TX_CFG);
-
-       val = readl(cp->regs + REG_RX_CFG);
-       val &= ~RX_CFG_DMA_EN;
-       writel(val, cp->regs + REG_RX_CFG);
-
-       /* program header parser */
-       if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
-           (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
-               cas_load_firmware(cp, CAS_HP_FIRMWARE);
-       } else {
-               cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
-       }
-
-       /* clear out error registers */
-       spin_lock(&cp->stat_lock[N_TX_RINGS]);
-       cas_clear_mac_err(cp);
-       spin_unlock(&cp->stat_lock[N_TX_RINGS]);
-}
-
-/* Shut down the chip, must be called with pm_mutex held.  */
-static void cas_shutdown(struct cas *cp)
-{
-       unsigned long flags;
-
-       /* Make us not-running to avoid timers respawning */
-       cp->hw_running = 0;
-
-       del_timer_sync(&cp->link_timer);
-
-       /* Stop the reset task */
-#if 0
-       while (atomic_read(&cp->reset_task_pending_mtu) ||
-              atomic_read(&cp->reset_task_pending_spare) ||
-              atomic_read(&cp->reset_task_pending_all))
-               schedule();
-
-#else
-       while (atomic_read(&cp->reset_task_pending))
-               schedule();
-#endif
-       /* Actually stop the chip */
-       cas_lock_all_save(cp, flags);
-       cas_reset(cp, 0);
-       if (cp->cas_flags & CAS_FLAG_SATURN)
-               cas_phy_powerdown(cp);
-       cas_unlock_all_restore(cp, flags);
-}
-
-static int cas_change_mtu(struct net_device *dev, int new_mtu)
-{
-       struct cas *cp = netdev_priv(dev);
-
-       if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU)
-               return -EINVAL;
-
-       dev->mtu = new_mtu;
-       if (!netif_running(dev) || !netif_device_present(dev))
-               return 0;
-
-       /* let the reset task handle it */
-#if 1
-       atomic_inc(&cp->reset_task_pending);
-       if ((cp->phy_type & CAS_PHY_SERDES)) {
-               atomic_inc(&cp->reset_task_pending_all);
-       } else {
-               atomic_inc(&cp->reset_task_pending_mtu);
-       }
-       schedule_work(&cp->reset_task);
-#else
-       atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
-                  CAS_RESET_ALL : CAS_RESET_MTU);
-       pr_err("reset called in cas_change_mtu\n");
-       schedule_work(&cp->reset_task);
-#endif
-
-       flush_work_sync(&cp->reset_task);
-       return 0;
-}
-
-static void cas_clean_txd(struct cas *cp, int ring)
-{
-       struct cas_tx_desc *txd = cp->init_txds[ring];
-       struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
-       u64 daddr, dlen;
-       int i, size;
-
-       size = TX_DESC_RINGN_SIZE(ring);
-       for (i = 0; i < size; i++) {
-               int frag;
-
-               if (skbs[i] == NULL)
-                       continue;
-
-               skb = skbs[i];
-               skbs[i] = NULL;
-
-               for (frag = 0; frag <= skb_shinfo(skb)->nr_frags;  frag++) {
-                       int ent = i & (size - 1);
-
-                       /* first buffer is never a tiny buffer and so
-                        * needs to be unmapped.
-                        */
-                       daddr = le64_to_cpu(txd[ent].buffer);
-                       dlen  =  CAS_VAL(TX_DESC_BUFLEN,
-                                        le64_to_cpu(txd[ent].control));
-                       pci_unmap_page(cp->pdev, daddr, dlen,
-                                      PCI_DMA_TODEVICE);
-
-                       if (frag != skb_shinfo(skb)->nr_frags) {
-                               i++;
-
-                               /* next buffer might by a tiny buffer.
-                                * skip past it.
-                                */
-                               ent = i & (size - 1);
-                               if (cp->tx_tiny_use[ring][ent].used)
-                                       i++;
-                       }
-               }
-               dev_kfree_skb_any(skb);
-       }
-
-       /* zero out tiny buf usage */
-       memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
-}
-
-/* freed on close */
-static inline void cas_free_rx_desc(struct cas *cp, int ring)
-{
-       cas_page_t **page = cp->rx_pages[ring];
-       int i, size;
-
-       size = RX_DESC_RINGN_SIZE(ring);
-       for (i = 0; i < size; i++) {
-               if (page[i]) {
-                       cas_page_free(cp, page[i]);
-                       page[i] = NULL;
-               }
-       }
-}
-
-static void cas_free_rxds(struct cas *cp)
-{
-       int i;
-
-       for (i = 0; i < N_RX_DESC_RINGS; i++)
-               cas_free_rx_desc(cp, i);
-}
-
-/* Must be invoked under cp->lock. */
-static void cas_clean_rings(struct cas *cp)
-{
-       int i;
-
-       /* need to clean all tx rings */
-       memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
-       memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
-       for (i = 0; i < N_TX_RINGS; i++)
-               cas_clean_txd(cp, i);
-
-       /* zero out init block */
-       memset(cp->init_block, 0, sizeof(struct cas_init_block));
-       cas_clean_rxds(cp);
-       cas_clean_rxcs(cp);
-}
-
-/* allocated on open */
-static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
-{
-       cas_page_t **page = cp->rx_pages[ring];
-       int size, i = 0;
-
-       size = RX_DESC_RINGN_SIZE(ring);
-       for (i = 0; i < size; i++) {
-               if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
-                       return -1;
-       }
-       return 0;
-}
-
-static int cas_alloc_rxds(struct cas *cp)
-{
-       int i;
-
-       for (i = 0; i < N_RX_DESC_RINGS; i++) {
-               if (cas_alloc_rx_desc(cp, i) < 0) {
-                       cas_free_rxds(cp);
-                       return -1;
-               }
-       }
-       return 0;
-}
-
-static void cas_reset_task(struct work_struct *work)
-{
-       struct cas *cp = container_of(work, struct cas, reset_task);
-#if 0
-       int pending = atomic_read(&cp->reset_task_pending);
-#else
-       int pending_all = atomic_read(&cp->reset_task_pending_all);
-       int pending_spare = atomic_read(&cp->reset_task_pending_spare);
-       int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
-
-       if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
-               /* We can have more tasks scheduled than actually
-                * needed.
-                */
-               atomic_dec(&cp->reset_task_pending);
-               return;
-       }
-#endif
-       /* The link went down, we reset the ring, but keep
-        * DMA stopped. Use this function for reset
-        * on error as well.
-        */
-       if (cp->hw_running) {
-               unsigned long flags;
-
-               /* Make sure we don't get interrupts or tx packets */
-               netif_device_detach(cp->dev);
-               cas_lock_all_save(cp, flags);
-
-               if (cp->opened) {
-                       /* We call cas_spare_recover when we call cas_open.
-                        * but we do not initialize the lists cas_spare_recover
-                        * uses until cas_open is called.
-                        */
-                       cas_spare_recover(cp, GFP_ATOMIC);
-               }
-#if 1
-               /* test => only pending_spare set */
-               if (!pending_all && !pending_mtu)
-                       goto done;
-#else
-               if (pending == CAS_RESET_SPARE)
-                       goto done;
-#endif
-               /* when pending == CAS_RESET_ALL, the following
-                * call to cas_init_hw will restart auto negotiation.
-                * Setting the second argument of cas_reset to
-                * !(pending == CAS_RESET_ALL) will set this argument
-                * to 1 (avoiding reinitializing the PHY for the normal
-                * PCS case) when auto negotiation is not restarted.
-                */
-#if 1
-               cas_reset(cp, !(pending_all > 0));
-               if (cp->opened)
-                       cas_clean_rings(cp);
-               cas_init_hw(cp, (pending_all > 0));
-#else
-               cas_reset(cp, !(pending == CAS_RESET_ALL));
-               if (cp->opened)
-                       cas_clean_rings(cp);
-               cas_init_hw(cp, pending == CAS_RESET_ALL);
-#endif
-
-done:
-               cas_unlock_all_restore(cp, flags);
-               netif_device_attach(cp->dev);
-       }
-#if 1
-       atomic_sub(pending_all, &cp->reset_task_pending_all);
-       atomic_sub(pending_spare, &cp->reset_task_pending_spare);
-       atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
-       atomic_dec(&cp->reset_task_pending);
-#else
-       atomic_set(&cp->reset_task_pending, 0);
-#endif
-}
-
-static void cas_link_timer(unsigned long data)
-{
-       struct cas *cp = (struct cas *) data;
-       int mask, pending = 0, reset = 0;
-       unsigned long flags;
-
-       if (link_transition_timeout != 0 &&
-           cp->link_transition_jiffies_valid &&
-           ((jiffies - cp->link_transition_jiffies) >
-             (link_transition_timeout))) {
-               /* One-second counter so link-down workaround doesn't
-                * cause resets to occur so fast as to fool the switch
-                * into thinking the link is down.
-                */
-               cp->link_transition_jiffies_valid = 0;
-       }
-
-       if (!cp->hw_running)
-               return;
-
-       spin_lock_irqsave(&cp->lock, flags);
-       cas_lock_tx(cp);
-       cas_entropy_gather(cp);
-
-       /* If the link task is still pending, we just
-        * reschedule the link timer
-        */
-#if 1
-       if (atomic_read(&cp->reset_task_pending_all) ||
-           atomic_read(&cp->reset_task_pending_spare) ||
-           atomic_read(&cp->reset_task_pending_mtu))
-               goto done;
-#else
-       if (atomic_read(&cp->reset_task_pending))
-               goto done;
-#endif
-
-       /* check for rx cleaning */
-       if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
-               int i, rmask;
-
-               for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
-                       rmask = CAS_FLAG_RXD_POST(i);
-                       if ((mask & rmask) == 0)
-                               continue;
-
-                       /* post_rxds will do a mod_timer */
-                       if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
-                               pending = 1;
-                               continue;
-                       }
-                       cp->cas_flags &= ~rmask;
-               }
-       }
-
-       if (CAS_PHY_MII(cp->phy_type)) {
-               u16 bmsr;
-               cas_mif_poll(cp, 0);
-               bmsr = cas_phy_read(cp, MII_BMSR);
-               /* WTZ: Solaris driver reads this twice, but that
-                * may be due to the PCS case and the use of a
-                * common implementation. Read it twice here to be
-                * safe.
-                */
-               bmsr = cas_phy_read(cp, MII_BMSR);
-               cas_mif_poll(cp, 1);
-               readl(cp->regs + REG_MIF_STATUS); /* avoid dups */
-               reset = cas_mii_link_check(cp, bmsr);
-       } else {
-               reset = cas_pcs_link_check(cp);
-       }
-
-       if (reset)
-               goto done;
-
-       /* check for tx state machine confusion */
-       if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
-               u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
-               u32 wptr, rptr;
-               int tlm  = CAS_VAL(MAC_SM_TLM, val);
-
-               if (((tlm == 0x5) || (tlm == 0x3)) &&
-                   (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
-                       netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
-                                    "tx err: MAC_STATE[%08x]\n", val);
-                       reset = 1;
-                       goto done;
-               }
-
-               val  = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
-               wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
-               rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
-               if ((val == 0) && (wptr != rptr)) {
-                       netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
-                                    "tx err: TX_FIFO[%08x:%08x:%08x]\n",
-                                    val, wptr, rptr);
-                       reset = 1;
-               }
-
-               if (reset)
-                       cas_hard_reset(cp);
-       }
-
-done:
-       if (reset) {
-#if 1
-               atomic_inc(&cp->reset_task_pending);
-               atomic_inc(&cp->reset_task_pending_all);
-               schedule_work(&cp->reset_task);
-#else
-               atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
-               pr_err("reset called in cas_link_timer\n");
-               schedule_work(&cp->reset_task);
-#endif
-       }
-
-       if (!pending)
-               mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
-       cas_unlock_tx(cp);
-       spin_unlock_irqrestore(&cp->lock, flags);
-}
-
-/* tiny buffers are used to avoid target abort issues with
- * older cassini's
- */
-static void cas_tx_tiny_free(struct cas *cp)
-{
-       struct pci_dev *pdev = cp->pdev;
-       int i;
-
-       for (i = 0; i < N_TX_RINGS; i++) {
-               if (!cp->tx_tiny_bufs[i])
-                       continue;
-
-               pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
-                                   cp->tx_tiny_bufs[i],
-                                   cp->tx_tiny_dvma[i]);
-               cp->tx_tiny_bufs[i] = NULL;
-       }
-}
-
-static int cas_tx_tiny_alloc(struct cas *cp)
-{
-       struct pci_dev *pdev = cp->pdev;
-       int i;
-
-       for (i = 0; i < N_TX_RINGS; i++) {
-               cp->tx_tiny_bufs[i] =
-                       pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
-                                            &cp->tx_tiny_dvma[i]);
-               if (!cp->tx_tiny_bufs[i]) {
-                       cas_tx_tiny_free(cp);
-                       return -1;
-               }
-       }
-       return 0;
-}
-
-
-static int cas_open(struct net_device *dev)
-{
-       struct cas *cp = netdev_priv(dev);
-       int hw_was_up, err;
-       unsigned long flags;
-
-       mutex_lock(&cp->pm_mutex);
-
-       hw_was_up = cp->hw_running;
-
-       /* The power-management mutex protects the hw_running
-        * etc. state so it is safe to do this bit without cp->lock
-        */
-       if (!cp->hw_running) {
-               /* Reset the chip */
-               cas_lock_all_save(cp, flags);
-               /* We set the second arg to cas_reset to zero
-                * because cas_init_hw below will have its second
-                * argument set to non-zero, which will force
-                * autonegotiation to start.
-                */
-               cas_reset(cp, 0);
-               cp->hw_running = 1;
-               cas_unlock_all_restore(cp, flags);
-       }
-
-       err = -ENOMEM;
-       if (cas_tx_tiny_alloc(cp) < 0)
-               goto err_unlock;
-
-       /* alloc rx descriptors */
-       if (cas_alloc_rxds(cp) < 0)
-               goto err_tx_tiny;
-
-       /* allocate spares */
-       cas_spare_init(cp);
-       cas_spare_recover(cp, GFP_KERNEL);
-
-       /* We can now request the interrupt as we know it's masked
-        * on the controller. cassini+ has up to 4 interrupts
-        * that can be used, but you need to do explicit pci interrupt
-        * mapping to expose them
-        */
-       if (request_irq(cp->pdev->irq, cas_interrupt,
-                       IRQF_SHARED, dev->name, (void *) dev)) {
-               netdev_err(cp->dev, "failed to request irq !\n");
-               err = -EAGAIN;
-               goto err_spare;
-       }
-
-#ifdef USE_NAPI
-       napi_enable(&cp->napi);
-#endif
-       /* init hw */
-       cas_lock_all_save(cp, flags);
-       cas_clean_rings(cp);
-       cas_init_hw(cp, !hw_was_up);
-       cp->opened = 1;
-       cas_unlock_all_restore(cp, flags);
-
-       netif_start_queue(dev);
-       mutex_unlock(&cp->pm_mutex);
-       return 0;
-
-err_spare:
-       cas_spare_free(cp);
-       cas_free_rxds(cp);
-err_tx_tiny:
-       cas_tx_tiny_free(cp);
-err_unlock:
-       mutex_unlock(&cp->pm_mutex);
-       return err;
-}
-
-static int cas_close(struct net_device *dev)
-{
-       unsigned long flags;
-       struct cas *cp = netdev_priv(dev);
-
-#ifdef USE_NAPI
-       napi_disable(&cp->napi);
-#endif
-       /* Make sure we don't get distracted by suspend/resume */
-       mutex_lock(&cp->pm_mutex);
-
-       netif_stop_queue(dev);
-
-       /* Stop traffic, mark us closed */
-       cas_lock_all_save(cp, flags);
-       cp->opened = 0;
-       cas_reset(cp, 0);
-       cas_phy_init(cp);
-       cas_begin_auto_negotiation(cp, NULL);
-       cas_clean_rings(cp);
-       cas_unlock_all_restore(cp, flags);
-
-       free_irq(cp->pdev->irq, (void *) dev);
-       cas_spare_free(cp);
-       cas_free_rxds(cp);
-       cas_tx_tiny_free(cp);
-       mutex_unlock(&cp->pm_mutex);
-       return 0;
-}
-
-static struct {
-       const char name[ETH_GSTRING_LEN];
-} ethtool_cassini_statnames[] = {
-       {"collisions"},
-       {"rx_bytes"},
-       {"rx_crc_errors"},
-       {"rx_dropped"},
-       {"rx_errors"},
-       {"rx_fifo_errors"},
-       {"rx_frame_errors"},
-       {"rx_length_errors"},
-       {"rx_over_errors"},
-       {"rx_packets"},
-       {"tx_aborted_errors"},
-       {"tx_bytes"},
-       {"tx_dropped"},
-       {"tx_errors"},
-       {"tx_fifo_errors"},
-       {"tx_packets"}
-};
-#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
-
-static struct {
-       const int offsets;      /* neg. values for 2nd arg to cas_read_phy */
-} ethtool_register_table[] = {
-       {-MII_BMSR},
-       {-MII_BMCR},
-       {REG_CAWR},
-       {REG_INF_BURST},
-       {REG_BIM_CFG},
-       {REG_RX_CFG},
-       {REG_HP_CFG},
-       {REG_MAC_TX_CFG},
-       {REG_MAC_RX_CFG},
-       {REG_MAC_CTRL_CFG},
-       {REG_MAC_XIF_CFG},
-       {REG_MIF_CFG},
-       {REG_PCS_CFG},
-       {REG_SATURN_PCFG},
-       {REG_PCS_MII_STATUS},
-       {REG_PCS_STATE_MACHINE},
-       {REG_MAC_COLL_EXCESS},
-       {REG_MAC_COLL_LATE}
-};
-#define CAS_REG_LEN    ARRAY_SIZE(ethtool_register_table)
-#define CAS_MAX_REGS   (sizeof (u32)*CAS_REG_LEN)
-
-static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
-{
-       u8 *p;
-       int i;
-       unsigned long flags;
-
-       spin_lock_irqsave(&cp->lock, flags);
-       for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
-               u16 hval;
-               u32 val;
-               if (ethtool_register_table[i].offsets < 0) {
-                       hval = cas_phy_read(cp,
-                                   -ethtool_register_table[i].offsets);
-                       val = hval;
-               } else {
-                       val= readl(cp->regs+ethtool_register_table[i].offsets);
-               }
-               memcpy(p, (u8 *)&val, sizeof(u32));
-       }
-       spin_unlock_irqrestore(&cp->lock, flags);
-}
-
-static struct net_device_stats *cas_get_stats(struct net_device *dev)
-{
-       struct cas *cp = netdev_priv(dev);
-       struct net_device_stats *stats = cp->net_stats;
-       unsigned long flags;
-       int i;
-       unsigned long tmp;
-
-       /* we collate all of the stats into net_stats[N_TX_RING] */
-       if (!cp->hw_running)
-               return stats + N_TX_RINGS;
-
-       /* collect outstanding stats */
-       /* WTZ: the Cassini spec gives these as 16 bit counters but
-        * stored in 32-bit words.  Added a mask of 0xffff to be safe,
-        * in case the chip somehow puts any garbage in the other bits.
-        * Also, counter usage didn't seem to mach what Adrian did
-        * in the parts of the code that set these quantities. Made
-        * that consistent.
-        */
-       spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
-       stats[N_TX_RINGS].rx_crc_errors +=
-         readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
-       stats[N_TX_RINGS].rx_frame_errors +=
-               readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
-       stats[N_TX_RINGS].rx_length_errors +=
-               readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
-#if 1
-       tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
-               (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
-       stats[N_TX_RINGS].tx_aborted_errors += tmp;
-       stats[N_TX_RINGS].collisions +=
-         tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
-#else
-       stats[N_TX_RINGS].tx_aborted_errors +=
-               readl(cp->regs + REG_MAC_COLL_EXCESS);
-       stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
-               readl(cp->regs + REG_MAC_COLL_LATE);
-#endif
-       cas_clear_mac_err(cp);
-
-       /* saved bits that are unique to ring 0 */
-       spin_lock(&cp->stat_lock[0]);
-       stats[N_TX_RINGS].collisions        += stats[0].collisions;
-       stats[N_TX_RINGS].rx_over_errors    += stats[0].rx_over_errors;
-       stats[N_TX_RINGS].rx_frame_errors   += stats[0].rx_frame_errors;
-       stats[N_TX_RINGS].rx_fifo_errors    += stats[0].rx_fifo_errors;
-       stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
-       stats[N_TX_RINGS].tx_fifo_errors    += stats[0].tx_fifo_errors;
-       spin_unlock(&cp->stat_lock[0]);
-
-       for (i = 0; i < N_TX_RINGS; i++) {
-               spin_lock(&cp->stat_lock[i]);
-               stats[N_TX_RINGS].rx_length_errors +=
-                       stats[i].rx_length_errors;
-               stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
-               stats[N_TX_RINGS].rx_packets    += stats[i].rx_packets;
-               stats[N_TX_RINGS].tx_packets    += stats[i].tx_packets;
-               stats[N_TX_RINGS].rx_bytes      += stats[i].rx_bytes;
-               stats[N_TX_RINGS].tx_bytes      += stats[i].tx_bytes;
-               stats[N_TX_RINGS].rx_errors     += stats[i].rx_errors;
-               stats[N_TX_RINGS].tx_errors     += stats[i].tx_errors;
-               stats[N_TX_RINGS].rx_dropped    += stats[i].rx_dropped;
-               stats[N_TX_RINGS].tx_dropped    += stats[i].tx_dropped;
-               memset(stats + i, 0, sizeof(struct net_device_stats));
-               spin_unlock(&cp->stat_lock[i]);
-       }
-       spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
-       return stats + N_TX_RINGS;
-}
-
-
-static void cas_set_multicast(struct net_device *dev)
-{
-       struct cas *cp = netdev_priv(dev);
-       u32 rxcfg, rxcfg_new;
-       unsigned long flags;
-       int limit = STOP_TRIES;
-
-       if (!cp->hw_running)
-               return;
-
-       spin_lock_irqsave(&cp->lock, flags);
-       rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
-
-       /* disable RX MAC and wait for completion */
-       writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
-       while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
-               if (!limit--)
-                       break;
-               udelay(10);
-       }
-
-       /* disable hash filter and wait for completion */
-       limit = STOP_TRIES;
-       rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
-       writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
-       while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
-               if (!limit--)
-                       break;
-               udelay(10);
-       }
-
-       /* program hash filters */
-       cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
-       rxcfg |= rxcfg_new;
-       writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
-       spin_unlock_irqrestore(&cp->lock, flags);
-}
-
-static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
-       struct cas *cp = netdev_priv(dev);
-       strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN);
-       strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN);
-       info->fw_version[0] = '\0';
-       strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN);
-       info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
-               cp->casreg_len : CAS_MAX_REGS;
-       info->n_stats = CAS_NUM_STAT_KEYS;
-}
-
-static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-       struct cas *cp = netdev_priv(dev);
-       u16 bmcr;
-       int full_duplex, speed, pause;
-       unsigned long flags;
-       enum link_state linkstate = link_up;
-
-       cmd->advertising = 0;
-       cmd->supported = SUPPORTED_Autoneg;
-       if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
-               cmd->supported |= SUPPORTED_1000baseT_Full;
-               cmd->advertising |= ADVERTISED_1000baseT_Full;
-       }
-
-       /* Record PHY settings if HW is on. */
-       spin_lock_irqsave(&cp->lock, flags);
-       bmcr = 0;
-       linkstate = cp->lstate;
-       if (CAS_PHY_MII(cp->phy_type)) {
-               cmd->port = PORT_MII;
-               cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
-                       XCVR_INTERNAL : XCVR_EXTERNAL;
-               cmd->phy_address = cp->phy_addr;
-               cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
-                       ADVERTISED_10baseT_Half |
-                       ADVERTISED_10baseT_Full |
-                       ADVERTISED_100baseT_Half |
-                       ADVERTISED_100baseT_Full;
-
-               cmd->supported |=
-                       (SUPPORTED_10baseT_Half |
-                        SUPPORTED_10baseT_Full |
-                        SUPPORTED_100baseT_Half |
-                        SUPPORTED_100baseT_Full |
-                        SUPPORTED_TP | SUPPORTED_MII);
-
-               if (cp->hw_running) {
-                       cas_mif_poll(cp, 0);
-                       bmcr = cas_phy_read(cp, MII_BMCR);
-                       cas_read_mii_link_mode(cp, &full_duplex,
-                                              &speed, &pause);
-                       cas_mif_poll(cp, 1);
-               }
-
-       } else {
-               cmd->port = PORT_FIBRE;
-               cmd->transceiver = XCVR_INTERNAL;
-               cmd->phy_address = 0;
-               cmd->supported   |= SUPPORTED_FIBRE;
-               cmd->advertising |= ADVERTISED_FIBRE;
-
-               if (cp->hw_running) {
-                       /* pcs uses the same bits as mii */
-                       bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
-                       cas_read_pcs_link_mode(cp, &full_duplex,
-                                              &speed, &pause);
-               }
-       }
-       spin_unlock_irqrestore(&cp->lock, flags);
-
-       if (bmcr & BMCR_ANENABLE) {
-               cmd->advertising |= ADVERTISED_Autoneg;
-               cmd->autoneg = AUTONEG_ENABLE;
-               ethtool_cmd_speed_set(cmd, ((speed == 10) ?
-                                           SPEED_10 :
-                                           ((speed == 1000) ?
-                                            SPEED_1000 : SPEED_100)));
-               cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
-       } else {
-               cmd->autoneg = AUTONEG_DISABLE;
-               ethtool_cmd_speed_set(cmd, ((bmcr & CAS_BMCR_SPEED1000) ?
-                                           SPEED_1000 :
-                                           ((bmcr & BMCR_SPEED100) ?
-                                            SPEED_100 : SPEED_10)));
-               cmd->duplex =
-                       (bmcr & BMCR_FULLDPLX) ?
-                       DUPLEX_FULL : DUPLEX_HALF;
-       }
-       if (linkstate != link_up) {
-               /* Force these to "unknown" if the link is not up and
-                * autonogotiation in enabled. We can set the link
-                * speed to 0, but not cmd->duplex,
-                * because its legal values are 0 and 1.  Ethtool will
-                * print the value reported in parentheses after the
-                * word "Unknown" for unrecognized values.
-                *
-                * If in forced mode, we report the speed and duplex
-                * settings that we configured.
-                */
-               if (cp->link_cntl & BMCR_ANENABLE) {
-                       ethtool_cmd_speed_set(cmd, 0);
-                       cmd->duplex = 0xff;
-               } else {
-                       ethtool_cmd_speed_set(cmd, SPEED_10);
-                       if (cp->link_cntl & BMCR_SPEED100) {
-                               ethtool_cmd_speed_set(cmd, SPEED_100);
-                       } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
-                               ethtool_cmd_speed_set(cmd, SPEED_1000);
-                       }
-                       cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
-                               DUPLEX_FULL : DUPLEX_HALF;
-               }
-       }
-       return 0;
-}
-
-static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-       struct cas *cp = netdev_priv(dev);
-       unsigned long flags;
-       u32 speed = ethtool_cmd_speed(cmd);
-
-       /* Verify the settings we care about. */
-       if (cmd->autoneg != AUTONEG_ENABLE &&
-           cmd->autoneg != AUTONEG_DISABLE)
-               return -EINVAL;
-
-       if (cmd->autoneg == AUTONEG_DISABLE &&
-           ((speed != SPEED_1000 &&
-             speed != SPEED_100 &&
-             speed != SPEED_10) ||
-            (cmd->duplex != DUPLEX_HALF &&
-             cmd->duplex != DUPLEX_FULL)))
-               return -EINVAL;
-
-       /* Apply settings and restart link process. */
-       spin_lock_irqsave(&cp->lock, flags);
-       cas_begin_auto_negotiation(cp, cmd);
-       spin_unlock_irqrestore(&cp->lock, flags);
-       return 0;
-}
-
-static int cas_nway_reset(struct net_device *dev)
-{
-       struct cas *cp = netdev_priv(dev);
-       unsigned long flags;
-
-       if ((cp->link_cntl & BMCR_ANENABLE) == 0)
-               return -EINVAL;
-
-       /* Restart link process. */
-       spin_lock_irqsave(&cp->lock, flags);
-       cas_begin_auto_negotiation(cp, NULL);
-       spin_unlock_irqrestore(&cp->lock, flags);
-
-       return 0;
-}
-
-static u32 cas_get_link(struct net_device *dev)
-{
-       struct cas *cp = netdev_priv(dev);
-       return cp->lstate == link_up;
-}
-
-static u32 cas_get_msglevel(struct net_device *dev)
-{
-       struct cas *cp = netdev_priv(dev);
-       return cp->msg_enable;
-}
-
-static void cas_set_msglevel(struct net_device *dev, u32 value)
-{
-       struct cas *cp = netdev_priv(dev);
-       cp->msg_enable = value;
-}
-
-static int cas_get_regs_len(struct net_device *dev)
-{
-       struct cas *cp = netdev_priv(dev);
-       return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
-}
-
-static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
-                            void *p)
-{
-       struct cas *cp = netdev_priv(dev);
-       regs->version = 0;
-       /* cas_read_regs handles locks (cp->lock).  */
-       cas_read_regs(cp, p, regs->len / sizeof(u32));
-}
-
-static int cas_get_sset_count(struct net_device *dev, int sset)
-{
-       switch (sset) {
-       case ETH_SS_STATS:
-               return CAS_NUM_STAT_KEYS;
-       default:
-               return -EOPNOTSUPP;
-       }
-}
-
-static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
-{
-        memcpy(data, &ethtool_cassini_statnames,
-                                        CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
-}
-
-static void cas_get_ethtool_stats(struct net_device *dev,
-                                     struct ethtool_stats *estats, u64 *data)
-{
-       struct cas *cp = netdev_priv(dev);
-       struct net_device_stats *stats = cas_get_stats(cp->dev);
-       int i = 0;
-       data[i++] = stats->collisions;
-       data[i++] = stats->rx_bytes;
-       data[i++] = stats->rx_crc_errors;
-       data[i++] = stats->rx_dropped;
-       data[i++] = stats->rx_errors;
-       data[i++] = stats->rx_fifo_errors;
-       data[i++] = stats->rx_frame_errors;
-       data[i++] = stats->rx_length_errors;
-       data[i++] = stats->rx_over_errors;
-       data[i++] = stats->rx_packets;
-       data[i++] = stats->tx_aborted_errors;
-       data[i++] = stats->tx_bytes;
-       data[i++] = stats->tx_dropped;
-       data[i++] = stats->tx_errors;
-       data[i++] = stats->tx_fifo_errors;
-       data[i++] = stats->tx_packets;
-       BUG_ON(i != CAS_NUM_STAT_KEYS);
-}
-
-static const struct ethtool_ops cas_ethtool_ops = {
-       .get_drvinfo            = cas_get_drvinfo,
-       .get_settings           = cas_get_settings,
-       .set_settings           = cas_set_settings,
-       .nway_reset             = cas_nway_reset,
-       .get_link               = cas_get_link,
-       .get_msglevel           = cas_get_msglevel,
-       .set_msglevel           = cas_set_msglevel,
-       .get_regs_len           = cas_get_regs_len,
-       .get_regs               = cas_get_regs,
-       .get_sset_count         = cas_get_sset_count,
-       .get_strings            = cas_get_strings,
-       .get_ethtool_stats      = cas_get_ethtool_stats,
-};
-
-static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
-       struct cas *cp = netdev_priv(dev);
-       struct mii_ioctl_data *data = if_mii(ifr);
-       unsigned long flags;
-       int rc = -EOPNOTSUPP;
-
-       /* Hold the PM mutex while doing ioctl's or we may collide
-        * with open/close and power management and oops.
-        */
-       mutex_lock(&cp->pm_mutex);
-       switch (cmd) {
-       case SIOCGMIIPHY:               /* Get address of MII PHY in use. */
-               data->phy_id = cp->phy_addr;
-               /* Fallthrough... */
-
-       case SIOCGMIIREG:               /* Read MII PHY register. */
-               spin_lock_irqsave(&cp->lock, flags);
-               cas_mif_poll(cp, 0);
-               data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
-               cas_mif_poll(cp, 1);
-               spin_unlock_irqrestore(&cp->lock, flags);
-               rc = 0;
-               break;
-
-       case SIOCSMIIREG:               /* Write MII PHY register. */
-               spin_lock_irqsave(&cp->lock, flags);
-               cas_mif_poll(cp, 0);
-               rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
-               cas_mif_poll(cp, 1);
-               spin_unlock_irqrestore(&cp->lock, flags);
-               break;
-       default:
-               break;
-       }
-
-       mutex_unlock(&cp->pm_mutex);
-       return rc;
-}
-
-/* When this chip sits underneath an Intel 31154 bridge, it is the
- * only subordinate device and we can tweak the bridge settings to
- * reflect that fact.
- */
-static void __devinit cas_program_bridge(struct pci_dev *cas_pdev)
-{
-       struct pci_dev *pdev = cas_pdev->bus->self;
-       u32 val;
-
-       if (!pdev)
-               return;
-
-       if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
-               return;
-
-       /* Clear bit 10 (Bus Parking Control) in the Secondary
-        * Arbiter Control/Status Register which lives at offset
-        * 0x41.  Using a 32-bit word read/modify/write at 0x40
-        * is much simpler so that's how we do this.
-        */
-       pci_read_config_dword(pdev, 0x40, &val);
-       val &= ~0x00040000;
-       pci_write_config_dword(pdev, 0x40, val);
-
-       /* Max out the Multi-Transaction Timer settings since
-        * Cassini is the only device present.
-        *
-        * The register is 16-bit and lives at 0x50.  When the
-        * settings are enabled, it extends the GRANT# signal
-        * for a requestor after a transaction is complete.  This
-        * allows the next request to run without first needing
-        * to negotiate the GRANT# signal back.
-        *
-        * Bits 12:10 define the grant duration:
-        *
-        *      1       --      16 clocks
-        *      2       --      32 clocks
-        *      3       --      64 clocks
-        *      4       --      128 clocks
-        *      5       --      256 clocks
-        *
-        * All other values are illegal.
-        *
-        * Bits 09:00 define which REQ/GNT signal pairs get the
-        * GRANT# signal treatment.  We set them all.
-        */
-       pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
-
-       /* The Read Prefecth Policy register is 16-bit and sits at
-        * offset 0x52.  It enables a "smart" pre-fetch policy.  We
-        * enable it and max out all of the settings since only one
-        * device is sitting underneath and thus bandwidth sharing is
-        * not an issue.
-        *
-        * The register has several 3 bit fields, which indicates a
-        * multiplier applied to the base amount of prefetching the
-        * chip would do.  These fields are at:
-        *
-        *      15:13   ---     ReRead Primary Bus
-        *      12:10   ---     FirstRead Primary Bus
-        *      09:07   ---     ReRead Secondary Bus
-        *      06:04   ---     FirstRead Secondary Bus
-        *
-        * Bits 03:00 control which REQ/GNT pairs the prefetch settings
-        * get enabled on.  Bit 3 is a grouped enabler which controls
-        * all of the REQ/GNT pairs from [8:3].  Bits 2 to 0 control
-        * the individual REQ/GNT pairs [2:0].
-        */
-       pci_write_config_word(pdev, 0x52,
-                             (0x7 << 13) |
-                             (0x7 << 10) |
-                             (0x7 <<  7) |
-                             (0x7 <<  4) |
-                             (0xf <<  0));
-
-       /* Force cacheline size to 0x8 */
-       pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
-
-       /* Force latency timer to maximum setting so Cassini can
-        * sit on the bus as long as it likes.
-        */
-       pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
-}
-
-static const struct net_device_ops cas_netdev_ops = {
-       .ndo_open               = cas_open,
-       .ndo_stop               = cas_close,
-       .ndo_start_xmit         = cas_start_xmit,
-       .ndo_get_stats          = cas_get_stats,
-       .ndo_set_multicast_list = cas_set_multicast,
-       .ndo_do_ioctl           = cas_ioctl,
-       .ndo_tx_timeout         = cas_tx_timeout,
-       .ndo_change_mtu         = cas_change_mtu,
-       .ndo_set_mac_address    = eth_mac_addr,
-       .ndo_validate_addr      = eth_validate_addr,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = cas_netpoll,
-#endif
-};
-
-static int __devinit cas_init_one(struct pci_dev *pdev,
-                                 const struct pci_device_id *ent)
-{
-       static int cas_version_printed = 0;
-       unsigned long casreg_len;
-       struct net_device *dev;
-       struct cas *cp;
-       int i, err, pci_using_dac;
-       u16 pci_cmd;
-       u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
-
-       if (cas_version_printed++ == 0)
-               pr_info("%s", version);
-
-       err = pci_enable_device(pdev);
-       if (err) {
-               dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
-               return err;
-       }
-
-       if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
-               dev_err(&pdev->dev, "Cannot find proper PCI device "
-                      "base address, aborting\n");
-               err = -ENODEV;
-               goto err_out_disable_pdev;
-       }
-
-       dev = alloc_etherdev(sizeof(*cp));
-       if (!dev) {
-               dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
-               err = -ENOMEM;
-               goto err_out_disable_pdev;
-       }
-       SET_NETDEV_DEV(dev, &pdev->dev);
-
-       err = pci_request_regions(pdev, dev->name);
-       if (err) {
-               dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
-               goto err_out_free_netdev;
-       }
-       pci_set_master(pdev);
-
-       /* we must always turn on parity response or else parity
-        * doesn't get generated properly. disable SERR/PERR as well.
-        * in addition, we want to turn MWI on.
-        */
-       pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
-       pci_cmd &= ~PCI_COMMAND_SERR;
-       pci_cmd |= PCI_COMMAND_PARITY;
-       pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
-       if (pci_try_set_mwi(pdev))
-               pr_warning("Could not enable MWI for %s\n", pci_name(pdev));
-
-       cas_program_bridge(pdev);
-
-       /*
-        * On some architectures, the default cache line size set
-        * by pci_try_set_mwi reduces perforamnce.  We have to increase
-        * it for this case.  To start, we'll print some configuration
-        * data.
-        */
-#if 1
-       pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
-                            &orig_cacheline_size);
-       if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
-               cas_cacheline_size =
-                       (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
-                       CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
-               if (pci_write_config_byte(pdev,
-                                         PCI_CACHE_LINE_SIZE,
-                                         cas_cacheline_size)) {
-                       dev_err(&pdev->dev, "Could not set PCI cache "
-                              "line size\n");
-                       goto err_write_cacheline;
-               }
-       }
-#endif
-
-
-       /* Configure DMA attributes. */
-       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               pci_using_dac = 1;
-               err = pci_set_consistent_dma_mask(pdev,
-                                                 DMA_BIT_MASK(64));
-               if (err < 0) {
-                       dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
-                              "for consistent allocations\n");
-                       goto err_out_free_res;
-               }
-
-       } else {
-               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (err) {
-                       dev_err(&pdev->dev, "No usable DMA configuration, "
-                              "aborting\n");
-                       goto err_out_free_res;
-               }
-               pci_using_dac = 0;
-       }
-
-       casreg_len = pci_resource_len(pdev, 0);
-
-       cp = netdev_priv(dev);
-       cp->pdev = pdev;
-#if 1
-       /* A value of 0 indicates we never explicitly set it */
-       cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
-#endif
-       cp->dev = dev;
-       cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
-         cassini_debug;
-
-#if defined(CONFIG_SPARC)
-       cp->of_node = pci_device_to_OF_node(pdev);
-#endif
-
-       cp->link_transition = LINK_TRANSITION_UNKNOWN;
-       cp->link_transition_jiffies_valid = 0;
-
-       spin_lock_init(&cp->lock);
-       spin_lock_init(&cp->rx_inuse_lock);
-       spin_lock_init(&cp->rx_spare_lock);
-       for (i = 0; i < N_TX_RINGS; i++) {
-               spin_lock_init(&cp->stat_lock[i]);
-               spin_lock_init(&cp->tx_lock[i]);
-       }
-       spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
-       mutex_init(&cp->pm_mutex);
-
-       init_timer(&cp->link_timer);
-       cp->link_timer.function = cas_link_timer;
-       cp->link_timer.data = (unsigned long) cp;
-
-#if 1
-       /* Just in case the implementation of atomic operations
-        * change so that an explicit initialization is necessary.
-        */
-       atomic_set(&cp->reset_task_pending, 0);
-       atomic_set(&cp->reset_task_pending_all, 0);
-       atomic_set(&cp->reset_task_pending_spare, 0);
-       atomic_set(&cp->reset_task_pending_mtu, 0);
-#endif
-       INIT_WORK(&cp->reset_task, cas_reset_task);
-
-       /* Default link parameters */
-       if (link_mode >= 0 && link_mode < 6)
-               cp->link_cntl = link_modes[link_mode];
-       else
-               cp->link_cntl = BMCR_ANENABLE;
-       cp->lstate = link_down;
-       cp->link_transition = LINK_TRANSITION_LINK_DOWN;
-       netif_carrier_off(cp->dev);
-       cp->timer_ticks = 0;
-
-       /* give us access to cassini registers */
-       cp->regs = pci_iomap(pdev, 0, casreg_len);
-       if (!cp->regs) {
-               dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
-               goto err_out_free_res;
-       }
-       cp->casreg_len = casreg_len;
-
-       pci_save_state(pdev);
-       cas_check_pci_invariants(cp);
-       cas_hard_reset(cp);
-       cas_reset(cp, 0);
-       if (cas_check_invariants(cp))
-               goto err_out_iounmap;
-       if (cp->cas_flags & CAS_FLAG_SATURN)
-               if (cas_saturn_firmware_init(cp))
-                       goto err_out_iounmap;
-
-       cp->init_block = (struct cas_init_block *)
-               pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
-                                    &cp->block_dvma);
-       if (!cp->init_block) {
-               dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
-               goto err_out_iounmap;
-       }
-
-       for (i = 0; i < N_TX_RINGS; i++)
-               cp->init_txds[i] = cp->init_block->txds[i];
-
-       for (i = 0; i < N_RX_DESC_RINGS; i++)
-               cp->init_rxds[i] = cp->init_block->rxds[i];
-
-       for (i = 0; i < N_RX_COMP_RINGS; i++)
-               cp->init_rxcs[i] = cp->init_block->rxcs[i];
-
-       for (i = 0; i < N_RX_FLOWS; i++)
-               skb_queue_head_init(&cp->rx_flows[i]);
-
-       dev->netdev_ops = &cas_netdev_ops;
-       dev->ethtool_ops = &cas_ethtool_ops;
-       dev->watchdog_timeo = CAS_TX_TIMEOUT;
-
-#ifdef USE_NAPI
-       netif_napi_add(dev, &cp->napi, cas_poll, 64);
-#endif
-       dev->irq = pdev->irq;
-       dev->dma = 0;
-
-       /* Cassini features. */
-       if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
-               dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
-
-       if (pci_using_dac)
-               dev->features |= NETIF_F_HIGHDMA;
-
-       if (register_netdev(dev)) {
-               dev_err(&pdev->dev, "Cannot register net device, aborting\n");
-               goto err_out_free_consistent;
-       }
-
-       i = readl(cp->regs + REG_BIM_CFG);
-       netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
-                   (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
-                   (i & BIM_CFG_32BIT) ? "32" : "64",
-                   (i & BIM_CFG_66MHZ) ? "66" : "33",
-                   (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
-                   dev->dev_addr);
-
-       pci_set_drvdata(pdev, dev);
-       cp->hw_running = 1;
-       cas_entropy_reset(cp);
-       cas_phy_init(cp);
-       cas_begin_auto_negotiation(cp, NULL);
-       return 0;
-
-err_out_free_consistent:
-       pci_free_consistent(pdev, sizeof(struct cas_init_block),
-                           cp->init_block, cp->block_dvma);
-
-err_out_iounmap:
-       mutex_lock(&cp->pm_mutex);
-       if (cp->hw_running)
-               cas_shutdown(cp);
-       mutex_unlock(&cp->pm_mutex);
-
-       pci_iounmap(pdev, cp->regs);
-
-
-err_out_free_res:
-       pci_release_regions(pdev);
-
-err_write_cacheline:
-       /* Try to restore it in case the error occurred after we
-        * set it.
-        */
-       pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
-
-err_out_free_netdev:
-       free_netdev(dev);
-
-err_out_disable_pdev:
-       pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
-       return -ENODEV;
-}
-
-static void __devexit cas_remove_one(struct pci_dev *pdev)
-{
-       struct net_device *dev = pci_get_drvdata(pdev);
-       struct cas *cp;
-       if (!dev)
-               return;
-
-       cp = netdev_priv(dev);
-       unregister_netdev(dev);
-
-       if (cp->fw_data)
-               vfree(cp->fw_data);
-
-       mutex_lock(&cp->pm_mutex);
-       cancel_work_sync(&cp->reset_task);
-       if (cp->hw_running)
-               cas_shutdown(cp);
-       mutex_unlock(&cp->pm_mutex);
-
-#if 1
-       if (cp->orig_cacheline_size) {
-               /* Restore the cache line size if we had modified
-                * it.
-                */
-               pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
-                                     cp->orig_cacheline_size);
-       }
-#endif
-       pci_free_consistent(pdev, sizeof(struct cas_init_block),
-                           cp->init_block, cp->block_dvma);
-       pci_iounmap(pdev, cp->regs);
-       free_netdev(dev);
-       pci_release_regions(pdev);
-       pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
-}
-
-#ifdef CONFIG_PM
-static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
-{
-       struct net_device *dev = pci_get_drvdata(pdev);
-       struct cas *cp = netdev_priv(dev);
-       unsigned long flags;
-
-       mutex_lock(&cp->pm_mutex);
-
-       /* If the driver is opened, we stop the DMA */
-       if (cp->opened) {
-               netif_device_detach(dev);
-
-               cas_lock_all_save(cp, flags);
-
-               /* We can set the second arg of cas_reset to 0
-                * because on resume, we'll call cas_init_hw with
-                * its second arg set so that autonegotiation is
-                * restarted.
-                */
-               cas_reset(cp, 0);
-               cas_clean_rings(cp);
-               cas_unlock_all_restore(cp, flags);
-       }
-
-       if (cp->hw_running)
-               cas_shutdown(cp);
-       mutex_unlock(&cp->pm_mutex);
-
-       return 0;
-}
-
-static int cas_resume(struct pci_dev *pdev)
-{
-       struct net_device *dev = pci_get_drvdata(pdev);
-       struct cas *cp = netdev_priv(dev);
-
-       netdev_info(dev, "resuming\n");
-
-       mutex_lock(&cp->pm_mutex);
-       cas_hard_reset(cp);
-       if (cp->opened) {
-               unsigned long flags;
-               cas_lock_all_save(cp, flags);
-               cas_reset(cp, 0);
-               cp->hw_running = 1;
-               cas_clean_rings(cp);
-               cas_init_hw(cp, 1);
-               cas_unlock_all_restore(cp, flags);
-
-               netif_device_attach(dev);
-       }
-       mutex_unlock(&cp->pm_mutex);
-       return 0;
-}
-#endif /* CONFIG_PM */
-
-static struct pci_driver cas_driver = {
-       .name           = DRV_MODULE_NAME,
-       .id_table       = cas_pci_tbl,
-       .probe          = cas_init_one,
-       .remove         = __devexit_p(cas_remove_one),
-#ifdef CONFIG_PM
-       .suspend        = cas_suspend,
-       .resume         = cas_resume
-#endif
-};
-
-static int __init cas_init(void)
-{
-       if (linkdown_timeout > 0)
-               link_transition_timeout = linkdown_timeout * HZ;
-       else
-               link_transition_timeout = 0;
-
-       return pci_register_driver(&cas_driver);
-}
-
-static void __exit cas_cleanup(void)
-{
-       pci_unregister_driver(&cas_driver);
-}
-
-module_init(cas_init);
-module_exit(cas_cleanup);
diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h
deleted file mode 100644 (file)
index b361424..0000000
+++ /dev/null
@@ -1,2914 +0,0 @@
-/* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $
- * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver.
- *
- * Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
- * 02111-1307, USA.
- *
- * vendor id: 0x108E (Sun Microsystems, Inc.)
- * device id: 0xabba (Cassini)
- * revision ids: 0x01 = Cassini
- *               0x02 = Cassini rev 2
- *               0x10 = Cassini+
- *               0x11 = Cassini+ 0.2u
- *
- * vendor id: 0x100b (National Semiconductor)
- * device id: 0x0035 (DP83065/Saturn)
- * revision ids: 0x30 = Saturn B2
- *
- * rings are all offset from 0.
- *
- * there are two clock domains:
- * PCI:  33/66MHz clock
- * chip: 125MHz clock
- */
-
-#ifndef _CASSINI_H
-#define _CASSINI_H
-
-/* cassini register map: 2M memory mapped in 32-bit memory space accessible as
- * 32-bit words. there is no i/o port access. REG_ addresses are
- * shared between cassini and cassini+. REG_PLUS_ addresses only
- * appear in cassini+. REG_MINUS_ addresses only appear in cassini.
- */
-#define CAS_ID_REV2          0x02
-#define CAS_ID_REVPLUS       0x10
-#define CAS_ID_REVPLUS02u    0x11
-#define CAS_ID_REVSATURNB2   0x30
-
-/** global resources **/
-
-/* this register sets the weights for the weighted round robin arbiter. e.g.,
- * if rx weight == 1 and tx weight == 0, rx == 2x tx transfer credit
- * for its next turn to access the pci bus.
- * map: 0x0 = x1, 0x1 = x2, 0x2 = x4, 0x3 = x8
- * DEFAULT: 0x0, SIZE: 5 bits
- */
-#define  REG_CAWR                     0x0004  /* core arbitration weight */
-#define    CAWR_RX_DMA_WEIGHT_SHIFT    0
-#define    CAWR_RX_DMA_WEIGHT_MASK     0x03    /* [0:1] */
-#define    CAWR_TX_DMA_WEIGHT_SHIFT    2
-#define    CAWR_TX_DMA_WEIGHT_MASK     0x0C    /* [3:2] */
-#define    CAWR_RR_DIS                 0x10    /* [4] */
-
-/* if enabled, BIM can send bursts across PCI bus > cacheline size. burst
- * sizes determined by length of packet or descriptor transfer and the
- * max length allowed by the target.
- * DEFAULT: 0x0, SIZE: 1 bit
- */
-#define  REG_INF_BURST                 0x0008  /* infinite burst enable reg */
-#define    INF_BURST_EN                0x1     /* enable */
-
-/* top level interrupts [0-9] are auto-cleared to 0 when the status
- * register is read. second level interrupts [13 - 18] are cleared at
- * the source. tx completion register 3 is replicated in [19 - 31]
- * DEFAULT: 0x00000000, SIZE: 29 bits
- */
-#define  REG_INTR_STATUS               0x000C  /* interrupt status register */
-#define    INTR_TX_INTME               0x00000001  /* frame w/ INT ME desc bit set
-                                                     xferred from host queue to
-                                                     TX FIFO */
-#define    INTR_TX_ALL                 0x00000002  /* all xmit frames xferred into
-                                                     TX FIFO. i.e.,
-                                                     TX Kick == TX complete. if
-                                                     PACED_MODE set, then TX FIFO
-                                                     also empty */
-#define    INTR_TX_DONE                0x00000004  /* any frame xferred into tx
-                                                     FIFO */
-#define    INTR_TX_TAG_ERROR           0x00000008  /* TX FIFO tag framing
-                                                     corrupted. FATAL ERROR */
-#define    INTR_RX_DONE                0x00000010  /* at least 1 frame xferred
-                                                     from RX FIFO to host mem.
-                                                     RX completion reg updated.
-                                                     may be delayed by recv
-                                                     intr blanking. */
-#define    INTR_RX_BUF_UNAVAIL         0x00000020  /* no more receive buffers.
-                                                     RX Kick == RX complete */
-#define    INTR_RX_TAG_ERROR           0x00000040  /* RX FIFO tag framing
-                                                     corrupted. FATAL ERROR */
-#define    INTR_RX_COMP_FULL           0x00000080  /* no more room in completion
-                                                     ring to post descriptors.
-                                                     RX complete head incr to
-                                                     almost reach RX complete
-                                                     tail */
-#define    INTR_RX_BUF_AE              0x00000100  /* less than the
-                                                     programmable threshold #
-                                                     of free descr avail for
-                                                     hw use */
-#define    INTR_RX_COMP_AF             0x00000200  /* less than the
-                                                     programmable threshold #
-                                                     of descr spaces for hw
-                                                     use in completion descr
-                                                     ring */
-#define    INTR_RX_LEN_MISMATCH        0x00000400  /* len field from MAC !=
-                                                     len of non-reassembly pkt
-                                                     from fifo during DMA or
-                                                     header parser provides TCP
-                                                     header and payload size >
-                                                     MAC packet size.
-                                                     FATAL ERROR */
-#define    INTR_SUMMARY                0x00001000  /* summary interrupt bit. this
-                                                     bit will be set if an interrupt
-                                                     generated on the pci bus. useful
-                                                     when driver is polling for
-                                                     interrupts */
-#define    INTR_PCS_STATUS             0x00002000  /* PCS interrupt status register */
-#define    INTR_TX_MAC_STATUS          0x00004000  /* TX MAC status register has at
-                                                     least 1 unmasked interrupt set */
-#define    INTR_RX_MAC_STATUS          0x00008000  /* RX MAC status register has at
-                                                     least 1 unmasked interrupt set */
-#define    INTR_MAC_CTRL_STATUS        0x00010000  /* MAC control status register has
-                                                     at least 1 unmasked interrupt
-                                                     set */
-#define    INTR_MIF_STATUS             0x00020000  /* MIF status register has at least
-                                                     1 unmasked interrupt set */
-#define    INTR_PCI_ERROR_STATUS       0x00040000  /* PCI error status register in the
-                                                     BIF has at least 1 unmasked
-                                                     interrupt set */
-#define    INTR_TX_COMP_3_MASK         0xFFF80000  /* mask for TX completion
-                                                     3 reg data */
-#define    INTR_TX_COMP_3_SHIFT        19
-#define    INTR_ERROR_MASK (INTR_MIF_STATUS | INTR_PCI_ERROR_STATUS | \
-                            INTR_PCS_STATUS | INTR_RX_LEN_MISMATCH | \
-                            INTR_TX_MAC_STATUS | INTR_RX_MAC_STATUS | \
-                            INTR_TX_TAG_ERROR | INTR_RX_TAG_ERROR | \
-                            INTR_MAC_CTRL_STATUS)
-
-/* determines which status events will cause an interrupt. layout same
- * as REG_INTR_STATUS.
- * DEFAULT: 0xFFFFFFFF, SIZE: 16 bits
- */
-#define  REG_INTR_MASK                 0x0010  /* Interrupt mask */
-
-/* top level interrupt bits that are cleared during read of REG_INTR_STATUS_ALIAS.
- * useful when driver is polling for interrupts. layout same as REG_INTR_MASK.
- * DEFAULT: 0x00000000, SIZE: 12 bits
- */
-#define  REG_ALIAS_CLEAR               0x0014  /* alias clear mask
-                                                 (used w/ status alias) */
-/* same as REG_INTR_STATUS except that only bits cleared are those selected by
- * REG_ALIAS_CLEAR
- * DEFAULT: 0x00000000, SIZE: 29 bits
- */
-#define  REG_INTR_STATUS_ALIAS         0x001C  /* interrupt status alias
-                                                 (selective clear) */
-
-/* DEFAULT: 0x0, SIZE: 3 bits */
-#define  REG_PCI_ERR_STATUS            0x1000  /* PCI error status */
-#define    PCI_ERR_BADACK              0x01    /* reserved in Cassini+.
-                                                 set if no ACK64# during ABS64 cycle
-                                                 in Cassini. */
-#define    PCI_ERR_DTRTO               0x02    /* delayed xaction timeout. set if
-                                                 no read retry after 2^15 clocks */
-#define    PCI_ERR_OTHER               0x04    /* other PCI errors */
-#define    PCI_ERR_BIM_DMA_WRITE       0x08    /* BIM received 0 count DMA write req.
-                                                 unused in Cassini. */
-#define    PCI_ERR_BIM_DMA_READ        0x10    /* BIM received 0 count DMA read req.
-                                                 unused in Cassini. */
-#define    PCI_ERR_BIM_DMA_TIMEOUT     0x20    /* BIM received 255 retries during
-                                                 DMA. unused in cassini. */
-
-/* mask for PCI status events that will set PCI_ERR_STATUS. if cleared, event
- * causes an interrupt to be generated.
- * DEFAULT: 0x7, SIZE: 3 bits
- */
-#define  REG_PCI_ERR_STATUS_MASK       0x1004  /* PCI Error status mask */
-
-/* used to configure PCI related parameters that are not in PCI config space.
- * DEFAULT: 0bxx000, SIZE: 5 bits
- */
-#define  REG_BIM_CFG                0x1008  /* BIM Configuration */
-#define    BIM_CFG_RESERVED0        0x001   /* reserved */
-#define    BIM_CFG_RESERVED1        0x002   /* reserved */
-#define    BIM_CFG_64BIT_DISABLE    0x004   /* disable 64-bit mode */
-#define    BIM_CFG_66MHZ            0x008   /* (ro) 1 = 66MHz, 0 = < 66MHz */
-#define    BIM_CFG_32BIT            0x010   /* (ro) 1 = 32-bit slot, 0 = 64-bit */
-#define    BIM_CFG_DPAR_INTR_ENABLE 0x020   /* detected parity err enable */
-#define    BIM_CFG_RMA_INTR_ENABLE  0x040   /* master abort intr enable */
-#define    BIM_CFG_RTA_INTR_ENABLE  0x080   /* target abort intr enable */
-#define    BIM_CFG_RESERVED2        0x100   /* reserved */
-#define    BIM_CFG_BIM_DISABLE      0x200   /* stop BIM DMA. use before global
-                                              reset. reserved in Cassini. */
-#define    BIM_CFG_BIM_STATUS       0x400   /* (ro) 1 = BIM DMA suspended.
-                                                 reserved in Cassini. */
-#define    BIM_CFG_PERROR_BLOCK     0x800  /* block PERR# to pci bus. def: 0.
-                                                reserved in Cassini. */
-
-/* DEFAULT: 0x00000000, SIZE: 32 bits */
-#define  REG_BIM_DIAG                  0x100C  /* BIM Diagnostic */
-#define    BIM_DIAG_MSTR_SM_MASK       0x3FFFFF00 /* PCI master controller state
-                                                    machine bits [21:0] */
-#define    BIM_DIAG_BRST_SM_MASK       0x7F    /* PCI burst controller state
-                                                 machine bits [6:0] */
-
-/* writing to SW_RESET_TX and SW_RESET_RX will issue a global
- * reset. poll until TX and RX read back as 0's for completion.
- */
-#define  REG_SW_RESET                  0x1010  /* Software reset */
-#define    SW_RESET_TX                 0x00000001  /* reset TX DMA engine. poll until
-                                                     cleared to 0.  */
-#define    SW_RESET_RX                 0x00000002  /* reset RX DMA engine. poll until
-                                                     cleared to 0. */
-#define    SW_RESET_RSTOUT             0x00000004  /* force RSTOUT# pin active (low).
-                                                     resets PHY and anything else
-                                                     connected to RSTOUT#. RSTOUT#
-                                                     is also activated by local PCI
-                                                     reset when hot-swap is being
-                                                     done. */
-#define    SW_RESET_BLOCK_PCS_SLINK    0x00000008  /* if a global reset is done with
-                                                     this bit set, PCS and SLINK
-                                                     modules won't be reset.
-                                                     i.e., link won't drop. */
-#define    SW_RESET_BREQ_SM_MASK       0x00007F00  /* breq state machine [6:0] */
-#define    SW_RESET_PCIARB_SM_MASK     0x00070000  /* pci arbitration state bits:
-                                                     0b000: ARB_IDLE1
-                                                     0b001: ARB_IDLE2
-                                                     0b010: ARB_WB_ACK
-                                                     0b011: ARB_WB_WAT
-                                                     0b100: ARB_RB_ACK
-                                                     0b101: ARB_RB_WAT
-                                                     0b110: ARB_RB_END
-                                                     0b111: ARB_WB_END */
-#define    SW_RESET_RDPCI_SM_MASK      0x00300000  /* read pci state bits:
-                                                     0b00: RD_PCI_WAT
-                                                     0b01: RD_PCI_RDY
-                                                     0b11: RD_PCI_ACK */
-#define    SW_RESET_RDARB_SM_MASK      0x00C00000  /* read arbitration state bits:
-                                                     0b00: AD_IDL_RX
-                                                     0b01: AD_ACK_RX
-                                                     0b10: AD_ACK_TX
-                                                     0b11: AD_IDL_TX */
-#define    SW_RESET_WRPCI_SM_MASK      0x06000000  /* write pci state bits
-                                                     0b00: WR_PCI_WAT
-                                                     0b01: WR_PCI_RDY
-                                                     0b11: WR_PCI_ACK */
-#define    SW_RESET_WRARB_SM_MASK      0x38000000  /* write arbitration state bits:
-                                                     0b000: ARB_IDLE1
-                                                     0b001: ARB_IDLE2
-                                                     0b010: ARB_TX_ACK
-                                                     0b011: ARB_TX_WAT
-                                                     0b100: ARB_RX_ACK
-                                                     0b110: ARB_RX_WAT */
-
-/* Cassini only. 64-bit register used to check PCI datapath. when read,
- * value written has both lower and upper 32-bit halves rotated to the right
- * one bit position. e.g., FFFFFFFF FFFFFFFF -> 7FFFFFFF 7FFFFFFF
- */
-#define  REG_MINUS_BIM_DATAPATH_TEST   0x1018  /* Cassini: BIM datapath test
-                                                 Cassini+: reserved */
-
-/* output enables are provided for each device's chip select and for the rest
- * of the outputs from cassini to its local bus devices. two sw programmable
- * bits are connected to general purpus control/status bits.
- * DEFAULT: 0x7
- */
-#define  REG_BIM_LOCAL_DEV_EN          0x1020  /* BIM local device
-                                                 output EN. default: 0x7 */
-#define    BIM_LOCAL_DEV_PAD           0x01    /* address bus, RW signal, and
-                                                 OE signal output enable on the
-                                                 local bus interface. these
-                                                 are shared between both local
-                                                 bus devices. tristate when 0. */
-#define    BIM_LOCAL_DEV_PROM          0x02    /* PROM chip select */
-#define    BIM_LOCAL_DEV_EXT           0x04    /* secondary local bus device chip
-                                                 select output enable */
-#define    BIM_LOCAL_DEV_SOFT_0        0x08    /* sw programmable ctrl bit 0 */
-#define    BIM_LOCAL_DEV_SOFT_1        0x10    /* sw programmable ctrl bit 1 */
-#define    BIM_LOCAL_DEV_HW_RESET      0x20    /* internal hw reset. Cassini+ only. */
-
-/* access 24 entry BIM read and write buffers. put address in REG_BIM_BUFFER_ADDR
- * and read/write from/to it REG_BIM_BUFFER_DATA_LOW and _DATA_HI.
- * _DATA_HI should be the last access of the sequence.
- * DEFAULT: undefined
- */
-#define  REG_BIM_BUFFER_ADDR           0x1024  /* BIM buffer address. for
-                                                 purposes. */
-#define    BIM_BUFFER_ADDR_MASK        0x3F    /* index (0 - 23) of buffer  */
-#define    BIM_BUFFER_WR_SELECT        0x40    /* write buffer access = 1
-                                                 read buffer access = 0 */
-/* DEFAULT: undefined */
-#define  REG_BIM_BUFFER_DATA_LOW       0x1028  /* BIM buffer data low */
-#define  REG_BIM_BUFFER_DATA_HI        0x102C  /* BIM buffer data high */
-
-/* set BIM_RAM_BIST_START to start built-in self test for BIM read buffer.
- * bit auto-clears when done with status read from _SUMMARY and _PASS bits.
- */
-#define  REG_BIM_RAM_BIST              0x102C  /* BIM RAM (read buffer) BIST
-                                                 control/status */
-#define    BIM_RAM_BIST_RD_START       0x01    /* start BIST for BIM read buffer */
-#define    BIM_RAM_BIST_WR_START       0x02    /* start BIST for BIM write buffer.
-                                                 Cassini only. reserved in
-                                                 Cassini+. */
-#define    BIM_RAM_BIST_RD_PASS        0x04    /* summary BIST pass status for read
-                                                 buffer. */
-#define    BIM_RAM_BIST_WR_PASS        0x08    /* summary BIST pass status for write
-                                                 buffer. Cassini only. reserved
-                                                 in Cassini+. */
-#define    BIM_RAM_BIST_RD_LOW_PASS    0x10    /* read low bank passes BIST */
-#define    BIM_RAM_BIST_RD_HI_PASS     0x20    /* read high bank passes BIST */
-#define    BIM_RAM_BIST_WR_LOW_PASS    0x40    /* write low bank passes BIST.
-                                                 Cassini only. reserved in
-                                                 Cassini+. */
-#define    BIM_RAM_BIST_WR_HI_PASS     0x80    /* write high bank passes BIST.
-                                                 Cassini only. reserved in
-                                                 Cassini+. */
-
-/* ASUN: i'm not sure what this does as it's not in the spec.
- * DEFAULT: 0xFC
- */
-#define  REG_BIM_DIAG_MUX              0x1030  /* BIM diagnostic probe mux
-                                                 select register */
-
-/* enable probe monitoring mode and select data appearing on the P_A* bus. bit
- * values for _SEL_HI_MASK and _SEL_LOW_MASK:
- * 0x0: internal probe[7:0] (pci arb state, wtc empty w, wtc full w, wtc empty w,
- *                           wtc empty r, post pci)
- * 0x1: internal probe[15:8] (pci wbuf comp, pci wpkt comp, pci rbuf comp,
- *                            pci rpkt comp, txdma wr req, txdma wr ack,
- *                           txdma wr rdy, txdma wr xfr done)
- * 0x2: internal probe[23:16] (txdma rd req, txdma rd ack, txdma rd rdy, rxdma rd,
- *                             rd arb state, rd pci state)
- * 0x3: internal probe[31:24] (rxdma req, rxdma ack, rxdma rdy, wrarb state,
- *                             wrpci state)
- * 0x4: pci io probe[7:0]     0x5: pci io probe[15:8]
- * 0x6: pci io probe[23:16]   0x7: pci io probe[31:24]
- * 0x8: pci io probe[39:32]   0x9: pci io probe[47:40]
- * 0xa: pci io probe[55:48]   0xb: pci io probe[63:56]
- * the following are not available in Cassini:
- * 0xc: rx probe[7:0]         0xd: tx probe[7:0]
- * 0xe: hp probe[7:0]        0xf: mac probe[7:0]
- */
-#define  REG_PLUS_PROBE_MUX_SELECT     0x1034 /* Cassini+: PROBE MUX SELECT */
-#define    PROBE_MUX_EN                0x80000000 /* allow probe signals to be
-                                                    driven on local bus P_A[15:0]
-                                                    for debugging */
-#define    PROBE_MUX_SUB_MUX_MASK      0x0000FF00 /* select sub module probe signals:
-                                                    0x03 = mac[1:0]
-                                                    0x0C = rx[1:0]
-                                                    0x30 = tx[1:0]
-                                                    0xC0 = hp[1:0] */
-#define    PROBE_MUX_SEL_HI_MASK       0x000000F0 /* select which module to appear
-                                                    on P_A[15:8]. see above for
-                                                    values. */
-#define    PROBE_MUX_SEL_LOW_MASK      0x0000000F /* select which module to appear
-                                                    on P_A[7:0]. see above for
-                                                    values. */
-
-/* values mean the same thing as REG_INTR_MASK excep that it's for INTB.
- DEFAULT: 0x1F */
-#define  REG_PLUS_INTR_MASK_1          0x1038 /* Cassini+: interrupt mask
-                                                register 2 for INTB */
-#define  REG_PLUS_INTRN_MASK(x)       (REG_PLUS_INTR_MASK_1 + ((x) - 1)*16)
-/* bits correspond to both _MASK and _STATUS registers. _ALT corresponds to
- * all of the alternate (2-4) INTR registers while _1 corresponds to only
- * _MASK_1 and _STATUS_1 registers.
- * DEFAULT: 0x7 for MASK registers, 0x0 for ALIAS_CLEAR registers
- */
-#define    INTR_RX_DONE_ALT              0x01
-#define    INTR_RX_COMP_FULL_ALT         0x02
-#define    INTR_RX_COMP_AF_ALT           0x04
-#define    INTR_RX_BUF_UNAVAIL_1         0x08
-#define    INTR_RX_BUF_AE_1              0x10 /* almost empty */
-#define    INTRN_MASK_RX_EN              0x80
-#define    INTRN_MASK_CLEAR_ALL          (INTR_RX_DONE_ALT | \
-                                          INTR_RX_COMP_FULL_ALT | \
-                                          INTR_RX_COMP_AF_ALT | \
-                                          INTR_RX_BUF_UNAVAIL_1 | \
-                                          INTR_RX_BUF_AE_1)
-#define  REG_PLUS_INTR_STATUS_1        0x103C /* Cassini+: interrupt status
-                                                register 2 for INTB. default: 0x1F */
-#define  REG_PLUS_INTRN_STATUS(x)       (REG_PLUS_INTR_STATUS_1 + ((x) - 1)*16)
-#define    INTR_STATUS_ALT_INTX_EN     0x80   /* generate INTX when one of the
-                                                flags are set. enables desc ring. */
-
-#define  REG_PLUS_ALIAS_CLEAR_1        0x1040 /* Cassini+: alias clear mask
-                                                register 2 for INTB */
-#define  REG_PLUS_ALIASN_CLEAR(x)      (REG_PLUS_ALIAS_CLEAR_1 + ((x) - 1)*16)
-
-#define  REG_PLUS_INTR_STATUS_ALIAS_1  0x1044 /* Cassini+: interrupt status
-                                                register alias 2 for INTB */
-#define  REG_PLUS_INTRN_STATUS_ALIAS(x) (REG_PLUS_INTR_STATUS_ALIAS_1 + ((x) - 1)*16)
-
-#define REG_SATURN_PCFG               0x106c /* pin configuration register for
-                                               integrated macphy */
-
-#define   SATURN_PCFG_TLA             0x00000001 /* 1 = phy actled */
-#define   SATURN_PCFG_FLA             0x00000002 /* 1 = phy link10led */
-#define   SATURN_PCFG_CLA             0x00000004 /* 1 = phy link100led */
-#define   SATURN_PCFG_LLA             0x00000008 /* 1 = phy link1000led */
-#define   SATURN_PCFG_RLA             0x00000010 /* 1 = phy duplexled */
-#define   SATURN_PCFG_PDS             0x00000020 /* phy debug mode.
-                                                   0 = normal */
-#define   SATURN_PCFG_MTP             0x00000080 /* test point select */
-#define   SATURN_PCFG_GMO             0x00000100 /* GMII observe. 1 =
-                                                   GMII on SERDES pins for
-                                                   monitoring. */
-#define   SATURN_PCFG_FSI             0x00000200 /* 1 = freeze serdes/gmii. all
-                                                   pins configed as outputs.
-                                                   for power saving when using
-                                                   internal phy. */
-#define   SATURN_PCFG_LAD             0x00000800 /* 0 = mac core led ctrl
-                                                   polarity from strapping
-                                                   value.
-                                                   1 = mac core led ctrl
-                                                   polarity active low. */
-
-
-/** transmit dma registers **/
-#define MAX_TX_RINGS_SHIFT            2
-#define MAX_TX_RINGS                  (1 << MAX_TX_RINGS_SHIFT)
-#define MAX_TX_RINGS_MASK             (MAX_TX_RINGS - 1)
-
-/* TX configuration.
- * descr ring sizes size = 32 * (1 << n), n < 9. e.g., 0x8 = 8k. default: 0x8
- * DEFAULT: 0x3F000001
- */
-#define  REG_TX_CFG                    0x2004  /* TX config */
-#define    TX_CFG_DMA_EN               0x00000001  /* enable TX DMA. if cleared, DMA
-                                                     will stop after xfer of current
-                                                     buffer has been completed. */
-#define    TX_CFG_FIFO_PIO_SEL         0x00000002  /* TX DMA FIFO can be
-                                                     accessed w/ FIFO addr
-                                                     and data registers.
-                                                     TX DMA should be
-                                                     disabled. */
-#define    TX_CFG_DESC_RING0_MASK      0x0000003C  /* # desc entries in
-                                                     ring 1. */
-#define    TX_CFG_DESC_RING0_SHIFT     2
-#define    TX_CFG_DESC_RINGN_MASK(a)   (TX_CFG_DESC_RING0_MASK << (a)*4)
-#define    TX_CFG_DESC_RINGN_SHIFT(a)  (TX_CFG_DESC_RING0_SHIFT + (a)*4)
-#define    TX_CFG_PACED_MODE           0x00100000  /* TX_ALL only set after
-                                                     TX FIFO becomes empty.
-                                                     if 0, TX_ALL set
-                                                     if descr queue empty. */
-#define    TX_CFG_DMA_RDPIPE_DIS       0x01000000  /* always set to 1 */
-#define    TX_CFG_COMPWB_Q1            0x02000000  /* completion writeback happens at
-                                                     the end of every packet kicked
-                                                     through Q1. */
-#define    TX_CFG_COMPWB_Q2            0x04000000  /* completion writeback happens at
-                                                     the end of every packet kicked
-                                                     through Q2. */
-#define    TX_CFG_COMPWB_Q3            0x08000000  /* completion writeback happens at
-                                                     the end of every packet kicked
-                                                     through Q3 */
-#define    TX_CFG_COMPWB_Q4            0x10000000  /* completion writeback happens at
-                                                     the end of every packet kicked
-                                                     through Q4 */
-#define    TX_CFG_INTR_COMPWB_DIS      0x20000000  /* disable pre-interrupt completion
-                                                     writeback */
-#define    TX_CFG_CTX_SEL_MASK         0xC0000000  /* selects tx test port
-                                                     connection
-                                                     0b00: tx mac req,
-                                                           tx mac retry req,
-                                                           tx ack and tx tag.
-                                                     0b01: txdma rd req,
-                                                           txdma rd ack,
-                                                           txdma rd rdy,
-                                                           txdma rd type0
-                                                     0b11: txdma wr req,
-                                                           txdma wr ack,
-                                                           txdma wr rdy,
-                                                           txdma wr xfr done. */
-#define    TX_CFG_CTX_SEL_SHIFT        30
-
-/* 11-bit counters that point to next location in FIFO to be loaded/retrieved.
- * used for diagnostics only.
- */
-#define  REG_TX_FIFO_WRITE_PTR         0x2014  /* TX FIFO write pointer */
-#define  REG_TX_FIFO_SHADOW_WRITE_PTR  0x2018  /* TX FIFO shadow write
-                                                 pointer. temp hold reg.
-                                                 diagnostics only. */
-#define  REG_TX_FIFO_READ_PTR          0x201C  /* TX FIFO read pointer */
-#define  REG_TX_FIFO_SHADOW_READ_PTR   0x2020  /* TX FIFO shadow read
-                                                 pointer */
-
-/* (ro) 11-bit up/down counter w/ # of frames currently in TX FIFO */
-#define  REG_TX_FIFO_PKT_CNT           0x2024  /* TX FIFO packet counter */
-
-/* current state of all state machines in TX */
-#define  REG_TX_SM_1                   0x2028  /* TX state machine reg #1 */
-#define    TX_SM_1_CHAIN_MASK          0x000003FF   /* chaining state machine */
-#define    TX_SM_1_CSUM_MASK           0x00000C00   /* checksum state machine */
-#define    TX_SM_1_FIFO_LOAD_MASK      0x0003F000   /* FIFO load state machine.
-                                                      = 0x01 when TX disabled. */
-#define    TX_SM_1_FIFO_UNLOAD_MASK    0x003C0000   /* FIFO unload state machine */
-#define    TX_SM_1_CACHE_MASK          0x03C00000   /* desc. prefetch cache controller
-                                                      state machine */
-#define    TX_SM_1_CBQ_ARB_MASK        0xF8000000   /* CBQ arbiter state machine */
-
-#define  REG_TX_SM_2                   0x202C  /* TX state machine reg #2 */
-#define    TX_SM_2_COMP_WB_MASK        0x07    /* completion writeback sm */
-#define           TX_SM_2_SUB_LOAD_MASK       0x38    /* sub load state machine */
-#define           TX_SM_2_KICK_MASK           0xC0    /* kick state machine */
-
-/* 64-bit pointer to the transmit data buffer. only the 50 LSB are incremented
- * while the upper 23 bits are taken from the TX descriptor
- */
-#define  REG_TX_DATA_PTR_LOW           0x2030  /* TX data pointer low */
-#define  REG_TX_DATA_PTR_HI            0x2034  /* TX data pointer high */
-
-/* 13 bit registers written by driver w/ descriptor value that follows
- * last valid xmit descriptor. kick # and complete # values are used by
- * the xmit dma engine to control tx descr fetching. if > 1 valid
- * tx descr is available within the cache line being read, cassini will
- * internally cache up to 4 of them. 0 on reset. _KICK = rw, _COMP = ro.
- */
-#define  REG_TX_KICK0                  0x2038  /* TX kick reg #1 */
-#define  REG_TX_KICKN(x)               (REG_TX_KICK0 + (x)*4)
-#define  REG_TX_COMP0                  0x2048  /* TX completion reg #1 */
-#define  REG_TX_COMPN(x)               (REG_TX_COMP0 + (x)*4)
-
-/* values of TX_COMPLETE_1-4 are written. each completion register
- * is 2bytes in size and contiguous. 8B allocation w/ 8B alignment.
- * NOTE: completion reg values are only written back prior to TX_INTME and
- * TX_ALL interrupts. at all other times, the most up-to-date index values
- * should be obtained from the REG_TX_COMPLETE_# registers.
- * here's the layout:
- * offset from base addr      completion # byte
- *           0                TX_COMPLETE_1_MSB
- *          1                TX_COMPLETE_1_LSB
- *           2                TX_COMPLETE_2_MSB
- *          3                TX_COMPLETE_2_LSB
- *           4                TX_COMPLETE_3_MSB
- *          5                TX_COMPLETE_3_LSB
- *           6                TX_COMPLETE_4_MSB
- *          7                TX_COMPLETE_4_LSB
- */
-#define  TX_COMPWB_SIZE             8
-#define  REG_TX_COMPWB_DB_LOW       0x2058  /* TX completion write back
-                                              base low */
-#define  REG_TX_COMPWB_DB_HI        0x205C  /* TX completion write back
-                                              base high */
-#define    TX_COMPWB_MSB_MASK       0x00000000000000FFULL
-#define    TX_COMPWB_MSB_SHIFT      0
-#define    TX_COMPWB_LSB_MASK       0x000000000000FF00ULL
-#define    TX_COMPWB_LSB_SHIFT      8
-#define    TX_COMPWB_NEXT(x)        ((x) >> 16)
-
-/* 53 MSB used as base address. 11 LSB assumed to be 0. TX desc pointer must
- * be 2KB-aligned. */
-#define  REG_TX_DB0_LOW         0x2060  /* TX descriptor base low #1 */
-#define  REG_TX_DB0_HI          0x2064  /* TX descriptor base hi #1 */
-#define  REG_TX_DBN_LOW(x)      (REG_TX_DB0_LOW + (x)*8)
-#define  REG_TX_DBN_HI(x)       (REG_TX_DB0_HI + (x)*8)
-
-/* 16-bit registers hold weights for the weighted round-robin of the
- * four CBQ TX descr rings. weights correspond to # bytes xferred from
- * host to TXFIFO in a round of WRR arbitration. can be set
- * dynamically with new weights set upon completion of the current
- * packet transfer from host memory to TXFIFO. a dummy write to any of
- * these registers causes a queue1 pre-emption with all historical bw
- * deficit data reset to 0 (useful when congestion requires a
- * pre-emption/re-allocation of network bandwidth
- */
-#define  REG_TX_MAXBURST_0             0x2080  /* TX MaxBurst #1 */
-#define  REG_TX_MAXBURST_1             0x2084  /* TX MaxBurst #2 */
-#define  REG_TX_MAXBURST_2             0x2088  /* TX MaxBurst #3 */
-#define  REG_TX_MAXBURST_3             0x208C  /* TX MaxBurst #4 */
-
-/* diagnostics access to any TX FIFO location. every access is 65
- * bits.  _DATA_LOW = 32 LSB, _DATA_HI_T1/T0 = 32 MSB. _TAG = tag bit.
- * writing _DATA_HI_T0 sets tag bit low, writing _DATA_HI_T1 sets tag
- * bit high.  TX_FIFO_PIO_SEL must be set for TX FIFO PIO access. if
- * TX FIFO data integrity is desired, TX DMA should be
- * disabled. _DATA_HI_Tx should be the last access of the sequence.
- */
-#define  REG_TX_FIFO_ADDR              0x2104  /* TX FIFO address */
-#define  REG_TX_FIFO_TAG               0x2108  /* TX FIFO tag */
-#define  REG_TX_FIFO_DATA_LOW          0x210C  /* TX FIFO data low */
-#define  REG_TX_FIFO_DATA_HI_T1        0x2110  /* TX FIFO data high t1 */
-#define  REG_TX_FIFO_DATA_HI_T0        0x2114  /* TX FIFO data high t0 */
-#define  REG_TX_FIFO_SIZE              0x2118  /* (ro) TX FIFO size = 0x090 = 9KB */
-
-/* 9-bit register controls BIST of TX FIFO. bit set indicates that the BIST
- * passed for the specified memory
- */
-#define  REG_TX_RAMBIST                0x211C /* TX RAMBIST control/status */
-#define    TX_RAMBIST_STATE            0x01C0 /* progress state of RAMBIST
-                                                controller state machine */
-#define    TX_RAMBIST_RAM33A_PASS      0x0020 /* RAM33A passed */
-#define    TX_RAMBIST_RAM32A_PASS      0x0010 /* RAM32A passed */
-#define    TX_RAMBIST_RAM33B_PASS      0x0008 /* RAM33B passed */
-#define    TX_RAMBIST_RAM32B_PASS      0x0004 /* RAM32B passed */
-#define    TX_RAMBIST_SUMMARY          0x0002 /* all RAM passed */
-#define    TX_RAMBIST_START            0x0001 /* write 1 to start BIST. self
-                                                clears on completion. */
-
-/** receive dma registers **/
-#define MAX_RX_DESC_RINGS              2
-#define MAX_RX_COMP_RINGS              4
-
-/* receive DMA channel configuration. default: 0x80910
- * free ring size       = (1 << n)*32  -> [32 - 8k]
- * completion ring size = (1 << n)*128 -> [128 - 32k], n < 9
- * DEFAULT: 0x80910
- */
-#define  REG_RX_CFG                     0x4000  /* RX config */
-#define    RX_CFG_DMA_EN                0x00000001 /* enable RX DMA. 0 stops
-                                                        channel as soon as current
-                                                        frame xfer has completed.
-                                                        driver should disable MAC
-                                                        for 200ms before disabling
-                                                        RX */
-#define    RX_CFG_DESC_RING_MASK        0x0000001E /* # desc entries in RX
-                                                        free desc ring.
-                                                        def: 0x8 = 8k */
-#define    RX_CFG_DESC_RING_SHIFT       1
-#define    RX_CFG_COMP_RING_MASK        0x000001E0 /* # desc entries in RX complete
-                                                        ring. def: 0x8 = 32k */
-#define    RX_CFG_COMP_RING_SHIFT       5
-#define    RX_CFG_BATCH_DIS             0x00000200 /* disable receive desc
-                                                     batching. def: 0x0 =
-                                                     enabled */
-#define    RX_CFG_SWIVEL_MASK           0x00001C00 /* byte offset of the 1st
-                                                     data byte of the packet
-                                                     w/in 8 byte boundares.
-                                                     this swivels the data
-                                                     DMA'ed to header
-                                                     buffers, jumbo buffers
-                                                     when header split is not
-                                                     requested and MTU sized
-                                                     buffers. def: 0x2 */
-#define    RX_CFG_SWIVEL_SHIFT          10
-
-/* cassini+ only */
-#define    RX_CFG_DESC_RING1_MASK       0x000F0000 /* # of desc entries in
-                                                        RX free desc ring 2.
-                                                        def: 0x8 = 8k */
-#define    RX_CFG_DESC_RING1_SHIFT      16
-
-
-/* the page size register allows cassini chips to do the following with
- * received data:
- * [--------------------------------------------------------------] page
- * [off][buf1][pad][off][buf2][pad][off][buf3][pad][off][buf4][pad]
- * |--------------| = PAGE_SIZE_BUFFER_STRIDE
- * page = PAGE_SIZE
- * offset = PAGE_SIZE_MTU_OFF
- * for the above example, MTU_BUFFER_COUNT = 4.
- * NOTE: as is apparent, you need to ensure that the following holds:
- * MTU_BUFFER_COUNT <= PAGE_SIZE/PAGE_SIZE_BUFFER_STRIDE
- * DEFAULT: 0x48002002 (8k pages)
- */
-#define  REG_RX_PAGE_SIZE               0x4004  /* RX page size */
-#define    RX_PAGE_SIZE_MASK            0x00000003 /* size of pages pointed to
-                                                     by receive descriptors.
-                                                     if jumbo buffers are
-                                                     supported the page size
-                                                     should not be < 8k.
-                                                     0b00 = 2k, 0b01 = 4k
-                                                     0b10 = 8k, 0b11 = 16k
-                                                     DEFAULT: 8k */
-#define    RX_PAGE_SIZE_SHIFT           0
-#define    RX_PAGE_SIZE_MTU_COUNT_MASK  0x00007800 /* # of MTU buffers the hw
-                                                     packs into a page.
-                                                     DEFAULT: 4 */
-#define    RX_PAGE_SIZE_MTU_COUNT_SHIFT 11
-#define    RX_PAGE_SIZE_MTU_STRIDE_MASK 0x18000000 /* # of bytes that separate
-                                                        each MTU buffer +
-                                                        offset from each
-                                                        other.
-                                                        0b00 = 1k, 0b01 = 2k
-                                                        0b10 = 4k, 0b11 = 8k
-                                                        DEFAULT: 0x1 */
-#define    RX_PAGE_SIZE_MTU_STRIDE_SHIFT 27
-#define    RX_PAGE_SIZE_MTU_OFF_MASK    0xC0000000 /* offset in each page that
-                                                     hw writes the MTU buffer
-                                                     into.
-                                                     0b00 = 0,
-                                                     0b01 = 64 bytes
-                                                     0b10 = 96, 0b11 = 128
-                                                     DEFAULT: 0x1 */
-#define    RX_PAGE_SIZE_MTU_OFF_SHIFT   30
-
-/* 11-bit counter points to next location in RX FIFO to be loaded/read.
- * shadow write pointers enable retries in case of early receive aborts.
- * DEFAULT: 0x0. generated on 64-bit boundaries.
- */
-#define  REG_RX_FIFO_WRITE_PTR             0x4008  /* RX FIFO write pointer */
-#define  REG_RX_FIFO_READ_PTR              0x400C  /* RX FIFO read pointer */
-#define  REG_RX_IPP_FIFO_SHADOW_WRITE_PTR  0x4010  /* RX IPP FIFO shadow write
-                                                     pointer */
-#define  REG_RX_IPP_FIFO_SHADOW_READ_PTR   0x4014  /* RX IPP FIFO shadow read
-                                                     pointer */
-#define  REG_RX_IPP_FIFO_READ_PTR          0x400C  /* RX IPP FIFO read
-                                                     pointer. (8-bit counter) */
-
-/* current state of RX DMA state engines + other info
- * DEFAULT: 0x0
- */
-#define  REG_RX_DEBUG                      0x401C  /* RX debug */
-#define    RX_DEBUG_LOAD_STATE_MASK        0x0000000F /* load state machine w/ MAC:
-                                                        0x0 = idle,   0x1 = load_bop
-                                                        0x2 = load 1, 0x3 = load 2
-                                                        0x4 = load 3, 0x5 = load 4
-                                                        0x6 = last detect
-                                                        0x7 = wait req
-                                                        0x8 = wait req statuss 1st
-                                                        0x9 = load st
-                                                        0xa = bubble mac
-                                                        0xb = error */
-#define    RX_DEBUG_LM_STATE_MASK          0x00000070 /* load state machine w/ HP and
-                                                        RX FIFO:
-                                                        0x0 = idle,   0x1 = hp xfr
-                                                        0x2 = wait hp ready
-                                                        0x3 = wait flow code
-                                                        0x4 = fifo xfer
-                                                        0x5 = make status
-                                                        0x6 = csum ready
-                                                        0x7 = error */
-#define    RX_DEBUG_FC_STATE_MASK          0x000000180 /* flow control state machine
-                                                        w/ MAC:
-                                                        0x0 = idle
-                                                        0x1 = wait xoff ack
-                                                        0x2 = wait xon
-                                                        0x3 = wait xon ack */
-#define    RX_DEBUG_DATA_STATE_MASK        0x000001E00 /* unload data state machine
-                                                        states:
-                                                        0x0 = idle data
-                                                        0x1 = header begin
-                                                        0x2 = xfer header
-                                                        0x3 = xfer header ld
-                                                        0x4 = mtu begin
-                                                        0x5 = xfer mtu
-                                                        0x6 = xfer mtu ld
-                                                        0x7 = jumbo begin
-                                                        0x8 = xfer jumbo
-                                                        0x9 = xfer jumbo ld
-                                                        0xa = reas begin
-                                                        0xb = xfer reas
-                                                        0xc = flush tag
-                                                        0xd = xfer reas ld
-                                                        0xe = error
-                                                        0xf = bubble idle */
-#define    RX_DEBUG_DESC_STATE_MASK        0x0001E000 /* unload desc state machine
-                                                        states:
-                                                        0x0 = idle desc
-                                                        0x1 = wait ack
-                                                        0x9 = wait ack 2
-                                                        0x2 = fetch desc 1
-                                                        0xa = fetch desc 2
-                                                        0x3 = load ptrs
-                                                        0x4 = wait dma
-                                                        0x5 = wait ack batch
-                                                        0x6 = post batch
-                                                        0x7 = xfr done */
-#define    RX_DEBUG_INTR_READ_PTR_MASK     0x30000000 /* interrupt read ptr of the
-                                                        interrupt queue */
-#define    RX_DEBUG_INTR_WRITE_PTR_MASK    0xC0000000 /* interrupt write pointer
-                                                        of the interrupt queue */
-
-/* flow control frames are emitted using two PAUSE thresholds:
- * XOFF PAUSE uses pause time value pre-programmed in the Send PAUSE MAC reg
- * XON PAUSE uses a pause time of 0. granularity of threshold is 64bytes.
- * PAUSE thresholds defined in terms of FIFO occupancy and may be translated
- * into FIFO vacancy using RX_FIFO_SIZE. setting ON will trigger XON frames
- * when FIFO reaches 0. OFF threshold should not be > size of RX FIFO. max
- * value is is 0x6F.
- * DEFAULT: 0x00078
- */
-#define  REG_RX_PAUSE_THRESH               0x4020  /* RX pause thresholds */
-#define    RX_PAUSE_THRESH_QUANTUM         64
-#define    RX_PAUSE_THRESH_OFF_MASK        0x000001FF /* XOFF PAUSE emitted when
-                                                        RX FIFO occupancy >
-                                                        value*64B */
-#define    RX_PAUSE_THRESH_OFF_SHIFT       0
-#define    RX_PAUSE_THRESH_ON_MASK         0x001FF000 /* XON PAUSE emitted after
-                                                        emitting XOFF PAUSE when RX
-                                                        FIFO occupancy falls below
-                                                        this value*64B. must be
-                                                        < XOFF threshold. if =
-                                                        RX_FIFO_SIZE< XON frames are
-                                                        never emitted. */
-#define    RX_PAUSE_THRESH_ON_SHIFT        12
-
-/* 13-bit register used to control RX desc fetching and intr generation. if 4+
- * valid RX descriptors are available, Cassini will read 4 at a time.
- * writing N means that all desc up to *but* excluding N are available. N must
- * be a multiple of 4 (N % 4 = 0). first desc should be cache-line aligned.
- * DEFAULT: 0 on reset
- */
-#define  REG_RX_KICK                    0x4024  /* RX kick reg */
-
-/* 8KB aligned 64-bit pointer to the base of the RX free/completion rings.
- * lower 13 bits of the low register are hard-wired to 0.
- */
-#define  REG_RX_DB_LOW                     0x4028  /* RX descriptor ring
-                                                        base low */
-#define  REG_RX_DB_HI                      0x402C  /* RX descriptor ring
-                                                        base hi */
-#define  REG_RX_CB_LOW                     0x4030  /* RX completion ring
-                                                        base low */
-#define  REG_RX_CB_HI                      0x4034  /* RX completion ring
-                                                        base hi */
-/* 13-bit register indicate desc used by cassini for receive frames. used
- * for diagnostic purposes.
- * DEFAULT: 0 on reset
- */
-#define  REG_RX_COMP                       0x4038  /* (ro) RX completion */
-
-/* HEAD and TAIL are used to control RX desc posting and interrupt
- * generation.  hw moves the head register to pass ownership to sw. sw
- * moves the tail register to pass ownership back to hw. to give all
- * entries to hw, set TAIL = HEAD.  if HEAD and TAIL indicate that no
- * more entries are available, DMA will pause and an interrupt will be
- * generated to indicate no more entries are available.  sw can use
- * this interrupt to reduce the # of times it must update the
- * completion tail register.
- * DEFAULT: 0 on reset
- */
-#define  REG_RX_COMP_HEAD                  0x403C  /* RX completion head */
-#define  REG_RX_COMP_TAIL                  0x4040  /* RX completion tail */
-
-/* values used for receive interrupt blanking. loaded each time the ISR is read
- * DEFAULT: 0x00000000
- */
-#define  REG_RX_BLANK                      0x4044  /* RX blanking register
-                                                        for ISR read */
-#define    RX_BLANK_INTR_PKT_MASK          0x000001FF /* RX_DONE intr asserted if
-                                                        this many sets of completion
-                                                        writebacks (up to 2 packets)
-                                                        occur since the last time
-                                                        the ISR was read. 0 = no
-                                                        packet blanking */
-#define    RX_BLANK_INTR_PKT_SHIFT         0
-#define    RX_BLANK_INTR_TIME_MASK         0x3FFFF000 /* RX_DONE interrupt asserted
-                                                        if that many clocks were
-                                                        counted since last time the
-                                                        ISR was read.
-                                                        each count is 512 core
-                                                        clocks (125MHz). 0 = no
-                                                        time blanking */
-#define    RX_BLANK_INTR_TIME_SHIFT        12
-
-/* values used for interrupt generation based on threshold values of how
- * many free desc and completion entries are available for hw use.
- * DEFAULT: 0x00000000
- */
-#define  REG_RX_AE_THRESH                  0x4048  /* RX almost empty
-                                                        thresholds */
-#define    RX_AE_THRESH_FREE_MASK          0x00001FFF /* RX_BUF_AE will be
-                                                        generated if # desc
-                                                        avail for hw use <=
-                                                        # */
-#define    RX_AE_THRESH_FREE_SHIFT         0
-#define    RX_AE_THRESH_COMP_MASK          0x0FFFE000 /* RX_COMP_AE will be
-                                                        generated if # of
-                                                        completion entries
-                                                        avail for hw use <=
-                                                        # */
-#define    RX_AE_THRESH_COMP_SHIFT         13
-
-/* probabilities for random early drop (RED) thresholds on a FIFO threshold
- * basis. probability should increase when the FIFO level increases. control
- * packets are never dropped and not counted in stats. probability programmed
- * on a 12.5% granularity. e.g., 0x1 = 1/8 packets dropped.
- * DEFAULT: 0x00000000
- */
-#define  REG_RX_RED                      0x404C  /* RX random early detect enable */
-#define    RX_RED_4K_6K_FIFO_MASK        0x000000FF /*  4KB < FIFO thresh < 6KB */
-#define    RX_RED_6K_8K_FIFO_MASK        0x0000FF00 /*  6KB < FIFO thresh < 8KB */
-#define    RX_RED_8K_10K_FIFO_MASK       0x00FF0000 /*  8KB < FIFO thresh < 10KB */
-#define    RX_RED_10K_12K_FIFO_MASK      0xFF000000 /* 10KB < FIFO thresh < 12KB */
-
-/* FIFO fullness levels for RX FIFO, RX control FIFO, and RX IPP FIFO.
- * RX control FIFO = # of packets in RX FIFO.
- * DEFAULT: 0x0
- */
-#define  REG_RX_FIFO_FULLNESS              0x4050  /* (ro) RX FIFO fullness */
-#define    RX_FIFO_FULLNESS_RX_FIFO_MASK   0x3FF80000 /* level w/ 8B granularity */
-#define    RX_FIFO_FULLNESS_IPP_FIFO_MASK  0x0007FF00 /* level w/ 8B granularity */
-#define    RX_FIFO_FULLNESS_RX_PKT_MASK    0x000000FF /* # packets in RX FIFO */
-#define  REG_RX_IPP_PACKET_COUNT           0x4054  /* RX IPP packet counter */
-#define  REG_RX_WORK_DMA_PTR_LOW           0x4058  /* RX working DMA ptr low */
-#define  REG_RX_WORK_DMA_PTR_HI            0x405C  /* RX working DMA ptr
-                                                     high */
-
-/* BIST testing ro RX FIFO, RX control FIFO, and RX IPP FIFO. only RX BIST
- * START/COMPLETE is writeable. START will clear when the BIST has completed
- * checking all 17 RAMS.
- * DEFAULT: 0bxxxx xxxxx xxxx xxxx xxxx x000 0000 0000 00x0
- */
-#define  REG_RX_BIST                       0x4060  /* (ro) RX BIST */
-#define    RX_BIST_32A_PASS                0x80000000 /* RX FIFO 32A passed */
-#define    RX_BIST_33A_PASS                0x40000000 /* RX FIFO 33A passed */
-#define    RX_BIST_32B_PASS                0x20000000 /* RX FIFO 32B passed */
-#define    RX_BIST_33B_PASS                0x10000000 /* RX FIFO 33B passed */
-#define    RX_BIST_32C_PASS                0x08000000 /* RX FIFO 32C passed */
-#define    RX_BIST_33C_PASS                0x04000000 /* RX FIFO 33C passed */
-#define    RX_BIST_IPP_32A_PASS            0x02000000 /* RX IPP FIFO 33B passed */
-#define    RX_BIST_IPP_33A_PASS            0x01000000 /* RX IPP FIFO 33A passed */
-#define    RX_BIST_IPP_32B_PASS            0x00800000 /* RX IPP FIFO 32B passed */
-#define    RX_BIST_IPP_33B_PASS            0x00400000 /* RX IPP FIFO 33B passed */
-#define    RX_BIST_IPP_32C_PASS            0x00200000 /* RX IPP FIFO 32C passed */
-#define    RX_BIST_IPP_33C_PASS            0x00100000 /* RX IPP FIFO 33C passed */
-#define    RX_BIST_CTRL_32_PASS            0x00800000 /* RX CTRL FIFO 32 passed */
-#define    RX_BIST_CTRL_33_PASS            0x00400000 /* RX CTRL FIFO 33 passed */
-#define    RX_BIST_REAS_26A_PASS           0x00200000 /* RX Reas 26A passed */
-#define    RX_BIST_REAS_26B_PASS           0x00100000 /* RX Reas 26B passed */
-#define    RX_BIST_REAS_27_PASS            0x00080000 /* RX Reas 27 passed */
-#define    RX_BIST_STATE_MASK              0x00078000 /* BIST state machine */
-#define    RX_BIST_SUMMARY                 0x00000002 /* when BIST complete,
-                                                        summary pass bit
-                                                        contains AND of BIST
-                                                        results of all 16
-                                                        RAMS */
-#define    RX_BIST_START                   0x00000001 /* write 1 to start
-                                                        BIST. self clears
-                                                        on completion. */
-
-/* next location in RX CTRL FIFO that will be loaded w/ data from RX IPP/read
- * from to retrieve packet control info.
- * DEFAULT: 0
- */
-#define  REG_RX_CTRL_FIFO_WRITE_PTR        0x4064  /* (ro) RX control FIFO
-                                                     write ptr */
-#define  REG_RX_CTRL_FIFO_READ_PTR         0x4068  /* (ro) RX control FIFO read
-                                                     ptr */
-
-/* receive interrupt blanking. loaded each time interrupt alias register is
- * read.
- * DEFAULT: 0x0
- */
-#define  REG_RX_BLANK_ALIAS_READ           0x406C  /* RX blanking register for
-                                                     alias read */
-#define    RX_BAR_INTR_PACKET_MASK         0x000001FF /* assert RX_DONE if #
-                                                        completion writebacks
-                                                        > # since last ISR
-                                                        read. 0 = no
-                                                        blanking. up to 2
-                                                        packets per
-                                                        completion wb. */
-#define    RX_BAR_INTR_TIME_MASK           0x3FFFF000 /* assert RX_DONE if #
-                                                        clocks > # since last
-                                                        ISR read. each count
-                                                        is 512 core clocks
-                                                        (125MHz). 0 = no
-                                                        blanking. */
-
-/* diagnostic access to RX FIFO. 32 LSB accessed via DATA_LOW. 32 MSB accessed
- * via DATA_HI_T0 or DATA_HI_T1. TAG reads the tag bit. writing HI_T0
- * will unset the tag bit while writing HI_T1 will set the tag bit. to reset
- * to normal operation after diagnostics, write to address location 0x0.
- * RX_DMA_EN bit must be set to 0x0 for RX FIFO PIO access. DATA_HI should
- * be the last write access of a write sequence.
- * DEFAULT: undefined
- */
-#define  REG_RX_FIFO_ADDR                  0x4080  /* RX FIFO address */
-#define  REG_RX_FIFO_TAG                   0x4084  /* RX FIFO tag */
-#define  REG_RX_FIFO_DATA_LOW              0x4088  /* RX FIFO data low */
-#define  REG_RX_FIFO_DATA_HI_T0            0x408C  /* RX FIFO data high T0 */
-#define  REG_RX_FIFO_DATA_HI_T1            0x4090  /* RX FIFO data high T1 */
-
-/* diagnostic assess to RX CTRL FIFO. 8-bit FIFO_ADDR holds address of
- * 81 bit control entry and 6 bit flow id. LOW and MID are both 32-bit
- * accesses. HI is 7-bits with 6-bit flow id and 1 bit control
- * word. RX_DMA_EN must be 0 for RX CTRL FIFO PIO access. DATA_HI
- * should be last write access of the write sequence.
- * DEFAULT: undefined
- */
-#define  REG_RX_CTRL_FIFO_ADDR             0x4094  /* RX Control FIFO and
-                                                     Batching FIFO addr */
-#define  REG_RX_CTRL_FIFO_DATA_LOW         0x4098  /* RX Control FIFO data
-                                                     low */
-#define  REG_RX_CTRL_FIFO_DATA_MID         0x409C  /* RX Control FIFO data
-                                                     mid */
-#define  REG_RX_CTRL_FIFO_DATA_HI          0x4100  /* RX Control FIFO data
-                                                     hi and flow id */
-#define    RX_CTRL_FIFO_DATA_HI_CTRL       0x0001  /* upper bit of ctrl word */
-#define    RX_CTRL_FIFO_DATA_HI_FLOW_MASK  0x007E  /* flow id */
-
-/* diagnostic access to RX IPP FIFO. same semantics as RX_FIFO.
- * DEFAULT: undefined
- */
-#define  REG_RX_IPP_FIFO_ADDR              0x4104  /* RX IPP FIFO address */
-#define  REG_RX_IPP_FIFO_TAG               0x4108  /* RX IPP FIFO tag */
-#define  REG_RX_IPP_FIFO_DATA_LOW          0x410C  /* RX IPP FIFO data low */
-#define  REG_RX_IPP_FIFO_DATA_HI_T0        0x4110  /* RX IPP FIFO data high
-                                                     T0 */
-#define  REG_RX_IPP_FIFO_DATA_HI_T1        0x4114  /* RX IPP FIFO data high
-                                                     T1 */
-
-/* 64-bit pointer to receive data buffer in host memory used for headers and
- * small packets. MSB in high register. loaded by DMA state machine and
- * increments as DMA writes receive data. only 50 LSB are incremented. top
- * 13 bits taken from RX descriptor.
- * DEFAULT: undefined
- */
-#define  REG_RX_HEADER_PAGE_PTR_LOW        0x4118  /* (ro) RX header page ptr
-                                                     low */
-#define  REG_RX_HEADER_PAGE_PTR_HI         0x411C  /* (ro) RX header page ptr
-                                                     high */
-#define  REG_RX_MTU_PAGE_PTR_LOW           0x4120  /* (ro) RX MTU page pointer
-                                                     low */
-#define  REG_RX_MTU_PAGE_PTR_HI            0x4124  /* (ro) RX MTU page pointer
-                                                     high */
-
-/* PIO diagnostic access to RX reassembly DMA Table RAM. 6-bit register holds
- * one of 64 79-bit locations in the RX Reassembly DMA table and the addr of
- * one of the 64 byte locations in the Batching table. LOW holds 32 LSB.
- * MID holds the next 32 LSB. HIGH holds the 15 MSB. RX_DMA_EN must be set
- * to 0 for PIO access. DATA_HIGH should be last write of write sequence.
- * layout:
- * reassmbl ptr [78:15] | reassmbl index [14:1] | reassmbl entry valid [0]
- * DEFAULT: undefined
- */
-#define  REG_RX_TABLE_ADDR             0x4128  /* RX reassembly DMA table
-                                                 address */
-#define    RX_TABLE_ADDR_MASK          0x0000003F /* address mask */
-
-#define  REG_RX_TABLE_DATA_LOW         0x412C  /* RX reassembly DMA table
-                                                 data low */
-#define  REG_RX_TABLE_DATA_MID         0x4130  /* RX reassembly DMA table
-                                                 data mid */
-#define  REG_RX_TABLE_DATA_HI          0x4134  /* RX reassembly DMA table
-                                                 data high */
-
-/* cassini+ only */
-/* 8KB aligned 64-bit pointer to base of RX rings. lower 13 bits hardwired to
- * 0. same semantics as primary desc/complete rings.
- */
-#define  REG_PLUS_RX_DB1_LOW            0x4200  /* RX descriptor ring
-                                                  2 base low */
-#define  REG_PLUS_RX_DB1_HI             0x4204  /* RX descriptor ring
-                                                  2 base high */
-#define  REG_PLUS_RX_CB1_LOW            0x4208  /* RX completion ring
-                                                  2 base low. 4 total */
-#define  REG_PLUS_RX_CB1_HI             0x420C  /* RX completion ring
-                                                  2 base high. 4 total */
-#define  REG_PLUS_RX_CBN_LOW(x)        (REG_PLUS_RX_CB1_LOW + 8*((x) - 1))
-#define  REG_PLUS_RX_CBN_HI(x)         (REG_PLUS_RX_CB1_HI + 8*((x) - 1))
-#define  REG_PLUS_RX_KICK1             0x4220  /* RX Kick 2 register */
-#define  REG_PLUS_RX_COMP1             0x4224  /* (ro) RX completion 2
-                                                 reg */
-#define  REG_PLUS_RX_COMP1_HEAD        0x4228  /* (ro) RX completion 2
-                                                 head reg. 4 total. */
-#define  REG_PLUS_RX_COMP1_TAIL        0x422C  /* RX completion 2
-                                                 tail reg. 4 total. */
-#define  REG_PLUS_RX_COMPN_HEAD(x)    (REG_PLUS_RX_COMP1_HEAD + 8*((x) - 1))
-#define  REG_PLUS_RX_COMPN_TAIL(x)    (REG_PLUS_RX_COMP1_TAIL + 8*((x) - 1))
-#define  REG_PLUS_RX_AE1_THRESH        0x4240  /* RX almost empty 2
-                                                 thresholds */
-#define    RX_AE1_THRESH_FREE_MASK     RX_AE_THRESH_FREE_MASK
-#define    RX_AE1_THRESH_FREE_SHIFT    RX_AE_THRESH_FREE_SHIFT
-
-/** header parser registers **/
-
-/* RX parser configuration register.
- * DEFAULT: 0x1651004
- */
-#define  REG_HP_CFG                       0x4140  /* header parser
-                                                    configuration reg */
-#define    HP_CFG_PARSE_EN                0x00000001 /* enab header parsing */
-#define    HP_CFG_NUM_CPU_MASK            0x000000FC /* # processors
-                                                     0 = 64. 0x3f = 63 */
-#define    HP_CFG_NUM_CPU_SHIFT           2
-#define    HP_CFG_SYN_INC_MASK            0x00000100 /* SYN bit won't increment
-                                                       TCP seq # by one when
-                                                       stored in FDBM */
-#define    HP_CFG_TCP_THRESH_MASK         0x000FFE00 /* # bytes of TCP data
-                                                       needed to be considered
-                                                       for reassembly */
-#define    HP_CFG_TCP_THRESH_SHIFT        9
-
-/* access to RX Instruction RAM. 5-bit register/counter holds addr
- * of 39 bit entry to be read/written. 32 LSB in _DATA_LOW. 7 MSB in _DATA_HI.
- * RX_DMA_EN must be 0 for RX instr PIO access. DATA_HI should be last access
- * of sequence.
- * DEFAULT: undefined
- */
-#define  REG_HP_INSTR_RAM_ADDR             0x4144  /* HP instruction RAM
-                                                     address */
-#define    HP_INSTR_RAM_ADDR_MASK          0x01F   /* 5-bit mask */
-#define  REG_HP_INSTR_RAM_DATA_LOW         0x4148  /* HP instruction RAM
-                                                     data low */
-#define    HP_INSTR_RAM_LOW_OUTMASK_MASK   0x0000FFFF
-#define    HP_INSTR_RAM_LOW_OUTMASK_SHIFT  0
-#define    HP_INSTR_RAM_LOW_OUTSHIFT_MASK  0x000F0000
-#define    HP_INSTR_RAM_LOW_OUTSHIFT_SHIFT 16
-#define    HP_INSTR_RAM_LOW_OUTEN_MASK     0x00300000
-#define    HP_INSTR_RAM_LOW_OUTEN_SHIFT    20
-#define    HP_INSTR_RAM_LOW_OUTARG_MASK    0xFFC00000
-#define    HP_INSTR_RAM_LOW_OUTARG_SHIFT   22
-#define  REG_HP_INSTR_RAM_DATA_MID         0x414C  /* HP instruction RAM
-                                                     data mid */
-#define    HP_INSTR_RAM_MID_OUTARG_MASK    0x00000003
-#define    HP_INSTR_RAM_MID_OUTARG_SHIFT   0
-#define    HP_INSTR_RAM_MID_OUTOP_MASK     0x0000003C
-#define    HP_INSTR_RAM_MID_OUTOP_SHIFT    2
-#define    HP_INSTR_RAM_MID_FNEXT_MASK     0x000007C0
-#define    HP_INSTR_RAM_MID_FNEXT_SHIFT    6
-#define    HP_INSTR_RAM_MID_FOFF_MASK      0x0003F800
-#define    HP_INSTR_RAM_MID_FOFF_SHIFT     11
-#define    HP_INSTR_RAM_MID_SNEXT_MASK     0x007C0000
-#define    HP_INSTR_RAM_MID_SNEXT_SHIFT    18
-#define    HP_INSTR_RAM_MID_SOFF_MASK      0x3F800000
-#define    HP_INSTR_RAM_MID_SOFF_SHIFT     23
-#define    HP_INSTR_RAM_MID_OP_MASK        0xC0000000
-#define    HP_INSTR_RAM_MID_OP_SHIFT       30
-#define  REG_HP_INSTR_RAM_DATA_HI          0x4150  /* HP instruction RAM
-                                                     data high */
-#define    HP_INSTR_RAM_HI_VAL_MASK        0x0000FFFF
-#define    HP_INSTR_RAM_HI_VAL_SHIFT       0
-#define    HP_INSTR_RAM_HI_MASK_MASK       0xFFFF0000
-#define    HP_INSTR_RAM_HI_MASK_SHIFT      16
-
-/* PIO access into RX Header parser data RAM and flow database.
- * 11-bit register. Data fills the LSB portion of bus if less than 32 bits.
- * DATA_RAM: write RAM_FDB_DATA with index to access DATA_RAM.
- * RAM bytes = 4*(x - 1) + [3:0]. e.g., 0 -> [3:0], 31 -> [123:120]
- * FLOWDB: write DATA_RAM_FDB register and then read/write FDB1-12 to access
- * flow database.
- * RX_DMA_EN must be 0 for RX parser RAM PIO access. RX Parser RAM data reg
- * should be the last write access of the write sequence.
- * DEFAULT: undefined
- */
-#define  REG_HP_DATA_RAM_FDB_ADDR          0x4154  /* HP data and FDB
-                                                     RAM address */
-#define    HP_DATA_RAM_FDB_DATA_MASK       0x001F  /* select 1 of 86 byte
-                                                     locations in header
-                                                     parser data ram to
-                                                     read/write */
-#define    HP_DATA_RAM_FDB_FDB_MASK        0x3F00  /* 1 of 64 353-bit locations
-                                                     in the flow database */
-#define  REG_HP_DATA_RAM_DATA              0x4158  /* HP data RAM data */
-
-/* HP flow database registers: 1 - 12, 0x415C - 0x4188, 4 8-bit bytes
- * FLOW_DB(1) = IP_SA[127:96], FLOW_DB(2) = IP_SA[95:64]
- * FLOW_DB(3) = IP_SA[63:32],  FLOW_DB(4) = IP_SA[31:0]
- * FLOW_DB(5) = IP_DA[127:96], FLOW_DB(6) = IP_DA[95:64]
- * FLOW_DB(7) = IP_DA[63:32],  FLOW_DB(8) = IP_DA[31:0]
- * FLOW_DB(9) = {TCP_SP[15:0],TCP_DP[15:0]}
- * FLOW_DB(10) = bit 0 has value for flow valid
- * FLOW_DB(11) = TCP_SEQ[63:32], FLOW_DB(12) = TCP_SEQ[31:0]
- */
-#define  REG_HP_FLOW_DB0                   0x415C  /* HP flow database 1 reg */
-#define  REG_HP_FLOW_DBN(x)                (REG_HP_FLOW_DB0 + (x)*4)
-
-/* diagnostics for RX Header Parser block.
- * ASUN: the header parser state machine register is used for diagnostics
- * purposes. however, the spec doesn't have any details on it.
- */
-#define  REG_HP_STATE_MACHINE              0x418C  /* (ro) HP state machine */
-#define  REG_HP_STATUS0                    0x4190  /* (ro) HP status 1 */
-#define    HP_STATUS0_SAP_MASK             0xFFFF0000 /* SAP */
-#define    HP_STATUS0_L3_OFF_MASK          0x0000FE00 /* L3 offset */
-#define    HP_STATUS0_LB_CPUNUM_MASK       0x000001F8 /* load balancing CPU
-                                                        number */
-#define    HP_STATUS0_HRP_OPCODE_MASK      0x00000007 /* HRP opcode */
-
-#define  REG_HP_STATUS1                    0x4194  /* (ro) HP status 2 */
-#define    HP_STATUS1_ACCUR2_MASK          0xE0000000 /* accu R2[6:4] */
-#define    HP_STATUS1_FLOWID_MASK          0x1F800000 /* flow id */
-#define    HP_STATUS1_TCP_OFF_MASK         0x007F0000 /* tcp payload offset */
-#define    HP_STATUS1_TCP_SIZE_MASK        0x0000FFFF /* tcp payload size */
-
-#define  REG_HP_STATUS2                    0x4198  /* (ro) HP status 3 */
-#define    HP_STATUS2_ACCUR2_MASK          0xF0000000 /* accu R2[3:0] */
-#define    HP_STATUS2_CSUM_OFF_MASK        0x07F00000 /* checksum start
-                                                        start offset */
-#define    HP_STATUS2_ACCUR1_MASK          0x000FE000 /* accu R1 */
-#define    HP_STATUS2_FORCE_DROP           0x00001000 /* force drop */
-#define    HP_STATUS2_BWO_REASSM           0x00000800 /* batching w/o
-                                                        reassembly */
-#define    HP_STATUS2_JH_SPLIT_EN          0x00000400 /* jumbo header split
-                                                        enable */
-#define    HP_STATUS2_FORCE_TCP_NOCHECK    0x00000200 /* force tcp no payload
-                                                        check */
-#define    HP_STATUS2_DATA_MASK_ZERO       0x00000100 /* mask of data length
-                                                        equal to zero */
-#define    HP_STATUS2_FORCE_TCP_CHECK      0x00000080 /* force tcp payload
-                                                        chk */
-#define    HP_STATUS2_MASK_TCP_THRESH      0x00000040 /* mask of payload
-                                                        threshold */
-#define    HP_STATUS2_NO_ASSIST            0x00000020 /* no assist */
-#define    HP_STATUS2_CTRL_PACKET_FLAG     0x00000010 /* control packet flag */
-#define    HP_STATUS2_TCP_FLAG_CHECK       0x00000008 /* tcp flag check */
-#define    HP_STATUS2_SYN_FLAG             0x00000004 /* syn flag */
-#define    HP_STATUS2_TCP_CHECK            0x00000002 /* tcp payload chk */
-#define    HP_STATUS2_TCP_NOCHECK          0x00000001 /* tcp no payload chk */
-
-/* BIST for header parser(HP) and flow database memories (FDBM). set _START
- * to start BIST. controller clears _START on completion. _START can also
- * be cleared to force termination of BIST. a bit set indicates that that
- * memory passed its BIST.
- */
-#define  REG_HP_RAM_BIST                   0x419C  /* HP RAM BIST reg */
-#define    HP_RAM_BIST_HP_DATA_PASS        0x80000000 /* HP data ram */
-#define    HP_RAM_BIST_HP_INSTR0_PASS      0x40000000 /* HP instr ram 0 */
-#define    HP_RAM_BIST_HP_INSTR1_PASS      0x20000000 /* HP instr ram 1 */
-#define    HP_RAM_BIST_HP_INSTR2_PASS      0x10000000 /* HP instr ram 2 */
-#define    HP_RAM_BIST_FDBM_AGE0_PASS      0x08000000 /* FDBM aging RAM0 */
-#define    HP_RAM_BIST_FDBM_AGE1_PASS      0x04000000 /* FDBM aging RAM1 */
-#define    HP_RAM_BIST_FDBM_FLOWID00_PASS  0x02000000 /* FDBM flowid RAM0
-                                                        bank 0 */
-#define    HP_RAM_BIST_FDBM_FLOWID10_PASS  0x01000000 /* FDBM flowid RAM1
-                                                        bank 0 */
-#define    HP_RAM_BIST_FDBM_FLOWID20_PASS  0x00800000 /* FDBM flowid RAM2
-                                                        bank 0 */
-#define    HP_RAM_BIST_FDBM_FLOWID30_PASS  0x00400000 /* FDBM flowid RAM3
-                                                        bank 0 */
-#define    HP_RAM_BIST_FDBM_FLOWID01_PASS  0x00200000 /* FDBM flowid RAM0
-                                                        bank 1 */
-#define    HP_RAM_BIST_FDBM_FLOWID11_PASS  0x00100000 /* FDBM flowid RAM1
-                                                        bank 2 */
-#define    HP_RAM_BIST_FDBM_FLOWID21_PASS  0x00080000 /* FDBM flowid RAM2
-                                                        bank 1 */
-#define    HP_RAM_BIST_FDBM_FLOWID31_PASS  0x00040000 /* FDBM flowid RAM3
-                                                        bank 1 */
-#define    HP_RAM_BIST_FDBM_TCPSEQ_PASS    0x00020000 /* FDBM tcp sequence
-                                                        RAM */
-#define    HP_RAM_BIST_SUMMARY             0x00000002 /* all BIST tests */
-#define    HP_RAM_BIST_START               0x00000001 /* start/stop BIST */
-
-
-/** MAC registers.  **/
-/* reset bits are set using a PIO write and self-cleared after the command
- * execution has completed.
- */
-#define  REG_MAC_TX_RESET                  0x6000  /* TX MAC software reset
-                                                     command (default: 0x0) */
-#define  REG_MAC_RX_RESET                  0x6004  /* RX MAC software reset
-                                                     command (default: 0x0) */
-/* execute a pause flow control frame transmission
- DEFAULT: 0x0XXXX */
-#define  REG_MAC_SEND_PAUSE                0x6008  /* send pause command reg */
-#define    MAC_SEND_PAUSE_TIME_MASK        0x0000FFFF /* value of pause time
-                                                        to be sent on network
-                                                        in units of slot
-                                                        times */
-#define    MAC_SEND_PAUSE_SEND             0x00010000 /* send pause flow ctrl
-                                                        frame on network */
-
-/* bit set indicates that event occurred. auto-cleared when status register
- * is read and have corresponding mask bits in mask register. events will
- * trigger an interrupt if the corresponding mask bit is 0.
- * status register default: 0x00000000
- * mask register default = 0xFFFFFFFF on reset
- */
-#define  REG_MAC_TX_STATUS                 0x6010  /* TX MAC status reg */
-#define    MAC_TX_FRAME_XMIT               0x0001  /* successful frame
-                                                     transmision */
-#define    MAC_TX_UNDERRUN                 0x0002  /* terminated frame
-                                                     transmission due to
-                                                     data starvation in the
-                                                     xmit data path */
-#define    MAC_TX_MAX_PACKET_ERR           0x0004  /* frame exceeds max allowed
-                                                     length passed to TX MAC
-                                                     by the DMA engine */
-#define    MAC_TX_COLL_NORMAL              0x0008  /* rollover of the normal
-                                                     collision counter */
-#define    MAC_TX_COLL_EXCESS              0x0010  /* rollover of the excessive
-                                                     collision counter */
-#define    MAC_TX_COLL_LATE                0x0020  /* rollover of the late
-                                                     collision counter */
-#define    MAC_TX_COLL_FIRST               0x0040  /* rollover of the first
-                                                     collision counter */
-#define    MAC_TX_DEFER_TIMER              0x0080  /* rollover of the defer
-                                                     timer */
-#define    MAC_TX_PEAK_ATTEMPTS            0x0100  /* rollover of the peak
-                                                     attempts counter */
-
-#define  REG_MAC_RX_STATUS                 0x6014  /* RX MAC status reg */
-#define    MAC_RX_FRAME_RECV               0x0001  /* successful receipt of
-                                                     a frame */
-#define    MAC_RX_OVERFLOW                 0x0002  /* dropped frame due to
-                                                     RX FIFO overflow */
-#define    MAC_RX_FRAME_COUNT              0x0004  /* rollover of receive frame
-                                                     counter */
-#define    MAC_RX_ALIGN_ERR                0x0008  /* rollover of alignment
-                                                     error counter */
-#define    MAC_RX_CRC_ERR                  0x0010  /* rollover of crc error
-                                                     counter */
-#define    MAC_RX_LEN_ERR                  0x0020  /* rollover of length
-                                                     error counter */
-#define    MAC_RX_VIOL_ERR                 0x0040  /* rollover of code
-                                                     violation error */
-
-/* DEFAULT: 0xXXXX0000 on reset */
-#define  REG_MAC_CTRL_STATUS               0x6018  /* MAC control status reg */
-#define    MAC_CTRL_PAUSE_RECEIVED         0x00000001  /* successful
-                                                         reception of a
-                                                         pause control
-                                                         frame */
-#define    MAC_CTRL_PAUSE_STATE            0x00000002  /* MAC has made a
-                                                         transition from
-                                                         "not paused" to
-                                                         "paused" */
-#define    MAC_CTRL_NOPAUSE_STATE          0x00000004  /* MAC has made a
-                                                         transition from
-                                                         "paused" to "not
-                                                         paused" */
-#define    MAC_CTRL_PAUSE_TIME_MASK        0xFFFF0000  /* value of pause time
-                                                         operand that was
-                                                         received in the last
-                                                         pause flow control
-                                                         frame */
-
-/* layout identical to TX MAC[8:0] */
-#define  REG_MAC_TX_MASK                   0x6020  /* TX MAC mask reg */
-/* layout identical to RX MAC[6:0] */
-#define  REG_MAC_RX_MASK                   0x6024  /* RX MAC mask reg */
-/* layout identical to CTRL MAC[2:0] */
-#define  REG_MAC_CTRL_MASK                 0x6028  /* MAC control mask reg */
-
-/* to ensure proper operation, CFG_EN must be cleared to 0 and a delay
- * imposed before writes to other bits in the TX_MAC_CFG register or any of
- * the MAC parameters is performed. delay dependent upon time required to
- * transmit a maximum size frame (= MAC_FRAMESIZE_MAX*8/Mbps). e.g.,
- * the delay for a 1518-byte frame on a 100Mbps network is 125us.
- * alternatively, just poll TX_CFG_EN until it reads back as 0.
- * NOTE: on half-duplex 1Gbps, TX_CFG_CARRIER_EXTEND and
- * RX_CFG_CARRIER_EXTEND should be set and the SLOT_TIME register should
- * be 0x200 (slot time of 512 bytes)
- */
-#define  REG_MAC_TX_CFG                 0x6030  /* TX MAC config reg */
-#define    MAC_TX_CFG_EN                0x0001  /* enable TX MAC. 0 will
-                                                     force TXMAC state
-                                                     machine to remain in
-                                                     idle state or to
-                                                     transition to idle state
-                                                     on completion of an
-                                                     ongoing packet. */
-#define    MAC_TX_CFG_IGNORE_CARRIER    0x0002  /* disable CSMA/CD deferral
-                                                  process. set to 1 when
-                                                  full duplex and 0 when
-                                                  half duplex */
-#define    MAC_TX_CFG_IGNORE_COLL       0x0004  /* disable CSMA/CD backoff
-                                                  algorithm. set to 1 when
-                                                  full duplex and 0 when
-                                                  half duplex */
-#define    MAC_TX_CFG_IPG_EN            0x0008  /* enable extension of the
-                                                  Rx-to-TX IPG. after
-                                                  receiving a frame, TX
-                                                  MAC will reset its
-                                                  deferral process to
-                                                  carrier sense for the
-                                                  amount of time = IPG0 +
-                                                  IPG1 and commit to
-                                                  transmission for time
-                                                  specified in IPG2. when
-                                                  0 or when xmitting frames
-                                                  back-to-pack (Tx-to-Tx
-                                                  IPG), TX MAC ignores
-                                                  IPG0 and will only use
-                                                  IPG1 for deferral time.
-                                                  IPG2 still used. */
-#define    MAC_TX_CFG_NEVER_GIVE_UP_EN  0x0010  /* TX MAC will not easily
-                                                  give up on frame
-                                                  xmission. if backoff
-                                                  algorithm reaches the
-                                                  ATTEMPT_LIMIT, it will
-                                                  clear attempts counter
-                                                  and continue trying to
-                                                  send the frame as
-                                                  specified by
-                                                  GIVE_UP_LIM. when 0,
-                                                  TX MAC will execute
-                                                  standard CSMA/CD prot. */
-#define    MAC_TX_CFG_NEVER_GIVE_UP_LIM 0x0020  /* when set, TX MAC will
-                                                  continue to try to xmit
-                                                  until successful. when
-                                                  0, TX MAC will continue
-                                                  to try xmitting until
-                                                  successful or backoff
-                                                  algorithm reaches
-                                                  ATTEMPT_LIMIT*16 */
-#define    MAC_TX_CFG_NO_BACKOFF        0x0040  /* modify CSMA/CD to disable
-                                                  backoff algorithm. TX
-                                                  MAC will not back off
-                                                  after a xmission attempt
-                                                  that resulted in a
-                                                  collision. */
-#define    MAC_TX_CFG_SLOW_DOWN         0x0080  /* modify CSMA/CD so that
-                                                  deferral process is reset
-                                                  in response to carrier
-                                                  sense during the entire
-                                                  duration of IPG. TX MAC
-                                                  will only commit to frame
-                                                  xmission after frame
-                                                  xmission has actually
-                                                  begun. */
-#define    MAC_TX_CFG_NO_FCS            0x0100  /* TX MAC will not generate
-                                                  CRC for all xmitted
-                                                  packets. when clear, CRC
-                                                  generation is dependent
-                                                  upon NO_CRC bit in the
-                                                  xmit control word from
-                                                  TX DMA */
-#define    MAC_TX_CFG_CARRIER_EXTEND    0x0200  /* enables xmit part of the
-                                                  carrier extension
-                                                  feature. this allows for
-                                                  longer collision domains
-                                                  by extending the carrier
-                                                  and collision window
-                                                  from the end of FCS until
-                                                  the end of the slot time
-                                                  if necessary. Required
-                                                  for half-duplex at 1Gbps,
-                                                  clear otherwise. */
-
-/* when CRC is not stripped, reassembly packets will not contain the CRC.
- * these will be stripped by HRP because it reassembles layer 4 data, and the
- * CRC is layer 2. however, non-reassembly packets will still contain the CRC
- * when passed to the host. to ensure proper operation, need to wait 3.2ms
- * after clearing RX_CFG_EN before writing to any other RX MAC registers
- * or other MAC parameters. alternatively, poll RX_CFG_EN until it clears
- * to 0. similary, HASH_FILTER_EN and ADDR_FILTER_EN have the same
- * restrictions as CFG_EN.
- */
-#define  REG_MAC_RX_CFG                 0x6034  /* RX MAC config reg */
-#define    MAC_RX_CFG_EN                0x0001  /* enable RX MAC */
-#define    MAC_RX_CFG_STRIP_PAD         0x0002  /* always program to 0.
-                                                  feature not supported */
-#define    MAC_RX_CFG_STRIP_FCS         0x0004  /* RX MAC will strip the
-                                                  last 4 bytes of a
-                                                  received frame. */
-#define    MAC_RX_CFG_PROMISC_EN        0x0008  /* promiscuous mode */
-#define    MAC_RX_CFG_PROMISC_GROUP_EN  0x0010  /* accept all valid
-                                                  multicast frames (group
-                                                  bit in DA field set) */
-#define    MAC_RX_CFG_HASH_FILTER_EN    0x0020  /* use hash table to filter
-                                                  multicast addresses */
-#define    MAC_RX_CFG_ADDR_FILTER_EN    0x0040  /* cause RX MAC to use
-                                                  address filtering regs
-                                                  to filter both unicast
-                                                  and multicast
-                                                  addresses */
-#define    MAC_RX_CFG_DISABLE_DISCARD   0x0080  /* pass errored frames to
-                                                  RX DMA by setting BAD
-                                                  bit but not Abort bit
-                                                  in the status. CRC,
-                                                  framing, and length errs
-                                                  will not increment
-                                                  error counters. frames
-                                                  which don't match dest
-                                                  addr will be passed up
-                                                  w/ BAD bit set. */
-#define    MAC_RX_CFG_CARRIER_EXTEND    0x0100  /* enable reception of
-                                                  packet bursts generated
-                                                  by carrier extension
-                                                  with packet bursting
-                                                  senders. only applies
-                                                  to half-duplex 1Gbps */
-
-/* DEFAULT: 0x0 */
-#define  REG_MAC_CTRL_CFG               0x6038  /* MAC control config reg */
-#define    MAC_CTRL_CFG_SEND_PAUSE_EN   0x0001  /* respond to requests for
-                                                  sending pause flow ctrl
-                                                  frames */
-#define    MAC_CTRL_CFG_RECV_PAUSE_EN   0x0002  /* respond to received
-                                                  pause flow ctrl frames */
-#define    MAC_CTRL_CFG_PASS_CTRL       0x0004  /* pass valid MAC ctrl
-                                                  packets to RX DMA */
-
-/* to ensure proper operation, a global initialization sequence should be
- * performed when a loopback config is entered or exited. if programmed after
- * a hw or global sw reset, RX/TX MAC software reset and initialization
- * should be done to ensure stable clocking.
- * DEFAULT: 0x0
- */
-#define  REG_MAC_XIF_CFG                0x603C  /* XIF config reg */
-#define    MAC_XIF_TX_MII_OUTPUT_EN        0x0001  /* enable output drivers
-                                                     on MII xmit bus */
-#define    MAC_XIF_MII_INT_LOOPBACK        0x0002  /* loopback GMII xmit data
-                                                     path to GMII recv data
-                                                     path. phy mode register
-                                                     clock selection must be
-                                                     set to GMII mode and
-                                                     GMII_MODE should be set
-                                                     to 1. in loopback mode,
-                                                     REFCLK will drive the
-                                                     entire mac core. 0 for
-                                                     normal operation. */
-#define    MAC_XIF_DISABLE_ECHO            0x0004  /* disables receive data
-                                                     path during packet
-                                                     xmission. clear to 0
-                                                     in any full duplex mode,
-                                                     in any loopback mode,
-                                                     or in half-duplex SERDES
-                                                     or SLINK modes. set when
-                                                     in half-duplex when
-                                                     using external phy. */
-#define    MAC_XIF_GMII_MODE               0x0008  /* MAC operates with GMII
-                                                     clocks and datapath */
-#define    MAC_XIF_MII_BUFFER_OUTPUT_EN    0x0010  /* MII_BUF_EN pin. enable
-                                                     external tristate buffer
-                                                     on the MII receive
-                                                     bus. */
-#define    MAC_XIF_LINK_LED                0x0020  /* LINKLED# active (low) */
-#define    MAC_XIF_FDPLX_LED               0x0040  /* FDPLXLED# active (low) */
-
-#define  REG_MAC_IPG0                      0x6040  /* inter-packet gap0 reg.
-                                                     recommended: 0x00 */
-#define  REG_MAC_IPG1                      0x6044  /* inter-packet gap1 reg
-                                                     recommended: 0x08 */
-#define  REG_MAC_IPG2                      0x6048  /* inter-packet gap2 reg
-                                                     recommended: 0x04 */
-#define  REG_MAC_SLOT_TIME                 0x604C  /* slot time reg
-                                                     recommended: 0x40 */
-#define  REG_MAC_FRAMESIZE_MIN             0x6050  /* min frame size reg
-                                                     recommended: 0x40 */
-
-/* FRAMESIZE_MAX holds both the max frame size as well as the max burst size.
- * recommended value:  0x2000.05EE
- */
-#define  REG_MAC_FRAMESIZE_MAX             0x6054  /* max frame size reg */
-#define    MAC_FRAMESIZE_MAX_BURST_MASK    0x3FFF0000 /* max burst size */
-#define    MAC_FRAMESIZE_MAX_BURST_SHIFT   16
-#define    MAC_FRAMESIZE_MAX_FRAME_MASK    0x00007FFF /* max frame size */
-#define    MAC_FRAMESIZE_MAX_FRAME_SHIFT   0
-#define  REG_MAC_PA_SIZE                   0x6058  /* PA size reg. number of
-                                                     preamble bytes that the
-                                                     TX MAC will xmit at the
-                                                     beginning of each frame
-                                                     value should be 2 or
-                                                     greater. recommended
-                                                     value: 0x07 */
-#define  REG_MAC_JAM_SIZE                  0x605C  /* jam size reg. duration
-                                                     of jam in units of media
-                                                     byte time. recommended
-                                                     value: 0x04 */
-#define  REG_MAC_ATTEMPT_LIMIT             0x6060  /* attempt limit reg. #
-                                                     of attempts TX MAC will
-                                                     make to xmit a frame
-                                                     before it resets its
-                                                     attempts counter. after
-                                                     the limit has been
-                                                     reached, TX MAC may or
-                                                     may not drop the frame
-                                                     dependent upon value
-                                                     in TX_MAC_CFG.
-                                                     recommended
-                                                     value: 0x10 */
-#define  REG_MAC_CTRL_TYPE                 0x6064  /* MAC control type reg.
-                                                     type field of a MAC
-                                                     ctrl frame. recommended
-                                                     value: 0x8808 */
-
-/* mac address registers: 0 - 44, 0x6080 - 0x6130, 4 8-bit bytes.
- * register           contains                   comparison
- *    0        16 MSB of primary MAC addr        [47:32] of DA field
- *    1        16 middle bits ""                 [31:16] of DA field
- *    2        16 LSB ""                         [15:0] of DA field
- *    3*x      16MSB of alt MAC addr 1-15        [47:32] of DA field
- *    4*x      16 middle bits ""                 [31:16]
- *    5*x      16 LSB ""                         [15:0]
- *    42       16 MSB of MAC CTRL addr           [47:32] of DA.
- *    43       16 middle bits ""                 [31:16]
- *    44       16 LSB ""                         [15:0]
- *    MAC CTRL addr must be the reserved multicast addr for MAC CTRL frames.
- *    if there is a match, MAC will set the bit for alternative address
- *    filter pass [15]
-
- *    here is the map of registers given MAC address notation: a:b:c:d:e:f
- *                     ab             cd             ef
- *    primary addr     reg 2          reg 1          reg 0
- *    alt addr 1       reg 5          reg 4          reg 3
- *    alt addr x       reg 5*x        reg 4*x        reg 3*x
- *    ctrl addr        reg 44         reg 43         reg 42
- */
-#define  REG_MAC_ADDR0                     0x6080  /* MAC address 0 reg */
-#define  REG_MAC_ADDRN(x)                  (REG_MAC_ADDR0 + (x)*4)
-#define  REG_MAC_ADDR_FILTER0              0x614C  /* address filter 0 reg
-                                                     [47:32] */
-#define  REG_MAC_ADDR_FILTER1              0x6150  /* address filter 1 reg
-                                                     [31:16] */
-#define  REG_MAC_ADDR_FILTER2              0x6154  /* address filter 2 reg
-                                                     [15:0] */
-#define  REG_MAC_ADDR_FILTER2_1_MASK       0x6158  /* address filter 2 and 1
-                                                     mask reg. 8-bit reg
-                                                     contains nibble mask for
-                                                     reg 2 and 1. */
-#define  REG_MAC_ADDR_FILTER0_MASK         0x615C  /* address filter 0 mask
-                                                     reg */
-
-/* hash table registers: 0 - 15, 0x6160 - 0x619C, 4 8-bit bytes
- * 16-bit registers contain bits of the hash table.
- * reg x  -> [16*(15 - x) + 15 : 16*(15 - x)].
- * e.g., 15 -> [15:0], 0 -> [255:240]
- */
-#define  REG_MAC_HASH_TABLE0               0x6160  /* hash table 0 reg */
-#define  REG_MAC_HASH_TABLEN(x)            (REG_MAC_HASH_TABLE0 + (x)*4)
-
-/* statistics registers. these registers generate an interrupt on
- * overflow. recommended initialization: 0x0000. most are 16-bits except
- * for PEAK_ATTEMPTS register which is 8 bits.
- */
-#define  REG_MAC_COLL_NORMAL               0x61A0 /* normal collision
-                                                    counter. */
-#define  REG_MAC_COLL_FIRST                0x61A4 /* first attempt
-                                                    successful collision
-                                                    counter */
-#define  REG_MAC_COLL_EXCESS               0x61A8 /* excessive collision
-                                                    counter */
-#define  REG_MAC_COLL_LATE                 0x61AC /* late collision counter */
-#define  REG_MAC_TIMER_DEFER               0x61B0 /* defer timer. time base
-                                                    is the media byte
-                                                    clock/256 */
-#define  REG_MAC_ATTEMPTS_PEAK             0x61B4 /* peak attempts reg */
-#define  REG_MAC_RECV_FRAME                0x61B8 /* receive frame counter */
-#define  REG_MAC_LEN_ERR                   0x61BC /* length error counter */
-#define  REG_MAC_ALIGN_ERR                 0x61C0 /* alignment error counter */
-#define  REG_MAC_FCS_ERR                   0x61C4 /* FCS error counter */
-#define  REG_MAC_RX_CODE_ERR               0x61C8 /* RX code violation
-                                                    error counter */
-
-/* misc registers */
-#define  REG_MAC_RANDOM_SEED               0x61CC /* random number seed reg.
-                                                  10-bit register used as a
-                                                  seed  for the random number
-                                                  generator for the CSMA/CD
-                                                  backoff algorithm. only
-                                                  programmed after power-on
-                                                  reset and should be a
-                                                  random value which has a
-                                                  high likelihood of being
-                                                  unique for each MAC
-                                                  attached to a network
-                                                  segment (e.g., 10 LSB of
-                                                  MAC address) */
-
-/* ASUN: there's a PAUSE_TIMER (ro) described, but it's not in the address
- *       map
- */
-
-/* 27-bit register has the current state for key state machines in the MAC */
-#define  REG_MAC_STATE_MACHINE             0x61D0 /* (ro) state machine reg */
-#define    MAC_SM_RLM_MASK                 0x07800000
-#define    MAC_SM_RLM_SHIFT                23
-#define    MAC_SM_RX_FC_MASK               0x00700000
-#define    MAC_SM_RX_FC_SHIFT              20
-#define    MAC_SM_TLM_MASK                 0x000F0000
-#define    MAC_SM_TLM_SHIFT                16
-#define    MAC_SM_ENCAP_SM_MASK            0x0000F000
-#define    MAC_SM_ENCAP_SM_SHIFT           12
-#define    MAC_SM_TX_REQ_MASK              0x00000C00
-#define    MAC_SM_TX_REQ_SHIFT             10
-#define    MAC_SM_TX_FC_MASK               0x000003C0
-#define    MAC_SM_TX_FC_SHIFT              6
-#define    MAC_SM_FIFO_WRITE_SEL_MASK      0x00000038
-#define    MAC_SM_FIFO_WRITE_SEL_SHIFT     3
-#define    MAC_SM_TX_FIFO_EMPTY_MASK       0x00000007
-#define    MAC_SM_TX_FIFO_EMPTY_SHIFT      0
-
-/** MIF registers. the MIF can be programmed in either bit-bang or
- *  frame mode.
- **/
-#define  REG_MIF_BIT_BANG_CLOCK            0x6200 /* MIF bit-bang clock.
-                                                  1 -> 0 will generate a
-                                                  rising edge. 0 -> 1 will
-                                                  generate a falling edge. */
-#define  REG_MIF_BIT_BANG_DATA             0x6204 /* MIF bit-bang data. 1-bit
-                                                    register generates data */
-#define  REG_MIF_BIT_BANG_OUTPUT_EN        0x6208 /* MIF bit-bang output
-                                                    enable. enable when
-                                                    xmitting data from MIF to
-                                                    transceiver. */
-
-/* 32-bit register serves as an instruction register when the MIF is
- * programmed in frame mode. load this register w/ a valid instruction
- * (as per IEEE 802.3u MII spec). poll this register to check for instruction
- * execution completion. during a read operation, this register will also
- * contain the 16-bit data returned by the tranceiver. unless specified
- * otherwise, fields are considered "don't care" when polling for
- * completion.
- */
-#define  REG_MIF_FRAME                     0x620C /* MIF frame/output reg */
-#define    MIF_FRAME_START_MASK            0xC0000000 /* start of frame.
-                                                        load w/ 01 when
-                                                        issuing an instr */
-#define    MIF_FRAME_ST                    0x40000000 /* STart of frame */
-#define    MIF_FRAME_OPCODE_MASK           0x30000000 /* opcode. 01 for a
-                                                        write. 10 for a
-                                                        read */
-#define    MIF_FRAME_OP_READ               0x20000000 /* read OPcode */
-#define    MIF_FRAME_OP_WRITE              0x10000000 /* write OPcode */
-#define    MIF_FRAME_PHY_ADDR_MASK         0x0F800000 /* phy address. when
-                                                        issuing an instr,
-                                                        this field should be
-                                                        loaded w/ the XCVR
-                                                        addr */
-#define    MIF_FRAME_PHY_ADDR_SHIFT        23
-#define    MIF_FRAME_REG_ADDR_MASK         0x007C0000 /* register address.
-                                                        when issuing an instr,
-                                                        addr of register
-                                                        to be read/written */
-#define    MIF_FRAME_REG_ADDR_SHIFT        18
-#define    MIF_FRAME_TURN_AROUND_MSB       0x00020000 /* turn around, MSB.
-                                                        when issuing an instr,
-                                                        set this bit to 1 */
-#define    MIF_FRAME_TURN_AROUND_LSB       0x00010000 /* turn around, LSB.
-                                                        when issuing an instr,
-                                                        set this bit to 0.
-                                                        when polling for
-                                                        completion, 1 means
-                                                        that instr execution
-                                                        has been completed */
-#define    MIF_FRAME_DATA_MASK             0x0000FFFF /* instruction payload
-                                                        load with 16-bit data
-                                                        to be written in
-                                                        transceiver reg for a
-                                                        write. doesn't matter
-                                                        in a read. when
-                                                        polling for
-                                                        completion, field is
-                                                        "don't care" for write
-                                                        and 16-bit data
-                                                        returned by the
-                                                        transceiver for a
-                                                        read (if valid bit
-                                                        is set) */
-#define  REG_MIF_CFG                    0x6210 /* MIF config reg */
-#define    MIF_CFG_PHY_SELECT           0x0001 /* 1 -> select MDIO_1
-                                                 0 -> select MDIO_0 */
-#define    MIF_CFG_POLL_EN              0x0002 /* enable polling
-                                                 mechanism. if set,
-                                                 BB_MODE should be 0 */
-#define    MIF_CFG_BB_MODE              0x0004 /* 1 -> bit-bang mode
-                                                 0 -> frame mode */
-#define    MIF_CFG_POLL_REG_MASK        0x00F8 /* register address to be
-                                                 used by polling mode.
-                                                 only meaningful if POLL_EN
-                                                 is set to 1 */
-#define    MIF_CFG_POLL_REG_SHIFT       3
-#define    MIF_CFG_MDIO_0               0x0100 /* (ro) dual purpose.
-                                                 when MDIO_0 is idle,
-                                                 1 -> tranceiver is
-                                                 connected to MDIO_0.
-                                                 when MIF is communicating
-                                                 w/ MDIO_0 in bit-bang
-                                                 mode, this bit indicates
-                                                 the incoming bit stream
-                                                 during a read op */
-#define    MIF_CFG_MDIO_1               0x0200 /* (ro) dual purpose.
-                                                 when MDIO_1 is idle,
-                                                 1 -> transceiver is
-                                                 connected to MDIO_1.
-                                                 when MIF is communicating
-                                                 w/ MDIO_1 in bit-bang
-                                                 mode, this bit indicates
-                                                 the incoming bit stream
-                                                 during a read op */
-#define    MIF_CFG_POLL_PHY_MASK        0x7C00 /* tranceiver address to
-                                                 be polled */
-#define    MIF_CFG_POLL_PHY_SHIFT       10
-
-/* 16-bit register used to determine which bits in the POLL_STATUS portion of
- * the MIF_STATUS register will cause an interrupt. if a mask bit is 0,
- * corresponding bit of the POLL_STATUS will generate a MIF interrupt when
- * set. DEFAULT: 0xFFFF
- */
-#define  REG_MIF_MASK                      0x6214 /* MIF mask reg */
-
-/* 32-bit register used when in poll mode. auto-cleared after being read */
-#define  REG_MIF_STATUS                    0x6218 /* MIF status reg */
-#define    MIF_STATUS_POLL_DATA_MASK       0xFFFF0000 /* poll data contains
-                                                        the "latest image"
-                                                        update of the XCVR
-                                                        reg being read */
-#define    MIF_STATUS_POLL_DATA_SHIFT      16
-#define    MIF_STATUS_POLL_STATUS_MASK     0x0000FFFF /* poll status indicates
-                                                        which bits in the
-                                                        POLL_DATA field have
-                                                        changed since the
-                                                        MIF_STATUS reg was
-                                                        last read */
-#define    MIF_STATUS_POLL_STATUS_SHIFT    0
-
-/* 7-bit register has current state for all state machines in the MIF */
-#define  REG_MIF_STATE_MACHINE             0x621C /* MIF state machine reg */
-#define    MIF_SM_CONTROL_MASK             0x07   /* control state machine
-                                                    state */
-#define    MIF_SM_EXECUTION_MASK           0x60   /* execution state machine
-                                                    state */
-
-/** PCS/Serialink. the following registers are equivalent to the standard
- *  MII management registers except that they're directly mapped in
- *  Cassini's register space.
- **/
-
-/* the auto-negotiation enable bit should be programmed the same at
- * the link partner as in the local device to enable auto-negotiation to
- * complete. when that bit is reprogrammed, auto-neg/manual config is
- * restarted automatically.
- * DEFAULT: 0x1040
- */
-#define  REG_PCS_MII_CTRL                  0x9000 /* PCS MII control reg */
-#define    PCS_MII_CTRL_1000_SEL           0x0040 /* reads 1. ignored on
-                                                    writes */
-#define    PCS_MII_CTRL_COLLISION_TEST     0x0080 /* COL signal at the PCS
-                                                    to MAC interface is
-                                                    activated regardless
-                                                    of activity */
-#define    PCS_MII_CTRL_DUPLEX             0x0100 /* forced 0x0. PCS
-                                                    behaviour same for
-                                                    half and full dplx */
-#define    PCS_MII_RESTART_AUTONEG         0x0200 /* self clearing.
-                                                    restart auto-
-                                                    negotiation */
-#define    PCS_MII_ISOLATE                 0x0400 /* read as 0. ignored
-                                                    on writes */
-#define    PCS_MII_POWER_DOWN              0x0800 /* read as 0. ignored
-                                                    on writes */
-#define    PCS_MII_AUTONEG_EN              0x1000 /* default 1. PCS goes
-                                                    through automatic
-                                                    link config before it
-                                                    can be used. when 0,
-                                                    link can be used
-                                                    w/out any link config
-                                                    phase */
-#define    PCS_MII_10_100_SEL              0x2000 /* read as 0. ignored on
-                                                    writes */
-#define    PCS_MII_RESET                   0x8000 /* reset PCS. self-clears
-                                                    when done */
-
-/* DEFAULT: 0x0108 */
-#define  REG_PCS_MII_STATUS                0x9004 /* PCS MII status reg */
-#define    PCS_MII_STATUS_EXTEND_CAP       0x0001 /* reads 0 */
-#define    PCS_MII_STATUS_JABBER_DETECT    0x0002 /* reads 0 */
-#define    PCS_MII_STATUS_LINK_STATUS      0x0004 /* 1 -> link up.
-                                                    0 -> link down. 0 is
-                                                    latched so that 0 is
-                                                    kept until read. read
-                                                    2x to determine if the
-                                                    link has gone up again */
-#define    PCS_MII_STATUS_AUTONEG_ABLE     0x0008 /* reads 1 (able to perform
-                                                    auto-neg) */
-#define    PCS_MII_STATUS_REMOTE_FAULT     0x0010 /* 1 -> remote fault detected
-                                                    from received link code
-                                                    word. only valid after
-                                                    auto-neg completed */
-#define    PCS_MII_STATUS_AUTONEG_COMP     0x0020 /* 1 -> auto-negotiation
-                                                         completed
-                                                    0 -> auto-negotiation not
-                                                    completed */
-#define    PCS_MII_STATUS_EXTEND_STATUS    0x0100 /* reads as 1. used as an
-                                                    indication that this is
-                                                    a 1000 Base-X PHY. writes
-                                                    to it are ignored */
-
-/* used during auto-negotiation.
- * DEFAULT: 0x00E0
- */
-#define  REG_PCS_MII_ADVERT                0x9008 /* PCS MII advertisement
-                                                    reg */
-#define    PCS_MII_ADVERT_FD               0x0020  /* advertise full duplex
-                                                     1000 Base-X */
-#define    PCS_MII_ADVERT_HD               0x0040  /* advertise half-duplex
-                                                     1000 Base-X */
-#define    PCS_MII_ADVERT_SYM_PAUSE        0x0080  /* advertise PAUSE
-                                                     symmetric capability */
-#define    PCS_MII_ADVERT_ASYM_PAUSE       0x0100  /* advertises PAUSE
-                                                     asymmetric capability */
-#define    PCS_MII_ADVERT_RF_MASK          0x3000 /* remote fault. write bit13
-                                                    to optionally indicate to
-                                                    link partner that chip is
-                                                    going off-line. bit12 will
-                                                    get set when signal
-                                                    detect == FAIL and will
-                                                    remain set until
-                                                    successful negotiation */
-#define    PCS_MII_ADVERT_ACK              0x4000 /* (ro) */
-#define    PCS_MII_ADVERT_NEXT_PAGE        0x8000 /* (ro) forced 0x0 */
-
-/* contents updated as a result of autonegotiation. layout and definitions
- * identical to PCS_MII_ADVERT
- */
-#define  REG_PCS_MII_LPA                   0x900C /* PCS MII link partner
-                                                    ability reg */
-#define    PCS_MII_LPA_FD             PCS_MII_ADVERT_FD
-#define    PCS_MII_LPA_HD             PCS_MII_ADVERT_HD
-#define    PCS_MII_LPA_SYM_PAUSE      PCS_MII_ADVERT_SYM_PAUSE
-#define    PCS_MII_LPA_ASYM_PAUSE     PCS_MII_ADVERT_ASYM_PAUSE
-#define    PCS_MII_LPA_RF_MASK        PCS_MII_ADVERT_RF_MASK
-#define    PCS_MII_LPA_ACK            PCS_MII_ADVERT_ACK
-#define    PCS_MII_LPA_NEXT_PAGE      PCS_MII_ADVERT_NEXT_PAGE
-
-/* DEFAULT: 0x0 */
-#define  REG_PCS_CFG                       0x9010 /* PCS config reg */
-#define    PCS_CFG_EN                      0x01   /* enable PCS. must be
-                                                    0 when modifying
-                                                    PCS_MII_ADVERT */
-#define    PCS_CFG_SD_OVERRIDE             0x02   /* sets signal detect to
-                                                    OK. bit is
-                                                    non-resettable */
-#define    PCS_CFG_SD_ACTIVE_LOW           0x04   /* changes interpretation
-                                                    of optical signal to make
-                                                    signal detect okay when
-                                                    signal is low */
-#define    PCS_CFG_JITTER_STUDY_MASK       0x18   /* used to make jitter
-                                                    measurements. a single
-                                                    code group is xmitted
-                                                    regularly.
-                                                    0x0 = normal operation
-                                                    0x1 = high freq test
-                                                          pattern, D21.5
-                                                    0x2 = low freq test
-                                                          pattern, K28.7
-                                                    0x3 = reserved */
-#define    PCS_CFG_10MS_TIMER_OVERRIDE     0x20   /* shortens 10-20ms auto-
-                                                    negotiation timer to
-                                                    a few cycles for test
-                                                    purposes */
-
-/* used for diagnostic purposes. bits 20-22 autoclear on read */
-#define  REG_PCS_STATE_MACHINE             0x9014 /* (ro) PCS state machine
-                                                    and diagnostic reg */
-#define    PCS_SM_TX_STATE_MASK            0x0000000F /* 0 and 1 indicate
-                                                        xmission of idle.
-                                                        otherwise, xmission of
-                                                        a packet */
-#define    PCS_SM_RX_STATE_MASK            0x000000F0 /* 0 indicates reception
-                                                        of idle. otherwise,
-                                                        reception of packet */
-#define    PCS_SM_WORD_SYNC_STATE_MASK     0x00000700 /* 0 indicates loss of
-                                                        sync */
-#define    PCS_SM_SEQ_DETECT_STATE_MASK    0x00001800 /* cycling through 0-3
-                                                        indicates reception of
-                                                        Config codes. cycling
-                                                        through 0-1 indicates
-                                                        reception of idles */
-#define    PCS_SM_LINK_STATE_MASK          0x0001E000
-#define        SM_LINK_STATE_UP            0x00016000 /* link state is up */
-
-#define    PCS_SM_LOSS_LINK_C              0x00100000 /* loss of link due to
-                                                        recept of Config
-                                                        codes */
-#define    PCS_SM_LOSS_LINK_SYNC           0x00200000 /* loss of link due to
-                                                        loss of sync */
-#define    PCS_SM_LOSS_SIGNAL_DETECT       0x00400000 /* signal detect goes
-                                                        from OK to FAIL. bit29
-                                                        will also be set if
-                                                        this is set */
-#define    PCS_SM_NO_LINK_BREAKLINK        0x01000000 /* link not up due to
-                                                       receipt of breaklink
-                                                       C codes from partner.
-                                                       C codes w/ 0 content
-                                                       received triggering
-                                                       start/restart of
-                                                       autonegotiation.
-                                                       should be sent for
-                                                       no longer than 20ms */
-#define    PCS_SM_NO_LINK_SERDES           0x02000000 /* serdes being
-                                                       initialized. see serdes
-                                                       state reg */
-#define    PCS_SM_NO_LINK_C                0x04000000 /* C codes not stable or
-                                                        not received */
-#define    PCS_SM_NO_LINK_SYNC             0x08000000 /* word sync not
-                                                        achieved */
-#define    PCS_SM_NO_LINK_WAIT_C           0x10000000 /* waiting for C codes
-                                                        w/ ack bit set */
-#define    PCS_SM_NO_LINK_NO_IDLE          0x20000000 /* link partner continues
-                                                        to send C codes
-                                                        instead of idle
-                                                        symbols or pkt data */
-
-/* this register indicates interrupt changes in specific PCS MII status bits.
- * PCS_INT may be masked at the ISR level. only a single bit is implemented
- * for link status change.
- */
-#define  REG_PCS_INTR_STATUS               0x9018 /* PCS interrupt status */
-#define    PCS_INTR_STATUS_LINK_CHANGE     0x04   /* link status has changed
-                                                    since last read */
-
-/* control which network interface is used. no more than one bit should
- * be set.
- * DEFAULT: none
- */
-#define  REG_PCS_DATAPATH_MODE             0x9050 /* datapath mode reg */
-#define    PCS_DATAPATH_MODE_MII           0x00 /* PCS is not used and
-                                                  MII/GMII is selected.
-                                                  selection between MII and
-                                                  GMII is controlled by
-                                                  XIF_CFG */
-#define    PCS_DATAPATH_MODE_SERDES        0x02 /* PCS is used via the
-                                                  10-bit interface */
-
-/* input to serdes chip or serialink block */
-#define  REG_PCS_SERDES_CTRL              0x9054 /* serdes control reg */
-#define    PCS_SERDES_CTRL_LOOPBACK       0x01   /* enable loopback on
-                                                   serdes interface */
-#define    PCS_SERDES_CTRL_SYNCD_EN       0x02   /* enable sync carrier
-                                                   detection. should be
-                                                   0x0 for normal
-                                                   operation */
-#define    PCS_SERDES_CTRL_LOCKREF       0x04   /* frequency-lock RBC[0:1]
-                                                  to REFCLK when set.
-                                                  when clear, receiver
-                                                  clock locks to incoming
-                                                  serial data */
-
-/* multiplex test outputs into the PROM address (PA_3 through PA_0) pins.
- * should be 0x0 for normal operations.
- * 0b000          normal operation, PROM address[3:0] selected
- * 0b001          rxdma req, rxdma ack, rxdma ready, rxdma read
- * 0b010          rxmac req, rx ack, rx tag, rx clk shared
- * 0b011          txmac req, tx ack, tx tag, tx retry req
- * 0b100          tx tp3, tx tp2, tx tp1, tx tp0
- * 0b101          R period RX, R period TX, R period HP, R period BIM
- * DEFAULT: 0x0
- */
-#define  REG_PCS_SHARED_OUTPUT_SEL         0x9058 /* shared output select */
-#define    PCS_SOS_PROM_ADDR_MASK          0x0007
-
-/* used for diagnostics. this register indicates progress of the SERDES
- * boot up.
- * 0b00       undergoing reset
- * 0b01       waiting 500us while lockrefn is asserted
- * 0b10       waiting for comma detect
- * 0b11       receive data is synchronized
- * DEFAULT: 0x0
- */
-#define  REG_PCS_SERDES_STATE              0x905C /* (ro) serdes state */
-#define    PCS_SERDES_STATE_MASK           0x03
-
-/* used for diagnostics. indicates number of packets transmitted or received.
- * counters rollover w/out generating an interrupt.
- * DEFAULT: 0x0
- */
-#define  REG_PCS_PACKET_COUNT              0x9060 /* (ro) PCS packet counter */
-#define    PCS_PACKET_COUNT_TX             0x000007FF /* pkts xmitted by PCS */
-#define    PCS_PACKET_COUNT_RX             0x07FF0000 /* pkts recvd by PCS
-                                                        whether they
-                                                        encountered an error
-                                                        or not */
-
-/** LocalBus Devices. the following provides run-time access to the
- *  Cassini's PROM
- ***/
-#define  REG_EXPANSION_ROM_RUN_START       0x100000 /* expansion rom run time
-                                                      access */
-#define  REG_EXPANSION_ROM_RUN_END         0x17FFFF
-
-#define  REG_SECOND_LOCALBUS_START         0x180000 /* secondary local bus
-                                                      device */
-#define  REG_SECOND_LOCALBUS_END           0x1FFFFF
-
-/* entropy device */
-#define  REG_ENTROPY_START                 REG_SECOND_LOCALBUS_START
-#define  REG_ENTROPY_DATA                  (REG_ENTROPY_START + 0x00)
-#define  REG_ENTROPY_STATUS                (REG_ENTROPY_START + 0x04)
-#define      ENTROPY_STATUS_DRDY           0x01
-#define      ENTROPY_STATUS_BUSY           0x02
-#define      ENTROPY_STATUS_CIPHER         0x04
-#define      ENTROPY_STATUS_BYPASS_MASK    0x18
-#define  REG_ENTROPY_MODE                  (REG_ENTROPY_START + 0x05)
-#define      ENTROPY_MODE_KEY_MASK         0x07
-#define      ENTROPY_MODE_ENCRYPT          0x40
-#define  REG_ENTROPY_RAND_REG              (REG_ENTROPY_START + 0x06)
-#define  REG_ENTROPY_RESET                 (REG_ENTROPY_START + 0x07)
-#define      ENTROPY_RESET_DES_IO          0x01
-#define      ENTROPY_RESET_STC_MODE        0x02
-#define      ENTROPY_RESET_KEY_CACHE       0x04
-#define      ENTROPY_RESET_IV              0x08
-#define  REG_ENTROPY_IV                    (REG_ENTROPY_START + 0x08)
-#define  REG_ENTROPY_KEY0                  (REG_ENTROPY_START + 0x10)
-#define  REG_ENTROPY_KEYN(x)               (REG_ENTROPY_KEY0 + 4*(x))
-
-/* phys of interest w/ their special mii registers */
-#define PHY_LUCENT_B0     0x00437421
-#define   LUCENT_MII_REG      0x1F
-
-#define PHY_NS_DP83065    0x20005c78
-#define   DP83065_MII_MEM     0x16
-#define   DP83065_MII_REGD    0x1D
-#define   DP83065_MII_REGE    0x1E
-
-#define PHY_BROADCOM_5411 0x00206071
-#define PHY_BROADCOM_B0   0x00206050
-#define   BROADCOM_MII_REG4   0x14
-#define   BROADCOM_MII_REG5   0x15
-#define   BROADCOM_MII_REG7   0x17
-#define   BROADCOM_MII_REG8   0x18
-
-#define   CAS_MII_ANNPTR          0x07
-#define   CAS_MII_ANNPRR          0x08
-#define   CAS_MII_1000_CTRL       0x09
-#define   CAS_MII_1000_STATUS     0x0A
-#define   CAS_MII_1000_EXTEND     0x0F
-
-#define   CAS_BMSR_1000_EXTEND    0x0100 /* supports 1000Base-T extended status */
-/*
- * if autoneg is disabled, here's the table:
- * BMCR_SPEED100 = 100Mbps
- * BMCR_SPEED1000 = 1000Mbps
- * ~(BMCR_SPEED100 | BMCR_SPEED1000) = 10Mbps
- */
-#define   CAS_BMCR_SPEED1000      0x0040  /* Select 1000Mbps */
-
-#define   CAS_ADVERTISE_1000HALF   0x0100
-#define   CAS_ADVERTISE_1000FULL   0x0200
-#define   CAS_ADVERTISE_PAUSE      0x0400
-#define   CAS_ADVERTISE_ASYM_PAUSE 0x0800
-
-/* regular lpa register */
-#define   CAS_LPA_PAUSE                   CAS_ADVERTISE_PAUSE
-#define   CAS_LPA_ASYM_PAUSE       CAS_ADVERTISE_ASYM_PAUSE
-
-/* 1000_STATUS register */
-#define   CAS_LPA_1000HALF        0x0400
-#define   CAS_LPA_1000FULL        0x0800
-
-#define   CAS_EXTEND_1000XFULL    0x8000
-#define   CAS_EXTEND_1000XHALF    0x4000
-#define   CAS_EXTEND_1000TFULL    0x2000
-#define   CAS_EXTEND_1000THALF    0x1000
-
-/* cassini header parser firmware */
-typedef struct cas_hp_inst {
-       const char *note;
-
-       u16 mask, val;
-
-       u8 op;
-       u8 soff, snext; /* if match succeeds, new offset and match */
-       u8 foff, fnext; /* if match fails, new offset and match */
-       /* output info */
-       u8 outop;    /* output opcode */
-
-       u16 outarg;  /* output argument */
-       u8 outenab;  /* output enable: 0 = not, 1 = if match
-                        2 = if !match, 3 = always */
-       u8 outshift; /* barrel shift right, 4 bits */
-       u16 outmask;
-} cas_hp_inst_t;
-
-/* comparison */
-#define OP_EQ     0 /* packet == value */
-#define OP_LT     1 /* packet < value */
-#define OP_GT     2 /* packet > value */
-#define OP_NP     3 /* new packet */
-
-/* output opcodes */
-#define        CL_REG  0
-#define        LD_FID  1
-#define        LD_SEQ  2
-#define        LD_CTL  3
-#define        LD_SAP  4
-#define        LD_R1   5
-#define        LD_L3   6
-#define        LD_SUM  7
-#define        LD_HDR  8
-#define        IM_FID  9
-#define        IM_SEQ  10
-#define        IM_SAP  11
-#define        IM_R1   12
-#define        IM_CTL  13
-#define        LD_LEN  14
-#define        ST_FLG  15
-
-/* match setp #s for IP4TCP4 */
-#define S1_PCKT         0
-#define S1_VLAN         1
-#define S1_CFI          2
-#define S1_8023         3
-#define S1_LLC          4
-#define S1_LLCc         5
-#define S1_IPV4         6
-#define S1_IPV4c        7
-#define S1_IPV4F        8
-#define S1_TCP44        9
-#define S1_IPV6         10
-#define S1_IPV6L        11
-#define S1_IPV6c        12
-#define S1_TCP64        13
-#define S1_TCPSQ        14
-#define S1_TCPFG        15
-#define        S1_TCPHL        16
-#define        S1_TCPHc        17
-#define        S1_CLNP         18
-#define        S1_CLNP2        19
-#define        S1_DROP         20
-#define        S2_HTTP         21
-#define        S1_ESP4         22
-#define        S1_AH4          23
-#define        S1_ESP6         24
-#define        S1_AH6          25
-
-#define CAS_PROG_IP46TCP4_PREAMBLE \
-{ "packet arrival?", 0xffff, 0x0000, OP_NP,  6, S1_VLAN,  0, S1_PCKT,  \
-  CL_REG, 0x3ff, 1, 0x0, 0x0000}, \
-{ "VLAN?", 0xffff, 0x8100, OP_EQ,  1, S1_CFI,   0, S1_8023,  \
-  IM_CTL, 0x00a,  3, 0x0, 0xffff}, \
-{ "CFI?", 0x1000, 0x1000, OP_EQ,  0, S1_DROP,  1, S1_8023, \
-  CL_REG, 0x000,  0, 0x0, 0x0000}, \
-{ "8023?", 0xffff, 0x0600, OP_LT,  1, S1_LLC,   0, S1_IPV4, \
-  CL_REG, 0x000,  0, 0x0, 0x0000}, \
-{ "LLC?", 0xffff, 0xaaaa, OP_EQ,  1, S1_LLCc,  0, S1_CLNP, \
-  CL_REG, 0x000,  0, 0x0, 0x0000}, \
-{ "LLCc?", 0xff00, 0x0300, OP_EQ,  2, S1_IPV4,  0, S1_CLNP, \
-  CL_REG, 0x000,  0, 0x0, 0x0000}, \
-{ "IPV4?", 0xffff, 0x0800, OP_EQ,  1, S1_IPV4c, 0, S1_IPV6, \
-  LD_SAP, 0x100,  3, 0x0, 0xffff}, \
-{ "IPV4 cont?", 0xff00, 0x4500, OP_EQ,  3, S1_IPV4F, 0, S1_CLNP, \
-  LD_SUM, 0x00a,  1, 0x0, 0x0000}, \
-{ "IPV4 frag?", 0x3fff, 0x0000, OP_EQ,  1, S1_TCP44, 0, S1_CLNP, \
-  LD_LEN, 0x03e,  1, 0x0, 0xffff}, \
-{ "TCP44?", 0x00ff, 0x0006, OP_EQ,  7, S1_TCPSQ, 0, S1_CLNP, \
-  LD_FID, 0x182,  1, 0x0, 0xffff}, /* FID IP4&TCP src+dst */ \
-{ "IPV6?", 0xffff, 0x86dd, OP_EQ,  1, S1_IPV6L, 0, S1_CLNP,  \
-  LD_SUM, 0x015,  1, 0x0, 0x0000}, \
-{ "IPV6 len", 0xf000, 0x6000, OP_EQ,  0, S1_IPV6c, 0, S1_CLNP, \
-  IM_R1,  0x128,  1, 0x0, 0xffff}, \
-{ "IPV6 cont?", 0x0000, 0x0000, OP_EQ,  3, S1_TCP64, 0, S1_CLNP, \
-  LD_FID, 0x484,  1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */ \
-{ "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_CLNP, \
-  LD_LEN, 0x03f,  1, 0x0, 0xffff}
-
-#ifdef USE_HP_IP46TCP4
-static cas_hp_inst_t cas_prog_ip46tcp4tab[] = {
-       CAS_PROG_IP46TCP4_PREAMBLE,
-       { "TCP seq", /* DADDR should point to dest port */
-         0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 4, S1_TCPFG, LD_SEQ,
-         0x081,  3, 0x0, 0xffff}, /* Load TCP seq # */
-       { "TCP control flags", 0x0000, 0x0000, OP_EQ,  0, S1_TCPHL, 0,
-         S1_TCPHL, ST_FLG, 0x045,  3, 0x0, 0x002f}, /* Load TCP flags */
-       { "TCP length", 0x0000, 0x0000, OP_EQ,  0, S1_TCPHc, 0,
-         S1_TCPHc, LD_R1,  0x205,  3, 0xB, 0xf000},
-       { "TCP length cont", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0,
-         S1_PCKT,  LD_HDR, 0x0ff,  3, 0x0, 0xffff},
-       { "Cleanup", 0x0000, 0x0000, OP_EQ,  0, S1_CLNP2,  0, S1_CLNP2,
-         IM_CTL, 0x001,  3, 0x0, 0x0001},
-       { "Cleanup 2", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
-         IM_CTL, 0x000,  0, 0x0, 0x0000},
-       { "Drop packet", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
-         IM_CTL, 0x080,  3, 0x0, 0xffff},
-       { NULL },
-};
-#ifdef HP_IP46TCP4_DEFAULT
-#define CAS_HP_FIRMWARE               cas_prog_ip46tcp4tab
-#endif
-#endif
-
-/*
- * Alternate table load which excludes HTTP server traffic from reassembly.
- * It is substantially similar to the basic table, with one extra state
- * and a few extra compares. */
-#ifdef USE_HP_IP46TCP4NOHTTP
-static cas_hp_inst_t cas_prog_ip46tcp4nohttptab[] = {
-       CAS_PROG_IP46TCP4_PREAMBLE,
-       { "TCP seq", /* DADDR should point to dest port */
-         0xFFFF, 0x0080, OP_EQ,  0, S2_HTTP,  0, S1_TCPFG, LD_SEQ,
-         0x081,  3, 0x0, 0xffff} , /* Load TCP seq # */
-       { "TCP control flags", 0xFFFF, 0x8080, OP_EQ,  0, S2_HTTP,  0,
-         S1_TCPHL, ST_FLG, 0x145,  2, 0x0, 0x002f, }, /* Load TCP flags */
-       { "TCP length", 0x0000, 0x0000, OP_EQ,  0, S1_TCPHc, 0, S1_TCPHc,
-         LD_R1,  0x205,  3, 0xB, 0xf000},
-       { "TCP length cont", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
-         LD_HDR, 0x0ff,  3, 0x0, 0xffff},
-       { "Cleanup", 0x0000, 0x0000, OP_EQ,  0, S1_CLNP2,  0, S1_CLNP2,
-         IM_CTL, 0x001,  3, 0x0, 0x0001},
-       { "Cleanup 2", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
-         CL_REG, 0x002,  3, 0x0, 0x0000},
-       { "Drop packet", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
-         IM_CTL, 0x080,  3, 0x0, 0xffff},
-       { "No HTTP", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
-         IM_CTL, 0x044,  3, 0x0, 0xffff},
-       { NULL },
-};
-#ifdef HP_IP46TCP4NOHTTP_DEFAULT
-#define CAS_HP_FIRMWARE               cas_prog_ip46tcp4nohttptab
-#endif
-#endif
-
-/* match step #s for IP4FRAG */
-#define        S3_IPV6c        11
-#define        S3_TCP64        12
-#define        S3_TCPSQ        13
-#define        S3_TCPFG        14
-#define        S3_TCPHL        15
-#define        S3_TCPHc        16
-#define        S3_FRAG         17
-#define        S3_FOFF         18
-#define        S3_CLNP         19
-
-#ifdef USE_HP_IP4FRAG
-static cas_hp_inst_t cas_prog_ip4fragtab[] = {
-       { "packet arrival?", 0xffff, 0x0000, OP_NP,  6, S1_VLAN,  0, S1_PCKT,
-         CL_REG, 0x3ff, 1, 0x0, 0x0000},
-       { "VLAN?", 0xffff, 0x8100, OP_EQ,  1, S1_CFI,   0, S1_8023,
-         IM_CTL, 0x00a,  3, 0x0, 0xffff},
-       { "CFI?", 0x1000, 0x1000, OP_EQ,  0, S3_CLNP,  1, S1_8023,
-         CL_REG, 0x000,  0, 0x0, 0x0000},
-       { "8023?", 0xffff, 0x0600, OP_LT,  1, S1_LLC,   0, S1_IPV4,
-         CL_REG, 0x000,  0, 0x0, 0x0000},
-       { "LLC?", 0xffff, 0xaaaa, OP_EQ,  1, S1_LLCc,  0, S3_CLNP,
-         CL_REG, 0x000,  0, 0x0, 0x0000},
-       { "LLCc?",0xff00, 0x0300, OP_EQ,  2, S1_IPV4,  0, S3_CLNP,
-         CL_REG, 0x000,  0, 0x0, 0x0000},
-       { "IPV4?", 0xffff, 0x0800, OP_EQ,  1, S1_IPV4c, 0, S1_IPV6,
-         LD_SAP, 0x100,  3, 0x0, 0xffff},
-       { "IPV4 cont?", 0xff00, 0x4500, OP_EQ,  3, S1_IPV4F, 0, S3_CLNP,
-         LD_SUM, 0x00a,  1, 0x0, 0x0000},
-       { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ,  1, S1_TCP44, 0, S3_FRAG,
-         LD_LEN, 0x03e,  3, 0x0, 0xffff},
-       { "TCP44?", 0x00ff, 0x0006, OP_EQ,  7, S3_TCPSQ, 0, S3_CLNP,
-         LD_FID, 0x182,  3, 0x0, 0xffff}, /* FID IP4&TCP src+dst */
-       { "IPV6?", 0xffff, 0x86dd, OP_EQ,  1, S3_IPV6c, 0, S3_CLNP,
-         LD_SUM, 0x015,  1, 0x0, 0x0000},
-       { "IPV6 cont?", 0xf000, 0x6000, OP_EQ,  3, S3_TCP64, 0, S3_CLNP,
-         LD_FID, 0x484,  1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */
-       { "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S3_TCPSQ, 0, S3_CLNP,
-         LD_LEN, 0x03f,  1, 0x0, 0xffff},
-       { "TCP seq",    /* DADDR should point to dest port */
-         0x0000, 0x0000, OP_EQ,  0, S3_TCPFG, 4, S3_TCPFG, LD_SEQ,
-         0x081,  3, 0x0, 0xffff}, /* Load TCP seq # */
-       { "TCP control flags", 0x0000, 0x0000, OP_EQ,  0, S3_TCPHL, 0,
-         S3_TCPHL, ST_FLG, 0x045,  3, 0x0, 0x002f}, /* Load TCP flags */
-       { "TCP length", 0x0000, 0x0000, OP_EQ,  0, S3_TCPHc, 0, S3_TCPHc,
-         LD_R1,  0x205,  3, 0xB, 0xf000},
-       { "TCP length cont", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
-         LD_HDR, 0x0ff,  3, 0x0, 0xffff},
-       { "IP4 Fragment", 0x0000, 0x0000, OP_EQ,  0, S3_FOFF,  0, S3_FOFF,
-         LD_FID, 0x103,  3, 0x0, 0xffff}, /* FID IP4 src+dst */
-       { "IP4 frag offset", 0x0000, 0x0000, OP_EQ,  0, S3_FOFF,  0, S3_FOFF,
-         LD_SEQ, 0x040,  1, 0xD, 0xfff8},
-       { "Cleanup", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
-         IM_CTL, 0x001,  3, 0x0, 0x0001},
-       { NULL },
-};
-#ifdef HP_IP4FRAG_DEFAULT
-#define CAS_HP_FIRMWARE               cas_prog_ip4fragtab
-#endif
-#endif
-
-/*
- * Alternate table which does batching without reassembly
- */
-#ifdef USE_HP_IP46TCP4BATCH
-static cas_hp_inst_t cas_prog_ip46tcp4batchtab[] = {
-       CAS_PROG_IP46TCP4_PREAMBLE,
-       { "TCP seq",    /* DADDR should point to dest port */
-         0x0000, 0x0000, OP_EQ,  0, S1_TCPFG, 0, S1_TCPFG, LD_SEQ,
-         0x081,  3, 0x0, 0xffff}, /* Load TCP seq # */
-       { "TCP control flags", 0x0000, 0x0000, OP_EQ,  0, S1_TCPHL, 0,
-         S1_TCPHL, ST_FLG, 0x000,  3, 0x0, 0x0000}, /* Load TCP flags */
-       { "TCP length", 0x0000, 0x0000, OP_EQ,  0, S1_TCPHc, 0,
-         S1_TCPHc, LD_R1,  0x205,  3, 0xB, 0xf000},
-       { "TCP length cont", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0,
-         S1_PCKT,  IM_CTL, 0x040,  3, 0x0, 0xffff}, /* set batch bit */
-       { "Cleanup", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
-         IM_CTL, 0x001,  3, 0x0, 0x0001},
-       { "Drop packet", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0,
-         S1_PCKT,  IM_CTL, 0x080,  3, 0x0, 0xffff},
-       { NULL },
-};
-#ifdef HP_IP46TCP4BATCH_DEFAULT
-#define CAS_HP_FIRMWARE               cas_prog_ip46tcp4batchtab
-#endif
-#endif
-
-/* Workaround for Cassini rev2 descriptor corruption problem.
- * Does batching without reassembly, and sets the SAP to a known
- * data pattern for all packets.
- */
-#ifdef USE_HP_WORKAROUND
-static cas_hp_inst_t  cas_prog_workaroundtab[] = {
-       { "packet arrival?", 0xffff, 0x0000, OP_NP,  6, S1_VLAN,  0,
-         S1_PCKT,  CL_REG, 0x3ff,  1, 0x0, 0x0000} ,
-       { "VLAN?", 0xffff, 0x8100, OP_EQ,  1, S1_CFI, 0, S1_8023,
-         IM_CTL, 0x04a,  3, 0x0, 0xffff},
-       { "CFI?", 0x1000, 0x1000, OP_EQ,  0, S1_CLNP,  1, S1_8023,
-         CL_REG, 0x000,  0, 0x0, 0x0000},
-       { "8023?", 0xffff, 0x0600, OP_LT,  1, S1_LLC,   0, S1_IPV4,
-         CL_REG, 0x000,  0, 0x0, 0x0000},
-       { "LLC?", 0xffff, 0xaaaa, OP_EQ,  1, S1_LLCc,  0, S1_CLNP,
-         CL_REG, 0x000,  0, 0x0, 0x0000},
-       { "LLCc?", 0xff00, 0x0300, OP_EQ,  2, S1_IPV4,  0, S1_CLNP,
-         CL_REG, 0x000,  0, 0x0, 0x0000},
-       { "IPV4?", 0xffff, 0x0800, OP_EQ,  1, S1_IPV4c, 0, S1_IPV6,
-         IM_SAP, 0x6AE,  3, 0x0, 0xffff},
-       { "IPV4 cont?", 0xff00, 0x4500, OP_EQ,  3, S1_IPV4F, 0, S1_CLNP,
-         LD_SUM, 0x00a,  1, 0x0, 0x0000},
-       { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ,  1, S1_TCP44, 0, S1_CLNP,
-         LD_LEN, 0x03e,  1, 0x0, 0xffff},
-       { "TCP44?", 0x00ff, 0x0006, OP_EQ,  7, S1_TCPSQ, 0, S1_CLNP,
-         LD_FID, 0x182,  3, 0x0, 0xffff}, /* FID IP4&TCP src+dst */
-       { "IPV6?", 0xffff, 0x86dd, OP_EQ,  1, S1_IPV6L, 0, S1_CLNP,
-         LD_SUM, 0x015,  1, 0x0, 0x0000},
-       { "IPV6 len", 0xf000, 0x6000, OP_EQ,  0, S1_IPV6c, 0, S1_CLNP,
-         IM_R1,  0x128,  1, 0x0, 0xffff},
-       { "IPV6 cont?", 0x0000, 0x0000, OP_EQ,  3, S1_TCP64, 0, S1_CLNP,
-         LD_FID, 0x484,  1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */
-       { "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_CLNP,
-         LD_LEN, 0x03f,  1, 0x0, 0xffff},
-       { "TCP seq",      /* DADDR should point to dest port */
-         0x0000, 0x0000, OP_EQ,  0, S1_TCPFG, 4, S1_TCPFG, LD_SEQ,
-         0x081,  3, 0x0, 0xffff}, /* Load TCP seq # */
-       { "TCP control flags", 0x0000, 0x0000, OP_EQ,  0, S1_TCPHL, 0,
-         S1_TCPHL, ST_FLG, 0x045,  3, 0x0, 0x002f}, /* Load TCP flags */
-       { "TCP length", 0x0000, 0x0000, OP_EQ,  0, S1_TCPHc, 0, S1_TCPHc,
-         LD_R1,  0x205,  3, 0xB, 0xf000},
-       { "TCP length cont", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0,
-         S1_PCKT,  LD_HDR, 0x0ff,  3, 0x0, 0xffff},
-       { "Cleanup", 0x0000, 0x0000, OP_EQ,  0, S1_CLNP2, 0, S1_CLNP2,
-         IM_SAP, 0x6AE,  3, 0x0, 0xffff} ,
-       { "Cleanup 2", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
-         IM_CTL, 0x001,  3, 0x0, 0x0001},
-       { NULL },
-};
-#ifdef HP_WORKAROUND_DEFAULT
-#define CAS_HP_FIRMWARE               cas_prog_workaroundtab
-#endif
-#endif
-
-#ifdef USE_HP_ENCRYPT
-static cas_hp_inst_t  cas_prog_encryptiontab[] = {
-       { "packet arrival?", 0xffff, 0x0000, OP_NP,  6, S1_VLAN,  0,
-         S1_PCKT,  CL_REG, 0x3ff,  1, 0x0, 0x0000},
-       { "VLAN?", 0xffff, 0x8100, OP_EQ,  1, S1_CFI,   0, S1_8023,
-         IM_CTL, 0x00a,  3, 0x0, 0xffff},
-#if 0
-//"CFI?", /* 02 FIND CFI and If FIND go to S1_DROP */
-//0x1000, 0x1000, OP_EQ,  0, S1_DROP,  1, S1_8023,  CL_REG, 0x000,  0, 0x0, 0x00
-       00,
-#endif
-       { "CFI?", /* FIND CFI and If FIND go to CleanUP1 (ignore and send to host) */
-         0x1000, 0x1000, OP_EQ,  0, S1_CLNP,  1, S1_8023,
-         CL_REG, 0x000,  0, 0x0, 0x0000},
-       { "8023?", 0xffff, 0x0600, OP_LT,  1, S1_LLC,   0, S1_IPV4,
-         CL_REG, 0x000,  0, 0x0, 0x0000},
-       { "LLC?", 0xffff, 0xaaaa, OP_EQ,  1, S1_LLCc,  0, S1_CLNP,
-         CL_REG, 0x000,  0, 0x0, 0x0000},
-       { "LLCc?", 0xff00, 0x0300, OP_EQ,  2, S1_IPV4,  0, S1_CLNP,
-         CL_REG, 0x000,  0, 0x0, 0x0000},
-       { "IPV4?", 0xffff, 0x0800, OP_EQ,  1, S1_IPV4c, 0, S1_IPV6,
-         LD_SAP, 0x100,  3, 0x0, 0xffff},
-       { "IPV4 cont?", 0xff00, 0x4500, OP_EQ,  3, S1_IPV4F, 0, S1_CLNP,
-         LD_SUM, 0x00a,  1, 0x0, 0x0000},
-       { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ,  1, S1_TCP44, 0, S1_CLNP,
-         LD_LEN, 0x03e,  1, 0x0, 0xffff},
-       { "TCP44?", 0x00ff, 0x0006, OP_EQ,  7, S1_TCPSQ, 0, S1_ESP4,
-         LD_FID, 0x182,  1, 0x0, 0xffff}, /* FID IP4&TCP src+dst */
-       { "IPV6?", 0xffff, 0x86dd, OP_EQ,  1, S1_IPV6L, 0, S1_CLNP,
-         LD_SUM, 0x015,  1, 0x0, 0x0000},
-       { "IPV6 len", 0xf000, 0x6000, OP_EQ,  0, S1_IPV6c, 0, S1_CLNP,
-         IM_R1,  0x128,  1, 0x0, 0xffff},
-       { "IPV6 cont?", 0x0000, 0x0000, OP_EQ,  3, S1_TCP64, 0, S1_CLNP,
-         LD_FID, 0x484,  1, 0x0, 0xffff}, /*  FID IP6&TCP src+dst */
-       { "TCP64?",
-#if 0
-//@@@0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_ESP6,  LD_LEN, 0x03f,  1, 0x0, 0xffff,
-#endif
-         0xff00, 0x0600, OP_EQ, 12, S1_TCPSQ, 0, S1_ESP6,  LD_LEN,
-         0x03f,  1, 0x0, 0xffff},
-       { "TCP seq", /* 14:DADDR should point to dest port */
-         0xFFFF, 0x0080, OP_EQ,  0, S2_HTTP,  0, S1_TCPFG, LD_SEQ,
-         0x081,  3, 0x0, 0xffff}, /* Load TCP seq # */
-       { "TCP control flags", 0xFFFF, 0x8080, OP_EQ,  0, S2_HTTP,  0,
-         S1_TCPHL, ST_FLG, 0x145,  2, 0x0, 0x002f}, /* Load TCP flags */
-       { "TCP length", 0x0000, 0x0000, OP_EQ,  0, S1_TCPHc, 0, S1_TCPHc,
-         LD_R1,  0x205,  3, 0xB, 0xf000} ,
-       { "TCP length cont", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0,
-         S1_PCKT,  LD_HDR, 0x0ff,  3, 0x0, 0xffff},
-       { "Cleanup", 0x0000, 0x0000, OP_EQ,  0, S1_CLNP2,  0, S1_CLNP2,
-         IM_CTL, 0x001,  3, 0x0, 0x0001},
-       { "Cleanup 2", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
-         CL_REG, 0x002,  3, 0x0, 0x0000},
-       { "Drop packet", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
-         IM_CTL, 0x080,  3, 0x0, 0xffff},
-       { "No HTTP", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
-         IM_CTL, 0x044,  3, 0x0, 0xffff},
-       { "IPV4 ESP encrypted?",  /* S1_ESP4 */
-         0x00ff, 0x0032, OP_EQ,  0, S1_CLNP2, 0, S1_AH4, IM_CTL,
-         0x021, 1,  0x0, 0xffff},
-       { "IPV4 AH encrypted?",   /* S1_AH4 */
-         0x00ff, 0x0033, OP_EQ,  0, S1_CLNP2, 0, S1_CLNP, IM_CTL,
-         0x021, 1,  0x0, 0xffff},
-       { "IPV6 ESP encrypted?",  /* S1_ESP6 */
-#if 0
-//@@@0x00ff, 0x0032, OP_EQ,  0, S1_CLNP2, 0, S1_AH6, IM_CTL, 0x021, 1,  0x0, 0xffff,
-#endif
-         0xff00, 0x3200, OP_EQ,  0, S1_CLNP2, 0, S1_AH6, IM_CTL,
-         0x021, 1,  0x0, 0xffff},
-       { "IPV6 AH encrypted?",   /* S1_AH6 */
-#if 0
-//@@@0x00ff, 0x0033, OP_EQ,  0, S1_CLNP2, 0, S1_CLNP, IM_CTL, 0x021, 1,  0x0, 0xffff,
-#endif
-         0xff00, 0x3300, OP_EQ,  0, S1_CLNP2, 0, S1_CLNP, IM_CTL,
-         0x021, 1,  0x0, 0xffff},
-       { NULL },
-};
-#ifdef HP_ENCRYPT_DEFAULT
-#define CAS_HP_FIRMWARE               cas_prog_encryptiontab
-#endif
-#endif
-
-static cas_hp_inst_t cas_prog_null[] = { {NULL} };
-#ifdef HP_NULL_DEFAULT
-#define CAS_HP_FIRMWARE               cas_prog_null
-#endif
-
-/* phy types */
-#define   CAS_PHY_UNKNOWN       0x00
-#define   CAS_PHY_SERDES        0x01
-#define   CAS_PHY_MII_MDIO0     0x02
-#define   CAS_PHY_MII_MDIO1     0x04
-#define   CAS_PHY_MII(x)        ((x) & (CAS_PHY_MII_MDIO0 | CAS_PHY_MII_MDIO1))
-
-/* _RING_INDEX is the index for the ring sizes to be used.  _RING_SIZE
- * is the actual size. the default index for the various rings is
- * 8. NOTE: there a bunch of alignment constraints for the rings. to
- * deal with that, i just allocate rings to create the desired
- * alignment. here are the constraints:
- *   RX DESC and COMP rings must be 8KB aligned
- *   TX DESC must be 2KB aligned.
- * if you change the numbers, be cognizant of how the alignment will change
- * in INIT_BLOCK as well.
- */
-
-#define DESC_RING_I_TO_S(x)  (32*(1 << (x)))
-#define COMP_RING_I_TO_S(x)  (128*(1 << (x)))
-#define TX_DESC_RING_INDEX 4  /* 512 = 8k */
-#define RX_DESC_RING_INDEX 4  /* 512 = 8k */
-#define RX_COMP_RING_INDEX 4  /* 2048 = 64k: should be 4x rx ring size */
-
-#if (TX_DESC_RING_INDEX > 8) || (TX_DESC_RING_INDEX < 0)
-#error TX_DESC_RING_INDEX must be between 0 and 8
-#endif
-
-#if (RX_DESC_RING_INDEX > 8) || (RX_DESC_RING_INDEX < 0)
-#error RX_DESC_RING_INDEX must be between 0 and 8
-#endif
-
-#if (RX_COMP_RING_INDEX > 8) || (RX_COMP_RING_INDEX < 0)
-#error RX_COMP_RING_INDEX must be between 0 and 8
-#endif
-
-#define N_TX_RINGS                    MAX_TX_RINGS      /* for QoS */
-#define N_TX_RINGS_MASK               MAX_TX_RINGS_MASK
-#define N_RX_DESC_RINGS               MAX_RX_DESC_RINGS /* 1 for ipsec */
-#define N_RX_COMP_RINGS               0x1 /* for mult. PCI interrupts */
-
-/* number of flows that can go through re-assembly */
-#define N_RX_FLOWS                    64
-
-#define TX_DESC_RING_SIZE  DESC_RING_I_TO_S(TX_DESC_RING_INDEX)
-#define RX_DESC_RING_SIZE  DESC_RING_I_TO_S(RX_DESC_RING_INDEX)
-#define RX_COMP_RING_SIZE  COMP_RING_I_TO_S(RX_COMP_RING_INDEX)
-#define TX_DESC_RINGN_INDEX(x) TX_DESC_RING_INDEX
-#define RX_DESC_RINGN_INDEX(x) RX_DESC_RING_INDEX
-#define RX_COMP_RINGN_INDEX(x) RX_COMP_RING_INDEX
-#define TX_DESC_RINGN_SIZE(x)  TX_DESC_RING_SIZE
-#define RX_DESC_RINGN_SIZE(x)  RX_DESC_RING_SIZE
-#define RX_COMP_RINGN_SIZE(x)  RX_COMP_RING_SIZE
-
-/* convert values */
-#define CAS_BASE(x, y)                (((y) << (x ## _SHIFT)) & (x ## _MASK))
-#define CAS_VAL(x, y)                 (((y) & (x ## _MASK)) >> (x ## _SHIFT))
-#define CAS_TX_RINGN_BASE(y)          ((TX_DESC_RINGN_INDEX(y) << \
-                                        TX_CFG_DESC_RINGN_SHIFT(y)) & \
-                                        TX_CFG_DESC_RINGN_MASK(y))
-
-/* min is 2k, but we can't do jumbo frames unless it's at least 8k */
-#define CAS_MIN_PAGE_SHIFT            11 /* 2048 */
-#define CAS_JUMBO_PAGE_SHIFT          13 /* 8192 */
-#define CAS_MAX_PAGE_SHIFT            14 /* 16384 */
-
-#define TX_DESC_BUFLEN_MASK         0x0000000000003FFFULL /* buffer length in
-                                                            bytes. 0 - 9256 */
-#define TX_DESC_BUFLEN_SHIFT        0
-#define TX_DESC_CSUM_START_MASK     0x00000000001F8000ULL /* checksum start. #
-                                                            of bytes to be
-                                                            skipped before
-                                                            csum calc begins.
-                                                            value must be
-                                                            even */
-#define TX_DESC_CSUM_START_SHIFT    15
-#define TX_DESC_CSUM_STUFF_MASK     0x000000001FE00000ULL /* checksum stuff.
-                                                            byte offset w/in
-                                                            the pkt for the
-                                                            1st csum byte.
-                                                            must be > 8 */
-#define TX_DESC_CSUM_STUFF_SHIFT    21
-#define TX_DESC_CSUM_EN             0x0000000020000000ULL /* enable checksum */
-#define TX_DESC_EOF                 0x0000000040000000ULL /* end of frame */
-#define TX_DESC_SOF                 0x0000000080000000ULL /* start of frame */
-#define TX_DESC_INTME               0x0000000100000000ULL /* interrupt me */
-#define TX_DESC_NO_CRC              0x0000000200000000ULL /* debugging only.
-                                                            CRC will not be
-                                                            inserted into
-                                                            outgoing frame. */
-struct cas_tx_desc {
-       __le64     control;
-       __le64     buffer;
-};
-
-/* descriptor ring for free buffers contains page-sized buffers. the index
- * value is not used by the hw in any way. it's just stored and returned in
- * the completion ring.
- */
-struct cas_rx_desc {
-       __le64     index;
-       __le64     buffer;
-};
-
-/* received packets are put on the completion ring. */
-/* word 1 */
-#define RX_COMP1_DATA_SIZE_MASK           0x0000000007FFE000ULL
-#define RX_COMP1_DATA_SIZE_SHIFT          13
-#define RX_COMP1_DATA_OFF_MASK            0x000001FFF8000000ULL
-#define RX_COMP1_DATA_OFF_SHIFT           27
-#define RX_COMP1_DATA_INDEX_MASK          0x007FFE0000000000ULL
-#define RX_COMP1_DATA_INDEX_SHIFT         41
-#define RX_COMP1_SKIP_MASK                0x0180000000000000ULL
-#define RX_COMP1_SKIP_SHIFT               55
-#define RX_COMP1_RELEASE_NEXT             0x0200000000000000ULL
-#define RX_COMP1_SPLIT_PKT                0x0400000000000000ULL
-#define RX_COMP1_RELEASE_FLOW             0x0800000000000000ULL
-#define RX_COMP1_RELEASE_DATA             0x1000000000000000ULL
-#define RX_COMP1_RELEASE_HDR              0x2000000000000000ULL
-#define RX_COMP1_TYPE_MASK                0xC000000000000000ULL
-#define RX_COMP1_TYPE_SHIFT               62
-
-/* word 2 */
-#define RX_COMP2_NEXT_INDEX_MASK          0x00000007FFE00000ULL
-#define RX_COMP2_NEXT_INDEX_SHIFT         21
-#define RX_COMP2_HDR_SIZE_MASK            0x00000FF800000000ULL
-#define RX_COMP2_HDR_SIZE_SHIFT           35
-#define RX_COMP2_HDR_OFF_MASK             0x0003F00000000000ULL
-#define RX_COMP2_HDR_OFF_SHIFT            44
-#define RX_COMP2_HDR_INDEX_MASK           0xFFFC000000000000ULL
-#define RX_COMP2_HDR_INDEX_SHIFT          50
-
-/* word 3 */
-#define RX_COMP3_SMALL_PKT                0x0000000000000001ULL
-#define RX_COMP3_JUMBO_PKT                0x0000000000000002ULL
-#define RX_COMP3_JUMBO_HDR_SPLIT_EN       0x0000000000000004ULL
-#define RX_COMP3_CSUM_START_MASK          0x000000000007F000ULL
-#define RX_COMP3_CSUM_START_SHIFT         12
-#define RX_COMP3_FLOWID_MASK              0x0000000001F80000ULL
-#define RX_COMP3_FLOWID_SHIFT             19
-#define RX_COMP3_OPCODE_MASK              0x000000000E000000ULL
-#define RX_COMP3_OPCODE_SHIFT             25
-#define RX_COMP3_FORCE_FLAG               0x0000000010000000ULL
-#define RX_COMP3_NO_ASSIST                0x0000000020000000ULL
-#define RX_COMP3_LOAD_BAL_MASK            0x000001F800000000ULL
-#define RX_COMP3_LOAD_BAL_SHIFT           35
-#define RX_PLUS_COMP3_ENC_PKT             0x0000020000000000ULL /* cas+ */
-#define RX_COMP3_L3_HEAD_OFF_MASK         0x0000FE0000000000ULL /* cas */
-#define RX_COMP3_L3_HEAD_OFF_SHIFT        41
-#define RX_PLUS_COMP_L3_HEAD_OFF_MASK     0x0000FC0000000000ULL /* cas+ */
-#define RX_PLUS_COMP_L3_HEAD_OFF_SHIFT    42
-#define RX_COMP3_SAP_MASK                 0xFFFF000000000000ULL
-#define RX_COMP3_SAP_SHIFT                48
-
-/* word 4 */
-#define RX_COMP4_TCP_CSUM_MASK            0x000000000000FFFFULL
-#define RX_COMP4_TCP_CSUM_SHIFT           0
-#define RX_COMP4_PKT_LEN_MASK             0x000000003FFF0000ULL
-#define RX_COMP4_PKT_LEN_SHIFT            16
-#define RX_COMP4_PERFECT_MATCH_MASK       0x00000003C0000000ULL
-#define RX_COMP4_PERFECT_MATCH_SHIFT      30
-#define RX_COMP4_ZERO                     0x0000080000000000ULL
-#define RX_COMP4_HASH_VAL_MASK            0x0FFFF00000000000ULL
-#define RX_COMP4_HASH_VAL_SHIFT           44
-#define RX_COMP4_HASH_PASS                0x1000000000000000ULL
-#define RX_COMP4_BAD                      0x4000000000000000ULL
-#define RX_COMP4_LEN_MISMATCH             0x8000000000000000ULL
-
-/* we encode the following: ring/index/release. only 14 bits
- * are usable.
- * NOTE: the encoding is dependent upon RX_DESC_RING_SIZE and
- *       MAX_RX_DESC_RINGS. */
-#define RX_INDEX_NUM_MASK                 0x0000000000000FFFULL
-#define RX_INDEX_NUM_SHIFT                0
-#define RX_INDEX_RING_MASK                0x0000000000001000ULL
-#define RX_INDEX_RING_SHIFT               12
-#define RX_INDEX_RELEASE                  0x0000000000002000ULL
-
-struct cas_rx_comp {
-       __le64     word1;
-       __le64     word2;
-       __le64     word3;
-       __le64     word4;
-};
-
-enum link_state {
-       link_down = 0,  /* No link, will retry */
-       link_aneg,      /* Autoneg in progress */
-       link_force_try, /* Try Forced link speed */
-       link_force_ret, /* Forced mode worked, retrying autoneg */
-       link_force_ok,  /* Stay in forced mode */
-       link_up         /* Link is up */
-};
-
-typedef struct cas_page {
-       struct list_head list;
-       struct page *buffer;
-       dma_addr_t dma_addr;
-       int used;
-} cas_page_t;
-
-
-/* some alignment constraints:
- * TX DESC, RX DESC, and RX COMP must each be 8K aligned.
- * TX COMPWB must be 8-byte aligned.
- * to accomplish this, here's what we do:
- *
- * INIT_BLOCK_RX_COMP  = 64k (already aligned)
- * INIT_BLOCK_RX_DESC  = 8k
- * INIT_BLOCK_TX       = 8k
- * INIT_BLOCK_RX1_DESC = 8k
- * TX COMPWB
- */
-#define INIT_BLOCK_TX           (TX_DESC_RING_SIZE)
-#define INIT_BLOCK_RX_DESC      (RX_DESC_RING_SIZE)
-#define INIT_BLOCK_RX_COMP      (RX_COMP_RING_SIZE)
-
-struct cas_init_block {
-       struct cas_rx_comp rxcs[N_RX_COMP_RINGS][INIT_BLOCK_RX_COMP];
-       struct cas_rx_desc rxds[N_RX_DESC_RINGS][INIT_BLOCK_RX_DESC];
-       struct cas_tx_desc txds[N_TX_RINGS][INIT_BLOCK_TX];
-       __le64 tx_compwb;
-};
-
-/* tiny buffers to deal with target abort issue. we allocate a bit
- * over so that we don't have target abort issues with these buffers
- * as well.
- */
-#define TX_TINY_BUF_LEN    0x100
-#define TX_TINY_BUF_BLOCK  ((INIT_BLOCK_TX + 1)*TX_TINY_BUF_LEN)
-
-struct cas_tiny_count {
-       int nbufs;
-       int used;
-};
-
-struct cas {
-       spinlock_t lock; /* for most bits */
-       spinlock_t tx_lock[N_TX_RINGS]; /* tx bits */
-       spinlock_t stat_lock[N_TX_RINGS + 1]; /* for stat gathering */
-       spinlock_t rx_inuse_lock; /* rx inuse list */
-       spinlock_t rx_spare_lock; /* rx spare list */
-
-       void __iomem *regs;
-       int tx_new[N_TX_RINGS], tx_old[N_TX_RINGS];
-       int rx_old[N_RX_DESC_RINGS];
-       int rx_cur[N_RX_COMP_RINGS], rx_new[N_RX_COMP_RINGS];
-       int rx_last[N_RX_DESC_RINGS];
-
-       struct napi_struct napi;
-
-       /* Set when chip is actually in operational state
-        * (ie. not power managed) */
-       int hw_running;
-       int opened;
-       struct mutex pm_mutex; /* open/close/suspend/resume */
-
-       struct cas_init_block *init_block;
-       struct cas_tx_desc *init_txds[MAX_TX_RINGS];
-       struct cas_rx_desc *init_rxds[MAX_RX_DESC_RINGS];
-       struct cas_rx_comp *init_rxcs[MAX_RX_COMP_RINGS];
-
-       /* we use sk_buffs for tx and pages for rx. the rx skbuffs
-        * are there for flow re-assembly. */
-       struct sk_buff      *tx_skbs[N_TX_RINGS][TX_DESC_RING_SIZE];
-       struct sk_buff_head  rx_flows[N_RX_FLOWS];
-       cas_page_t          *rx_pages[N_RX_DESC_RINGS][RX_DESC_RING_SIZE];
-       struct list_head     rx_spare_list, rx_inuse_list;
-       int                  rx_spares_needed;
-
-       /* for small packets when copying would be quicker than
-          mapping */
-       struct cas_tiny_count tx_tiny_use[N_TX_RINGS][TX_DESC_RING_SIZE];
-       u8 *tx_tiny_bufs[N_TX_RINGS];
-
-       u32                     msg_enable;
-
-       /* N_TX_RINGS must be >= N_RX_DESC_RINGS */
-       struct net_device_stats net_stats[N_TX_RINGS + 1];
-
-       u32                     pci_cfg[64 >> 2];
-       u8                      pci_revision;
-
-       int                     phy_type;
-       int                     phy_addr;
-       u32                     phy_id;
-#define CAS_FLAG_1000MB_CAP     0x00000001
-#define CAS_FLAG_REG_PLUS       0x00000002
-#define CAS_FLAG_TARGET_ABORT   0x00000004
-#define CAS_FLAG_SATURN         0x00000008
-#define CAS_FLAG_RXD_POST_MASK  0x000000F0
-#define CAS_FLAG_RXD_POST_SHIFT 4
-#define CAS_FLAG_RXD_POST(x)    ((1 << (CAS_FLAG_RXD_POST_SHIFT + (x))) & \
-                                 CAS_FLAG_RXD_POST_MASK)
-#define CAS_FLAG_ENTROPY_DEV    0x00000100
-#define CAS_FLAG_NO_HW_CSUM     0x00000200
-       u32                     cas_flags;
-       int                     packet_min; /* minimum packet size */
-       int                     tx_fifo_size;
-       int                     rx_fifo_size;
-       int                     rx_pause_off;
-       int                     rx_pause_on;
-       int                     crc_size;      /* 4 if half-duplex */
-
-       int                     pci_irq_INTC;
-       int                     min_frame_size; /* for tx fifo workaround */
-
-       /* page size allocation */
-       int                     page_size;
-       int                     page_order;
-       int                     mtu_stride;
-
-       u32                     mac_rx_cfg;
-
-       /* Autoneg & PHY control */
-       int                     link_cntl;
-       int                     link_fcntl;
-       enum link_state         lstate;
-       struct timer_list       link_timer;
-       int                     timer_ticks;
-       struct work_struct      reset_task;
-#if 0
-       atomic_t                reset_task_pending;
-#else
-       atomic_t                reset_task_pending;
-       atomic_t                reset_task_pending_mtu;
-       atomic_t                reset_task_pending_spare;
-       atomic_t                reset_task_pending_all;
-#endif
-
-       /* Link-down problem workaround */
-#define LINK_TRANSITION_UNKNOWN        0
-#define LINK_TRANSITION_ON_FAILURE     1
-#define LINK_TRANSITION_STILL_FAILED   2
-#define LINK_TRANSITION_LINK_UP        3
-#define LINK_TRANSITION_LINK_CONFIG    4
-#define LINK_TRANSITION_LINK_DOWN      5
-#define LINK_TRANSITION_REQUESTED_RESET        6
-       int                     link_transition;
-       int                     link_transition_jiffies_valid;
-       unsigned long           link_transition_jiffies;
-
-       /* Tuning */
-       u8 orig_cacheline_size; /* value when loaded */
-#define CAS_PREF_CACHELINE_SIZE         0x20   /* Minimum desired */
-
-       /* Diagnostic counters and state. */
-       int                     casreg_len; /* reg-space size for dumping */
-       u64                     pause_entered;
-       u16                     pause_last_time_recvd;
-
-       dma_addr_t block_dvma, tx_tiny_dvma[N_TX_RINGS];
-       struct pci_dev *pdev;
-       struct net_device *dev;
-#if defined(CONFIG_OF)
-       struct device_node      *of_node;
-#endif
-
-       /* Firmware Info */
-       u16                     fw_load_addr;
-       u32                     fw_size;
-       u8                      *fw_data;
-};
-
-#define TX_DESC_NEXT(r, x)  (((x) + 1) & (TX_DESC_RINGN_SIZE(r) - 1))
-#define RX_DESC_ENTRY(r, x) ((x) & (RX_DESC_RINGN_SIZE(r) - 1))
-#define RX_COMP_ENTRY(r, x) ((x) & (RX_COMP_RINGN_SIZE(r) - 1))
-
-#define TX_BUFF_COUNT(r, x, y)    ((x) <= (y) ? ((y) - (x)) : \
-        (TX_DESC_RINGN_SIZE(r) - (x) + (y)))
-
-#define TX_BUFFS_AVAIL(cp, i)  ((cp)->tx_old[(i)] <= (cp)->tx_new[(i)] ? \
-        (cp)->tx_old[(i)] + (TX_DESC_RINGN_SIZE(i) - 1) - (cp)->tx_new[(i)] : \
-        (cp)->tx_old[(i)] - (cp)->tx_new[(i)] - 1)
-
-#define CAS_ALIGN(addr, align) \
-     (((unsigned long) (addr) + ((align) - 1UL)) & ~((align) - 1))
-
-#define RX_FIFO_SIZE                  16384
-#define EXPANSION_ROM_SIZE            65536
-
-#define CAS_MC_EXACT_MATCH_SIZE       15
-#define CAS_MC_HASH_SIZE              256
-#define CAS_MC_HASH_MAX              (CAS_MC_EXACT_MATCH_SIZE + \
-                                      CAS_MC_HASH_SIZE)
-
-#define TX_TARGET_ABORT_LEN           0x20
-#define RX_SWIVEL_OFF_VAL             0x2
-#define RX_AE_FREEN_VAL(x)            (RX_DESC_RINGN_SIZE(x) >> 1)
-#define RX_AE_COMP_VAL                (RX_COMP_RING_SIZE >> 1)
-#define RX_BLANK_INTR_PKT_VAL         0x05
-#define RX_BLANK_INTR_TIME_VAL        0x0F
-#define HP_TCP_THRESH_VAL             1530 /* reduce to enable reassembly */
-
-#define RX_SPARE_COUNT                (RX_DESC_RING_SIZE >> 1)
-#define RX_SPARE_RECOVER_VAL          (RX_SPARE_COUNT >> 2)
-
-#endif /* _CASSINI_H */
index 7efbb7cf91a976d9ac6d803bde294d02e7bb1595..5edd2371c53f7d1a223364d545be925fd8f62928 100644 (file)
@@ -21,5 +21,6 @@ source "drivers/net/ethernet/i825xx/Kconfig"
 source "drivers/net/ethernet/qlogic/Kconfig"
 source "drivers/net/ethernet/racal/Kconfig"
 source "drivers/net/ethernet/smsc/Kconfig"
+source "drivers/net/ethernet/sun/Kconfig"
 
 endif # ETHERNET
index 86da8b8339e133a28719e0d2c37265eb8402548d..18d8a893d78b225d7f35cf00a7d3cd1d136272e2 100644 (file)
@@ -12,3 +12,4 @@ obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/
 obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/
 obj-$(CONFIG_NET_VENDOR_RACAL) += racal/
 obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
+obj-$(CONFIG_NET_VENDOR_SUN) += sun/
diff --git a/drivers/net/ethernet/sun/Kconfig b/drivers/net/ethernet/sun/Kconfig
new file mode 100644 (file)
index 0000000..87b17a7
--- /dev/null
@@ -0,0 +1,86 @@
+#
+# Sun network device configuration
+#
+
+config NET_VENDOR_SUN
+       bool "Sun devices"
+       depends on SUN3 || SBUS || PCI || SUN_LDOMS
+       ---help---
+         If you have a network (Ethernet) card belonging to this class, say
+         Y and read the Ethernet-HOWTO, available from
+         <http://www.tldp.org/docs.html#howto>.
+
+         Note that the answer to this question doesn't directly affect the
+         kernel: saying N will just cause the configurator to skip all
+         the questions about Sun network interfaces. If you say Y, you will be
+         asked for your specific card in the following questions.
+
+if NET_VENDOR_SUN
+
+config HAPPYMEAL
+       tristate "Sun Happy Meal 10/100baseT support"
+       depends on (SBUS || PCI)
+       select CRC32
+       ---help---
+         This driver supports the "hme" interface present on most Ultra
+         systems and as an option on older Sbus systems. This driver supports
+         both PCI and Sbus devices. This driver also supports the "qfe" quad
+         100baseT device available in both PCI and Sbus configurations.
+
+         To compile this driver as a module, choose M here: the module
+         will be called sunhme.
+
+config SUNBMAC
+       tristate "Sun BigMAC 10/100baseT support (EXPERIMENTAL)"
+       depends on SBUS && EXPERIMENTAL
+       select CRC32
+       ---help---
+         This driver supports the "be" interface available as an Sbus option.
+         This is Sun's older 100baseT Ethernet device.
+
+         To compile this driver as a module, choose M here: the module
+         will be called sunbmac.
+
+config SUNQE
+       tristate "Sun QuadEthernet support"
+       depends on SBUS
+       select CRC32
+       ---help---
+         This driver supports the "qe" 10baseT Ethernet device, available as
+         an Sbus option. Note that this is not the same as Quad FastEthernet
+         "qfe" which is supported by the Happy Meal driver instead.
+
+         To compile this driver as a module, choose M here: the module
+         will be called sunqe.
+
+config SUNGEM
+       tristate "Sun GEM support"
+       depends on PCI
+       select CRC32
+       ---help---
+         Support for the Sun GEM chip, aka Sun GigabitEthernet/P 2.0.  See also
+         <http://www.sun.com/products-n-solutions/hardware/docs/pdf/806-3985-10.pdf>.
+
+config CASSINI
+       tristate "Sun Cassini support"
+       depends on PCI
+       select CRC32
+       ---help---
+         Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also
+         <http://www.sun.com/products-n-solutions/hardware/docs/pdf/817-4341-10.pdf>
+
+config SUNVNET
+       tristate "Sun Virtual Network support"
+       depends on SUN_LDOMS
+       ---help---
+         Support for virtual network devices under Sun Logical Domains.
+
+config NIU
+       tristate "Sun Neptune 10Gbit Ethernet support"
+       depends on PCI
+       select CRC32
+       ---help---
+         This enables support for cards based upon Sun's
+         Neptune chipset.
+
+endif # NET_VENDOR_SUN
diff --git a/drivers/net/ethernet/sun/Makefile b/drivers/net/ethernet/sun/Makefile
new file mode 100644 (file)
index 0000000..4f25217
--- /dev/null
@@ -0,0 +1,11 @@
+#
+# Makefile for the Sun network device drivers.
+#
+
+obj-$(CONFIG_HAPPYMEAL) += sunhme.o
+obj-$(CONFIG_SUNQE) += sunqe.o
+obj-$(CONFIG_SUNBMAC) += sunbmac.o
+obj-$(CONFIG_SUNGEM) += sungem.o sungem_phy.o
+obj-$(CONFIG_CASSINI) += cassini.o
+obj-$(CONFIG_SUNVNET) += sunvnet.o
+obj-$(CONFIG_NIU) += niu.o
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
new file mode 100644 (file)
index 0000000..646c86b
--- /dev/null
@@ -0,0 +1,5305 @@
+/* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
+ *
+ * Copyright (C) 2004 Sun Microsystems Inc.
+ * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ *
+ * This driver uses the sungem driver (c) David Miller
+ * (davem@redhat.com) as its basis.
+ *
+ * The cassini chip has a number of features that distinguish it from
+ * the gem chip:
+ *  4 transmit descriptor rings that are used for either QoS (VLAN) or
+ *      load balancing (non-VLAN mode)
+ *  batching of multiple packets
+ *  multiple CPU dispatching
+ *  page-based RX descriptor engine with separate completion rings
+ *  Gigabit support (GMII and PCS interface)
+ *  MIF link up/down detection works
+ *
+ * RX is handled by page sized buffers that are attached as fragments to
+ * the skb. here's what's done:
+ *  -- driver allocates pages at a time and keeps reference counts
+ *     on them.
+ *  -- the upper protocol layers assume that the header is in the skb
+ *     itself. as a result, cassini will copy a small amount (64 bytes)
+ *     to make them happy.
+ *  -- driver appends the rest of the data pages as frags to skbuffs
+ *     and increments the reference count
+ *  -- on page reclamation, the driver swaps the page with a spare page.
+ *     if that page is still in use, it frees its reference to that page,
+ *     and allocates a new page for use. otherwise, it just recycles the
+ *     the page.
+ *
+ * NOTE: cassini can parse the header. however, it's not worth it
+ *       as long as the network stack requires a header copy.
+ *
+ * TX has 4 queues. currently these queues are used in a round-robin
+ * fashion for load balancing. They can also be used for QoS. for that
+ * to work, however, QoS information needs to be exposed down to the driver
+ * level so that subqueues get targeted to particular transmit rings.
+ * alternatively, the queues can be configured via use of the all-purpose
+ * ioctl.
+ *
+ * RX DATA: the rx completion ring has all the info, but the rx desc
+ * ring has all of the data. RX can conceivably come in under multiple
+ * interrupts, but the INT# assignment needs to be set up properly by
+ * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
+ * that. also, the two descriptor rings are designed to distinguish between
+ * encrypted and non-encrypted packets, but we use them for buffering
+ * instead.
+ *
+ * by default, the selective clear mask is set up to process rx packets.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/vmalloc.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/random.h>
+#include <linux/mii.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/mutex.h>
+#include <linux/firmware.h>
+
+#include <net/checksum.h>
+
+#include <linux/atomic.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+
+#define cas_page_map(x)      kmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
+#define cas_page_unmap(x)    kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
+#define CAS_NCPUS            num_online_cpus()
+
+#define cas_skb_release(x)  netif_rx(x)
+
+/* select which firmware to use */
+#define USE_HP_WORKAROUND
+#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
+#define CAS_HP_ALT_FIRMWARE   cas_prog_null /* alternate firmware */
+
+#include "cassini.h"
+
+#define USE_TX_COMPWB      /* use completion writeback registers */
+#define USE_CSMA_CD_PROTO  /* standard CSMA/CD */
+#define USE_RX_BLANK       /* hw interrupt mitigation */
+#undef USE_ENTROPY_DEV     /* don't test for entropy device */
+
+/* NOTE: these aren't useable unless PCI interrupts can be assigned.
+ * also, we need to make cp->lock finer-grained.
+ */
+#undef  USE_PCI_INTB
+#undef  USE_PCI_INTC
+#undef  USE_PCI_INTD
+#undef  USE_QOS
+
+#undef  USE_VPD_DEBUG       /* debug vpd information if defined */
+
+/* rx processing options */
+#define USE_PAGE_ORDER      /* specify to allocate large rx pages */
+#define RX_DONT_BATCH  0    /* if 1, don't batch flows */
+#define RX_COPY_ALWAYS 0    /* if 0, use frags */
+#define RX_COPY_MIN    64   /* copy a little to make upper layers happy */
+#undef  RX_COUNT_BUFFERS    /* define to calculate RX buffer stats */
+
+#define DRV_MODULE_NAME                "cassini"
+#define DRV_MODULE_VERSION     "1.6"
+#define DRV_MODULE_RELDATE     "21 May 2008"
+
+#define CAS_DEF_MSG_ENABLE       \
+       (NETIF_MSG_DRV          | \
+        NETIF_MSG_PROBE        | \
+        NETIF_MSG_LINK         | \
+        NETIF_MSG_TIMER        | \
+        NETIF_MSG_IFDOWN       | \
+        NETIF_MSG_IFUP         | \
+        NETIF_MSG_RX_ERR       | \
+        NETIF_MSG_TX_ERR)
+
+/* length of time before we decide the hardware is borked,
+ * and dev->tx_timeout() should be called to fix the problem
+ */
+#define CAS_TX_TIMEOUT                 (HZ)
+#define CAS_LINK_TIMEOUT                (22*HZ/10)
+#define CAS_LINK_FAST_TIMEOUT           (1)
+
+/* timeout values for state changing. these specify the number
+ * of 10us delays to be used before giving up.
+ */
+#define STOP_TRIES_PHY 1000
+#define STOP_TRIES     5000
+
+/* specify a minimum frame size to deal with some fifo issues
+ * max mtu == 2 * page size - ethernet header - 64 - swivel =
+ *            2 * page_size - 0x50
+ */
+#define CAS_MIN_FRAME                  97
+#define CAS_1000MB_MIN_FRAME            255
+#define CAS_MIN_MTU                     60
+#define CAS_MAX_MTU                     min(((cp->page_size << 1) - 0x50), 9000)
+
+#if 1
+/*
+ * Eliminate these and use separate atomic counters for each, to
+ * avoid a race condition.
+ */
+#else
+#define CAS_RESET_MTU                   1
+#define CAS_RESET_ALL                   2
+#define CAS_RESET_SPARE                 3
+#endif
+
+static char version[] __devinitdata =
+       DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */
+static int link_mode;
+
+MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
+MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("sun/cassini.bin");
+module_param(cassini_debug, int, 0);
+MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
+module_param(link_mode, int, 0);
+MODULE_PARM_DESC(link_mode, "default link mode");
+
+/*
+ * Work around for a PCS bug in which the link goes down due to the chip
+ * being confused and never showing a link status of "up."
+ */
+#define DEFAULT_LINKDOWN_TIMEOUT 5
+/*
+ * Value in seconds, for user input.
+ */
+static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
+module_param(linkdown_timeout, int, 0);
+MODULE_PARM_DESC(linkdown_timeout,
+"min reset interval in sec. for PCS linkdown issue; disabled if not positive");
+
+/*
+ * value in 'ticks' (units used by jiffies). Set when we init the
+ * module because 'HZ' in actually a function call on some flavors of
+ * Linux.  This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ.
+ */
+static int link_transition_timeout;
+
+
+
+static u16 link_modes[] __devinitdata = {
+       BMCR_ANENABLE,                   /* 0 : autoneg */
+       0,                               /* 1 : 10bt half duplex */
+       BMCR_SPEED100,                   /* 2 : 100bt half duplex */
+       BMCR_FULLDPLX,                   /* 3 : 10bt full duplex */
+       BMCR_SPEED100|BMCR_FULLDPLX,     /* 4 : 100bt full duplex */
+       CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
+};
+
+static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = {
+       { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+       { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+       { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
+
+static void cas_set_link_modes(struct cas *cp);
+
+static inline void cas_lock_tx(struct cas *cp)
+{
+       int i;
+
+       for (i = 0; i < N_TX_RINGS; i++)
+               spin_lock(&cp->tx_lock[i]);
+}
+
+static inline void cas_lock_all(struct cas *cp)
+{
+       spin_lock_irq(&cp->lock);
+       cas_lock_tx(cp);
+}
+
+/* WTZ: QA was finding deadlock problems with the previous
+ * versions after long test runs with multiple cards per machine.
+ * See if replacing cas_lock_all with safer versions helps. The
+ * symptoms QA is reporting match those we'd expect if interrupts
+ * aren't being properly restored, and we fixed a previous deadlock
+ * with similar symptoms by using save/restore versions in other
+ * places.
+ */
+#define cas_lock_all_save(cp, flags) \
+do { \
+       struct cas *xxxcp = (cp); \
+       spin_lock_irqsave(&xxxcp->lock, flags); \
+       cas_lock_tx(xxxcp); \
+} while (0)
+
+static inline void cas_unlock_tx(struct cas *cp)
+{
+       int i;
+
+       for (i = N_TX_RINGS; i > 0; i--)
+               spin_unlock(&cp->tx_lock[i - 1]);
+}
+
+static inline void cas_unlock_all(struct cas *cp)
+{
+       cas_unlock_tx(cp);
+       spin_unlock_irq(&cp->lock);
+}
+
+#define cas_unlock_all_restore(cp, flags) \
+do { \
+       struct cas *xxxcp = (cp); \
+       cas_unlock_tx(xxxcp); \
+       spin_unlock_irqrestore(&xxxcp->lock, flags); \
+} while (0)
+
+static void cas_disable_irq(struct cas *cp, const int ring)
+{
+       /* Make sure we won't get any more interrupts */
+       if (ring == 0) {
+               writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
+               return;
+       }
+
+       /* disable completion interrupts and selectively mask */
+       if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+               switch (ring) {
+#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
+#ifdef USE_PCI_INTB
+               case 1:
+#endif
+#ifdef USE_PCI_INTC
+               case 2:
+#endif
+#ifdef USE_PCI_INTD
+               case 3:
+#endif
+                       writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
+                              cp->regs + REG_PLUS_INTRN_MASK(ring));
+                       break;
+#endif
+               default:
+                       writel(INTRN_MASK_CLEAR_ALL, cp->regs +
+                              REG_PLUS_INTRN_MASK(ring));
+                       break;
+               }
+       }
+}
+
+static inline void cas_mask_intr(struct cas *cp)
+{
+       int i;
+
+       for (i = 0; i < N_RX_COMP_RINGS; i++)
+               cas_disable_irq(cp, i);
+}
+
+static void cas_enable_irq(struct cas *cp, const int ring)
+{
+       if (ring == 0) { /* all but TX_DONE */
+               writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
+               return;
+       }
+
+       if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+               switch (ring) {
+#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
+#ifdef USE_PCI_INTB
+               case 1:
+#endif
+#ifdef USE_PCI_INTC
+               case 2:
+#endif
+#ifdef USE_PCI_INTD
+               case 3:
+#endif
+                       writel(INTRN_MASK_RX_EN, cp->regs +
+                              REG_PLUS_INTRN_MASK(ring));
+                       break;
+#endif
+               default:
+                       break;
+               }
+       }
+}
+
+static inline void cas_unmask_intr(struct cas *cp)
+{
+       int i;
+
+       for (i = 0; i < N_RX_COMP_RINGS; i++)
+               cas_enable_irq(cp, i);
+}
+
+static inline void cas_entropy_gather(struct cas *cp)
+{
+#ifdef USE_ENTROPY_DEV
+       if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
+               return;
+
+       batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
+                           readl(cp->regs + REG_ENTROPY_IV),
+                           sizeof(uint64_t)*8);
+#endif
+}
+
+static inline void cas_entropy_reset(struct cas *cp)
+{
+#ifdef USE_ENTROPY_DEV
+       if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
+               return;
+
+       writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
+              cp->regs + REG_BIM_LOCAL_DEV_EN);
+       writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
+       writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
+
+       /* if we read back 0x0, we don't have an entropy device */
+       if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
+               cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
+#endif
+}
+
+/* access to the phy. the following assumes that we've initialized the MIF to
+ * be in frame rather than bit-bang mode
+ */
+static u16 cas_phy_read(struct cas *cp, int reg)
+{
+       u32 cmd;
+       int limit = STOP_TRIES_PHY;
+
+       cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
+       cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
+       cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
+       cmd |= MIF_FRAME_TURN_AROUND_MSB;
+       writel(cmd, cp->regs + REG_MIF_FRAME);
+
+       /* poll for completion */
+       while (limit-- > 0) {
+               udelay(10);
+               cmd = readl(cp->regs + REG_MIF_FRAME);
+               if (cmd & MIF_FRAME_TURN_AROUND_LSB)
+                       return cmd & MIF_FRAME_DATA_MASK;
+       }
+       return 0xFFFF; /* -1 */
+}
+
+static int cas_phy_write(struct cas *cp, int reg, u16 val)
+{
+       int limit = STOP_TRIES_PHY;
+       u32 cmd;
+
+       cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
+       cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
+       cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
+       cmd |= MIF_FRAME_TURN_AROUND_MSB;
+       cmd |= val & MIF_FRAME_DATA_MASK;
+       writel(cmd, cp->regs + REG_MIF_FRAME);
+
+       /* poll for completion */
+       while (limit-- > 0) {
+               udelay(10);
+               cmd = readl(cp->regs + REG_MIF_FRAME);
+               if (cmd & MIF_FRAME_TURN_AROUND_LSB)
+                       return 0;
+       }
+       return -1;
+}
+
+static void cas_phy_powerup(struct cas *cp)
+{
+       u16 ctl = cas_phy_read(cp, MII_BMCR);
+
+       if ((ctl & BMCR_PDOWN) == 0)
+               return;
+       ctl &= ~BMCR_PDOWN;
+       cas_phy_write(cp, MII_BMCR, ctl);
+}
+
+static void cas_phy_powerdown(struct cas *cp)
+{
+       u16 ctl = cas_phy_read(cp, MII_BMCR);
+
+       if (ctl & BMCR_PDOWN)
+               return;
+       ctl |= BMCR_PDOWN;
+       cas_phy_write(cp, MII_BMCR, ctl);
+}
+
+/* cp->lock held. note: the last put_page will free the buffer */
+static int cas_page_free(struct cas *cp, cas_page_t *page)
+{
+       pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
+                      PCI_DMA_FROMDEVICE);
+       __free_pages(page->buffer, cp->page_order);
+       kfree(page);
+       return 0;
+}
+
+#ifdef RX_COUNT_BUFFERS
+#define RX_USED_ADD(x, y)       ((x)->used += (y))
+#define RX_USED_SET(x, y)       ((x)->used  = (y))
+#else
+#define RX_USED_ADD(x, y)
+#define RX_USED_SET(x, y)
+#endif
+
+/* local page allocation routines for the receive buffers. jumbo pages
+ * require at least 8K contiguous and 8K aligned buffers.
+ */
+static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
+{
+       cas_page_t *page;
+
+       page = kmalloc(sizeof(cas_page_t), flags);
+       if (!page)
+               return NULL;
+
+       INIT_LIST_HEAD(&page->list);
+       RX_USED_SET(page, 0);
+       page->buffer = alloc_pages(flags, cp->page_order);
+       if (!page->buffer)
+               goto page_err;
+       page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
+                                     cp->page_size, PCI_DMA_FROMDEVICE);
+       return page;
+
+page_err:
+       kfree(page);
+       return NULL;
+}
+
+/* initialize spare pool of rx buffers, but allocate during the open */
+static void cas_spare_init(struct cas *cp)
+{
+       spin_lock(&cp->rx_inuse_lock);
+       INIT_LIST_HEAD(&cp->rx_inuse_list);
+       spin_unlock(&cp->rx_inuse_lock);
+
+       spin_lock(&cp->rx_spare_lock);
+       INIT_LIST_HEAD(&cp->rx_spare_list);
+       cp->rx_spares_needed = RX_SPARE_COUNT;
+       spin_unlock(&cp->rx_spare_lock);
+}
+
+/* used on close. free all the spare buffers. */
+static void cas_spare_free(struct cas *cp)
+{
+       struct list_head list, *elem, *tmp;
+
+       /* free spare buffers */
+       INIT_LIST_HEAD(&list);
+       spin_lock(&cp->rx_spare_lock);
+       list_splice_init(&cp->rx_spare_list, &list);
+       spin_unlock(&cp->rx_spare_lock);
+       list_for_each_safe(elem, tmp, &list) {
+               cas_page_free(cp, list_entry(elem, cas_page_t, list));
+       }
+
+       INIT_LIST_HEAD(&list);
+#if 1
+       /*
+        * Looks like Adrian had protected this with a different
+        * lock than used everywhere else to manipulate this list.
+        */
+       spin_lock(&cp->rx_inuse_lock);
+       list_splice_init(&cp->rx_inuse_list, &list);
+       spin_unlock(&cp->rx_inuse_lock);
+#else
+       spin_lock(&cp->rx_spare_lock);
+       list_splice_init(&cp->rx_inuse_list, &list);
+       spin_unlock(&cp->rx_spare_lock);
+#endif
+       list_for_each_safe(elem, tmp, &list) {
+               cas_page_free(cp, list_entry(elem, cas_page_t, list));
+       }
+}
+
+/* replenish spares if needed */
+static void cas_spare_recover(struct cas *cp, const gfp_t flags)
+{
+       struct list_head list, *elem, *tmp;
+       int needed, i;
+
+       /* check inuse list. if we don't need any more free buffers,
+        * just free it
+        */
+
+       /* make a local copy of the list */
+       INIT_LIST_HEAD(&list);
+       spin_lock(&cp->rx_inuse_lock);
+       list_splice_init(&cp->rx_inuse_list, &list);
+       spin_unlock(&cp->rx_inuse_lock);
+
+       list_for_each_safe(elem, tmp, &list) {
+               cas_page_t *page = list_entry(elem, cas_page_t, list);
+
+               /*
+                * With the lockless pagecache, cassini buffering scheme gets
+                * slightly less accurate: we might find that a page has an
+                * elevated reference count here, due to a speculative ref,
+                * and skip it as in-use. Ideally we would be able to reclaim
+                * it. However this would be such a rare case, it doesn't
+                * matter too much as we should pick it up the next time round.
+                *
+                * Importantly, if we find that the page has a refcount of 1
+                * here (our refcount), then we know it is definitely not inuse
+                * so we can reuse it.
+                */
+               if (page_count(page->buffer) > 1)
+                       continue;
+
+               list_del(elem);
+               spin_lock(&cp->rx_spare_lock);
+               if (cp->rx_spares_needed > 0) {
+                       list_add(elem, &cp->rx_spare_list);
+                       cp->rx_spares_needed--;
+                       spin_unlock(&cp->rx_spare_lock);
+               } else {
+                       spin_unlock(&cp->rx_spare_lock);
+                       cas_page_free(cp, page);
+               }
+       }
+
+       /* put any inuse buffers back on the list */
+       if (!list_empty(&list)) {
+               spin_lock(&cp->rx_inuse_lock);
+               list_splice(&list, &cp->rx_inuse_list);
+               spin_unlock(&cp->rx_inuse_lock);
+       }
+
+       spin_lock(&cp->rx_spare_lock);
+       needed = cp->rx_spares_needed;
+       spin_unlock(&cp->rx_spare_lock);
+       if (!needed)
+               return;
+
+       /* we still need spares, so try to allocate some */
+       INIT_LIST_HEAD(&list);
+       i = 0;
+       while (i < needed) {
+               cas_page_t *spare = cas_page_alloc(cp, flags);
+               if (!spare)
+                       break;
+               list_add(&spare->list, &list);
+               i++;
+       }
+
+       spin_lock(&cp->rx_spare_lock);
+       list_splice(&list, &cp->rx_spare_list);
+       cp->rx_spares_needed -= i;
+       spin_unlock(&cp->rx_spare_lock);
+}
+
+/* pull a page from the list. */
+static cas_page_t *cas_page_dequeue(struct cas *cp)
+{
+       struct list_head *entry;
+       int recover;
+
+       spin_lock(&cp->rx_spare_lock);
+       if (list_empty(&cp->rx_spare_list)) {
+               /* try to do a quick recovery */
+               spin_unlock(&cp->rx_spare_lock);
+               cas_spare_recover(cp, GFP_ATOMIC);
+               spin_lock(&cp->rx_spare_lock);
+               if (list_empty(&cp->rx_spare_list)) {
+                       netif_err(cp, rx_err, cp->dev,
+                                 "no spare buffers available\n");
+                       spin_unlock(&cp->rx_spare_lock);
+                       return NULL;
+               }
+       }
+
+       entry = cp->rx_spare_list.next;
+       list_del(entry);
+       recover = ++cp->rx_spares_needed;
+       spin_unlock(&cp->rx_spare_lock);
+
+       /* trigger the timer to do the recovery */
+       if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
+#if 1
+               atomic_inc(&cp->reset_task_pending);
+               atomic_inc(&cp->reset_task_pending_spare);
+               schedule_work(&cp->reset_task);
+#else
+               atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
+               schedule_work(&cp->reset_task);
+#endif
+       }
+       return list_entry(entry, cas_page_t, list);
+}
+
+
+static void cas_mif_poll(struct cas *cp, const int enable)
+{
+       u32 cfg;
+
+       cfg  = readl(cp->regs + REG_MIF_CFG);
+       cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
+
+       if (cp->phy_type & CAS_PHY_MII_MDIO1)
+               cfg |= MIF_CFG_PHY_SELECT;
+
+       /* poll and interrupt on link status change. */
+       if (enable) {
+               cfg |= MIF_CFG_POLL_EN;
+               cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
+               cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
+       }
+       writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
+              cp->regs + REG_MIF_MASK);
+       writel(cfg, cp->regs + REG_MIF_CFG);
+}
+
+/* Must be invoked under cp->lock */
+static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
+{
+       u16 ctl;
+#if 1
+       int lcntl;
+       int changed = 0;
+       int oldstate = cp->lstate;
+       int link_was_not_down = !(oldstate == link_down);
+#endif
+       /* Setup link parameters */
+       if (!ep)
+               goto start_aneg;
+       lcntl = cp->link_cntl;
+       if (ep->autoneg == AUTONEG_ENABLE)
+               cp->link_cntl = BMCR_ANENABLE;
+       else {
+               u32 speed = ethtool_cmd_speed(ep);
+               cp->link_cntl = 0;
+               if (speed == SPEED_100)
+                       cp->link_cntl |= BMCR_SPEED100;
+               else if (speed == SPEED_1000)
+                       cp->link_cntl |= CAS_BMCR_SPEED1000;
+               if (ep->duplex == DUPLEX_FULL)
+                       cp->link_cntl |= BMCR_FULLDPLX;
+       }
+#if 1
+       changed = (lcntl != cp->link_cntl);
+#endif
+start_aneg:
+       if (cp->lstate == link_up) {
+               netdev_info(cp->dev, "PCS link down\n");
+       } else {
+               if (changed) {
+                       netdev_info(cp->dev, "link configuration changed\n");
+               }
+       }
+       cp->lstate = link_down;
+       cp->link_transition = LINK_TRANSITION_LINK_DOWN;
+       if (!cp->hw_running)
+               return;
+#if 1
+       /*
+        * WTZ: If the old state was link_up, we turn off the carrier
+        * to replicate everything we do elsewhere on a link-down
+        * event when we were already in a link-up state..
+        */
+       if (oldstate == link_up)
+               netif_carrier_off(cp->dev);
+       if (changed  && link_was_not_down) {
+               /*
+                * WTZ: This branch will simply schedule a full reset after
+                * we explicitly changed link modes in an ioctl. See if this
+                * fixes the link-problems we were having for forced mode.
+                */
+               atomic_inc(&cp->reset_task_pending);
+               atomic_inc(&cp->reset_task_pending_all);
+               schedule_work(&cp->reset_task);
+               cp->timer_ticks = 0;
+               mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
+               return;
+       }
+#endif
+       if (cp->phy_type & CAS_PHY_SERDES) {
+               u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
+
+               if (cp->link_cntl & BMCR_ANENABLE) {
+                       val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
+                       cp->lstate = link_aneg;
+               } else {
+                       if (cp->link_cntl & BMCR_FULLDPLX)
+                               val |= PCS_MII_CTRL_DUPLEX;
+                       val &= ~PCS_MII_AUTONEG_EN;
+                       cp->lstate = link_force_ok;
+               }
+               cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
+               writel(val, cp->regs + REG_PCS_MII_CTRL);
+
+       } else {
+               cas_mif_poll(cp, 0);
+               ctl = cas_phy_read(cp, MII_BMCR);
+               ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
+                        CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
+               ctl |= cp->link_cntl;
+               if (ctl & BMCR_ANENABLE) {
+                       ctl |= BMCR_ANRESTART;
+                       cp->lstate = link_aneg;
+               } else {
+                       cp->lstate = link_force_ok;
+               }
+               cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
+               cas_phy_write(cp, MII_BMCR, ctl);
+               cas_mif_poll(cp, 1);
+       }
+
+       cp->timer_ticks = 0;
+       mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
+}
+
+/* Must be invoked under cp->lock. */
+static int cas_reset_mii_phy(struct cas *cp)
+{
+       int limit = STOP_TRIES_PHY;
+       u16 val;
+
+       cas_phy_write(cp, MII_BMCR, BMCR_RESET);
+       udelay(100);
+       while (--limit) {
+               val = cas_phy_read(cp, MII_BMCR);
+               if ((val & BMCR_RESET) == 0)
+                       break;
+               udelay(10);
+       }
+       return limit <= 0;
+}
+
+static int cas_saturn_firmware_init(struct cas *cp)
+{
+       const struct firmware *fw;
+       const char fw_name[] = "sun/cassini.bin";
+       int err;
+
+       if (PHY_NS_DP83065 != cp->phy_id)
+               return 0;
+
+       err = request_firmware(&fw, fw_name, &cp->pdev->dev);
+       if (err) {
+               pr_err("Failed to load firmware \"%s\"\n",
+                      fw_name);
+               return err;
+       }
+       if (fw->size < 2) {
+               pr_err("bogus length %zu in \"%s\"\n",
+                      fw->size, fw_name);
+               err = -EINVAL;
+               goto out;
+       }
+       cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
+       cp->fw_size = fw->size - 2;
+       cp->fw_data = vmalloc(cp->fw_size);
+       if (!cp->fw_data) {
+               err = -ENOMEM;
+               pr_err("\"%s\" Failed %d\n", fw_name, err);
+               goto out;
+       }
+       memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
+out:
+       release_firmware(fw);
+       return err;
+}
+
+static void cas_saturn_firmware_load(struct cas *cp)
+{
+       int i;
+
+       cas_phy_powerdown(cp);
+
+       /* expanded memory access mode */
+       cas_phy_write(cp, DP83065_MII_MEM, 0x0);
+
+       /* pointer configuration for new firmware */
+       cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
+       cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
+       cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
+       cas_phy_write(cp, DP83065_MII_REGD, 0x82);
+       cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
+       cas_phy_write(cp, DP83065_MII_REGD, 0x0);
+       cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
+       cas_phy_write(cp, DP83065_MII_REGD, 0x39);
+
+       /* download new firmware */
+       cas_phy_write(cp, DP83065_MII_MEM, 0x1);
+       cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
+       for (i = 0; i < cp->fw_size; i++)
+               cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
+
+       /* enable firmware */
+       cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
+       cas_phy_write(cp, DP83065_MII_REGD, 0x1);
+}
+
+
+/* phy initialization */
+static void cas_phy_init(struct cas *cp)
+{
+       u16 val;
+
+       /* if we're in MII/GMII mode, set up phy */
+       if (CAS_PHY_MII(cp->phy_type)) {
+               writel(PCS_DATAPATH_MODE_MII,
+                      cp->regs + REG_PCS_DATAPATH_MODE);
+
+               cas_mif_poll(cp, 0);
+               cas_reset_mii_phy(cp); /* take out of isolate mode */
+
+               if (PHY_LUCENT_B0 == cp->phy_id) {
+                       /* workaround link up/down issue with lucent */
+                       cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
+                       cas_phy_write(cp, MII_BMCR, 0x00f1);
+                       cas_phy_write(cp, LUCENT_MII_REG, 0x0);
+
+               } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
+                       /* workarounds for broadcom phy */
+                       cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
+                       cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
+                       cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
+                       cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
+                       cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
+                       cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
+                       cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
+                       cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
+                       cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
+                       cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
+                       cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
+
+               } else if (PHY_BROADCOM_5411 == cp->phy_id) {
+                       val = cas_phy_read(cp, BROADCOM_MII_REG4);
+                       val = cas_phy_read(cp, BROADCOM_MII_REG4);
+                       if (val & 0x0080) {
+                               /* link workaround */
+                               cas_phy_write(cp, BROADCOM_MII_REG4,
+                                             val & ~0x0080);
+                       }
+
+               } else if (cp->cas_flags & CAS_FLAG_SATURN) {
+                       writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
+                              SATURN_PCFG_FSI : 0x0,
+                              cp->regs + REG_SATURN_PCFG);
+
+                       /* load firmware to address 10Mbps auto-negotiation
+                        * issue. NOTE: this will need to be changed if the
+                        * default firmware gets fixed.
+                        */
+                       if (PHY_NS_DP83065 == cp->phy_id) {
+                               cas_saturn_firmware_load(cp);
+                       }
+                       cas_phy_powerup(cp);
+               }
+
+               /* advertise capabilities */
+               val = cas_phy_read(cp, MII_BMCR);
+               val &= ~BMCR_ANENABLE;
+               cas_phy_write(cp, MII_BMCR, val);
+               udelay(10);
+
+               cas_phy_write(cp, MII_ADVERTISE,
+                             cas_phy_read(cp, MII_ADVERTISE) |
+                             (ADVERTISE_10HALF | ADVERTISE_10FULL |
+                              ADVERTISE_100HALF | ADVERTISE_100FULL |
+                              CAS_ADVERTISE_PAUSE |
+                              CAS_ADVERTISE_ASYM_PAUSE));
+
+               if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
+                       /* make sure that we don't advertise half
+                        * duplex to avoid a chip issue
+                        */
+                       val  = cas_phy_read(cp, CAS_MII_1000_CTRL);
+                       val &= ~CAS_ADVERTISE_1000HALF;
+                       val |= CAS_ADVERTISE_1000FULL;
+                       cas_phy_write(cp, CAS_MII_1000_CTRL, val);
+               }
+
+       } else {
+               /* reset pcs for serdes */
+               u32 val;
+               int limit;
+
+               writel(PCS_DATAPATH_MODE_SERDES,
+                      cp->regs + REG_PCS_DATAPATH_MODE);
+
+               /* enable serdes pins on saturn */
+               if (cp->cas_flags & CAS_FLAG_SATURN)
+                       writel(0, cp->regs + REG_SATURN_PCFG);
+
+               /* Reset PCS unit. */
+               val = readl(cp->regs + REG_PCS_MII_CTRL);
+               val |= PCS_MII_RESET;
+               writel(val, cp->regs + REG_PCS_MII_CTRL);
+
+               limit = STOP_TRIES;
+               while (--limit > 0) {
+                       udelay(10);
+                       if ((readl(cp->regs + REG_PCS_MII_CTRL) &
+                            PCS_MII_RESET) == 0)
+                               break;
+               }
+               if (limit <= 0)
+                       netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
+                                   readl(cp->regs + REG_PCS_STATE_MACHINE));
+
+               /* Make sure PCS is disabled while changing advertisement
+                * configuration.
+                */
+               writel(0x0, cp->regs + REG_PCS_CFG);
+
+               /* Advertise all capabilities except half-duplex. */
+               val  = readl(cp->regs + REG_PCS_MII_ADVERT);
+               val &= ~PCS_MII_ADVERT_HD;
+               val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
+                       PCS_MII_ADVERT_ASYM_PAUSE);
+               writel(val, cp->regs + REG_PCS_MII_ADVERT);
+
+               /* enable PCS */
+               writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
+
+               /* pcs workaround: enable sync detect */
+               writel(PCS_SERDES_CTRL_SYNCD_EN,
+                      cp->regs + REG_PCS_SERDES_CTRL);
+       }
+}
+
+
+static int cas_pcs_link_check(struct cas *cp)
+{
+       u32 stat, state_machine;
+       int retval = 0;
+
+       /* The link status bit latches on zero, so you must
+        * read it twice in such a case to see a transition
+        * to the link being up.
+        */
+       stat = readl(cp->regs + REG_PCS_MII_STATUS);
+       if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
+               stat = readl(cp->regs + REG_PCS_MII_STATUS);
+
+       /* The remote-fault indication is only valid
+        * when autoneg has completed.
+        */
+       if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
+                    PCS_MII_STATUS_REMOTE_FAULT)) ==
+           (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
+               netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
+
+       /* work around link detection issue by querying the PCS state
+        * machine directly.
+        */
+       state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
+       if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
+               stat &= ~PCS_MII_STATUS_LINK_STATUS;
+       } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
+               stat |= PCS_MII_STATUS_LINK_STATUS;
+       }
+
+       if (stat & PCS_MII_STATUS_LINK_STATUS) {
+               if (cp->lstate != link_up) {
+                       if (cp->opened) {
+                               cp->lstate = link_up;
+                               cp->link_transition = LINK_TRANSITION_LINK_UP;
+
+                               cas_set_link_modes(cp);
+                               netif_carrier_on(cp->dev);
+                       }
+               }
+       } else if (cp->lstate == link_up) {
+               cp->lstate = link_down;
+               if (link_transition_timeout != 0 &&
+                   cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
+                   !cp->link_transition_jiffies_valid) {
+                       /*
+                        * force a reset, as a workaround for the
+                        * link-failure problem. May want to move this to a
+                        * point a bit earlier in the sequence. If we had
+                        * generated a reset a short time ago, we'll wait for
+                        * the link timer to check the status until a
+                        * timer expires (link_transistion_jiffies_valid is
+                        * true when the timer is running.)  Instead of using
+                        * a system timer, we just do a check whenever the
+                        * link timer is running - this clears the flag after
+                        * a suitable delay.
+                        */
+                       retval = 1;
+                       cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
+                       cp->link_transition_jiffies = jiffies;
+                       cp->link_transition_jiffies_valid = 1;
+               } else {
+                       cp->link_transition = LINK_TRANSITION_ON_FAILURE;
+               }
+               netif_carrier_off(cp->dev);
+               if (cp->opened)
+                       netif_info(cp, link, cp->dev, "PCS link down\n");
+
+               /* Cassini only: if you force a mode, there can be
+                * sync problems on link down. to fix that, the following
+                * things need to be checked:
+                * 1) read serialink state register
+                * 2) read pcs status register to verify link down.
+                * 3) if link down and serial link == 0x03, then you need
+                *    to global reset the chip.
+                */
+               if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
+                       /* should check to see if we're in a forced mode */
+                       stat = readl(cp->regs + REG_PCS_SERDES_STATE);
+                       if (stat == 0x03)
+                               return 1;
+               }
+       } else if (cp->lstate == link_down) {
+               if (link_transition_timeout != 0 &&
+                   cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
+                   !cp->link_transition_jiffies_valid) {
+                       /* force a reset, as a workaround for the
+                        * link-failure problem.  May want to move
+                        * this to a point a bit earlier in the
+                        * sequence.
+                        */
+                       retval = 1;
+                       cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
+                       cp->link_transition_jiffies = jiffies;
+                       cp->link_transition_jiffies_valid = 1;
+               } else {
+                       cp->link_transition = LINK_TRANSITION_STILL_FAILED;
+               }
+       }
+
+       return retval;
+}
+
+static int cas_pcs_interrupt(struct net_device *dev,
+                            struct cas *cp, u32 status)
+{
+       u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
+
+       if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
+               return 0;
+       return cas_pcs_link_check(cp);
+}
+
+static int cas_txmac_interrupt(struct net_device *dev,
+                              struct cas *cp, u32 status)
+{
+       u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
+
+       if (!txmac_stat)
+               return 0;
+
+       netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+                    "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
+
+       /* Defer timer expiration is quite normal,
+        * don't even log the event.
+        */
+       if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
+           !(txmac_stat & ~MAC_TX_DEFER_TIMER))
+               return 0;
+
+       spin_lock(&cp->stat_lock[0]);
+       if (txmac_stat & MAC_TX_UNDERRUN) {
+               netdev_err(dev, "TX MAC xmit underrun\n");
+               cp->net_stats[0].tx_fifo_errors++;
+       }
+
+       if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
+               netdev_err(dev, "TX MAC max packet size error\n");
+               cp->net_stats[0].tx_errors++;
+       }
+
+       /* The rest are all cases of one of the 16-bit TX
+        * counters expiring.
+        */
+       if (txmac_stat & MAC_TX_COLL_NORMAL)
+               cp->net_stats[0].collisions += 0x10000;
+
+       if (txmac_stat & MAC_TX_COLL_EXCESS) {
+               cp->net_stats[0].tx_aborted_errors += 0x10000;
+               cp->net_stats[0].collisions += 0x10000;
+       }
+
+       if (txmac_stat & MAC_TX_COLL_LATE) {
+               cp->net_stats[0].tx_aborted_errors += 0x10000;
+               cp->net_stats[0].collisions += 0x10000;
+       }
+       spin_unlock(&cp->stat_lock[0]);
+
+       /* We do not keep track of MAC_TX_COLL_FIRST and
+        * MAC_TX_PEAK_ATTEMPTS events.
+        */
+       return 0;
+}
+
+static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
+{
+       cas_hp_inst_t *inst;
+       u32 val;
+       int i;
+
+       i = 0;
+       while ((inst = firmware) && inst->note) {
+               writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
+
+               val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
+               val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
+               writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
+
+               val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
+               val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
+               val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
+               val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
+               val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
+               val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
+               val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
+               writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
+
+               val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
+               val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
+               val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
+               val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
+               writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
+               ++firmware;
+               ++i;
+       }
+}
+
+static void cas_init_rx_dma(struct cas *cp)
+{
+       u64 desc_dma = cp->block_dvma;
+       u32 val;
+       int i, size;
+
+       /* rx free descriptors */
+       val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
+       val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
+       val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
+       if ((N_RX_DESC_RINGS > 1) &&
+           (cp->cas_flags & CAS_FLAG_REG_PLUS))  /* do desc 2 */
+               val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
+       writel(val, cp->regs + REG_RX_CFG);
+
+       val = (unsigned long) cp->init_rxds[0] -
+               (unsigned long) cp->init_block;
+       writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
+       writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
+       writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
+
+       if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+               /* rx desc 2 is for IPSEC packets. however,
+                * we don't it that for that purpose.
+                */
+               val = (unsigned long) cp->init_rxds[1] -
+                       (unsigned long) cp->init_block;
+               writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
+               writel((desc_dma + val) & 0xffffffff, cp->regs +
+                      REG_PLUS_RX_DB1_LOW);
+               writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
+                      REG_PLUS_RX_KICK1);
+       }
+
+       /* rx completion registers */
+       val = (unsigned long) cp->init_rxcs[0] -
+               (unsigned long) cp->init_block;
+       writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
+       writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
+
+       if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+               /* rx comp 2-4 */
+               for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
+                       val = (unsigned long) cp->init_rxcs[i] -
+                               (unsigned long) cp->init_block;
+                       writel((desc_dma + val) >> 32, cp->regs +
+                              REG_PLUS_RX_CBN_HI(i));
+                       writel((desc_dma + val) & 0xffffffff, cp->regs +
+                              REG_PLUS_RX_CBN_LOW(i));
+               }
+       }
+
+       /* read selective clear regs to prevent spurious interrupts
+        * on reset because complete == kick.
+        * selective clear set up to prevent interrupts on resets
+        */
+       readl(cp->regs + REG_INTR_STATUS_ALIAS);
+       writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
+       if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+               for (i = 1; i < N_RX_COMP_RINGS; i++)
+                       readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
+
+               /* 2 is different from 3 and 4 */
+               if (N_RX_COMP_RINGS > 1)
+                       writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
+                              cp->regs + REG_PLUS_ALIASN_CLEAR(1));
+
+               for (i = 2; i < N_RX_COMP_RINGS; i++)
+                       writel(INTR_RX_DONE_ALT,
+                              cp->regs + REG_PLUS_ALIASN_CLEAR(i));
+       }
+
+       /* set up pause thresholds */
+       val  = CAS_BASE(RX_PAUSE_THRESH_OFF,
+                       cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
+       val |= CAS_BASE(RX_PAUSE_THRESH_ON,
+                       cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
+       writel(val, cp->regs + REG_RX_PAUSE_THRESH);
+
+       /* zero out dma reassembly buffers */
+       for (i = 0; i < 64; i++) {
+               writel(i, cp->regs + REG_RX_TABLE_ADDR);
+               writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
+               writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
+               writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
+       }
+
+       /* make sure address register is 0 for normal operation */
+       writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
+       writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
+
+       /* interrupt mitigation */
+#ifdef USE_RX_BLANK
+       val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
+       val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
+       writel(val, cp->regs + REG_RX_BLANK);
+#else
+       writel(0x0, cp->regs + REG_RX_BLANK);
+#endif
+
+       /* interrupt generation as a function of low water marks for
+        * free desc and completion entries. these are used to trigger
+        * housekeeping for rx descs. we don't use the free interrupt
+        * as it's not very useful
+        */
+       /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */
+       val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
+       writel(val, cp->regs + REG_RX_AE_THRESH);
+       if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+               val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
+               writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
+       }
+
+       /* Random early detect registers. useful for congestion avoidance.
+        * this should be tunable.
+        */
+       writel(0x0, cp->regs + REG_RX_RED);
+
+       /* receive page sizes. default == 2K (0x800) */
+       val = 0;
+       if (cp->page_size == 0x1000)
+               val = 0x1;
+       else if (cp->page_size == 0x2000)
+               val = 0x2;
+       else if (cp->page_size == 0x4000)
+               val = 0x3;
+
+       /* round mtu + offset. constrain to page size. */
+       size = cp->dev->mtu + 64;
+       if (size > cp->page_size)
+               size = cp->page_size;
+
+       if (size <= 0x400)
+               i = 0x0;
+       else if (size <= 0x800)
+               i = 0x1;
+       else if (size <= 0x1000)
+               i = 0x2;
+       else
+               i = 0x3;
+
+       cp->mtu_stride = 1 << (i + 10);
+       val  = CAS_BASE(RX_PAGE_SIZE, val);
+       val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
+       val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
+       val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
+       writel(val, cp->regs + REG_RX_PAGE_SIZE);
+
+       /* enable the header parser if desired */
+       if (CAS_HP_FIRMWARE == cas_prog_null)
+               return;
+
+       val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
+       val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
+       val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
+       writel(val, cp->regs + REG_HP_CFG);
+}
+
+static inline void cas_rxc_init(struct cas_rx_comp *rxc)
+{
+       memset(rxc, 0, sizeof(*rxc));
+       rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
+}
+
+/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
+ * flipping is protected by the fact that the chip will not
+ * hand back the same page index while it's being processed.
+ */
+static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
+{
+       cas_page_t *page = cp->rx_pages[1][index];
+       cas_page_t *new;
+
+       if (page_count(page->buffer) == 1)
+               return page;
+
+       new = cas_page_dequeue(cp);
+       if (new) {
+               spin_lock(&cp->rx_inuse_lock);
+               list_add(&page->list, &cp->rx_inuse_list);
+               spin_unlock(&cp->rx_inuse_lock);
+       }
+       return new;
+}
+
+/* this needs to be changed if we actually use the ENC RX DESC ring */
+static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
+                                const int index)
+{
+       cas_page_t **page0 = cp->rx_pages[0];
+       cas_page_t **page1 = cp->rx_pages[1];
+
+       /* swap if buffer is in use */
+       if (page_count(page0[index]->buffer) > 1) {
+               cas_page_t *new = cas_page_spare(cp, index);
+               if (new) {
+                       page1[index] = page0[index];
+                       page0[index] = new;
+               }
+       }
+       RX_USED_SET(page0[index], 0);
+       return page0[index];
+}
+
+static void cas_clean_rxds(struct cas *cp)
+{
+       /* only clean ring 0 as ring 1 is used for spare buffers */
+        struct cas_rx_desc *rxd = cp->init_rxds[0];
+       int i, size;
+
+       /* release all rx flows */
+       for (i = 0; i < N_RX_FLOWS; i++) {
+               struct sk_buff *skb;
+               while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
+                       cas_skb_release(skb);
+               }
+       }
+
+       /* initialize descriptors */
+       size = RX_DESC_RINGN_SIZE(0);
+       for (i = 0; i < size; i++) {
+               cas_page_t *page = cas_page_swap(cp, 0, i);
+               rxd[i].buffer = cpu_to_le64(page->dma_addr);
+               rxd[i].index  = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
+                                           CAS_BASE(RX_INDEX_RING, 0));
+       }
+
+       cp->rx_old[0]  = RX_DESC_RINGN_SIZE(0) - 4;
+       cp->rx_last[0] = 0;
+       cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
+}
+
+static void cas_clean_rxcs(struct cas *cp)
+{
+       int i, j;
+
+       /* take ownership of rx comp descriptors */
+       memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
+       memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
+       for (i = 0; i < N_RX_COMP_RINGS; i++) {
+               struct cas_rx_comp *rxc = cp->init_rxcs[i];
+               for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
+                       cas_rxc_init(rxc + j);
+               }
+       }
+}
+
+#if 0
+/* When we get a RX fifo overflow, the RX unit is probably hung
+ * so we do the following.
+ *
+ * If any part of the reset goes wrong, we return 1 and that causes the
+ * whole chip to be reset.
+ */
+static int cas_rxmac_reset(struct cas *cp)
+{
+       struct net_device *dev = cp->dev;
+       int limit;
+       u32 val;
+
+       /* First, reset MAC RX. */
+       writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
+       for (limit = 0; limit < STOP_TRIES; limit++) {
+               if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
+                       break;
+               udelay(10);
+       }
+       if (limit == STOP_TRIES) {
+               netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
+               return 1;
+       }
+
+       /* Second, disable RX DMA. */
+       writel(0, cp->regs + REG_RX_CFG);
+       for (limit = 0; limit < STOP_TRIES; limit++) {
+               if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
+                       break;
+               udelay(10);
+       }
+       if (limit == STOP_TRIES) {
+               netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
+               return 1;
+       }
+
+       mdelay(5);
+
+       /* Execute RX reset command. */
+       writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
+       for (limit = 0; limit < STOP_TRIES; limit++) {
+               if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
+                       break;
+               udelay(10);
+       }
+       if (limit == STOP_TRIES) {
+               netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
+               return 1;
+       }
+
+       /* reset driver rx state */
+       cas_clean_rxds(cp);
+       cas_clean_rxcs(cp);
+
+       /* Now, reprogram the rest of RX unit. */
+       cas_init_rx_dma(cp);
+
+       /* re-enable */
+       val = readl(cp->regs + REG_RX_CFG);
+       writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
+       writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
+       val = readl(cp->regs + REG_MAC_RX_CFG);
+       writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
+       return 0;
+}
+#endif
+
+static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
+                              u32 status)
+{
+       u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
+
+       if (!stat)
+               return 0;
+
+       netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
+
+       /* these are all rollovers */
+       spin_lock(&cp->stat_lock[0]);
+       if (stat & MAC_RX_ALIGN_ERR)
+               cp->net_stats[0].rx_frame_errors += 0x10000;
+
+       if (stat & MAC_RX_CRC_ERR)
+               cp->net_stats[0].rx_crc_errors += 0x10000;
+
+       if (stat & MAC_RX_LEN_ERR)
+               cp->net_stats[0].rx_length_errors += 0x10000;
+
+       if (stat & MAC_RX_OVERFLOW) {
+               cp->net_stats[0].rx_over_errors++;
+               cp->net_stats[0].rx_fifo_errors++;
+       }
+
+       /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR
+        * events.
+        */
+       spin_unlock(&cp->stat_lock[0]);
+       return 0;
+}
+
+static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
+                            u32 status)
+{
+       u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
+
+       if (!stat)
+               return 0;
+
+       netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+                    "mac interrupt, stat: 0x%x\n", stat);
+
+       /* This interrupt is just for pause frame and pause
+        * tracking.  It is useful for diagnostics and debug
+        * but probably by default we will mask these events.
+        */
+       if (stat & MAC_CTRL_PAUSE_STATE)
+               cp->pause_entered++;
+
+       if (stat & MAC_CTRL_PAUSE_RECEIVED)
+               cp->pause_last_time_recvd = (stat >> 16);
+
+       return 0;
+}
+
+
+/* Must be invoked under cp->lock. */
+static inline int cas_mdio_link_not_up(struct cas *cp)
+{
+       u16 val;
+
+       switch (cp->lstate) {
+       case link_force_ret:
+               netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
+               cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
+               cp->timer_ticks = 5;
+               cp->lstate = link_force_ok;
+               cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
+               break;
+
+       case link_aneg:
+               val = cas_phy_read(cp, MII_BMCR);
+
+               /* Try forced modes. we try things in the following order:
+                * 1000 full -> 100 full/half -> 10 half
+                */
+               val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
+               val |= BMCR_FULLDPLX;
+               val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
+                       CAS_BMCR_SPEED1000 : BMCR_SPEED100;
+               cas_phy_write(cp, MII_BMCR, val);
+               cp->timer_ticks = 5;
+               cp->lstate = link_force_try;
+               cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
+               break;
+
+       case link_force_try:
+               /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */
+               val = cas_phy_read(cp, MII_BMCR);
+               cp->timer_ticks = 5;
+               if (val & CAS_BMCR_SPEED1000) { /* gigabit */
+                       val &= ~CAS_BMCR_SPEED1000;
+                       val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
+                       cas_phy_write(cp, MII_BMCR, val);
+                       break;
+               }
+
+               if (val & BMCR_SPEED100) {
+                       if (val & BMCR_FULLDPLX) /* fd failed */
+                               val &= ~BMCR_FULLDPLX;
+                       else { /* 100Mbps failed */
+                               val &= ~BMCR_SPEED100;
+                       }
+                       cas_phy_write(cp, MII_BMCR, val);
+                       break;
+               }
+       default:
+               break;
+       }
+       return 0;
+}
+
+
+/* must be invoked with cp->lock held */
+static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
+{
+       int restart;
+
+       if (bmsr & BMSR_LSTATUS) {
+               /* Ok, here we got a link. If we had it due to a forced
+                * fallback, and we were configured for autoneg, we
+                * retry a short autoneg pass. If you know your hub is
+                * broken, use ethtool ;)
+                */
+               if ((cp->lstate == link_force_try) &&
+                   (cp->link_cntl & BMCR_ANENABLE)) {
+                       cp->lstate = link_force_ret;
+                       cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
+                       cas_mif_poll(cp, 0);
+                       cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
+                       cp->timer_ticks = 5;
+                       if (cp->opened)
+                               netif_info(cp, link, cp->dev,
+                                          "Got link after fallback, retrying autoneg once...\n");
+                       cas_phy_write(cp, MII_BMCR,
+                                     cp->link_fcntl | BMCR_ANENABLE |
+                                     BMCR_ANRESTART);
+                       cas_mif_poll(cp, 1);
+
+               } else if (cp->lstate != link_up) {
+                       cp->lstate = link_up;
+                       cp->link_transition = LINK_TRANSITION_LINK_UP;
+
+                       if (cp->opened) {
+                               cas_set_link_modes(cp);
+                               netif_carrier_on(cp->dev);
+                       }
+               }
+               return 0;
+       }
+
+       /* link not up. if the link was previously up, we restart the
+        * whole process
+        */
+       restart = 0;
+       if (cp->lstate == link_up) {
+               cp->lstate = link_down;
+               cp->link_transition = LINK_TRANSITION_LINK_DOWN;
+
+               netif_carrier_off(cp->dev);
+               if (cp->opened)
+                       netif_info(cp, link, cp->dev, "Link down\n");
+               restart = 1;
+
+       } else if (++cp->timer_ticks > 10)
+               cas_mdio_link_not_up(cp);
+
+       return restart;
+}
+
+static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
+                            u32 status)
+{
+       u32 stat = readl(cp->regs + REG_MIF_STATUS);
+       u16 bmsr;
+
+       /* check for a link change */
+       if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
+               return 0;
+
+       bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
+       return cas_mii_link_check(cp, bmsr);
+}
+
+static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
+                            u32 status)
+{
+       u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
+
+       if (!stat)
+               return 0;
+
+       netdev_err(dev, "PCI error [%04x:%04x]",
+                  stat, readl(cp->regs + REG_BIM_DIAG));
+
+       /* cassini+ has this reserved */
+       if ((stat & PCI_ERR_BADACK) &&
+           ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
+               pr_cont(" <No ACK64# during ABS64 cycle>");
+
+       if (stat & PCI_ERR_DTRTO)
+               pr_cont(" <Delayed transaction timeout>");
+       if (stat & PCI_ERR_OTHER)
+               pr_cont(" <other>");
+       if (stat & PCI_ERR_BIM_DMA_WRITE)
+               pr_cont(" <BIM DMA 0 write req>");
+       if (stat & PCI_ERR_BIM_DMA_READ)
+               pr_cont(" <BIM DMA 0 read req>");
+       pr_cont("\n");
+
+       if (stat & PCI_ERR_OTHER) {
+               u16 cfg;
+
+               /* Interrogate PCI config space for the
+                * true cause.
+                */
+               pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
+               netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
+               if (cfg & PCI_STATUS_PARITY)
+                       netdev_err(dev, "PCI parity error detected\n");
+               if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
+                       netdev_err(dev, "PCI target abort\n");
+               if (cfg & PCI_STATUS_REC_TARGET_ABORT)
+                       netdev_err(dev, "PCI master acks target abort\n");
+               if (cfg & PCI_STATUS_REC_MASTER_ABORT)
+                       netdev_err(dev, "PCI master abort\n");
+               if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
+                       netdev_err(dev, "PCI system error SERR#\n");
+               if (cfg & PCI_STATUS_DETECTED_PARITY)
+                       netdev_err(dev, "PCI parity error\n");
+
+               /* Write the error bits back to clear them. */
+               cfg &= (PCI_STATUS_PARITY |
+                       PCI_STATUS_SIG_TARGET_ABORT |
+                       PCI_STATUS_REC_TARGET_ABORT |
+                       PCI_STATUS_REC_MASTER_ABORT |
+                       PCI_STATUS_SIG_SYSTEM_ERROR |
+                       PCI_STATUS_DETECTED_PARITY);
+               pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
+       }
+
+       /* For all PCI errors, we should reset the chip. */
+       return 1;
+}
+
+/* All non-normal interrupt conditions get serviced here.
+ * Returns non-zero if we should just exit the interrupt
+ * handler right now (ie. if we reset the card which invalidates
+ * all of the other original irq status bits).
+ */
+static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
+                           u32 status)
+{
+       if (status & INTR_RX_TAG_ERROR) {
+               /* corrupt RX tag framing */
+               netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+                            "corrupt rx tag framing\n");
+               spin_lock(&cp->stat_lock[0]);
+               cp->net_stats[0].rx_errors++;
+               spin_unlock(&cp->stat_lock[0]);
+               goto do_reset;
+       }
+
+       if (status & INTR_RX_LEN_MISMATCH) {
+               /* length mismatch. */
+               netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+                            "length mismatch for rx frame\n");
+               spin_lock(&cp->stat_lock[0]);
+               cp->net_stats[0].rx_errors++;
+               spin_unlock(&cp->stat_lock[0]);
+               goto do_reset;
+       }
+
+       if (status & INTR_PCS_STATUS) {
+               if (cas_pcs_interrupt(dev, cp, status))
+                       goto do_reset;
+       }
+
+       if (status & INTR_TX_MAC_STATUS) {
+               if (cas_txmac_interrupt(dev, cp, status))
+                       goto do_reset;
+       }
+
+       if (status & INTR_RX_MAC_STATUS) {
+               if (cas_rxmac_interrupt(dev, cp, status))
+                       goto do_reset;
+       }
+
+       if (status & INTR_MAC_CTRL_STATUS) {
+               if (cas_mac_interrupt(dev, cp, status))
+                       goto do_reset;
+       }
+
+       if (status & INTR_MIF_STATUS) {
+               if (cas_mif_interrupt(dev, cp, status))
+                       goto do_reset;
+       }
+
+       if (status & INTR_PCI_ERROR_STATUS) {
+               if (cas_pci_interrupt(dev, cp, status))
+                       goto do_reset;
+       }
+       return 0;
+
+do_reset:
+#if 1
+       atomic_inc(&cp->reset_task_pending);
+       atomic_inc(&cp->reset_task_pending_all);
+       netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
+       schedule_work(&cp->reset_task);
+#else
+       atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
+       netdev_err(dev, "reset called in cas_abnormal_irq\n");
+       schedule_work(&cp->reset_task);
+#endif
+       return 1;
+}
+
+/* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when
+ *       determining whether to do a netif_stop/wakeup
+ */
+#define CAS_TABORT(x)      (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
+#define CAS_ROUND_PAGE(x)  (((x) + PAGE_SIZE - 1) & PAGE_MASK)
+static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
+                                 const int len)
+{
+       unsigned long off = addr + len;
+
+       if (CAS_TABORT(cp) == 1)
+               return 0;
+       if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
+               return 0;
+       return TX_TARGET_ABORT_LEN;
+}
+
+static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
+{
+       struct cas_tx_desc *txds;
+       struct sk_buff **skbs;
+       struct net_device *dev = cp->dev;
+       int entry, count;
+
+       spin_lock(&cp->tx_lock[ring]);
+       txds = cp->init_txds[ring];
+       skbs = cp->tx_skbs[ring];
+       entry = cp->tx_old[ring];
+
+       count = TX_BUFF_COUNT(ring, entry, limit);
+       while (entry != limit) {
+               struct sk_buff *skb = skbs[entry];
+               dma_addr_t daddr;
+               u32 dlen;
+               int frag;
+
+               if (!skb) {
+                       /* this should never occur */
+                       entry = TX_DESC_NEXT(ring, entry);
+                       continue;
+               }
+
+               /* however, we might get only a partial skb release. */
+               count -= skb_shinfo(skb)->nr_frags +
+                       + cp->tx_tiny_use[ring][entry].nbufs + 1;
+               if (count < 0)
+                       break;
+
+               netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
+                            "tx[%d] done, slot %d\n", ring, entry);
+
+               skbs[entry] = NULL;
+               cp->tx_tiny_use[ring][entry].nbufs = 0;
+
+               for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
+                       struct cas_tx_desc *txd = txds + entry;
+
+                       daddr = le64_to_cpu(txd->buffer);
+                       dlen = CAS_VAL(TX_DESC_BUFLEN,
+                                      le64_to_cpu(txd->control));
+                       pci_unmap_page(cp->pdev, daddr, dlen,
+                                      PCI_DMA_TODEVICE);
+                       entry = TX_DESC_NEXT(ring, entry);
+
+                       /* tiny buffer may follow */
+                       if (cp->tx_tiny_use[ring][entry].used) {
+                               cp->tx_tiny_use[ring][entry].used = 0;
+                               entry = TX_DESC_NEXT(ring, entry);
+                       }
+               }
+
+               spin_lock(&cp->stat_lock[ring]);
+               cp->net_stats[ring].tx_packets++;
+               cp->net_stats[ring].tx_bytes += skb->len;
+               spin_unlock(&cp->stat_lock[ring]);
+               dev_kfree_skb_irq(skb);
+       }
+       cp->tx_old[ring] = entry;
+
+       /* this is wrong for multiple tx rings. the net device needs
+        * multiple queues for this to do the right thing.  we wait
+        * for 2*packets to be available when using tiny buffers
+        */
+       if (netif_queue_stopped(dev) &&
+           (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
+               netif_wake_queue(dev);
+       spin_unlock(&cp->tx_lock[ring]);
+}
+
+static void cas_tx(struct net_device *dev, struct cas *cp,
+                  u32 status)
+{
+        int limit, ring;
+#ifdef USE_TX_COMPWB
+       u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
+#endif
+       netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+                    "tx interrupt, status: 0x%x, %llx\n",
+                    status, (unsigned long long)compwb);
+       /* process all the rings */
+       for (ring = 0; ring < N_TX_RINGS; ring++) {
+#ifdef USE_TX_COMPWB
+               /* use the completion writeback registers */
+               limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
+                       CAS_VAL(TX_COMPWB_LSB, compwb);
+               compwb = TX_COMPWB_NEXT(compwb);
+#else
+               limit = readl(cp->regs + REG_TX_COMPN(ring));
+#endif
+               if (cp->tx_old[ring] != limit)
+                       cas_tx_ringN(cp, ring, limit);
+       }
+}
+
+
+static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
+                             int entry, const u64 *words,
+                             struct sk_buff **skbref)
+{
+       int dlen, hlen, len, i, alloclen;
+       int off, swivel = RX_SWIVEL_OFF_VAL;
+       struct cas_page *page;
+       struct sk_buff *skb;
+       void *addr, *crcaddr;
+       __sum16 csum;
+       char *p;
+
+       hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
+       dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
+       len  = hlen + dlen;
+
+       if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
+               alloclen = len;
+       else
+               alloclen = max(hlen, RX_COPY_MIN);
+
+       skb = dev_alloc_skb(alloclen + swivel + cp->crc_size);
+       if (skb == NULL)
+               return -1;
+
+       *skbref = skb;
+       skb_reserve(skb, swivel);
+
+       p = skb->data;
+       addr = crcaddr = NULL;
+       if (hlen) { /* always copy header pages */
+               i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
+               page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
+               off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
+                       swivel;
+
+               i = hlen;
+               if (!dlen) /* attach FCS */
+                       i += cp->crc_size;
+               pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
+                                   PCI_DMA_FROMDEVICE);
+               addr = cas_page_map(page->buffer);
+               memcpy(p, addr + off, i);
+               pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
+                                   PCI_DMA_FROMDEVICE);
+               cas_page_unmap(addr);
+               RX_USED_ADD(page, 0x100);
+               p += hlen;
+               swivel = 0;
+       }
+
+
+       if (alloclen < (hlen + dlen)) {
+               skb_frag_t *frag = skb_shinfo(skb)->frags;
+
+               /* normal or jumbo packets. we use frags */
+               i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
+               page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
+               off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
+
+               hlen = min(cp->page_size - off, dlen);
+               if (hlen < 0) {
+                       netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+                                    "rx page overflow: %d\n", hlen);
+                       dev_kfree_skb_irq(skb);
+                       return -1;
+               }
+               i = hlen;
+               if (i == dlen)  /* attach FCS */
+                       i += cp->crc_size;
+               pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
+                                   PCI_DMA_FROMDEVICE);
+
+               /* make sure we always copy a header */
+               swivel = 0;
+               if (p == (char *) skb->data) { /* not split */
+                       addr = cas_page_map(page->buffer);
+                       memcpy(p, addr + off, RX_COPY_MIN);
+                       pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
+                                       PCI_DMA_FROMDEVICE);
+                       cas_page_unmap(addr);
+                       off += RX_COPY_MIN;
+                       swivel = RX_COPY_MIN;
+                       RX_USED_ADD(page, cp->mtu_stride);
+               } else {
+                       RX_USED_ADD(page, hlen);
+               }
+               skb_put(skb, alloclen);
+
+               skb_shinfo(skb)->nr_frags++;
+               skb->data_len += hlen - swivel;
+               skb->truesize += hlen - swivel;
+               skb->len      += hlen - swivel;
+
+               get_page(page->buffer);
+               frag->page = page->buffer;
+               frag->page_offset = off;
+               frag->size = hlen - swivel;
+
+               /* any more data? */
+               if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
+                       hlen = dlen;
+                       off = 0;
+
+                       i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
+                       page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
+                       pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
+                                           hlen + cp->crc_size,
+                                           PCI_DMA_FROMDEVICE);
+                       pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
+                                           hlen + cp->crc_size,
+                                           PCI_DMA_FROMDEVICE);
+
+                       skb_shinfo(skb)->nr_frags++;
+                       skb->data_len += hlen;
+                       skb->len      += hlen;
+                       frag++;
+
+                       get_page(page->buffer);
+                       frag->page = page->buffer;
+                       frag->page_offset = 0;
+                       frag->size = hlen;
+                       RX_USED_ADD(page, hlen + cp->crc_size);
+               }
+
+               if (cp->crc_size) {
+                       addr = cas_page_map(page->buffer);
+                       crcaddr  = addr + off + hlen;
+               }
+
+       } else {
+               /* copying packet */
+               if (!dlen)
+                       goto end_copy_pkt;
+
+               i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
+               page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
+               off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
+               hlen = min(cp->page_size - off, dlen);
+               if (hlen < 0) {
+                       netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+                                    "rx page overflow: %d\n", hlen);
+                       dev_kfree_skb_irq(skb);
+                       return -1;
+               }
+               i = hlen;
+               if (i == dlen) /* attach FCS */
+                       i += cp->crc_size;
+               pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
+                                   PCI_DMA_FROMDEVICE);
+               addr = cas_page_map(page->buffer);
+               memcpy(p, addr + off, i);
+               pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
+                                   PCI_DMA_FROMDEVICE);
+               cas_page_unmap(addr);
+               if (p == (char *) skb->data) /* not split */
+                       RX_USED_ADD(page, cp->mtu_stride);
+               else
+                       RX_USED_ADD(page, i);
+
+               /* any more data? */
+               if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
+                       p += hlen;
+                       i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
+                       page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
+                       pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
+                                           dlen + cp->crc_size,
+                                           PCI_DMA_FROMDEVICE);
+                       addr = cas_page_map(page->buffer);
+                       memcpy(p, addr, dlen + cp->crc_size);
+                       pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
+                                           dlen + cp->crc_size,
+                                           PCI_DMA_FROMDEVICE);
+                       cas_page_unmap(addr);
+                       RX_USED_ADD(page, dlen + cp->crc_size);
+               }
+end_copy_pkt:
+               if (cp->crc_size) {
+                       addr    = NULL;
+                       crcaddr = skb->data + alloclen;
+               }
+               skb_put(skb, alloclen);
+       }
+
+       csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
+       if (cp->crc_size) {
+               /* checksum includes FCS. strip it out. */
+               csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
+                                             csum_unfold(csum)));
+               if (addr)
+                       cas_page_unmap(addr);
+       }
+       skb->protocol = eth_type_trans(skb, cp->dev);
+       if (skb->protocol == htons(ETH_P_IP)) {
+               skb->csum = csum_unfold(~csum);
+               skb->ip_summed = CHECKSUM_COMPLETE;
+       } else
+               skb_checksum_none_assert(skb);
+       return len;
+}
+
+
+/* we can handle up to 64 rx flows at a time. we do the same thing
+ * as nonreassm except that we batch up the buffers.
+ * NOTE: we currently just treat each flow as a bunch of packets that
+ *       we pass up. a better way would be to coalesce the packets
+ *       into a jumbo packet. to do that, we need to do the following:
+ *       1) the first packet will have a clean split between header and
+ *          data. save both.
+ *       2) each time the next flow packet comes in, extend the
+ *          data length and merge the checksums.
+ *       3) on flow release, fix up the header.
+ *       4) make sure the higher layer doesn't care.
+ * because packets get coalesced, we shouldn't run into fragment count
+ * issues.
+ */
+static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
+                                  struct sk_buff *skb)
+{
+       int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
+       struct sk_buff_head *flow = &cp->rx_flows[flowid];
+
+       /* this is protected at a higher layer, so no need to
+        * do any additional locking here. stick the buffer
+        * at the end.
+        */
+       __skb_queue_tail(flow, skb);
+       if (words[0] & RX_COMP1_RELEASE_FLOW) {
+               while ((skb = __skb_dequeue(flow))) {
+                       cas_skb_release(skb);
+               }
+       }
+}
+
+/* put rx descriptor back on ring. if a buffer is in use by a higher
+ * layer, this will need to put in a replacement.
+ */
+static void cas_post_page(struct cas *cp, const int ring, const int index)
+{
+       cas_page_t *new;
+       int entry;
+
+       entry = cp->rx_old[ring];
+
+       new = cas_page_swap(cp, ring, index);
+       cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
+       cp->init_rxds[ring][entry].index  =
+               cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
+                           CAS_BASE(RX_INDEX_RING, ring));
+
+       entry = RX_DESC_ENTRY(ring, entry + 1);
+       cp->rx_old[ring] = entry;
+
+       if (entry % 4)
+               return;
+
+       if (ring == 0)
+               writel(entry, cp->regs + REG_RX_KICK);
+       else if ((N_RX_DESC_RINGS > 1) &&
+                (cp->cas_flags & CAS_FLAG_REG_PLUS))
+               writel(entry, cp->regs + REG_PLUS_RX_KICK1);
+}
+
+
+/* only when things are bad */
+static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
+{
+       unsigned int entry, last, count, released;
+       int cluster;
+       cas_page_t **page = cp->rx_pages[ring];
+
+       entry = cp->rx_old[ring];
+
+       netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+                    "rxd[%d] interrupt, done: %d\n", ring, entry);
+
+       cluster = -1;
+       count = entry & 0x3;
+       last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
+       released = 0;
+       while (entry != last) {
+               /* make a new buffer if it's still in use */
+               if (page_count(page[entry]->buffer) > 1) {
+                       cas_page_t *new = cas_page_dequeue(cp);
+                       if (!new) {
+                               /* let the timer know that we need to
+                                * do this again
+                                */
+                               cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
+                               if (!timer_pending(&cp->link_timer))
+                                       mod_timer(&cp->link_timer, jiffies +
+                                                 CAS_LINK_FAST_TIMEOUT);
+                               cp->rx_old[ring]  = entry;
+                               cp->rx_last[ring] = num ? num - released : 0;
+                               return -ENOMEM;
+                       }
+                       spin_lock(&cp->rx_inuse_lock);
+                       list_add(&page[entry]->list, &cp->rx_inuse_list);
+                       spin_unlock(&cp->rx_inuse_lock);
+                       cp->init_rxds[ring][entry].buffer =
+                               cpu_to_le64(new->dma_addr);
+                       page[entry] = new;
+
+               }
+
+               if (++count == 4) {
+                       cluster = entry;
+                       count = 0;
+               }
+               released++;
+               entry = RX_DESC_ENTRY(ring, entry + 1);
+       }
+       cp->rx_old[ring] = entry;
+
+       if (cluster < 0)
+               return 0;
+
+       if (ring == 0)
+               writel(cluster, cp->regs + REG_RX_KICK);
+       else if ((N_RX_DESC_RINGS > 1) &&
+                (cp->cas_flags & CAS_FLAG_REG_PLUS))
+               writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
+       return 0;
+}
+
+
+/* process a completion ring. packets are set up in three basic ways:
+ * small packets: should be copied header + data in single buffer.
+ * large packets: header and data in a single buffer.
+ * split packets: header in a separate buffer from data.
+ *                data may be in multiple pages. data may be > 256
+ *                bytes but in a single page.
+ *
+ * NOTE: RX page posting is done in this routine as well. while there's
+ *       the capability of using multiple RX completion rings, it isn't
+ *       really worthwhile due to the fact that the page posting will
+ *       force serialization on the single descriptor ring.
+ */
+static int cas_rx_ringN(struct cas *cp, int ring, int budget)
+{
+       struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
+       int entry, drops;
+       int npackets = 0;
+
+       netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+                    "rx[%d] interrupt, done: %d/%d\n",
+                    ring,
+                    readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
+
+       entry = cp->rx_new[ring];
+       drops = 0;
+       while (1) {
+               struct cas_rx_comp *rxc = rxcs + entry;
+               struct sk_buff *uninitialized_var(skb);
+               int type, len;
+               u64 words[4];
+               int i, dring;
+
+               words[0] = le64_to_cpu(rxc->word1);
+               words[1] = le64_to_cpu(rxc->word2);
+               words[2] = le64_to_cpu(rxc->word3);
+               words[3] = le64_to_cpu(rxc->word4);
+
+               /* don't touch if still owned by hw */
+               type = CAS_VAL(RX_COMP1_TYPE, words[0]);
+               if (type == 0)
+                       break;
+
+               /* hw hasn't cleared the zero bit yet */
+               if (words[3] & RX_COMP4_ZERO) {
+                       break;
+               }
+
+               /* get info on the packet */
+               if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
+                       spin_lock(&cp->stat_lock[ring]);
+                       cp->net_stats[ring].rx_errors++;
+                       if (words[3] & RX_COMP4_LEN_MISMATCH)
+                               cp->net_stats[ring].rx_length_errors++;
+                       if (words[3] & RX_COMP4_BAD)
+                               cp->net_stats[ring].rx_crc_errors++;
+                       spin_unlock(&cp->stat_lock[ring]);
+
+                       /* We'll just return it to Cassini. */
+               drop_it:
+                       spin_lock(&cp->stat_lock[ring]);
+                       ++cp->net_stats[ring].rx_dropped;
+                       spin_unlock(&cp->stat_lock[ring]);
+                       goto next;
+               }
+
+               len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
+               if (len < 0) {
+                       ++drops;
+                       goto drop_it;
+               }
+
+               /* see if it's a flow re-assembly or not. the driver
+                * itself handles release back up.
+                */
+               if (RX_DONT_BATCH || (type == 0x2)) {
+                       /* non-reassm: these always get released */
+                       cas_skb_release(skb);
+               } else {
+                       cas_rx_flow_pkt(cp, words, skb);
+               }
+
+               spin_lock(&cp->stat_lock[ring]);
+               cp->net_stats[ring].rx_packets++;
+               cp->net_stats[ring].rx_bytes += len;
+               spin_unlock(&cp->stat_lock[ring]);
+
+       next:
+               npackets++;
+
+               /* should it be released? */
+               if (words[0] & RX_COMP1_RELEASE_HDR) {
+                       i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
+                       dring = CAS_VAL(RX_INDEX_RING, i);
+                       i = CAS_VAL(RX_INDEX_NUM, i);
+                       cas_post_page(cp, dring, i);
+               }
+
+               if (words[0] & RX_COMP1_RELEASE_DATA) {
+                       i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
+                       dring = CAS_VAL(RX_INDEX_RING, i);
+                       i = CAS_VAL(RX_INDEX_NUM, i);
+                       cas_post_page(cp, dring, i);
+               }
+
+               if (words[0] & RX_COMP1_RELEASE_NEXT) {
+                       i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
+                       dring = CAS_VAL(RX_INDEX_RING, i);
+                       i = CAS_VAL(RX_INDEX_NUM, i);
+                       cas_post_page(cp, dring, i);
+               }
+
+               /* skip to the next entry */
+               entry = RX_COMP_ENTRY(ring, entry + 1 +
+                                     CAS_VAL(RX_COMP1_SKIP, words[0]));
+#ifdef USE_NAPI
+               if (budget && (npackets >= budget))
+                       break;
+#endif
+       }
+       cp->rx_new[ring] = entry;
+
+       if (drops)
+               netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
+       return npackets;
+}
+
+
+/* put completion entries back on the ring */
+static void cas_post_rxcs_ringN(struct net_device *dev,
+                               struct cas *cp, int ring)
+{
+       struct cas_rx_comp *rxc = cp->init_rxcs[ring];
+       int last, entry;
+
+       last = cp->rx_cur[ring];
+       entry = cp->rx_new[ring];
+       netif_printk(cp, intr, KERN_DEBUG, dev,
+                    "rxc[%d] interrupt, done: %d/%d\n",
+                    ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
+
+       /* zero and re-mark descriptors */
+       while (last != entry) {
+               cas_rxc_init(rxc + last);
+               last = RX_COMP_ENTRY(ring, last + 1);
+       }
+       cp->rx_cur[ring] = last;
+
+       if (ring == 0)
+               writel(last, cp->regs + REG_RX_COMP_TAIL);
+       else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
+               writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
+}
+
+
+
+/* cassini can use all four PCI interrupts for the completion ring.
+ * rings 3 and 4 are identical
+ */
+#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
+static inline void cas_handle_irqN(struct net_device *dev,
+                                  struct cas *cp, const u32 status,
+                                  const int ring)
+{
+       if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
+               cas_post_rxcs_ringN(dev, cp, ring);
+}
+
+static irqreturn_t cas_interruptN(int irq, void *dev_id)
+{
+       struct net_device *dev = dev_id;
+       struct cas *cp = netdev_priv(dev);
+       unsigned long flags;
+       int ring;
+       u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
+
+       /* check for shared irq */
+       if (status == 0)
+               return IRQ_NONE;
+
+       ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
+       spin_lock_irqsave(&cp->lock, flags);
+       if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
+#ifdef USE_NAPI
+               cas_mask_intr(cp);
+               napi_schedule(&cp->napi);
+#else
+               cas_rx_ringN(cp, ring, 0);
+#endif
+               status &= ~INTR_RX_DONE_ALT;
+       }
+
+       if (status)
+               cas_handle_irqN(dev, cp, status, ring);
+       spin_unlock_irqrestore(&cp->lock, flags);
+       return IRQ_HANDLED;
+}
+#endif
+
+#ifdef USE_PCI_INTB
+/* everything but rx packets */
+static inline void cas_handle_irq1(struct cas *cp, const u32 status)
+{
+       if (status & INTR_RX_BUF_UNAVAIL_1) {
+               /* Frame arrived, no free RX buffers available.
+                * NOTE: we can get this on a link transition. */
+               cas_post_rxds_ringN(cp, 1, 0);
+               spin_lock(&cp->stat_lock[1]);
+               cp->net_stats[1].rx_dropped++;
+               spin_unlock(&cp->stat_lock[1]);
+       }
+
+       if (status & INTR_RX_BUF_AE_1)
+               cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
+                                   RX_AE_FREEN_VAL(1));
+
+       if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
+               cas_post_rxcs_ringN(cp, 1);
+}
+
+/* ring 2 handles a few more events than 3 and 4 */
+static irqreturn_t cas_interrupt1(int irq, void *dev_id)
+{
+       struct net_device *dev = dev_id;
+       struct cas *cp = netdev_priv(dev);
+       unsigned long flags;
+       u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
+
+       /* check for shared interrupt */
+       if (status == 0)
+               return IRQ_NONE;
+
+       spin_lock_irqsave(&cp->lock, flags);
+       if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
+#ifdef USE_NAPI
+               cas_mask_intr(cp);
+               napi_schedule(&cp->napi);
+#else
+               cas_rx_ringN(cp, 1, 0);
+#endif
+               status &= ~INTR_RX_DONE_ALT;
+       }
+       if (status)
+               cas_handle_irq1(cp, status);
+       spin_unlock_irqrestore(&cp->lock, flags);
+       return IRQ_HANDLED;
+}
+#endif
+
+static inline void cas_handle_irq(struct net_device *dev,
+                                 struct cas *cp, const u32 status)
+{
+       /* housekeeping interrupts */
+       if (status & INTR_ERROR_MASK)
+               cas_abnormal_irq(dev, cp, status);
+
+       if (status & INTR_RX_BUF_UNAVAIL) {
+               /* Frame arrived, no free RX buffers available.
+                * NOTE: we can get this on a link transition.
+                */
+               cas_post_rxds_ringN(cp, 0, 0);
+               spin_lock(&cp->stat_lock[0]);
+               cp->net_stats[0].rx_dropped++;
+               spin_unlock(&cp->stat_lock[0]);
+       } else if (status & INTR_RX_BUF_AE) {
+               cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
+                                   RX_AE_FREEN_VAL(0));
+       }
+
+       if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
+               cas_post_rxcs_ringN(dev, cp, 0);
+}
+
+static irqreturn_t cas_interrupt(int irq, void *dev_id)
+{
+       struct net_device *dev = dev_id;
+       struct cas *cp = netdev_priv(dev);
+       unsigned long flags;
+       u32 status = readl(cp->regs + REG_INTR_STATUS);
+
+       if (status == 0)
+               return IRQ_NONE;
+
+       spin_lock_irqsave(&cp->lock, flags);
+       if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
+               cas_tx(dev, cp, status);
+               status &= ~(INTR_TX_ALL | INTR_TX_INTME);
+       }
+
+       if (status & INTR_RX_DONE) {
+#ifdef USE_NAPI
+               cas_mask_intr(cp);
+               napi_schedule(&cp->napi);
+#else
+               cas_rx_ringN(cp, 0, 0);
+#endif
+               status &= ~INTR_RX_DONE;
+       }
+
+       if (status)
+               cas_handle_irq(dev, cp, status);
+       spin_unlock_irqrestore(&cp->lock, flags);
+       return IRQ_HANDLED;
+}
+
+
+#ifdef USE_NAPI
+static int cas_poll(struct napi_struct *napi, int budget)
+{
+       struct cas *cp = container_of(napi, struct cas, napi);
+       struct net_device *dev = cp->dev;
+       int i, enable_intr, credits;
+       u32 status = readl(cp->regs + REG_INTR_STATUS);
+       unsigned long flags;
+
+       spin_lock_irqsave(&cp->lock, flags);
+       cas_tx(dev, cp, status);
+       spin_unlock_irqrestore(&cp->lock, flags);
+
+       /* NAPI rx packets. we spread the credits across all of the
+        * rxc rings
+        *
+        * to make sure we're fair with the work we loop through each
+        * ring N_RX_COMP_RING times with a request of
+        * budget / N_RX_COMP_RINGS
+        */
+       enable_intr = 1;
+       credits = 0;
+       for (i = 0; i < N_RX_COMP_RINGS; i++) {
+               int j;
+               for (j = 0; j < N_RX_COMP_RINGS; j++) {
+                       credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
+                       if (credits >= budget) {
+                               enable_intr = 0;
+                               goto rx_comp;
+                       }
+               }
+       }
+
+rx_comp:
+       /* final rx completion */
+       spin_lock_irqsave(&cp->lock, flags);
+       if (status)
+               cas_handle_irq(dev, cp, status);
+
+#ifdef USE_PCI_INTB
+       if (N_RX_COMP_RINGS > 1) {
+               status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
+               if (status)
+                       cas_handle_irq1(dev, cp, status);
+       }
+#endif
+
+#ifdef USE_PCI_INTC
+       if (N_RX_COMP_RINGS > 2) {
+               status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
+               if (status)
+                       cas_handle_irqN(dev, cp, status, 2);
+       }
+#endif
+
+#ifdef USE_PCI_INTD
+       if (N_RX_COMP_RINGS > 3) {
+               status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
+               if (status)
+                       cas_handle_irqN(dev, cp, status, 3);
+       }
+#endif
+       spin_unlock_irqrestore(&cp->lock, flags);
+       if (enable_intr) {
+               napi_complete(napi);
+               cas_unmask_intr(cp);
+       }
+       return credits;
+}
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void cas_netpoll(struct net_device *dev)
+{
+       struct cas *cp = netdev_priv(dev);
+
+       cas_disable_irq(cp, 0);
+       cas_interrupt(cp->pdev->irq, dev);
+       cas_enable_irq(cp, 0);
+
+#ifdef USE_PCI_INTB
+       if (N_RX_COMP_RINGS > 1) {
+               /* cas_interrupt1(); */
+       }
+#endif
+#ifdef USE_PCI_INTC
+       if (N_RX_COMP_RINGS > 2) {
+               /* cas_interruptN(); */
+       }
+#endif
+#ifdef USE_PCI_INTD
+       if (N_RX_COMP_RINGS > 3) {
+               /* cas_interruptN(); */
+       }
+#endif
+}
+#endif
+
+static void cas_tx_timeout(struct net_device *dev)
+{
+       struct cas *cp = netdev_priv(dev);
+
+       netdev_err(dev, "transmit timed out, resetting\n");
+       if (!cp->hw_running) {
+               netdev_err(dev, "hrm.. hw not running!\n");
+               return;
+       }
+
+       netdev_err(dev, "MIF_STATE[%08x]\n",
+                  readl(cp->regs + REG_MIF_STATE_MACHINE));
+
+       netdev_err(dev, "MAC_STATE[%08x]\n",
+                  readl(cp->regs + REG_MAC_STATE_MACHINE));
+
+       netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
+                  readl(cp->regs + REG_TX_CFG),
+                  readl(cp->regs + REG_MAC_TX_STATUS),
+                  readl(cp->regs + REG_MAC_TX_CFG),
+                  readl(cp->regs + REG_TX_FIFO_PKT_CNT),
+                  readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
+                  readl(cp->regs + REG_TX_FIFO_READ_PTR),
+                  readl(cp->regs + REG_TX_SM_1),
+                  readl(cp->regs + REG_TX_SM_2));
+
+       netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
+                  readl(cp->regs + REG_RX_CFG),
+                  readl(cp->regs + REG_MAC_RX_STATUS),
+                  readl(cp->regs + REG_MAC_RX_CFG));
+
+       netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
+                  readl(cp->regs + REG_HP_STATE_MACHINE),
+                  readl(cp->regs + REG_HP_STATUS0),
+                  readl(cp->regs + REG_HP_STATUS1),
+                  readl(cp->regs + REG_HP_STATUS2));
+
+#if 1
+       atomic_inc(&cp->reset_task_pending);
+       atomic_inc(&cp->reset_task_pending_all);
+       schedule_work(&cp->reset_task);
+#else
+       atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
+       schedule_work(&cp->reset_task);
+#endif
+}
+
+static inline int cas_intme(int ring, int entry)
+{
+       /* Algorithm: IRQ every 1/2 of descriptors. */
+       if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
+               return 1;
+       return 0;
+}
+
+
+static void cas_write_txd(struct cas *cp, int ring, int entry,
+                         dma_addr_t mapping, int len, u64 ctrl, int last)
+{
+       struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
+
+       ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
+       if (cas_intme(ring, entry))
+               ctrl |= TX_DESC_INTME;
+       if (last)
+               ctrl |= TX_DESC_EOF;
+       txd->control = cpu_to_le64(ctrl);
+       txd->buffer = cpu_to_le64(mapping);
+}
+
+static inline void *tx_tiny_buf(struct cas *cp, const int ring,
+                               const int entry)
+{
+       return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
+}
+
+static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
+                                    const int entry, const int tentry)
+{
+       cp->tx_tiny_use[ring][tentry].nbufs++;
+       cp->tx_tiny_use[ring][entry].used = 1;
+       return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
+}
+
+static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
+                                   struct sk_buff *skb)
+{
+       struct net_device *dev = cp->dev;
+       int entry, nr_frags, frag, tabort, tentry;
+       dma_addr_t mapping;
+       unsigned long flags;
+       u64 ctrl;
+       u32 len;
+
+       spin_lock_irqsave(&cp->tx_lock[ring], flags);
+
+       /* This is a hard error, log it. */
+       if (TX_BUFFS_AVAIL(cp, ring) <=
+           CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
+               netif_stop_queue(dev);
+               spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
+               netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+               return 1;
+       }
+
+       ctrl = 0;
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               const u64 csum_start_off = skb_checksum_start_offset(skb);
+               const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
+
+               ctrl =  TX_DESC_CSUM_EN |
+                       CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
+                       CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
+       }
+
+       entry = cp->tx_new[ring];
+       cp->tx_skbs[ring][entry] = skb;
+
+       nr_frags = skb_shinfo(skb)->nr_frags;
+       len = skb_headlen(skb);
+       mapping = pci_map_page(cp->pdev, virt_to_page(skb->data),
+                              offset_in_page(skb->data), len,
+                              PCI_DMA_TODEVICE);
+
+       tentry = entry;
+       tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
+       if (unlikely(tabort)) {
+               /* NOTE: len is always >  tabort */
+               cas_write_txd(cp, ring, entry, mapping, len - tabort,
+                             ctrl | TX_DESC_SOF, 0);
+               entry = TX_DESC_NEXT(ring, entry);
+
+               skb_copy_from_linear_data_offset(skb, len - tabort,
+                             tx_tiny_buf(cp, ring, entry), tabort);
+               mapping = tx_tiny_map(cp, ring, entry, tentry);
+               cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
+                             (nr_frags == 0));
+       } else {
+               cas_write_txd(cp, ring, entry, mapping, len, ctrl |
+                             TX_DESC_SOF, (nr_frags == 0));
+       }
+       entry = TX_DESC_NEXT(ring, entry);
+
+       for (frag = 0; frag < nr_frags; frag++) {
+               skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
+
+               len = fragp->size;
+               mapping = pci_map_page(cp->pdev, fragp->page,
+                                      fragp->page_offset, len,
+                                      PCI_DMA_TODEVICE);
+
+               tabort = cas_calc_tabort(cp, fragp->page_offset, len);
+               if (unlikely(tabort)) {
+                       void *addr;
+
+                       /* NOTE: len is always > tabort */
+                       cas_write_txd(cp, ring, entry, mapping, len - tabort,
+                                     ctrl, 0);
+                       entry = TX_DESC_NEXT(ring, entry);
+
+                       addr = cas_page_map(fragp->page);
+                       memcpy(tx_tiny_buf(cp, ring, entry),
+                              addr + fragp->page_offset + len - tabort,
+                              tabort);
+                       cas_page_unmap(addr);
+                       mapping = tx_tiny_map(cp, ring, entry, tentry);
+                       len     = tabort;
+               }
+
+               cas_write_txd(cp, ring, entry, mapping, len, ctrl,
+                             (frag + 1 == nr_frags));
+               entry = TX_DESC_NEXT(ring, entry);
+       }
+
+       cp->tx_new[ring] = entry;
+       if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
+               netif_stop_queue(dev);
+
+       netif_printk(cp, tx_queued, KERN_DEBUG, dev,
+                    "tx[%d] queued, slot %d, skblen %d, avail %d\n",
+                    ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
+       writel(entry, cp->regs + REG_TX_KICKN(ring));
+       spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
+       return 0;
+}
+
+static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct cas *cp = netdev_priv(dev);
+
+       /* this is only used as a load-balancing hint, so it doesn't
+        * need to be SMP safe
+        */
+       static int ring;
+
+       if (skb_padto(skb, cp->min_frame_size))
+               return NETDEV_TX_OK;
+
+       /* XXX: we need some higher-level QoS hooks to steer packets to
+        *      individual queues.
+        */
+       if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
+               return NETDEV_TX_BUSY;
+       return NETDEV_TX_OK;
+}
+
+static void cas_init_tx_dma(struct cas *cp)
+{
+       u64 desc_dma = cp->block_dvma;
+       unsigned long off;
+       u32 val;
+       int i;
+
+       /* set up tx completion writeback registers. must be 8-byte aligned */
+#ifdef USE_TX_COMPWB
+       off = offsetof(struct cas_init_block, tx_compwb);
+       writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
+       writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
+#endif
+
+       /* enable completion writebacks, enable paced mode,
+        * disable read pipe, and disable pre-interrupt compwbs
+        */
+       val =   TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
+               TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
+               TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
+               TX_CFG_INTR_COMPWB_DIS;
+
+       /* write out tx ring info and tx desc bases */
+       for (i = 0; i < MAX_TX_RINGS; i++) {
+               off = (unsigned long) cp->init_txds[i] -
+                       (unsigned long) cp->init_block;
+
+               val |= CAS_TX_RINGN_BASE(i);
+               writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
+               writel((desc_dma + off) & 0xffffffff, cp->regs +
+                      REG_TX_DBN_LOW(i));
+               /* don't zero out the kick register here as the system
+                * will wedge
+                */
+       }
+       writel(val, cp->regs + REG_TX_CFG);
+
+       /* program max burst sizes. these numbers should be different
+        * if doing QoS.
+        */
+#ifdef USE_QOS
+       writel(0x800, cp->regs + REG_TX_MAXBURST_0);
+       writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
+       writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
+       writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
+#else
+       writel(0x800, cp->regs + REG_TX_MAXBURST_0);
+       writel(0x800, cp->regs + REG_TX_MAXBURST_1);
+       writel(0x800, cp->regs + REG_TX_MAXBURST_2);
+       writel(0x800, cp->regs + REG_TX_MAXBURST_3);
+#endif
+}
+
+/* Must be invoked under cp->lock. */
+static inline void cas_init_dma(struct cas *cp)
+{
+       cas_init_tx_dma(cp);
+       cas_init_rx_dma(cp);
+}
+
+static void cas_process_mc_list(struct cas *cp)
+{
+       u16 hash_table[16];
+       u32 crc;
+       struct netdev_hw_addr *ha;
+       int i = 1;
+
+       memset(hash_table, 0, sizeof(hash_table));
+       netdev_for_each_mc_addr(ha, cp->dev) {
+               if (i <= CAS_MC_EXACT_MATCH_SIZE) {
+                       /* use the alternate mac address registers for the
+                        * first 15 multicast addresses
+                        */
+                       writel((ha->addr[4] << 8) | ha->addr[5],
+                              cp->regs + REG_MAC_ADDRN(i*3 + 0));
+                       writel((ha->addr[2] << 8) | ha->addr[3],
+                              cp->regs + REG_MAC_ADDRN(i*3 + 1));
+                       writel((ha->addr[0] << 8) | ha->addr[1],
+                              cp->regs + REG_MAC_ADDRN(i*3 + 2));
+                       i++;
+               }
+               else {
+                       /* use hw hash table for the next series of
+                        * multicast addresses
+                        */
+                       crc = ether_crc_le(ETH_ALEN, ha->addr);
+                       crc >>= 24;
+                       hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
+               }
+       }
+       for (i = 0; i < 16; i++)
+               writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
+}
+
+/* Must be invoked under cp->lock. */
+static u32 cas_setup_multicast(struct cas *cp)
+{
+       u32 rxcfg = 0;
+       int i;
+
+       if (cp->dev->flags & IFF_PROMISC) {
+               rxcfg |= MAC_RX_CFG_PROMISC_EN;
+
+       } else if (cp->dev->flags & IFF_ALLMULTI) {
+               for (i=0; i < 16; i++)
+                       writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
+               rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
+
+       } else {
+               cas_process_mc_list(cp);
+               rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
+       }
+
+       return rxcfg;
+}
+
+/* must be invoked under cp->stat_lock[N_TX_RINGS] */
+static void cas_clear_mac_err(struct cas *cp)
+{
+       writel(0, cp->regs + REG_MAC_COLL_NORMAL);
+       writel(0, cp->regs + REG_MAC_COLL_FIRST);
+       writel(0, cp->regs + REG_MAC_COLL_EXCESS);
+       writel(0, cp->regs + REG_MAC_COLL_LATE);
+       writel(0, cp->regs + REG_MAC_TIMER_DEFER);
+       writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
+       writel(0, cp->regs + REG_MAC_RECV_FRAME);
+       writel(0, cp->regs + REG_MAC_LEN_ERR);
+       writel(0, cp->regs + REG_MAC_ALIGN_ERR);
+       writel(0, cp->regs + REG_MAC_FCS_ERR);
+       writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
+}
+
+
+static void cas_mac_reset(struct cas *cp)
+{
+       int i;
+
+       /* do both TX and RX reset */
+       writel(0x1, cp->regs + REG_MAC_TX_RESET);
+       writel(0x1, cp->regs + REG_MAC_RX_RESET);
+
+       /* wait for TX */
+       i = STOP_TRIES;
+       while (i-- > 0) {
+               if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
+                       break;
+               udelay(10);
+       }
+
+       /* wait for RX */
+       i = STOP_TRIES;
+       while (i-- > 0) {
+               if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
+                       break;
+               udelay(10);
+       }
+
+       if (readl(cp->regs + REG_MAC_TX_RESET) |
+           readl(cp->regs + REG_MAC_RX_RESET))
+               netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
+                          readl(cp->regs + REG_MAC_TX_RESET),
+                          readl(cp->regs + REG_MAC_RX_RESET),
+                          readl(cp->regs + REG_MAC_STATE_MACHINE));
+}
+
+
+/* Must be invoked under cp->lock. */
+static void cas_init_mac(struct cas *cp)
+{
+       unsigned char *e = &cp->dev->dev_addr[0];
+       int i;
+       cas_mac_reset(cp);
+
+       /* setup core arbitration weight register */
+       writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
+
+       /* XXX Use pci_dma_burst_advice() */
+#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
+       /* set the infinite burst register for chips that don't have
+        * pci issues.
+        */
+       if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
+               writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
+#endif
+
+       writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
+
+       writel(0x00, cp->regs + REG_MAC_IPG0);
+       writel(0x08, cp->regs + REG_MAC_IPG1);
+       writel(0x04, cp->regs + REG_MAC_IPG2);
+
+       /* change later for 802.3z */
+       writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
+
+       /* min frame + FCS */
+       writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
+
+       /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
+        * specify the maximum frame size to prevent RX tag errors on
+        * oversized frames.
+        */
+       writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
+              CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
+                       (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
+              cp->regs + REG_MAC_FRAMESIZE_MAX);
+
+       /* NOTE: crc_size is used as a surrogate for half-duplex.
+        * workaround saturn half-duplex issue by increasing preamble
+        * size to 65 bytes.
+        */
+       if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
+               writel(0x41, cp->regs + REG_MAC_PA_SIZE);
+       else
+               writel(0x07, cp->regs + REG_MAC_PA_SIZE);
+       writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
+       writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
+       writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
+
+       writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
+
+       writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
+       writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
+       writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
+       writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
+       writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
+
+       /* setup mac address in perfect filter array */
+       for (i = 0; i < 45; i++)
+               writel(0x0, cp->regs + REG_MAC_ADDRN(i));
+
+       writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
+       writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
+       writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
+
+       writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
+       writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
+       writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
+
+       cp->mac_rx_cfg = cas_setup_multicast(cp);
+
+       spin_lock(&cp->stat_lock[N_TX_RINGS]);
+       cas_clear_mac_err(cp);
+       spin_unlock(&cp->stat_lock[N_TX_RINGS]);
+
+       /* Setup MAC interrupts.  We want to get all of the interesting
+        * counter expiration events, but we do not want to hear about
+        * normal rx/tx as the DMA engine tells us that.
+        */
+       writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
+       writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
+
+       /* Don't enable even the PAUSE interrupts for now, we
+        * make no use of those events other than to record them.
+        */
+       writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
+}
+
+/* Must be invoked under cp->lock. */
+static void cas_init_pause_thresholds(struct cas *cp)
+{
+       /* Calculate pause thresholds.  Setting the OFF threshold to the
+        * full RX fifo size effectively disables PAUSE generation
+        */
+       if (cp->rx_fifo_size <= (2 * 1024)) {
+               cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
+       } else {
+               int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
+               if (max_frame * 3 > cp->rx_fifo_size) {
+                       cp->rx_pause_off = 7104;
+                       cp->rx_pause_on  = 960;
+               } else {
+                       int off = (cp->rx_fifo_size - (max_frame * 2));
+                       int on = off - max_frame;
+                       cp->rx_pause_off = off;
+                       cp->rx_pause_on = on;
+               }
+       }
+}
+
+static int cas_vpd_match(const void __iomem *p, const char *str)
+{
+       int len = strlen(str) + 1;
+       int i;
+
+       for (i = 0; i < len; i++) {
+               if (readb(p + i) != str[i])
+                       return 0;
+       }
+       return 1;
+}
+
+
+/* get the mac address by reading the vpd information in the rom.
+ * also get the phy type and determine if there's an entropy generator.
+ * NOTE: this is a bit convoluted for the following reasons:
+ *  1) vpd info has order-dependent mac addresses for multinic cards
+ *  2) the only way to determine the nic order is to use the slot
+ *     number.
+ *  3) fiber cards don't have bridges, so their slot numbers don't
+ *     mean anything.
+ *  4) we don't actually know we have a fiber card until after
+ *     the mac addresses are parsed.
+ */
+static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
+                           const int offset)
+{
+       void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
+       void __iomem *base, *kstart;
+       int i, len;
+       int found = 0;
+#define VPD_FOUND_MAC        0x01
+#define VPD_FOUND_PHY        0x02
+
+       int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
+       int mac_off  = 0;
+
+#if defined(CONFIG_SPARC)
+       const unsigned char *addr;
+#endif
+
+       /* give us access to the PROM */
+       writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
+              cp->regs + REG_BIM_LOCAL_DEV_EN);
+
+       /* check for an expansion rom */
+       if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
+               goto use_random_mac_addr;
+
+       /* search for beginning of vpd */
+       base = NULL;
+       for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
+               /* check for PCIR */
+               if ((readb(p + i + 0) == 0x50) &&
+                   (readb(p + i + 1) == 0x43) &&
+                   (readb(p + i + 2) == 0x49) &&
+                   (readb(p + i + 3) == 0x52)) {
+                       base = p + (readb(p + i + 8) |
+                                   (readb(p + i + 9) << 8));
+                       break;
+               }
+       }
+
+       if (!base || (readb(base) != 0x82))
+               goto use_random_mac_addr;
+
+       i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
+       while (i < EXPANSION_ROM_SIZE) {
+               if (readb(base + i) != 0x90) /* no vpd found */
+                       goto use_random_mac_addr;
+
+               /* found a vpd field */
+               len = readb(base + i + 1) | (readb(base + i + 2) << 8);
+
+               /* extract keywords */
+               kstart = base + i + 3;
+               p = kstart;
+               while ((p - kstart) < len) {
+                       int klen = readb(p + 2);
+                       int j;
+                       char type;
+
+                       p += 3;
+
+                       /* look for the following things:
+                        * -- correct length == 29
+                        * 3 (type) + 2 (size) +
+                        * 18 (strlen("local-mac-address") + 1) +
+                        * 6 (mac addr)
+                        * -- VPD Instance 'I'
+                        * -- VPD Type Bytes 'B'
+                        * -- VPD data length == 6
+                        * -- property string == local-mac-address
+                        *
+                        * -- correct length == 24
+                        * 3 (type) + 2 (size) +
+                        * 12 (strlen("entropy-dev") + 1) +
+                        * 7 (strlen("vms110") + 1)
+                        * -- VPD Instance 'I'
+                        * -- VPD Type String 'B'
+                        * -- VPD data length == 7
+                        * -- property string == entropy-dev
+                        *
+                        * -- correct length == 18
+                        * 3 (type) + 2 (size) +
+                        * 9 (strlen("phy-type") + 1) +
+                        * 4 (strlen("pcs") + 1)
+                        * -- VPD Instance 'I'
+                        * -- VPD Type String 'S'
+                        * -- VPD data length == 4
+                        * -- property string == phy-type
+                        *
+                        * -- correct length == 23
+                        * 3 (type) + 2 (size) +
+                        * 14 (strlen("phy-interface") + 1) +
+                        * 4 (strlen("pcs") + 1)
+                        * -- VPD Instance 'I'
+                        * -- VPD Type String 'S'
+                        * -- VPD data length == 4
+                        * -- property string == phy-interface
+                        */
+                       if (readb(p) != 'I')
+                               goto next;
+
+                       /* finally, check string and length */
+                       type = readb(p + 3);
+                       if (type == 'B') {
+                               if ((klen == 29) && readb(p + 4) == 6 &&
+                                   cas_vpd_match(p + 5,
+                                                 "local-mac-address")) {
+                                       if (mac_off++ > offset)
+                                               goto next;
+
+                                       /* set mac address */
+                                       for (j = 0; j < 6; j++)
+                                               dev_addr[j] =
+                                                       readb(p + 23 + j);
+                                       goto found_mac;
+                               }
+                       }
+
+                       if (type != 'S')
+                               goto next;
+
+#ifdef USE_ENTROPY_DEV
+                       if ((klen == 24) &&
+                           cas_vpd_match(p + 5, "entropy-dev") &&
+                           cas_vpd_match(p + 17, "vms110")) {
+                               cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
+                               goto next;
+                       }
+#endif
+
+                       if (found & VPD_FOUND_PHY)
+                               goto next;
+
+                       if ((klen == 18) && readb(p + 4) == 4 &&
+                           cas_vpd_match(p + 5, "phy-type")) {
+                               if (cas_vpd_match(p + 14, "pcs")) {
+                                       phy_type = CAS_PHY_SERDES;
+                                       goto found_phy;
+                               }
+                       }
+
+                       if ((klen == 23) && readb(p + 4) == 4 &&
+                           cas_vpd_match(p + 5, "phy-interface")) {
+                               if (cas_vpd_match(p + 19, "pcs")) {
+                                       phy_type = CAS_PHY_SERDES;
+                                       goto found_phy;
+                               }
+                       }
+found_mac:
+                       found |= VPD_FOUND_MAC;
+                       goto next;
+
+found_phy:
+                       found |= VPD_FOUND_PHY;
+
+next:
+                       p += klen;
+               }
+               i += len + 3;
+       }
+
+use_random_mac_addr:
+       if (found & VPD_FOUND_MAC)
+               goto done;
+
+#if defined(CONFIG_SPARC)
+       addr = of_get_property(cp->of_node, "local-mac-address", NULL);
+       if (addr != NULL) {
+               memcpy(dev_addr, addr, 6);
+               goto done;
+       }
+#endif
+
+       /* Sun MAC prefix then 3 random bytes. */
+       pr_info("MAC address not found in ROM VPD\n");
+       dev_addr[0] = 0x08;
+       dev_addr[1] = 0x00;
+       dev_addr[2] = 0x20;
+       get_random_bytes(dev_addr + 3, 3);
+
+done:
+       writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
+       return phy_type;
+}
+
+/* check pci invariants */
+static void cas_check_pci_invariants(struct cas *cp)
+{
+       struct pci_dev *pdev = cp->pdev;
+
+       cp->cas_flags = 0;
+       if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
+           (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
+               if (pdev->revision >= CAS_ID_REVPLUS)
+                       cp->cas_flags |= CAS_FLAG_REG_PLUS;
+               if (pdev->revision < CAS_ID_REVPLUS02u)
+                       cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
+
+               /* Original Cassini supports HW CSUM, but it's not
+                * enabled by default as it can trigger TX hangs.
+                */
+               if (pdev->revision < CAS_ID_REV2)
+                       cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
+       } else {
+               /* Only sun has original cassini chips.  */
+               cp->cas_flags |= CAS_FLAG_REG_PLUS;
+
+               /* We use a flag because the same phy might be externally
+                * connected.
+                */
+               if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
+                   (pdev->device == PCI_DEVICE_ID_NS_SATURN))
+                       cp->cas_flags |= CAS_FLAG_SATURN;
+       }
+}
+
+
+static int cas_check_invariants(struct cas *cp)
+{
+       struct pci_dev *pdev = cp->pdev;
+       u32 cfg;
+       int i;
+
+       /* get page size for rx buffers. */
+       cp->page_order = 0;
+#ifdef USE_PAGE_ORDER
+       if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
+               /* see if we can allocate larger pages */
+               struct page *page = alloc_pages(GFP_ATOMIC,
+                                               CAS_JUMBO_PAGE_SHIFT -
+                                               PAGE_SHIFT);
+               if (page) {
+                       __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
+                       cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
+               } else {
+                       printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
+               }
+       }
+#endif
+       cp->page_size = (PAGE_SIZE << cp->page_order);
+
+       /* Fetch the FIFO configurations. */
+       cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
+       cp->rx_fifo_size = RX_FIFO_SIZE;
+
+       /* finish phy determination. MDIO1 takes precedence over MDIO0 if
+        * they're both connected.
+        */
+       cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
+                                       PCI_SLOT(pdev->devfn));
+       if (cp->phy_type & CAS_PHY_SERDES) {
+               cp->cas_flags |= CAS_FLAG_1000MB_CAP;
+               return 0; /* no more checking needed */
+       }
+
+       /* MII */
+       cfg = readl(cp->regs + REG_MIF_CFG);
+       if (cfg & MIF_CFG_MDIO_1) {
+               cp->phy_type = CAS_PHY_MII_MDIO1;
+       } else if (cfg & MIF_CFG_MDIO_0) {
+               cp->phy_type = CAS_PHY_MII_MDIO0;
+       }
+
+       cas_mif_poll(cp, 0);
+       writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
+
+       for (i = 0; i < 32; i++) {
+               u32 phy_id;
+               int j;
+
+               for (j = 0; j < 3; j++) {
+                       cp->phy_addr = i;
+                       phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
+                       phy_id |= cas_phy_read(cp, MII_PHYSID2);
+                       if (phy_id && (phy_id != 0xFFFFFFFF)) {
+                               cp->phy_id = phy_id;
+                               goto done;
+                       }
+               }
+       }
+       pr_err("MII phy did not respond [%08x]\n",
+              readl(cp->regs + REG_MIF_STATE_MACHINE));
+       return -1;
+
+done:
+       /* see if we can do gigabit */
+       cfg = cas_phy_read(cp, MII_BMSR);
+       if ((cfg & CAS_BMSR_1000_EXTEND) &&
+           cas_phy_read(cp, CAS_MII_1000_EXTEND))
+               cp->cas_flags |= CAS_FLAG_1000MB_CAP;
+       return 0;
+}
+
+/* Must be invoked under cp->lock. */
+static inline void cas_start_dma(struct cas *cp)
+{
+       int i;
+       u32 val;
+       int txfailed = 0;
+
+       /* enable dma */
+       val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
+       writel(val, cp->regs + REG_TX_CFG);
+       val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
+       writel(val, cp->regs + REG_RX_CFG);
+
+       /* enable the mac */
+       val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
+       writel(val, cp->regs + REG_MAC_TX_CFG);
+       val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
+       writel(val, cp->regs + REG_MAC_RX_CFG);
+
+       i = STOP_TRIES;
+       while (i-- > 0) {
+               val = readl(cp->regs + REG_MAC_TX_CFG);
+               if ((val & MAC_TX_CFG_EN))
+                       break;
+               udelay(10);
+       }
+       if (i < 0) txfailed = 1;
+       i = STOP_TRIES;
+       while (i-- > 0) {
+               val = readl(cp->regs + REG_MAC_RX_CFG);
+               if ((val & MAC_RX_CFG_EN)) {
+                       if (txfailed) {
+                               netdev_err(cp->dev,
+                                          "enabling mac failed [tx:%08x:%08x]\n",
+                                          readl(cp->regs + REG_MIF_STATE_MACHINE),
+                                          readl(cp->regs + REG_MAC_STATE_MACHINE));
+                       }
+                       goto enable_rx_done;
+               }
+               udelay(10);
+       }
+       netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
+                  (txfailed ? "tx,rx" : "rx"),
+                  readl(cp->regs + REG_MIF_STATE_MACHINE),
+                  readl(cp->regs + REG_MAC_STATE_MACHINE));
+
+enable_rx_done:
+       cas_unmask_intr(cp); /* enable interrupts */
+       writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
+       writel(0, cp->regs + REG_RX_COMP_TAIL);
+
+       if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
+               if (N_RX_DESC_RINGS > 1)
+                       writel(RX_DESC_RINGN_SIZE(1) - 4,
+                              cp->regs + REG_PLUS_RX_KICK1);
+
+               for (i = 1; i < N_RX_COMP_RINGS; i++)
+                       writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
+       }
+}
+
+/* Must be invoked under cp->lock. */
+static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
+                                  int *pause)
+{
+       u32 val = readl(cp->regs + REG_PCS_MII_LPA);
+       *fd     = (val & PCS_MII_LPA_FD) ? 1 : 0;
+       *pause  = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
+       if (val & PCS_MII_LPA_ASYM_PAUSE)
+               *pause |= 0x10;
+       *spd = 1000;
+}
+
+/* Must be invoked under cp->lock. */
+static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
+                                  int *pause)
+{
+       u32 val;
+
+       *fd = 0;
+       *spd = 10;
+       *pause = 0;
+
+       /* use GMII registers */
+       val = cas_phy_read(cp, MII_LPA);
+       if (val & CAS_LPA_PAUSE)
+               *pause = 0x01;
+
+       if (val & CAS_LPA_ASYM_PAUSE)
+               *pause |= 0x10;
+
+       if (val & LPA_DUPLEX)
+               *fd = 1;
+       if (val & LPA_100)
+               *spd = 100;
+
+       if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
+               val = cas_phy_read(cp, CAS_MII_1000_STATUS);
+               if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
+                       *spd = 1000;
+               if (val & CAS_LPA_1000FULL)
+                       *fd = 1;
+       }
+}
+
+/* A link-up condition has occurred, initialize and enable the
+ * rest of the chip.
+ *
+ * Must be invoked under cp->lock.
+ */
+static void cas_set_link_modes(struct cas *cp)
+{
+       u32 val;
+       int full_duplex, speed, pause;
+
+       full_duplex = 0;
+       speed = 10;
+       pause = 0;
+
+       if (CAS_PHY_MII(cp->phy_type)) {
+               cas_mif_poll(cp, 0);
+               val = cas_phy_read(cp, MII_BMCR);
+               if (val & BMCR_ANENABLE) {
+                       cas_read_mii_link_mode(cp, &full_duplex, &speed,
+                                              &pause);
+               } else {
+                       if (val & BMCR_FULLDPLX)
+                               full_duplex = 1;
+
+                       if (val & BMCR_SPEED100)
+                               speed = 100;
+                       else if (val & CAS_BMCR_SPEED1000)
+                               speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
+                                       1000 : 100;
+               }
+               cas_mif_poll(cp, 1);
+
+       } else {
+               val = readl(cp->regs + REG_PCS_MII_CTRL);
+               cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
+               if ((val & PCS_MII_AUTONEG_EN) == 0) {
+                       if (val & PCS_MII_CTRL_DUPLEX)
+                               full_duplex = 1;
+               }
+       }
+
+       netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
+                  speed, full_duplex ? "full" : "half");
+
+       val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
+       if (CAS_PHY_MII(cp->phy_type)) {
+               val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
+               if (!full_duplex)
+                       val |= MAC_XIF_DISABLE_ECHO;
+       }
+       if (full_duplex)
+               val |= MAC_XIF_FDPLX_LED;
+       if (speed == 1000)
+               val |= MAC_XIF_GMII_MODE;
+       writel(val, cp->regs + REG_MAC_XIF_CFG);
+
+       /* deal with carrier and collision detect. */
+       val = MAC_TX_CFG_IPG_EN;
+       if (full_duplex) {
+               val |= MAC_TX_CFG_IGNORE_CARRIER;
+               val |= MAC_TX_CFG_IGNORE_COLL;
+       } else {
+#ifndef USE_CSMA_CD_PROTO
+               val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
+               val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
+#endif
+       }
+       /* val now set up for REG_MAC_TX_CFG */
+
+       /* If gigabit and half-duplex, enable carrier extension
+        * mode.  increase slot time to 512 bytes as well.
+        * else, disable it and make sure slot time is 64 bytes.
+        * also activate checksum bug workaround
+        */
+       if ((speed == 1000) && !full_duplex) {
+               writel(val | MAC_TX_CFG_CARRIER_EXTEND,
+                      cp->regs + REG_MAC_TX_CFG);
+
+               val = readl(cp->regs + REG_MAC_RX_CFG);
+               val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */
+               writel(val | MAC_RX_CFG_CARRIER_EXTEND,
+                      cp->regs + REG_MAC_RX_CFG);
+
+               writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
+
+               cp->crc_size = 4;
+               /* minimum size gigabit frame at half duplex */
+               cp->min_frame_size = CAS_1000MB_MIN_FRAME;
+
+       } else {
+               writel(val, cp->regs + REG_MAC_TX_CFG);
+
+               /* checksum bug workaround. don't strip FCS when in
+                * half-duplex mode
+                */
+               val = readl(cp->regs + REG_MAC_RX_CFG);
+               if (full_duplex) {
+                       val |= MAC_RX_CFG_STRIP_FCS;
+                       cp->crc_size = 0;
+                       cp->min_frame_size = CAS_MIN_MTU;
+               } else {
+                       val &= ~MAC_RX_CFG_STRIP_FCS;
+                       cp->crc_size = 4;
+                       cp->min_frame_size = CAS_MIN_FRAME;
+               }
+               writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
+                      cp->regs + REG_MAC_RX_CFG);
+               writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
+       }
+
+       if (netif_msg_link(cp)) {
+               if (pause & 0x01) {
+                       netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
+                                   cp->rx_fifo_size,
+                                   cp->rx_pause_off,
+                                   cp->rx_pause_on);
+               } else if (pause & 0x10) {
+                       netdev_info(cp->dev, "TX pause enabled\n");
+               } else {
+                       netdev_info(cp->dev, "Pause is disabled\n");
+               }
+       }
+
+       val = readl(cp->regs + REG_MAC_CTRL_CFG);
+       val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
+       if (pause) { /* symmetric or asymmetric pause */
+               val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
+               if (pause & 0x01) { /* symmetric pause */
+                       val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
+               }
+       }
+       writel(val, cp->regs + REG_MAC_CTRL_CFG);
+       cas_start_dma(cp);
+}
+
+/* Must be invoked under cp->lock. */
+static void cas_init_hw(struct cas *cp, int restart_link)
+{
+       if (restart_link)
+               cas_phy_init(cp);
+
+       cas_init_pause_thresholds(cp);
+       cas_init_mac(cp);
+       cas_init_dma(cp);
+
+       if (restart_link) {
+               /* Default aneg parameters */
+               cp->timer_ticks = 0;
+               cas_begin_auto_negotiation(cp, NULL);
+       } else if (cp->lstate == link_up) {
+               cas_set_link_modes(cp);
+               netif_carrier_on(cp->dev);
+       }
+}
+
+/* Must be invoked under cp->lock. on earlier cassini boards,
+ * SOFT_0 is tied to PCI reset. we use this to force a pci reset,
+ * let it settle out, and then restore pci state.
+ */
+static void cas_hard_reset(struct cas *cp)
+{
+       writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
+       udelay(20);
+       pci_restore_state(cp->pdev);
+}
+
+
+static void cas_global_reset(struct cas *cp, int blkflag)
+{
+       int limit;
+
+       /* issue a global reset. don't use RSTOUT. */
+       if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
+               /* For PCS, when the blkflag is set, we should set the
+                * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of
+                * the last autonegotiation from being cleared.  We'll
+                * need some special handling if the chip is set into a
+                * loopback mode.
+                */
+               writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
+                      cp->regs + REG_SW_RESET);
+       } else {
+               writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
+       }
+
+       /* need to wait at least 3ms before polling register */
+       mdelay(3);
+
+       limit = STOP_TRIES;
+       while (limit-- > 0) {
+               u32 val = readl(cp->regs + REG_SW_RESET);
+               if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
+                       goto done;
+               udelay(10);
+       }
+       netdev_err(cp->dev, "sw reset failed\n");
+
+done:
+       /* enable various BIM interrupts */
+       writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
+              BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
+
+       /* clear out pci error status mask for handled errors.
+        * we don't deal with DMA counter overflows as they happen
+        * all the time.
+        */
+       writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
+                              PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
+                              PCI_ERR_BIM_DMA_READ), cp->regs +
+              REG_PCI_ERR_STATUS_MASK);
+
+       /* set up for MII by default to address mac rx reset timeout
+        * issue
+        */
+       writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
+}
+
+static void cas_reset(struct cas *cp, int blkflag)
+{
+       u32 val;
+
+       cas_mask_intr(cp);
+       cas_global_reset(cp, blkflag);
+       cas_mac_reset(cp);
+       cas_entropy_reset(cp);
+
+       /* disable dma engines. */
+       val = readl(cp->regs + REG_TX_CFG);
+       val &= ~TX_CFG_DMA_EN;
+       writel(val, cp->regs + REG_TX_CFG);
+
+       val = readl(cp->regs + REG_RX_CFG);
+       val &= ~RX_CFG_DMA_EN;
+       writel(val, cp->regs + REG_RX_CFG);
+
+       /* program header parser */
+       if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
+           (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
+               cas_load_firmware(cp, CAS_HP_FIRMWARE);
+       } else {
+               cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
+       }
+
+       /* clear out error registers */
+       spin_lock(&cp->stat_lock[N_TX_RINGS]);
+       cas_clear_mac_err(cp);
+       spin_unlock(&cp->stat_lock[N_TX_RINGS]);
+}
+
+/* Shut down the chip, must be called with pm_mutex held.  */
+static void cas_shutdown(struct cas *cp)
+{
+       unsigned long flags;
+
+       /* Make us not-running to avoid timers respawning */
+       cp->hw_running = 0;
+
+       del_timer_sync(&cp->link_timer);
+
+       /* Stop the reset task */
+#if 0
+       while (atomic_read(&cp->reset_task_pending_mtu) ||
+              atomic_read(&cp->reset_task_pending_spare) ||
+              atomic_read(&cp->reset_task_pending_all))
+               schedule();
+
+#else
+       while (atomic_read(&cp->reset_task_pending))
+               schedule();
+#endif
+       /* Actually stop the chip */
+       cas_lock_all_save(cp, flags);
+       cas_reset(cp, 0);
+       if (cp->cas_flags & CAS_FLAG_SATURN)
+               cas_phy_powerdown(cp);
+       cas_unlock_all_restore(cp, flags);
+}
+
+static int cas_change_mtu(struct net_device *dev, int new_mtu)
+{
+       struct cas *cp = netdev_priv(dev);
+
+       if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU)
+               return -EINVAL;
+
+       dev->mtu = new_mtu;
+       if (!netif_running(dev) || !netif_device_present(dev))
+               return 0;
+
+       /* let the reset task handle it */
+#if 1
+       atomic_inc(&cp->reset_task_pending);
+       if ((cp->phy_type & CAS_PHY_SERDES)) {
+               atomic_inc(&cp->reset_task_pending_all);
+       } else {
+               atomic_inc(&cp->reset_task_pending_mtu);
+       }
+       schedule_work(&cp->reset_task);
+#else
+       atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
+                  CAS_RESET_ALL : CAS_RESET_MTU);
+       pr_err("reset called in cas_change_mtu\n");
+       schedule_work(&cp->reset_task);
+#endif
+
+       flush_work_sync(&cp->reset_task);
+       return 0;
+}
+
+static void cas_clean_txd(struct cas *cp, int ring)
+{
+       struct cas_tx_desc *txd = cp->init_txds[ring];
+       struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
+       u64 daddr, dlen;
+       int i, size;
+
+       size = TX_DESC_RINGN_SIZE(ring);
+       for (i = 0; i < size; i++) {
+               int frag;
+
+               if (skbs[i] == NULL)
+                       continue;
+
+               skb = skbs[i];
+               skbs[i] = NULL;
+
+               for (frag = 0; frag <= skb_shinfo(skb)->nr_frags;  frag++) {
+                       int ent = i & (size - 1);
+
+                       /* first buffer is never a tiny buffer and so
+                        * needs to be unmapped.
+                        */
+                       daddr = le64_to_cpu(txd[ent].buffer);
+                       dlen  =  CAS_VAL(TX_DESC_BUFLEN,
+                                        le64_to_cpu(txd[ent].control));
+                       pci_unmap_page(cp->pdev, daddr, dlen,
+                                      PCI_DMA_TODEVICE);
+
+                       if (frag != skb_shinfo(skb)->nr_frags) {
+                               i++;
+
+                               /* next buffer might by a tiny buffer.
+                                * skip past it.
+                                */
+                               ent = i & (size - 1);
+                               if (cp->tx_tiny_use[ring][ent].used)
+                                       i++;
+                       }
+               }
+               dev_kfree_skb_any(skb);
+       }
+
+       /* zero out tiny buf usage */
+       memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
+}
+
+/* freed on close */
+static inline void cas_free_rx_desc(struct cas *cp, int ring)
+{
+       cas_page_t **page = cp->rx_pages[ring];
+       int i, size;
+
+       size = RX_DESC_RINGN_SIZE(ring);
+       for (i = 0; i < size; i++) {
+               if (page[i]) {
+                       cas_page_free(cp, page[i]);
+                       page[i] = NULL;
+               }
+       }
+}
+
+static void cas_free_rxds(struct cas *cp)
+{
+       int i;
+
+       for (i = 0; i < N_RX_DESC_RINGS; i++)
+               cas_free_rx_desc(cp, i);
+}
+
+/* Must be invoked under cp->lock. */
+static void cas_clean_rings(struct cas *cp)
+{
+       int i;
+
+       /* need to clean all tx rings */
+       memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
+       memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
+       for (i = 0; i < N_TX_RINGS; i++)
+               cas_clean_txd(cp, i);
+
+       /* zero out init block */
+       memset(cp->init_block, 0, sizeof(struct cas_init_block));
+       cas_clean_rxds(cp);
+       cas_clean_rxcs(cp);
+}
+
+/* allocated on open */
+static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
+{
+       cas_page_t **page = cp->rx_pages[ring];
+       int size, i = 0;
+
+       size = RX_DESC_RINGN_SIZE(ring);
+       for (i = 0; i < size; i++) {
+               if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
+                       return -1;
+       }
+       return 0;
+}
+
+static int cas_alloc_rxds(struct cas *cp)
+{
+       int i;
+
+       for (i = 0; i < N_RX_DESC_RINGS; i++) {
+               if (cas_alloc_rx_desc(cp, i) < 0) {
+                       cas_free_rxds(cp);
+                       return -1;
+               }
+       }
+       return 0;
+}
+
+static void cas_reset_task(struct work_struct *work)
+{
+       struct cas *cp = container_of(work, struct cas, reset_task);
+#if 0
+       int pending = atomic_read(&cp->reset_task_pending);
+#else
+       int pending_all = atomic_read(&cp->reset_task_pending_all);
+       int pending_spare = atomic_read(&cp->reset_task_pending_spare);
+       int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
+
+       if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
+               /* We can have more tasks scheduled than actually
+                * needed.
+                */
+               atomic_dec(&cp->reset_task_pending);
+               return;
+       }
+#endif
+       /* The link went down, we reset the ring, but keep
+        * DMA stopped. Use this function for reset
+        * on error as well.
+        */
+       if (cp->hw_running) {
+               unsigned long flags;
+
+               /* Make sure we don't get interrupts or tx packets */
+               netif_device_detach(cp->dev);
+               cas_lock_all_save(cp, flags);
+
+               if (cp->opened) {
+                       /* We call cas_spare_recover when we call cas_open.
+                        * but we do not initialize the lists cas_spare_recover
+                        * uses until cas_open is called.
+                        */
+                       cas_spare_recover(cp, GFP_ATOMIC);
+               }
+#if 1
+               /* test => only pending_spare set */
+               if (!pending_all && !pending_mtu)
+                       goto done;
+#else
+               if (pending == CAS_RESET_SPARE)
+                       goto done;
+#endif
+               /* when pending == CAS_RESET_ALL, the following
+                * call to cas_init_hw will restart auto negotiation.
+                * Setting the second argument of cas_reset to
+                * !(pending == CAS_RESET_ALL) will set this argument
+                * to 1 (avoiding reinitializing the PHY for the normal
+                * PCS case) when auto negotiation is not restarted.
+                */
+#if 1
+               cas_reset(cp, !(pending_all > 0));
+               if (cp->opened)
+                       cas_clean_rings(cp);
+               cas_init_hw(cp, (pending_all > 0));
+#else
+               cas_reset(cp, !(pending == CAS_RESET_ALL));
+               if (cp->opened)
+                       cas_clean_rings(cp);
+               cas_init_hw(cp, pending == CAS_RESET_ALL);
+#endif
+
+done:
+               cas_unlock_all_restore(cp, flags);
+               netif_device_attach(cp->dev);
+       }
+#if 1
+       atomic_sub(pending_all, &cp->reset_task_pending_all);
+       atomic_sub(pending_spare, &cp->reset_task_pending_spare);
+       atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
+       atomic_dec(&cp->reset_task_pending);
+#else
+       atomic_set(&cp->reset_task_pending, 0);
+#endif
+}
+
+static void cas_link_timer(unsigned long data)
+{
+       struct cas *cp = (struct cas *) data;
+       int mask, pending = 0, reset = 0;
+       unsigned long flags;
+
+       if (link_transition_timeout != 0 &&
+           cp->link_transition_jiffies_valid &&
+           ((jiffies - cp->link_transition_jiffies) >
+             (link_transition_timeout))) {
+               /* One-second counter so link-down workaround doesn't
+                * cause resets to occur so fast as to fool the switch
+                * into thinking the link is down.
+                */
+               cp->link_transition_jiffies_valid = 0;
+       }
+
+       if (!cp->hw_running)
+               return;
+
+       spin_lock_irqsave(&cp->lock, flags);
+       cas_lock_tx(cp);
+       cas_entropy_gather(cp);
+
+       /* If the link task is still pending, we just
+        * reschedule the link timer
+        */
+#if 1
+       if (atomic_read(&cp->reset_task_pending_all) ||
+           atomic_read(&cp->reset_task_pending_spare) ||
+           atomic_read(&cp->reset_task_pending_mtu))
+               goto done;
+#else
+       if (atomic_read(&cp->reset_task_pending))
+               goto done;
+#endif
+
+       /* check for rx cleaning */
+       if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
+               int i, rmask;
+
+               for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
+                       rmask = CAS_FLAG_RXD_POST(i);
+                       if ((mask & rmask) == 0)
+                               continue;
+
+                       /* post_rxds will do a mod_timer */
+                       if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
+                               pending = 1;
+                               continue;
+                       }
+                       cp->cas_flags &= ~rmask;
+               }
+       }
+
+       if (CAS_PHY_MII(cp->phy_type)) {
+               u16 bmsr;
+               cas_mif_poll(cp, 0);
+               bmsr = cas_phy_read(cp, MII_BMSR);
+               /* WTZ: Solaris driver reads this twice, but that
+                * may be due to the PCS case and the use of a
+                * common implementation. Read it twice here to be
+                * safe.
+                */
+               bmsr = cas_phy_read(cp, MII_BMSR);
+               cas_mif_poll(cp, 1);
+               readl(cp->regs + REG_MIF_STATUS); /* avoid dups */
+               reset = cas_mii_link_check(cp, bmsr);
+       } else {
+               reset = cas_pcs_link_check(cp);
+       }
+
+       if (reset)
+               goto done;
+
+       /* check for tx state machine confusion */
+       if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
+               u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
+               u32 wptr, rptr;
+               int tlm  = CAS_VAL(MAC_SM_TLM, val);
+
+               if (((tlm == 0x5) || (tlm == 0x3)) &&
+                   (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
+                       netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
+                                    "tx err: MAC_STATE[%08x]\n", val);
+                       reset = 1;
+                       goto done;
+               }
+
+               val  = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
+               wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
+               rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
+               if ((val == 0) && (wptr != rptr)) {
+                       netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
+                                    "tx err: TX_FIFO[%08x:%08x:%08x]\n",
+                                    val, wptr, rptr);
+                       reset = 1;
+               }
+
+               if (reset)
+                       cas_hard_reset(cp);
+       }
+
+done:
+       if (reset) {
+#if 1
+               atomic_inc(&cp->reset_task_pending);
+               atomic_inc(&cp->reset_task_pending_all);
+               schedule_work(&cp->reset_task);
+#else
+               atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
+               pr_err("reset called in cas_link_timer\n");
+               schedule_work(&cp->reset_task);
+#endif
+       }
+
+       if (!pending)
+               mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
+       cas_unlock_tx(cp);
+       spin_unlock_irqrestore(&cp->lock, flags);
+}
+
+/* tiny buffers are used to avoid target abort issues with
+ * older cassini's
+ */
+static void cas_tx_tiny_free(struct cas *cp)
+{
+       struct pci_dev *pdev = cp->pdev;
+       int i;
+
+       for (i = 0; i < N_TX_RINGS; i++) {
+               if (!cp->tx_tiny_bufs[i])
+                       continue;
+
+               pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
+                                   cp->tx_tiny_bufs[i],
+                                   cp->tx_tiny_dvma[i]);
+               cp->tx_tiny_bufs[i] = NULL;
+       }
+}
+
+static int cas_tx_tiny_alloc(struct cas *cp)
+{
+       struct pci_dev *pdev = cp->pdev;
+       int i;
+
+       for (i = 0; i < N_TX_RINGS; i++) {
+               cp->tx_tiny_bufs[i] =
+                       pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
+                                            &cp->tx_tiny_dvma[i]);
+               if (!cp->tx_tiny_bufs[i]) {
+                       cas_tx_tiny_free(cp);
+                       return -1;
+               }
+       }
+       return 0;
+}
+
+
+static int cas_open(struct net_device *dev)
+{
+       struct cas *cp = netdev_priv(dev);
+       int hw_was_up, err;
+       unsigned long flags;
+
+       mutex_lock(&cp->pm_mutex);
+
+       hw_was_up = cp->hw_running;
+
+       /* The power-management mutex protects the hw_running
+        * etc. state so it is safe to do this bit without cp->lock
+        */
+       if (!cp->hw_running) {
+               /* Reset the chip */
+               cas_lock_all_save(cp, flags);
+               /* We set the second arg to cas_reset to zero
+                * because cas_init_hw below will have its second
+                * argument set to non-zero, which will force
+                * autonegotiation to start.
+                */
+               cas_reset(cp, 0);
+               cp->hw_running = 1;
+               cas_unlock_all_restore(cp, flags);
+       }
+
+       err = -ENOMEM;
+       if (cas_tx_tiny_alloc(cp) < 0)
+               goto err_unlock;
+
+       /* alloc rx descriptors */
+       if (cas_alloc_rxds(cp) < 0)
+               goto err_tx_tiny;
+
+       /* allocate spares */
+       cas_spare_init(cp);
+       cas_spare_recover(cp, GFP_KERNEL);
+
+       /* We can now request the interrupt as we know it's masked
+        * on the controller. cassini+ has up to 4 interrupts
+        * that can be used, but you need to do explicit pci interrupt
+        * mapping to expose them
+        */
+       if (request_irq(cp->pdev->irq, cas_interrupt,
+                       IRQF_SHARED, dev->name, (void *) dev)) {
+               netdev_err(cp->dev, "failed to request irq !\n");
+               err = -EAGAIN;
+               goto err_spare;
+       }
+
+#ifdef USE_NAPI
+       napi_enable(&cp->napi);
+#endif
+       /* init hw */
+       cas_lock_all_save(cp, flags);
+       cas_clean_rings(cp);
+       cas_init_hw(cp, !hw_was_up);
+       cp->opened = 1;
+       cas_unlock_all_restore(cp, flags);
+
+       netif_start_queue(dev);
+       mutex_unlock(&cp->pm_mutex);
+       return 0;
+
+err_spare:
+       cas_spare_free(cp);
+       cas_free_rxds(cp);
+err_tx_tiny:
+       cas_tx_tiny_free(cp);
+err_unlock:
+       mutex_unlock(&cp->pm_mutex);
+       return err;
+}
+
+static int cas_close(struct net_device *dev)
+{
+       unsigned long flags;
+       struct cas *cp = netdev_priv(dev);
+
+#ifdef USE_NAPI
+       napi_disable(&cp->napi);
+#endif
+       /* Make sure we don't get distracted by suspend/resume */
+       mutex_lock(&cp->pm_mutex);
+
+       netif_stop_queue(dev);
+
+       /* Stop traffic, mark us closed */
+       cas_lock_all_save(cp, flags);
+       cp->opened = 0;
+       cas_reset(cp, 0);
+       cas_phy_init(cp);
+       cas_begin_auto_negotiation(cp, NULL);
+       cas_clean_rings(cp);
+       cas_unlock_all_restore(cp, flags);
+
+       free_irq(cp->pdev->irq, (void *) dev);
+       cas_spare_free(cp);
+       cas_free_rxds(cp);
+       cas_tx_tiny_free(cp);
+       mutex_unlock(&cp->pm_mutex);
+       return 0;
+}
+
+static struct {
+       const char name[ETH_GSTRING_LEN];
+} ethtool_cassini_statnames[] = {
+       {"collisions"},
+       {"rx_bytes"},
+       {"rx_crc_errors"},
+       {"rx_dropped"},
+       {"rx_errors"},
+       {"rx_fifo_errors"},
+       {"rx_frame_errors"},
+       {"rx_length_errors"},
+       {"rx_over_errors"},
+       {"rx_packets"},
+       {"tx_aborted_errors"},
+       {"tx_bytes"},
+       {"tx_dropped"},
+       {"tx_errors"},
+       {"tx_fifo_errors"},
+       {"tx_packets"}
+};
+#define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
+
+static struct {
+       const int offsets;      /* neg. values for 2nd arg to cas_read_phy */
+} ethtool_register_table[] = {
+       {-MII_BMSR},
+       {-MII_BMCR},
+       {REG_CAWR},
+       {REG_INF_BURST},
+       {REG_BIM_CFG},
+       {REG_RX_CFG},
+       {REG_HP_CFG},
+       {REG_MAC_TX_CFG},
+       {REG_MAC_RX_CFG},
+       {REG_MAC_CTRL_CFG},
+       {REG_MAC_XIF_CFG},
+       {REG_MIF_CFG},
+       {REG_PCS_CFG},
+       {REG_SATURN_PCFG},
+       {REG_PCS_MII_STATUS},
+       {REG_PCS_STATE_MACHINE},
+       {REG_MAC_COLL_EXCESS},
+       {REG_MAC_COLL_LATE}
+};
+#define CAS_REG_LEN    ARRAY_SIZE(ethtool_register_table)
+#define CAS_MAX_REGS   (sizeof (u32)*CAS_REG_LEN)
+
+static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
+{
+       u8 *p;
+       int i;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cp->lock, flags);
+       for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
+               u16 hval;
+               u32 val;
+               if (ethtool_register_table[i].offsets < 0) {
+                       hval = cas_phy_read(cp,
+                                   -ethtool_register_table[i].offsets);
+                       val = hval;
+               } else {
+                       val= readl(cp->regs+ethtool_register_table[i].offsets);
+               }
+               memcpy(p, (u8 *)&val, sizeof(u32));
+       }
+       spin_unlock_irqrestore(&cp->lock, flags);
+}
+
+static struct net_device_stats *cas_get_stats(struct net_device *dev)
+{
+       struct cas *cp = netdev_priv(dev);
+       struct net_device_stats *stats = cp->net_stats;
+       unsigned long flags;
+       int i;
+       unsigned long tmp;
+
+       /* we collate all of the stats into net_stats[N_TX_RING] */
+       if (!cp->hw_running)
+               return stats + N_TX_RINGS;
+
+       /* collect outstanding stats */
+       /* WTZ: the Cassini spec gives these as 16 bit counters but
+        * stored in 32-bit words.  Added a mask of 0xffff to be safe,
+        * in case the chip somehow puts any garbage in the other bits.
+        * Also, counter usage didn't seem to mach what Adrian did
+        * in the parts of the code that set these quantities. Made
+        * that consistent.
+        */
+       spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
+       stats[N_TX_RINGS].rx_crc_errors +=
+         readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
+       stats[N_TX_RINGS].rx_frame_errors +=
+               readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
+       stats[N_TX_RINGS].rx_length_errors +=
+               readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
+#if 1
+       tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
+               (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
+       stats[N_TX_RINGS].tx_aborted_errors += tmp;
+       stats[N_TX_RINGS].collisions +=
+         tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
+#else
+       stats[N_TX_RINGS].tx_aborted_errors +=
+               readl(cp->regs + REG_MAC_COLL_EXCESS);
+       stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
+               readl(cp->regs + REG_MAC_COLL_LATE);
+#endif
+       cas_clear_mac_err(cp);
+
+       /* saved bits that are unique to ring 0 */
+       spin_lock(&cp->stat_lock[0]);
+       stats[N_TX_RINGS].collisions        += stats[0].collisions;
+       stats[N_TX_RINGS].rx_over_errors    += stats[0].rx_over_errors;
+       stats[N_TX_RINGS].rx_frame_errors   += stats[0].rx_frame_errors;
+       stats[N_TX_RINGS].rx_fifo_errors    += stats[0].rx_fifo_errors;
+       stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
+       stats[N_TX_RINGS].tx_fifo_errors    += stats[0].tx_fifo_errors;
+       spin_unlock(&cp->stat_lock[0]);
+
+       for (i = 0; i < N_TX_RINGS; i++) {
+               spin_lock(&cp->stat_lock[i]);
+               stats[N_TX_RINGS].rx_length_errors +=
+                       stats[i].rx_length_errors;
+               stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
+               stats[N_TX_RINGS].rx_packets    += stats[i].rx_packets;
+               stats[N_TX_RINGS].tx_packets    += stats[i].tx_packets;
+               stats[N_TX_RINGS].rx_bytes      += stats[i].rx_bytes;
+               stats[N_TX_RINGS].tx_bytes      += stats[i].tx_bytes;
+               stats[N_TX_RINGS].rx_errors     += stats[i].rx_errors;
+               stats[N_TX_RINGS].tx_errors     += stats[i].tx_errors;
+               stats[N_TX_RINGS].rx_dropped    += stats[i].rx_dropped;
+               stats[N_TX_RINGS].tx_dropped    += stats[i].tx_dropped;
+               memset(stats + i, 0, sizeof(struct net_device_stats));
+               spin_unlock(&cp->stat_lock[i]);
+       }
+       spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
+       return stats + N_TX_RINGS;
+}
+
+
+static void cas_set_multicast(struct net_device *dev)
+{
+       struct cas *cp = netdev_priv(dev);
+       u32 rxcfg, rxcfg_new;
+       unsigned long flags;
+       int limit = STOP_TRIES;
+
+       if (!cp->hw_running)
+               return;
+
+       spin_lock_irqsave(&cp->lock, flags);
+       rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
+
+       /* disable RX MAC and wait for completion */
+       writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
+       while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
+               if (!limit--)
+                       break;
+               udelay(10);
+       }
+
+       /* disable hash filter and wait for completion */
+       limit = STOP_TRIES;
+       rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
+       writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
+       while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
+               if (!limit--)
+                       break;
+               udelay(10);
+       }
+
+       /* program hash filters */
+       cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
+       rxcfg |= rxcfg_new;
+       writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
+       spin_unlock_irqrestore(&cp->lock, flags);
+}
+
+static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+       struct cas *cp = netdev_priv(dev);
+       strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN);
+       strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN);
+       info->fw_version[0] = '\0';
+       strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN);
+       info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
+               cp->casreg_len : CAS_MAX_REGS;
+       info->n_stats = CAS_NUM_STAT_KEYS;
+}
+
+static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct cas *cp = netdev_priv(dev);
+       u16 bmcr;
+       int full_duplex, speed, pause;
+       unsigned long flags;
+       enum link_state linkstate = link_up;
+
+       cmd->advertising = 0;
+       cmd->supported = SUPPORTED_Autoneg;
+       if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
+               cmd->supported |= SUPPORTED_1000baseT_Full;
+               cmd->advertising |= ADVERTISED_1000baseT_Full;
+       }
+
+       /* Record PHY settings if HW is on. */
+       spin_lock_irqsave(&cp->lock, flags);
+       bmcr = 0;
+       linkstate = cp->lstate;
+       if (CAS_PHY_MII(cp->phy_type)) {
+               cmd->port = PORT_MII;
+               cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
+                       XCVR_INTERNAL : XCVR_EXTERNAL;
+               cmd->phy_address = cp->phy_addr;
+               cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
+                       ADVERTISED_10baseT_Half |
+                       ADVERTISED_10baseT_Full |
+                       ADVERTISED_100baseT_Half |
+                       ADVERTISED_100baseT_Full;
+
+               cmd->supported |=
+                       (SUPPORTED_10baseT_Half |
+                        SUPPORTED_10baseT_Full |
+                        SUPPORTED_100baseT_Half |
+                        SUPPORTED_100baseT_Full |
+                        SUPPORTED_TP | SUPPORTED_MII);
+
+               if (cp->hw_running) {
+                       cas_mif_poll(cp, 0);
+                       bmcr = cas_phy_read(cp, MII_BMCR);
+                       cas_read_mii_link_mode(cp, &full_duplex,
+                                              &speed, &pause);
+                       cas_mif_poll(cp, 1);
+               }
+
+       } else {
+               cmd->port = PORT_FIBRE;
+               cmd->transceiver = XCVR_INTERNAL;
+               cmd->phy_address = 0;
+               cmd->supported   |= SUPPORTED_FIBRE;
+               cmd->advertising |= ADVERTISED_FIBRE;
+
+               if (cp->hw_running) {
+                       /* pcs uses the same bits as mii */
+                       bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
+                       cas_read_pcs_link_mode(cp, &full_duplex,
+                                              &speed, &pause);
+               }
+       }
+       spin_unlock_irqrestore(&cp->lock, flags);
+
+       if (bmcr & BMCR_ANENABLE) {
+               cmd->advertising |= ADVERTISED_Autoneg;
+               cmd->autoneg = AUTONEG_ENABLE;
+               ethtool_cmd_speed_set(cmd, ((speed == 10) ?
+                                           SPEED_10 :
+                                           ((speed == 1000) ?
+                                            SPEED_1000 : SPEED_100)));
+               cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+       } else {
+               cmd->autoneg = AUTONEG_DISABLE;
+               ethtool_cmd_speed_set(cmd, ((bmcr & CAS_BMCR_SPEED1000) ?
+                                           SPEED_1000 :
+                                           ((bmcr & BMCR_SPEED100) ?
+                                            SPEED_100 : SPEED_10)));
+               cmd->duplex =
+                       (bmcr & BMCR_FULLDPLX) ?
+                       DUPLEX_FULL : DUPLEX_HALF;
+       }
+       if (linkstate != link_up) {
+               /* Force these to "unknown" if the link is not up and
+                * autonogotiation in enabled. We can set the link
+                * speed to 0, but not cmd->duplex,
+                * because its legal values are 0 and 1.  Ethtool will
+                * print the value reported in parentheses after the
+                * word "Unknown" for unrecognized values.
+                *
+                * If in forced mode, we report the speed and duplex
+                * settings that we configured.
+                */
+               if (cp->link_cntl & BMCR_ANENABLE) {
+                       ethtool_cmd_speed_set(cmd, 0);
+                       cmd->duplex = 0xff;
+               } else {
+                       ethtool_cmd_speed_set(cmd, SPEED_10);
+                       if (cp->link_cntl & BMCR_SPEED100) {
+                               ethtool_cmd_speed_set(cmd, SPEED_100);
+                       } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
+                               ethtool_cmd_speed_set(cmd, SPEED_1000);
+                       }
+                       cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
+                               DUPLEX_FULL : DUPLEX_HALF;
+               }
+       }
+       return 0;
+}
+
+static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct cas *cp = netdev_priv(dev);
+       unsigned long flags;
+       u32 speed = ethtool_cmd_speed(cmd);
+
+       /* Verify the settings we care about. */
+       if (cmd->autoneg != AUTONEG_ENABLE &&
+           cmd->autoneg != AUTONEG_DISABLE)
+               return -EINVAL;
+
+       if (cmd->autoneg == AUTONEG_DISABLE &&
+           ((speed != SPEED_1000 &&
+             speed != SPEED_100 &&
+             speed != SPEED_10) ||
+            (cmd->duplex != DUPLEX_HALF &&
+             cmd->duplex != DUPLEX_FULL)))
+               return -EINVAL;
+
+       /* Apply settings and restart link process. */
+       spin_lock_irqsave(&cp->lock, flags);
+       cas_begin_auto_negotiation(cp, cmd);
+       spin_unlock_irqrestore(&cp->lock, flags);
+       return 0;
+}
+
+static int cas_nway_reset(struct net_device *dev)
+{
+       struct cas *cp = netdev_priv(dev);
+       unsigned long flags;
+
+       if ((cp->link_cntl & BMCR_ANENABLE) == 0)
+               return -EINVAL;
+
+       /* Restart link process. */
+       spin_lock_irqsave(&cp->lock, flags);
+       cas_begin_auto_negotiation(cp, NULL);
+       spin_unlock_irqrestore(&cp->lock, flags);
+
+       return 0;
+}
+
+static u32 cas_get_link(struct net_device *dev)
+{
+       struct cas *cp = netdev_priv(dev);
+       return cp->lstate == link_up;
+}
+
+static u32 cas_get_msglevel(struct net_device *dev)
+{
+       struct cas *cp = netdev_priv(dev);
+       return cp->msg_enable;
+}
+
+static void cas_set_msglevel(struct net_device *dev, u32 value)
+{
+       struct cas *cp = netdev_priv(dev);
+       cp->msg_enable = value;
+}
+
+static int cas_get_regs_len(struct net_device *dev)
+{
+       struct cas *cp = netdev_priv(dev);
+       return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
+}
+
+static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+                            void *p)
+{
+       struct cas *cp = netdev_priv(dev);
+       regs->version = 0;
+       /* cas_read_regs handles locks (cp->lock).  */
+       cas_read_regs(cp, p, regs->len / sizeof(u32));
+}
+
+static int cas_get_sset_count(struct net_device *dev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_STATS:
+               return CAS_NUM_STAT_KEYS;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+        memcpy(data, &ethtool_cassini_statnames,
+                                        CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
+}
+
+static void cas_get_ethtool_stats(struct net_device *dev,
+                                     struct ethtool_stats *estats, u64 *data)
+{
+       struct cas *cp = netdev_priv(dev);
+       struct net_device_stats *stats = cas_get_stats(cp->dev);
+       int i = 0;
+       data[i++] = stats->collisions;
+       data[i++] = stats->rx_bytes;
+       data[i++] = stats->rx_crc_errors;
+       data[i++] = stats->rx_dropped;
+       data[i++] = stats->rx_errors;
+       data[i++] = stats->rx_fifo_errors;
+       data[i++] = stats->rx_frame_errors;
+       data[i++] = stats->rx_length_errors;
+       data[i++] = stats->rx_over_errors;
+       data[i++] = stats->rx_packets;
+       data[i++] = stats->tx_aborted_errors;
+       data[i++] = stats->tx_bytes;
+       data[i++] = stats->tx_dropped;
+       data[i++] = stats->tx_errors;
+       data[i++] = stats->tx_fifo_errors;
+       data[i++] = stats->tx_packets;
+       BUG_ON(i != CAS_NUM_STAT_KEYS);
+}
+
+static const struct ethtool_ops cas_ethtool_ops = {
+       .get_drvinfo            = cas_get_drvinfo,
+       .get_settings           = cas_get_settings,
+       .set_settings           = cas_set_settings,
+       .nway_reset             = cas_nway_reset,
+       .get_link               = cas_get_link,
+       .get_msglevel           = cas_get_msglevel,
+       .set_msglevel           = cas_set_msglevel,
+       .get_regs_len           = cas_get_regs_len,
+       .get_regs               = cas_get_regs,
+       .get_sset_count         = cas_get_sset_count,
+       .get_strings            = cas_get_strings,
+       .get_ethtool_stats      = cas_get_ethtool_stats,
+};
+
+static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+       struct cas *cp = netdev_priv(dev);
+       struct mii_ioctl_data *data = if_mii(ifr);
+       unsigned long flags;
+       int rc = -EOPNOTSUPP;
+
+       /* Hold the PM mutex while doing ioctl's or we may collide
+        * with open/close and power management and oops.
+        */
+       mutex_lock(&cp->pm_mutex);
+       switch (cmd) {
+       case SIOCGMIIPHY:               /* Get address of MII PHY in use. */
+               data->phy_id = cp->phy_addr;
+               /* Fallthrough... */
+
+       case SIOCGMIIREG:               /* Read MII PHY register. */
+               spin_lock_irqsave(&cp->lock, flags);
+               cas_mif_poll(cp, 0);
+               data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
+               cas_mif_poll(cp, 1);
+               spin_unlock_irqrestore(&cp->lock, flags);
+               rc = 0;
+               break;
+
+       case SIOCSMIIREG:               /* Write MII PHY register. */
+               spin_lock_irqsave(&cp->lock, flags);
+               cas_mif_poll(cp, 0);
+               rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
+               cas_mif_poll(cp, 1);
+               spin_unlock_irqrestore(&cp->lock, flags);
+               break;
+       default:
+               break;
+       }
+
+       mutex_unlock(&cp->pm_mutex);
+       return rc;
+}
+
+/* When this chip sits underneath an Intel 31154 bridge, it is the
+ * only subordinate device and we can tweak the bridge settings to
+ * reflect that fact.
+ */
+static void __devinit cas_program_bridge(struct pci_dev *cas_pdev)
+{
+       struct pci_dev *pdev = cas_pdev->bus->self;
+       u32 val;
+
+       if (!pdev)
+               return;
+
+       if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
+               return;
+
+       /* Clear bit 10 (Bus Parking Control) in the Secondary
+        * Arbiter Control/Status Register which lives at offset
+        * 0x41.  Using a 32-bit word read/modify/write at 0x40
+        * is much simpler so that's how we do this.
+        */
+       pci_read_config_dword(pdev, 0x40, &val);
+       val &= ~0x00040000;
+       pci_write_config_dword(pdev, 0x40, val);
+
+       /* Max out the Multi-Transaction Timer settings since
+        * Cassini is the only device present.
+        *
+        * The register is 16-bit and lives at 0x50.  When the
+        * settings are enabled, it extends the GRANT# signal
+        * for a requestor after a transaction is complete.  This
+        * allows the next request to run without first needing
+        * to negotiate the GRANT# signal back.
+        *
+        * Bits 12:10 define the grant duration:
+        *
+        *      1       --      16 clocks
+        *      2       --      32 clocks
+        *      3       --      64 clocks
+        *      4       --      128 clocks
+        *      5       --      256 clocks
+        *
+        * All other values are illegal.
+        *
+        * Bits 09:00 define which REQ/GNT signal pairs get the
+        * GRANT# signal treatment.  We set them all.
+        */
+       pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
+
+       /* The Read Prefecth Policy register is 16-bit and sits at
+        * offset 0x52.  It enables a "smart" pre-fetch policy.  We
+        * enable it and max out all of the settings since only one
+        * device is sitting underneath and thus bandwidth sharing is
+        * not an issue.
+        *
+        * The register has several 3 bit fields, which indicates a
+        * multiplier applied to the base amount of prefetching the
+        * chip would do.  These fields are at:
+        *
+        *      15:13   ---     ReRead Primary Bus
+        *      12:10   ---     FirstRead Primary Bus
+        *      09:07   ---     ReRead Secondary Bus
+        *      06:04   ---     FirstRead Secondary Bus
+        *
+        * Bits 03:00 control which REQ/GNT pairs the prefetch settings
+        * get enabled on.  Bit 3 is a grouped enabler which controls
+        * all of the REQ/GNT pairs from [8:3].  Bits 2 to 0 control
+        * the individual REQ/GNT pairs [2:0].
+        */
+       pci_write_config_word(pdev, 0x52,
+                             (0x7 << 13) |
+                             (0x7 << 10) |
+                             (0x7 <<  7) |
+                             (0x7 <<  4) |
+                             (0xf <<  0));
+
+       /* Force cacheline size to 0x8 */
+       pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
+
+       /* Force latency timer to maximum setting so Cassini can
+        * sit on the bus as long as it likes.
+        */
+       pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
+}
+
+static const struct net_device_ops cas_netdev_ops = {
+       .ndo_open               = cas_open,
+       .ndo_stop               = cas_close,
+       .ndo_start_xmit         = cas_start_xmit,
+       .ndo_get_stats          = cas_get_stats,
+       .ndo_set_multicast_list = cas_set_multicast,
+       .ndo_do_ioctl           = cas_ioctl,
+       .ndo_tx_timeout         = cas_tx_timeout,
+       .ndo_change_mtu         = cas_change_mtu,
+       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = cas_netpoll,
+#endif
+};
+
+static int __devinit cas_init_one(struct pci_dev *pdev,
+                                 const struct pci_device_id *ent)
+{
+       static int cas_version_printed = 0;
+       unsigned long casreg_len;
+       struct net_device *dev;
+       struct cas *cp;
+       int i, err, pci_using_dac;
+       u16 pci_cmd;
+       u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
+
+       if (cas_version_printed++ == 0)
+               pr_info("%s", version);
+
+       err = pci_enable_device(pdev);
+       if (err) {
+               dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+               return err;
+       }
+
+       if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+               dev_err(&pdev->dev, "Cannot find proper PCI device "
+                      "base address, aborting\n");
+               err = -ENODEV;
+               goto err_out_disable_pdev;
+       }
+
+       dev = alloc_etherdev(sizeof(*cp));
+       if (!dev) {
+               dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
+               err = -ENOMEM;
+               goto err_out_disable_pdev;
+       }
+       SET_NETDEV_DEV(dev, &pdev->dev);
+
+       err = pci_request_regions(pdev, dev->name);
+       if (err) {
+               dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+               goto err_out_free_netdev;
+       }
+       pci_set_master(pdev);
+
+       /* we must always turn on parity response or else parity
+        * doesn't get generated properly. disable SERR/PERR as well.
+        * in addition, we want to turn MWI on.
+        */
+       pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+       pci_cmd &= ~PCI_COMMAND_SERR;
+       pci_cmd |= PCI_COMMAND_PARITY;
+       pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
+       if (pci_try_set_mwi(pdev))
+               pr_warning("Could not enable MWI for %s\n", pci_name(pdev));
+
+       cas_program_bridge(pdev);
+
+       /*
+        * On some architectures, the default cache line size set
+        * by pci_try_set_mwi reduces perforamnce.  We have to increase
+        * it for this case.  To start, we'll print some configuration
+        * data.
+        */
+#if 1
+       pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
+                            &orig_cacheline_size);
+       if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
+               cas_cacheline_size =
+                       (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
+                       CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
+               if (pci_write_config_byte(pdev,
+                                         PCI_CACHE_LINE_SIZE,
+                                         cas_cacheline_size)) {
+                       dev_err(&pdev->dev, "Could not set PCI cache "
+                              "line size\n");
+                       goto err_write_cacheline;
+               }
+       }
+#endif
+
+
+       /* Configure DMA attributes. */
+       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+               pci_using_dac = 1;
+               err = pci_set_consistent_dma_mask(pdev,
+                                                 DMA_BIT_MASK(64));
+               if (err < 0) {
+                       dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
+                              "for consistent allocations\n");
+                       goto err_out_free_res;
+               }
+
+       } else {
+               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (err) {
+                       dev_err(&pdev->dev, "No usable DMA configuration, "
+                              "aborting\n");
+                       goto err_out_free_res;
+               }
+               pci_using_dac = 0;
+       }
+
+       casreg_len = pci_resource_len(pdev, 0);
+
+       cp = netdev_priv(dev);
+       cp->pdev = pdev;
+#if 1
+       /* A value of 0 indicates we never explicitly set it */
+       cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
+#endif
+       cp->dev = dev;
+       cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
+         cassini_debug;
+
+#if defined(CONFIG_SPARC)
+       cp->of_node = pci_device_to_OF_node(pdev);
+#endif
+
+       cp->link_transition = LINK_TRANSITION_UNKNOWN;
+       cp->link_transition_jiffies_valid = 0;
+
+       spin_lock_init(&cp->lock);
+       spin_lock_init(&cp->rx_inuse_lock);
+       spin_lock_init(&cp->rx_spare_lock);
+       for (i = 0; i < N_TX_RINGS; i++) {
+               spin_lock_init(&cp->stat_lock[i]);
+               spin_lock_init(&cp->tx_lock[i]);
+       }
+       spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
+       mutex_init(&cp->pm_mutex);
+
+       init_timer(&cp->link_timer);
+       cp->link_timer.function = cas_link_timer;
+       cp->link_timer.data = (unsigned long) cp;
+
+#if 1
+       /* Just in case the implementation of atomic operations
+        * change so that an explicit initialization is necessary.
+        */
+       atomic_set(&cp->reset_task_pending, 0);
+       atomic_set(&cp->reset_task_pending_all, 0);
+       atomic_set(&cp->reset_task_pending_spare, 0);
+       atomic_set(&cp->reset_task_pending_mtu, 0);
+#endif
+       INIT_WORK(&cp->reset_task, cas_reset_task);
+
+       /* Default link parameters */
+       if (link_mode >= 0 && link_mode < 6)
+               cp->link_cntl = link_modes[link_mode];
+       else
+               cp->link_cntl = BMCR_ANENABLE;
+       cp->lstate = link_down;
+       cp->link_transition = LINK_TRANSITION_LINK_DOWN;
+       netif_carrier_off(cp->dev);
+       cp->timer_ticks = 0;
+
+       /* give us access to cassini registers */
+       cp->regs = pci_iomap(pdev, 0, casreg_len);
+       if (!cp->regs) {
+               dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
+               goto err_out_free_res;
+       }
+       cp->casreg_len = casreg_len;
+
+       pci_save_state(pdev);
+       cas_check_pci_invariants(cp);
+       cas_hard_reset(cp);
+       cas_reset(cp, 0);
+       if (cas_check_invariants(cp))
+               goto err_out_iounmap;
+       if (cp->cas_flags & CAS_FLAG_SATURN)
+               if (cas_saturn_firmware_init(cp))
+                       goto err_out_iounmap;
+
+       cp->init_block = (struct cas_init_block *)
+               pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
+                                    &cp->block_dvma);
+       if (!cp->init_block) {
+               dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
+               goto err_out_iounmap;
+       }
+
+       for (i = 0; i < N_TX_RINGS; i++)
+               cp->init_txds[i] = cp->init_block->txds[i];
+
+       for (i = 0; i < N_RX_DESC_RINGS; i++)
+               cp->init_rxds[i] = cp->init_block->rxds[i];
+
+       for (i = 0; i < N_RX_COMP_RINGS; i++)
+               cp->init_rxcs[i] = cp->init_block->rxcs[i];
+
+       for (i = 0; i < N_RX_FLOWS; i++)
+               skb_queue_head_init(&cp->rx_flows[i]);
+
+       dev->netdev_ops = &cas_netdev_ops;
+       dev->ethtool_ops = &cas_ethtool_ops;
+       dev->watchdog_timeo = CAS_TX_TIMEOUT;
+
+#ifdef USE_NAPI
+       netif_napi_add(dev, &cp->napi, cas_poll, 64);
+#endif
+       dev->irq = pdev->irq;
+       dev->dma = 0;
+
+       /* Cassini features. */
+       if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
+               dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
+
+       if (pci_using_dac)
+               dev->features |= NETIF_F_HIGHDMA;
+
+       if (register_netdev(dev)) {
+               dev_err(&pdev->dev, "Cannot register net device, aborting\n");
+               goto err_out_free_consistent;
+       }
+
+       i = readl(cp->regs + REG_BIM_CFG);
+       netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
+                   (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
+                   (i & BIM_CFG_32BIT) ? "32" : "64",
+                   (i & BIM_CFG_66MHZ) ? "66" : "33",
+                   (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
+                   dev->dev_addr);
+
+       pci_set_drvdata(pdev, dev);
+       cp->hw_running = 1;
+       cas_entropy_reset(cp);
+       cas_phy_init(cp);
+       cas_begin_auto_negotiation(cp, NULL);
+       return 0;
+
+err_out_free_consistent:
+       pci_free_consistent(pdev, sizeof(struct cas_init_block),
+                           cp->init_block, cp->block_dvma);
+
+err_out_iounmap:
+       mutex_lock(&cp->pm_mutex);
+       if (cp->hw_running)
+               cas_shutdown(cp);
+       mutex_unlock(&cp->pm_mutex);
+
+       pci_iounmap(pdev, cp->regs);
+
+
+err_out_free_res:
+       pci_release_regions(pdev);
+
+err_write_cacheline:
+       /* Try to restore it in case the error occurred after we
+        * set it.
+        */
+       pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
+
+err_out_free_netdev:
+       free_netdev(dev);
+
+err_out_disable_pdev:
+       pci_disable_device(pdev);
+       pci_set_drvdata(pdev, NULL);
+       return -ENODEV;
+}
+
+static void __devexit cas_remove_one(struct pci_dev *pdev)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct cas *cp;
+       if (!dev)
+               return;
+
+       cp = netdev_priv(dev);
+       unregister_netdev(dev);
+
+       if (cp->fw_data)
+               vfree(cp->fw_data);
+
+       mutex_lock(&cp->pm_mutex);
+       cancel_work_sync(&cp->reset_task);
+       if (cp->hw_running)
+               cas_shutdown(cp);
+       mutex_unlock(&cp->pm_mutex);
+
+#if 1
+       if (cp->orig_cacheline_size) {
+               /* Restore the cache line size if we had modified
+                * it.
+                */
+               pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
+                                     cp->orig_cacheline_size);
+       }
+#endif
+       pci_free_consistent(pdev, sizeof(struct cas_init_block),
+                           cp->init_block, cp->block_dvma);
+       pci_iounmap(pdev, cp->regs);
+       free_netdev(dev);
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+       pci_set_drvdata(pdev, NULL);
+}
+
+#ifdef CONFIG_PM
+static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct cas *cp = netdev_priv(dev);
+       unsigned long flags;
+
+       mutex_lock(&cp->pm_mutex);
+
+       /* If the driver is opened, we stop the DMA */
+       if (cp->opened) {
+               netif_device_detach(dev);
+
+               cas_lock_all_save(cp, flags);
+
+               /* We can set the second arg of cas_reset to 0
+                * because on resume, we'll call cas_init_hw with
+                * its second arg set so that autonegotiation is
+                * restarted.
+                */
+               cas_reset(cp, 0);
+               cas_clean_rings(cp);
+               cas_unlock_all_restore(cp, flags);
+       }
+
+       if (cp->hw_running)
+               cas_shutdown(cp);
+       mutex_unlock(&cp->pm_mutex);
+
+       return 0;
+}
+
+static int cas_resume(struct pci_dev *pdev)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct cas *cp = netdev_priv(dev);
+
+       netdev_info(dev, "resuming\n");
+
+       mutex_lock(&cp->pm_mutex);
+       cas_hard_reset(cp);
+       if (cp->opened) {
+               unsigned long flags;
+               cas_lock_all_save(cp, flags);
+               cas_reset(cp, 0);
+               cp->hw_running = 1;
+               cas_clean_rings(cp);
+               cas_init_hw(cp, 1);
+               cas_unlock_all_restore(cp, flags);
+
+               netif_device_attach(dev);
+       }
+       mutex_unlock(&cp->pm_mutex);
+       return 0;
+}
+#endif /* CONFIG_PM */
+
+static struct pci_driver cas_driver = {
+       .name           = DRV_MODULE_NAME,
+       .id_table       = cas_pci_tbl,
+       .probe          = cas_init_one,
+       .remove         = __devexit_p(cas_remove_one),
+#ifdef CONFIG_PM
+       .suspend        = cas_suspend,
+       .resume         = cas_resume
+#endif
+};
+
+static int __init cas_init(void)
+{
+       if (linkdown_timeout > 0)
+               link_transition_timeout = linkdown_timeout * HZ;
+       else
+               link_transition_timeout = 0;
+
+       return pci_register_driver(&cas_driver);
+}
+
+static void __exit cas_cleanup(void)
+{
+       pci_unregister_driver(&cas_driver);
+}
+
+module_init(cas_init);
+module_exit(cas_cleanup);
diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h
new file mode 100644 (file)
index 0000000..b361424
--- /dev/null
@@ -0,0 +1,2914 @@
+/* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $
+ * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver.
+ *
+ * Copyright (C) 2004 Sun Microsystems Inc.
+ * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ *
+ * vendor id: 0x108E (Sun Microsystems, Inc.)
+ * device id: 0xabba (Cassini)
+ * revision ids: 0x01 = Cassini
+ *               0x02 = Cassini rev 2
+ *               0x10 = Cassini+
+ *               0x11 = Cassini+ 0.2u
+ *
+ * vendor id: 0x100b (National Semiconductor)
+ * device id: 0x0035 (DP83065/Saturn)
+ * revision ids: 0x30 = Saturn B2
+ *
+ * rings are all offset from 0.
+ *
+ * there are two clock domains:
+ * PCI:  33/66MHz clock
+ * chip: 125MHz clock
+ */
+
+#ifndef _CASSINI_H
+#define _CASSINI_H
+
+/* cassini register map: 2M memory mapped in 32-bit memory space accessible as
+ * 32-bit words. there is no i/o port access. REG_ addresses are
+ * shared between cassini and cassini+. REG_PLUS_ addresses only
+ * appear in cassini+. REG_MINUS_ addresses only appear in cassini.
+ */
+#define CAS_ID_REV2          0x02
+#define CAS_ID_REVPLUS       0x10
+#define CAS_ID_REVPLUS02u    0x11
+#define CAS_ID_REVSATURNB2   0x30
+
+/** global resources **/
+
+/* this register sets the weights for the weighted round robin arbiter. e.g.,
+ * if rx weight == 1 and tx weight == 0, rx == 2x tx transfer credit
+ * for its next turn to access the pci bus.
+ * map: 0x0 = x1, 0x1 = x2, 0x2 = x4, 0x3 = x8
+ * DEFAULT: 0x0, SIZE: 5 bits
+ */
+#define  REG_CAWR                     0x0004  /* core arbitration weight */
+#define    CAWR_RX_DMA_WEIGHT_SHIFT    0
+#define    CAWR_RX_DMA_WEIGHT_MASK     0x03    /* [0:1] */
+#define    CAWR_TX_DMA_WEIGHT_SHIFT    2
+#define    CAWR_TX_DMA_WEIGHT_MASK     0x0C    /* [3:2] */
+#define    CAWR_RR_DIS                 0x10    /* [4] */
+
+/* if enabled, BIM can send bursts across PCI bus > cacheline size. burst
+ * sizes determined by length of packet or descriptor transfer and the
+ * max length allowed by the target.
+ * DEFAULT: 0x0, SIZE: 1 bit
+ */
+#define  REG_INF_BURST                 0x0008  /* infinite burst enable reg */
+#define    INF_BURST_EN                0x1     /* enable */
+
+/* top level interrupts [0-9] are auto-cleared to 0 when the status
+ * register is read. second level interrupts [13 - 18] are cleared at
+ * the source. tx completion register 3 is replicated in [19 - 31]
+ * DEFAULT: 0x00000000, SIZE: 29 bits
+ */
+#define  REG_INTR_STATUS               0x000C  /* interrupt status register */
+#define    INTR_TX_INTME               0x00000001  /* frame w/ INT ME desc bit set
+                                                     xferred from host queue to
+                                                     TX FIFO */
+#define    INTR_TX_ALL                 0x00000002  /* all xmit frames xferred into
+                                                     TX FIFO. i.e.,
+                                                     TX Kick == TX complete. if
+                                                     PACED_MODE set, then TX FIFO
+                                                     also empty */
+#define    INTR_TX_DONE                0x00000004  /* any frame xferred into tx
+                                                     FIFO */
+#define    INTR_TX_TAG_ERROR           0x00000008  /* TX FIFO tag framing
+                                                     corrupted. FATAL ERROR */
+#define    INTR_RX_DONE                0x00000010  /* at least 1 frame xferred
+                                                     from RX FIFO to host mem.
+                                                     RX completion reg updated.
+                                                     may be delayed by recv
+                                                     intr blanking. */
+#define    INTR_RX_BUF_UNAVAIL         0x00000020  /* no more receive buffers.
+                                                     RX Kick == RX complete */
+#define    INTR_RX_TAG_ERROR           0x00000040  /* RX FIFO tag framing
+                                                     corrupted. FATAL ERROR */
+#define    INTR_RX_COMP_FULL           0x00000080  /* no more room in completion
+                                                     ring to post descriptors.
+                                                     RX complete head incr to
+                                                     almost reach RX complete
+                                                     tail */
+#define    INTR_RX_BUF_AE              0x00000100  /* less than the
+                                                     programmable threshold #
+                                                     of free descr avail for
+                                                     hw use */
+#define    INTR_RX_COMP_AF             0x00000200  /* less than the
+                                                     programmable threshold #
+                                                     of descr spaces for hw
+                                                     use in completion descr
+                                                     ring */
+#define    INTR_RX_LEN_MISMATCH        0x00000400  /* len field from MAC !=
+                                                     len of non-reassembly pkt
+                                                     from fifo during DMA or
+                                                     header parser provides TCP
+                                                     header and payload size >
+                                                     MAC packet size.
+                                                     FATAL ERROR */
+#define    INTR_SUMMARY                0x00001000  /* summary interrupt bit. this
+                                                     bit will be set if an interrupt
+                                                     generated on the pci bus. useful
+                                                     when driver is polling for
+                                                     interrupts */
+#define    INTR_PCS_STATUS             0x00002000  /* PCS interrupt status register */
+#define    INTR_TX_MAC_STATUS          0x00004000  /* TX MAC status register has at
+                                                     least 1 unmasked interrupt set */
+#define    INTR_RX_MAC_STATUS          0x00008000  /* RX MAC status register has at
+                                                     least 1 unmasked interrupt set */
+#define    INTR_MAC_CTRL_STATUS        0x00010000  /* MAC control status register has
+                                                     at least 1 unmasked interrupt
+                                                     set */
+#define    INTR_MIF_STATUS             0x00020000  /* MIF status register has at least
+                                                     1 unmasked interrupt set */
+#define    INTR_PCI_ERROR_STATUS       0x00040000  /* PCI error status register in the
+                                                     BIF has at least 1 unmasked
+                                                     interrupt set */
+#define    INTR_TX_COMP_3_MASK         0xFFF80000  /* mask for TX completion
+                                                     3 reg data */
+#define    INTR_TX_COMP_3_SHIFT        19
+#define    INTR_ERROR_MASK (INTR_MIF_STATUS | INTR_PCI_ERROR_STATUS | \
+                            INTR_PCS_STATUS | INTR_RX_LEN_MISMATCH | \
+                            INTR_TX_MAC_STATUS | INTR_RX_MAC_STATUS | \
+                            INTR_TX_TAG_ERROR | INTR_RX_TAG_ERROR | \
+                            INTR_MAC_CTRL_STATUS)
+
+/* determines which status events will cause an interrupt. layout same
+ * as REG_INTR_STATUS.
+ * DEFAULT: 0xFFFFFFFF, SIZE: 16 bits
+ */
+#define  REG_INTR_MASK                 0x0010  /* Interrupt mask */
+
+/* top level interrupt bits that are cleared during read of REG_INTR_STATUS_ALIAS.
+ * useful when driver is polling for interrupts. layout same as REG_INTR_MASK.
+ * DEFAULT: 0x00000000, SIZE: 12 bits
+ */
+#define  REG_ALIAS_CLEAR               0x0014  /* alias clear mask
+                                                 (used w/ status alias) */
+/* same as REG_INTR_STATUS except that only bits cleared are those selected by
+ * REG_ALIAS_CLEAR
+ * DEFAULT: 0x00000000, SIZE: 29 bits
+ */
+#define  REG_INTR_STATUS_ALIAS         0x001C  /* interrupt status alias
+                                                 (selective clear) */
+
+/* DEFAULT: 0x0, SIZE: 3 bits */
+#define  REG_PCI_ERR_STATUS            0x1000  /* PCI error status */
+#define    PCI_ERR_BADACK              0x01    /* reserved in Cassini+.
+                                                 set if no ACK64# during ABS64 cycle
+                                                 in Cassini. */
+#define    PCI_ERR_DTRTO               0x02    /* delayed xaction timeout. set if
+                                                 no read retry after 2^15 clocks */
+#define    PCI_ERR_OTHER               0x04    /* other PCI errors */
+#define    PCI_ERR_BIM_DMA_WRITE       0x08    /* BIM received 0 count DMA write req.
+                                                 unused in Cassini. */
+#define    PCI_ERR_BIM_DMA_READ        0x10    /* BIM received 0 count DMA read req.
+                                                 unused in Cassini. */
+#define    PCI_ERR_BIM_DMA_TIMEOUT     0x20    /* BIM received 255 retries during
+                                                 DMA. unused in cassini. */
+
+/* mask for PCI status events that will set PCI_ERR_STATUS. if cleared, event
+ * causes an interrupt to be generated.
+ * DEFAULT: 0x7, SIZE: 3 bits
+ */
+#define  REG_PCI_ERR_STATUS_MASK       0x1004  /* PCI Error status mask */
+
+/* used to configure PCI related parameters that are not in PCI config space.
+ * DEFAULT: 0bxx000, SIZE: 5 bits
+ */
+#define  REG_BIM_CFG                0x1008  /* BIM Configuration */
+#define    BIM_CFG_RESERVED0        0x001   /* reserved */
+#define    BIM_CFG_RESERVED1        0x002   /* reserved */
+#define    BIM_CFG_64BIT_DISABLE    0x004   /* disable 64-bit mode */
+#define    BIM_CFG_66MHZ            0x008   /* (ro) 1 = 66MHz, 0 = < 66MHz */
+#define    BIM_CFG_32BIT            0x010   /* (ro) 1 = 32-bit slot, 0 = 64-bit */
+#define    BIM_CFG_DPAR_INTR_ENABLE 0x020   /* detected parity err enable */
+#define    BIM_CFG_RMA_INTR_ENABLE  0x040   /* master abort intr enable */
+#define    BIM_CFG_RTA_INTR_ENABLE  0x080   /* target abort intr enable */
+#define    BIM_CFG_RESERVED2        0x100   /* reserved */
+#define    BIM_CFG_BIM_DISABLE      0x200   /* stop BIM DMA. use before global
+                                              reset. reserved in Cassini. */
+#define    BIM_CFG_BIM_STATUS       0x400   /* (ro) 1 = BIM DMA suspended.
+                                                 reserved in Cassini. */
+#define    BIM_CFG_PERROR_BLOCK     0x800  /* block PERR# to pci bus. def: 0.
+                                                reserved in Cassini. */
+
+/* DEFAULT: 0x00000000, SIZE: 32 bits */
+#define  REG_BIM_DIAG                  0x100C  /* BIM Diagnostic */
+#define    BIM_DIAG_MSTR_SM_MASK       0x3FFFFF00 /* PCI master controller state
+                                                    machine bits [21:0] */
+#define    BIM_DIAG_BRST_SM_MASK       0x7F    /* PCI burst controller state
+                                                 machine bits [6:0] */
+
+/* writing to SW_RESET_TX and SW_RESET_RX will issue a global
+ * reset. poll until TX and RX read back as 0's for completion.
+ */
+#define  REG_SW_RESET                  0x1010  /* Software reset */
+#define    SW_RESET_TX                 0x00000001  /* reset TX DMA engine. poll until
+                                                     cleared to 0.  */
+#define    SW_RESET_RX                 0x00000002  /* reset RX DMA engine. poll until
+                                                     cleared to 0. */
+#define    SW_RESET_RSTOUT             0x00000004  /* force RSTOUT# pin active (low).
+                                                     resets PHY and anything else
+                                                     connected to RSTOUT#. RSTOUT#
+                                                     is also activated by local PCI
+                                                     reset when hot-swap is being
+                                                     done. */
+#define    SW_RESET_BLOCK_PCS_SLINK    0x00000008  /* if a global reset is done with
+                                                     this bit set, PCS and SLINK
+                                                     modules won't be reset.
+                                                     i.e., link won't drop. */
+#define    SW_RESET_BREQ_SM_MASK       0x00007F00  /* breq state machine [6:0] */
+#define    SW_RESET_PCIARB_SM_MASK     0x00070000  /* pci arbitration state bits:
+                                                     0b000: ARB_IDLE1
+                                                     0b001: ARB_IDLE2
+                                                     0b010: ARB_WB_ACK
+                                                     0b011: ARB_WB_WAT
+                                                     0b100: ARB_RB_ACK
+                                                     0b101: ARB_RB_WAT
+                                                     0b110: ARB_RB_END
+                                                     0b111: ARB_WB_END */
+#define    SW_RESET_RDPCI_SM_MASK      0x00300000  /* read pci state bits:
+                                                     0b00: RD_PCI_WAT
+                                                     0b01: RD_PCI_RDY
+                                                     0b11: RD_PCI_ACK */
+#define    SW_RESET_RDARB_SM_MASK      0x00C00000  /* read arbitration state bits:
+                                                     0b00: AD_IDL_RX
+                                                     0b01: AD_ACK_RX
+                                                     0b10: AD_ACK_TX
+                                                     0b11: AD_IDL_TX */
+#define    SW_RESET_WRPCI_SM_MASK      0x06000000  /* write pci state bits
+                                                     0b00: WR_PCI_WAT
+                                                     0b01: WR_PCI_RDY
+                                                     0b11: WR_PCI_ACK */
+#define    SW_RESET_WRARB_SM_MASK      0x38000000  /* write arbitration state bits:
+                                                     0b000: ARB_IDLE1
+                                                     0b001: ARB_IDLE2
+                                                     0b010: ARB_TX_ACK
+                                                     0b011: ARB_TX_WAT
+                                                     0b100: ARB_RX_ACK
+                                                     0b110: ARB_RX_WAT */
+
+/* Cassini only. 64-bit register used to check PCI datapath. when read,
+ * value written has both lower and upper 32-bit halves rotated to the right
+ * one bit position. e.g., FFFFFFFF FFFFFFFF -> 7FFFFFFF 7FFFFFFF
+ */
+#define  REG_MINUS_BIM_DATAPATH_TEST   0x1018  /* Cassini: BIM datapath test
+                                                 Cassini+: reserved */
+
+/* output enables are provided for each device's chip select and for the rest
+ * of the outputs from cassini to its local bus devices. two sw programmable
+ * bits are connected to general purpus control/status bits.
+ * DEFAULT: 0x7
+ */
+#define  REG_BIM_LOCAL_DEV_EN          0x1020  /* BIM local device
+                                                 output EN. default: 0x7 */
+#define    BIM_LOCAL_DEV_PAD           0x01    /* address bus, RW signal, and
+                                                 OE signal output enable on the
+                                                 local bus interface. these
+                                                 are shared between both local
+                                                 bus devices. tristate when 0. */
+#define    BIM_LOCAL_DEV_PROM          0x02    /* PROM chip select */
+#define    BIM_LOCAL_DEV_EXT           0x04    /* secondary local bus device chip
+                                                 select output enable */
+#define    BIM_LOCAL_DEV_SOFT_0        0x08    /* sw programmable ctrl bit 0 */
+#define    BIM_LOCAL_DEV_SOFT_1        0x10    /* sw programmable ctrl bit 1 */
+#define    BIM_LOCAL_DEV_HW_RESET      0x20    /* internal hw reset. Cassini+ only. */
+
+/* access 24 entry BIM read and write buffers. put address in REG_BIM_BUFFER_ADDR
+ * and read/write from/to it REG_BIM_BUFFER_DATA_LOW and _DATA_HI.
+ * _DATA_HI should be the last access of the sequence.
+ * DEFAULT: undefined
+ */
+#define  REG_BIM_BUFFER_ADDR           0x1024  /* BIM buffer address. for
+                                                 purposes. */
+#define    BIM_BUFFER_ADDR_MASK        0x3F    /* index (0 - 23) of buffer  */
+#define    BIM_BUFFER_WR_SELECT        0x40    /* write buffer access = 1
+                                                 read buffer access = 0 */
+/* DEFAULT: undefined */
+#define  REG_BIM_BUFFER_DATA_LOW       0x1028  /* BIM buffer data low */
+#define  REG_BIM_BUFFER_DATA_HI        0x102C  /* BIM buffer data high */
+
+/* set BIM_RAM_BIST_START to start built-in self test for BIM read buffer.
+ * bit auto-clears when done with status read from _SUMMARY and _PASS bits.
+ */
+#define  REG_BIM_RAM_BIST              0x102C  /* BIM RAM (read buffer) BIST
+                                                 control/status */
+#define    BIM_RAM_BIST_RD_START       0x01    /* start BIST for BIM read buffer */
+#define    BIM_RAM_BIST_WR_START       0x02    /* start BIST for BIM write buffer.
+                                                 Cassini only. reserved in
+                                                 Cassini+. */
+#define    BIM_RAM_BIST_RD_PASS        0x04    /* summary BIST pass status for read
+                                                 buffer. */
+#define    BIM_RAM_BIST_WR_PASS        0x08    /* summary BIST pass status for write
+                                                 buffer. Cassini only. reserved
+                                                 in Cassini+. */
+#define    BIM_RAM_BIST_RD_LOW_PASS    0x10    /* read low bank passes BIST */
+#define    BIM_RAM_BIST_RD_HI_PASS     0x20    /* read high bank passes BIST */
+#define    BIM_RAM_BIST_WR_LOW_PASS    0x40    /* write low bank passes BIST.
+                                                 Cassini only. reserved in
+                                                 Cassini+. */
+#define    BIM_RAM_BIST_WR_HI_PASS     0x80    /* write high bank passes BIST.
+                                                 Cassini only. reserved in
+                                                 Cassini+. */
+
+/* ASUN: i'm not sure what this does as it's not in the spec.
+ * DEFAULT: 0xFC
+ */
+#define  REG_BIM_DIAG_MUX              0x1030  /* BIM diagnostic probe mux
+                                                 select register */
+
+/* enable probe monitoring mode and select data appearing on the P_A* bus. bit
+ * values for _SEL_HI_MASK and _SEL_LOW_MASK:
+ * 0x0: internal probe[7:0] (pci arb state, wtc empty w, wtc full w, wtc empty w,
+ *                           wtc empty r, post pci)
+ * 0x1: internal probe[15:8] (pci wbuf comp, pci wpkt comp, pci rbuf comp,
+ *                            pci rpkt comp, txdma wr req, txdma wr ack,
+ *                           txdma wr rdy, txdma wr xfr done)
+ * 0x2: internal probe[23:16] (txdma rd req, txdma rd ack, txdma rd rdy, rxdma rd,
+ *                             rd arb state, rd pci state)
+ * 0x3: internal probe[31:24] (rxdma req, rxdma ack, rxdma rdy, wrarb state,
+ *                             wrpci state)
+ * 0x4: pci io probe[7:0]     0x5: pci io probe[15:8]
+ * 0x6: pci io probe[23:16]   0x7: pci io probe[31:24]
+ * 0x8: pci io probe[39:32]   0x9: pci io probe[47:40]
+ * 0xa: pci io probe[55:48]   0xb: pci io probe[63:56]
+ * the following are not available in Cassini:
+ * 0xc: rx probe[7:0]         0xd: tx probe[7:0]
+ * 0xe: hp probe[7:0]        0xf: mac probe[7:0]
+ */
+#define  REG_PLUS_PROBE_MUX_SELECT     0x1034 /* Cassini+: PROBE MUX SELECT */
+#define    PROBE_MUX_EN                0x80000000 /* allow probe signals to be
+                                                    driven on local bus P_A[15:0]
+                                                    for debugging */
+#define    PROBE_MUX_SUB_MUX_MASK      0x0000FF00 /* select sub module probe signals:
+                                                    0x03 = mac[1:0]
+                                                    0x0C = rx[1:0]
+                                                    0x30 = tx[1:0]
+                                                    0xC0 = hp[1:0] */
+#define    PROBE_MUX_SEL_HI_MASK       0x000000F0 /* select which module to appear
+                                                    on P_A[15:8]. see above for
+                                                    values. */
+#define    PROBE_MUX_SEL_LOW_MASK      0x0000000F /* select which module to appear
+                                                    on P_A[7:0]. see above for
+                                                    values. */
+
+/* values mean the same thing as REG_INTR_MASK excep that it's for INTB.
+ DEFAULT: 0x1F */
+#define  REG_PLUS_INTR_MASK_1          0x1038 /* Cassini+: interrupt mask
+                                                register 2 for INTB */
+#define  REG_PLUS_INTRN_MASK(x)       (REG_PLUS_INTR_MASK_1 + ((x) - 1)*16)
+/* bits correspond to both _MASK and _STATUS registers. _ALT corresponds to
+ * all of the alternate (2-4) INTR registers while _1 corresponds to only
+ * _MASK_1 and _STATUS_1 registers.
+ * DEFAULT: 0x7 for MASK registers, 0x0 for ALIAS_CLEAR registers
+ */
+#define    INTR_RX_DONE_ALT              0x01
+#define    INTR_RX_COMP_FULL_ALT         0x02
+#define    INTR_RX_COMP_AF_ALT           0x04
+#define    INTR_RX_BUF_UNAVAIL_1         0x08
+#define    INTR_RX_BUF_AE_1              0x10 /* almost empty */
+#define    INTRN_MASK_RX_EN              0x80
+#define    INTRN_MASK_CLEAR_ALL          (INTR_RX_DONE_ALT | \
+                                          INTR_RX_COMP_FULL_ALT | \
+                                          INTR_RX_COMP_AF_ALT | \
+                                          INTR_RX_BUF_UNAVAIL_1 | \
+                                          INTR_RX_BUF_AE_1)
+#define  REG_PLUS_INTR_STATUS_1        0x103C /* Cassini+: interrupt status
+                                                register 2 for INTB. default: 0x1F */
+#define  REG_PLUS_INTRN_STATUS(x)       (REG_PLUS_INTR_STATUS_1 + ((x) - 1)*16)
+#define    INTR_STATUS_ALT_INTX_EN     0x80   /* generate INTX when one of the
+                                                flags are set. enables desc ring. */
+
+#define  REG_PLUS_ALIAS_CLEAR_1        0x1040 /* Cassini+: alias clear mask
+                                                register 2 for INTB */
+#define  REG_PLUS_ALIASN_CLEAR(x)      (REG_PLUS_ALIAS_CLEAR_1 + ((x) - 1)*16)
+
+#define  REG_PLUS_INTR_STATUS_ALIAS_1  0x1044 /* Cassini+: interrupt status
+                                                register alias 2 for INTB */
+#define  REG_PLUS_INTRN_STATUS_ALIAS(x) (REG_PLUS_INTR_STATUS_ALIAS_1 + ((x) - 1)*16)
+
+#define REG_SATURN_PCFG               0x106c /* pin configuration register for
+                                               integrated macphy */
+
+#define   SATURN_PCFG_TLA             0x00000001 /* 1 = phy actled */
+#define   SATURN_PCFG_FLA             0x00000002 /* 1 = phy link10led */
+#define   SATURN_PCFG_CLA             0x00000004 /* 1 = phy link100led */
+#define   SATURN_PCFG_LLA             0x00000008 /* 1 = phy link1000led */
+#define   SATURN_PCFG_RLA             0x00000010 /* 1 = phy duplexled */
+#define   SATURN_PCFG_PDS             0x00000020 /* phy debug mode.
+                                                   0 = normal */
+#define   SATURN_PCFG_MTP             0x00000080 /* test point select */
+#define   SATURN_PCFG_GMO             0x00000100 /* GMII observe. 1 =
+                                                   GMII on SERDES pins for
+                                                   monitoring. */
+#define   SATURN_PCFG_FSI             0x00000200 /* 1 = freeze serdes/gmii. all
+                                                   pins configed as outputs.
+                                                   for power saving when using
+                                                   internal phy. */
+#define   SATURN_PCFG_LAD             0x00000800 /* 0 = mac core led ctrl
+                                                   polarity from strapping
+                                                   value.
+                                                   1 = mac core led ctrl
+                                                   polarity active low. */
+
+
+/** transmit dma registers **/
+#define MAX_TX_RINGS_SHIFT            2
+#define MAX_TX_RINGS                  (1 << MAX_TX_RINGS_SHIFT)
+#define MAX_TX_RINGS_MASK             (MAX_TX_RINGS - 1)
+
+/* TX configuration.
+ * descr ring sizes size = 32 * (1 << n), n < 9. e.g., 0x8 = 8k. default: 0x8
+ * DEFAULT: 0x3F000001
+ */
+#define  REG_TX_CFG                    0x2004  /* TX config */
+#define    TX_CFG_DMA_EN               0x00000001  /* enable TX DMA. if cleared, DMA
+                                                     will stop after xfer of current
+                                                     buffer has been completed. */
+#define    TX_CFG_FIFO_PIO_SEL         0x00000002  /* TX DMA FIFO can be
+                                                     accessed w/ FIFO addr
+                                                     and data registers.
+                                                     TX DMA should be
+                                                     disabled. */
+#define    TX_CFG_DESC_RING0_MASK      0x0000003C  /* # desc entries in
+                                                     ring 1. */
+#define    TX_CFG_DESC_RING0_SHIFT     2
+#define    TX_CFG_DESC_RINGN_MASK(a)   (TX_CFG_DESC_RING0_MASK << (a)*4)
+#define    TX_CFG_DESC_RINGN_SHIFT(a)  (TX_CFG_DESC_RING0_SHIFT + (a)*4)
+#define    TX_CFG_PACED_MODE           0x00100000  /* TX_ALL only set after
+                                                     TX FIFO becomes empty.
+                                                     if 0, TX_ALL set
+                                                     if descr queue empty. */
+#define    TX_CFG_DMA_RDPIPE_DIS       0x01000000  /* always set to 1 */
+#define    TX_CFG_COMPWB_Q1            0x02000000  /* completion writeback happens at
+                                                     the end of every packet kicked
+                                                     through Q1. */
+#define    TX_CFG_COMPWB_Q2            0x04000000  /* completion writeback happens at
+                                                     the end of every packet kicked
+                                                     through Q2. */
+#define    TX_CFG_COMPWB_Q3            0x08000000  /* completion writeback happens at
+                                                     the end of every packet kicked
+                                                     through Q3 */
+#define    TX_CFG_COMPWB_Q4            0x10000000  /* completion writeback happens at
+                                                     the end of every packet kicked
+                                                     through Q4 */
+#define    TX_CFG_INTR_COMPWB_DIS      0x20000000  /* disable pre-interrupt completion
+                                                     writeback */
+#define    TX_CFG_CTX_SEL_MASK         0xC0000000  /* selects tx test port
+                                                     connection
+                                                     0b00: tx mac req,
+                                                           tx mac retry req,
+                                                           tx ack and tx tag.
+                                                     0b01: txdma rd req,
+                                                           txdma rd ack,
+                                                           txdma rd rdy,
+                                                           txdma rd type0
+                                                     0b11: txdma wr req,
+                                                           txdma wr ack,
+                                                           txdma wr rdy,
+                                                           txdma wr xfr done. */
+#define    TX_CFG_CTX_SEL_SHIFT        30
+
+/* 11-bit counters that point to next location in FIFO to be loaded/retrieved.
+ * used for diagnostics only.
+ */
+#define  REG_TX_FIFO_WRITE_PTR         0x2014  /* TX FIFO write pointer */
+#define  REG_TX_FIFO_SHADOW_WRITE_PTR  0x2018  /* TX FIFO shadow write
+                                                 pointer. temp hold reg.
+                                                 diagnostics only. */
+#define  REG_TX_FIFO_READ_PTR          0x201C  /* TX FIFO read pointer */
+#define  REG_TX_FIFO_SHADOW_READ_PTR   0x2020  /* TX FIFO shadow read
+                                                 pointer */
+
+/* (ro) 11-bit up/down counter w/ # of frames currently in TX FIFO */
+#define  REG_TX_FIFO_PKT_CNT           0x2024  /* TX FIFO packet counter */
+
+/* current state of all state machines in TX */
+#define  REG_TX_SM_1                   0x2028  /* TX state machine reg #1 */
+#define    TX_SM_1_CHAIN_MASK          0x000003FF   /* chaining state machine */
+#define    TX_SM_1_CSUM_MASK           0x00000C00   /* checksum state machine */
+#define    TX_SM_1_FIFO_LOAD_MASK      0x0003F000   /* FIFO load state machine.
+                                                      = 0x01 when TX disabled. */
+#define    TX_SM_1_FIFO_UNLOAD_MASK    0x003C0000   /* FIFO unload state machine */
+#define    TX_SM_1_CACHE_MASK          0x03C00000   /* desc. prefetch cache controller
+                                                      state machine */
+#define    TX_SM_1_CBQ_ARB_MASK        0xF8000000   /* CBQ arbiter state machine */
+
+#define  REG_TX_SM_2                   0x202C  /* TX state machine reg #2 */
+#define    TX_SM_2_COMP_WB_MASK        0x07    /* completion writeback sm */
+#define           TX_SM_2_SUB_LOAD_MASK       0x38    /* sub load state machine */
+#define           TX_SM_2_KICK_MASK           0xC0    /* kick state machine */
+
+/* 64-bit pointer to the transmit data buffer. only the 50 LSB are incremented
+ * while the upper 23 bits are taken from the TX descriptor
+ */
+#define  REG_TX_DATA_PTR_LOW           0x2030  /* TX data pointer low */
+#define  REG_TX_DATA_PTR_HI            0x2034  /* TX data pointer high */
+
+/* 13 bit registers written by driver w/ descriptor value that follows
+ * last valid xmit descriptor. kick # and complete # values are used by
+ * the xmit dma engine to control tx descr fetching. if > 1 valid
+ * tx descr is available within the cache line being read, cassini will
+ * internally cache up to 4 of them. 0 on reset. _KICK = rw, _COMP = ro.
+ */
+#define  REG_TX_KICK0                  0x2038  /* TX kick reg #1 */
+#define  REG_TX_KICKN(x)               (REG_TX_KICK0 + (x)*4)
+#define  REG_TX_COMP0                  0x2048  /* TX completion reg #1 */
+#define  REG_TX_COMPN(x)               (REG_TX_COMP0 + (x)*4)
+
+/* values of TX_COMPLETE_1-4 are written. each completion register
+ * is 2bytes in size and contiguous. 8B allocation w/ 8B alignment.
+ * NOTE: completion reg values are only written back prior to TX_INTME and
+ * TX_ALL interrupts. at all other times, the most up-to-date index values
+ * should be obtained from the REG_TX_COMPLETE_# registers.
+ * here's the layout:
+ * offset from base addr      completion # byte
+ *           0                TX_COMPLETE_1_MSB
+ *          1                TX_COMPLETE_1_LSB
+ *           2                TX_COMPLETE_2_MSB
+ *          3                TX_COMPLETE_2_LSB
+ *           4                TX_COMPLETE_3_MSB
+ *          5                TX_COMPLETE_3_LSB
+ *           6                TX_COMPLETE_4_MSB
+ *          7                TX_COMPLETE_4_LSB
+ */
+#define  TX_COMPWB_SIZE             8
+#define  REG_TX_COMPWB_DB_LOW       0x2058  /* TX completion write back
+                                              base low */
+#define  REG_TX_COMPWB_DB_HI        0x205C  /* TX completion write back
+                                              base high */
+#define    TX_COMPWB_MSB_MASK       0x00000000000000FFULL
+#define    TX_COMPWB_MSB_SHIFT      0
+#define    TX_COMPWB_LSB_MASK       0x000000000000FF00ULL
+#define    TX_COMPWB_LSB_SHIFT      8
+#define    TX_COMPWB_NEXT(x)        ((x) >> 16)
+
+/* 53 MSB used as base address. 11 LSB assumed to be 0. TX desc pointer must
+ * be 2KB-aligned. */
+#define  REG_TX_DB0_LOW         0x2060  /* TX descriptor base low #1 */
+#define  REG_TX_DB0_HI          0x2064  /* TX descriptor base hi #1 */
+#define  REG_TX_DBN_LOW(x)      (REG_TX_DB0_LOW + (x)*8)
+#define  REG_TX_DBN_HI(x)       (REG_TX_DB0_HI + (x)*8)
+
+/* 16-bit registers hold weights for the weighted round-robin of the
+ * four CBQ TX descr rings. weights correspond to # bytes xferred from
+ * host to TXFIFO in a round of WRR arbitration. can be set
+ * dynamically with new weights set upon completion of the current
+ * packet transfer from host memory to TXFIFO. a dummy write to any of
+ * these registers causes a queue1 pre-emption with all historical bw
+ * deficit data reset to 0 (useful when congestion requires a
+ * pre-emption/re-allocation of network bandwidth
+ */
+#define  REG_TX_MAXBURST_0             0x2080  /* TX MaxBurst #1 */
+#define  REG_TX_MAXBURST_1             0x2084  /* TX MaxBurst #2 */
+#define  REG_TX_MAXBURST_2             0x2088  /* TX MaxBurst #3 */
+#define  REG_TX_MAXBURST_3             0x208C  /* TX MaxBurst #4 */
+
+/* diagnostics access to any TX FIFO location. every access is 65
+ * bits.  _DATA_LOW = 32 LSB, _DATA_HI_T1/T0 = 32 MSB. _TAG = tag bit.
+ * writing _DATA_HI_T0 sets tag bit low, writing _DATA_HI_T1 sets tag
+ * bit high.  TX_FIFO_PIO_SEL must be set for TX FIFO PIO access. if
+ * TX FIFO data integrity is desired, TX DMA should be
+ * disabled. _DATA_HI_Tx should be the last access of the sequence.
+ */
+#define  REG_TX_FIFO_ADDR              0x2104  /* TX FIFO address */
+#define  REG_TX_FIFO_TAG               0x2108  /* TX FIFO tag */
+#define  REG_TX_FIFO_DATA_LOW          0x210C  /* TX FIFO data low */
+#define  REG_TX_FIFO_DATA_HI_T1        0x2110  /* TX FIFO data high t1 */
+#define  REG_TX_FIFO_DATA_HI_T0        0x2114  /* TX FIFO data high t0 */
+#define  REG_TX_FIFO_SIZE              0x2118  /* (ro) TX FIFO size = 0x090 = 9KB */
+
+/* 9-bit register controls BIST of TX FIFO. bit set indicates that the BIST
+ * passed for the specified memory
+ */
+#define  REG_TX_RAMBIST                0x211C /* TX RAMBIST control/status */
+#define    TX_RAMBIST_STATE            0x01C0 /* progress state of RAMBIST
+                                                controller state machine */
+#define    TX_RAMBIST_RAM33A_PASS      0x0020 /* RAM33A passed */
+#define    TX_RAMBIST_RAM32A_PASS      0x0010 /* RAM32A passed */
+#define    TX_RAMBIST_RAM33B_PASS      0x0008 /* RAM33B passed */
+#define    TX_RAMBIST_RAM32B_PASS      0x0004 /* RAM32B passed */
+#define    TX_RAMBIST_SUMMARY          0x0002 /* all RAM passed */
+#define    TX_RAMBIST_START            0x0001 /* write 1 to start BIST. self
+                                                clears on completion. */
+
+/** receive dma registers **/
+#define MAX_RX_DESC_RINGS              2
+#define MAX_RX_COMP_RINGS              4
+
+/* receive DMA channel configuration. default: 0x80910
+ * free ring size       = (1 << n)*32  -> [32 - 8k]
+ * completion ring size = (1 << n)*128 -> [128 - 32k], n < 9
+ * DEFAULT: 0x80910
+ */
+#define  REG_RX_CFG                     0x4000  /* RX config */
+#define    RX_CFG_DMA_EN                0x00000001 /* enable RX DMA. 0 stops
+                                                        channel as soon as current
+                                                        frame xfer has completed.
+                                                        driver should disable MAC
+                                                        for 200ms before disabling
+                                                        RX */
+#define    RX_CFG_DESC_RING_MASK        0x0000001E /* # desc entries in RX
+                                                        free desc ring.
+                                                        def: 0x8 = 8k */
+#define    RX_CFG_DESC_RING_SHIFT       1
+#define    RX_CFG_COMP_RING_MASK        0x000001E0 /* # desc entries in RX complete
+                                                        ring. def: 0x8 = 32k */
+#define    RX_CFG_COMP_RING_SHIFT       5
+#define    RX_CFG_BATCH_DIS             0x00000200 /* disable receive desc
+                                                     batching. def: 0x0 =
+                                                     enabled */
+#define    RX_CFG_SWIVEL_MASK           0x00001C00 /* byte offset of the 1st
+                                                     data byte of the packet
+                                                     w/in 8 byte boundares.
+                                                     this swivels the data
+                                                     DMA'ed to header
+                                                     buffers, jumbo buffers
+                                                     when header split is not
+                                                     requested and MTU sized
+                                                     buffers. def: 0x2 */
+#define    RX_CFG_SWIVEL_SHIFT          10
+
+/* cassini+ only */
+#define    RX_CFG_DESC_RING1_MASK       0x000F0000 /* # of desc entries in
+                                                        RX free desc ring 2.
+                                                        def: 0x8 = 8k */
+#define    RX_CFG_DESC_RING1_SHIFT      16
+
+
+/* the page size register allows cassini chips to do the following with
+ * received data:
+ * [--------------------------------------------------------------] page
+ * [off][buf1][pad][off][buf2][pad][off][buf3][pad][off][buf4][pad]
+ * |--------------| = PAGE_SIZE_BUFFER_STRIDE
+ * page = PAGE_SIZE
+ * offset = PAGE_SIZE_MTU_OFF
+ * for the above example, MTU_BUFFER_COUNT = 4.
+ * NOTE: as is apparent, you need to ensure that the following holds:
+ * MTU_BUFFER_COUNT <= PAGE_SIZE/PAGE_SIZE_BUFFER_STRIDE
+ * DEFAULT: 0x48002002 (8k pages)
+ */
+#define  REG_RX_PAGE_SIZE               0x4004  /* RX page size */
+#define    RX_PAGE_SIZE_MASK            0x00000003 /* size of pages pointed to
+                                                     by receive descriptors.
+                                                     if jumbo buffers are
+                                                     supported the page size
+                                                     should not be < 8k.
+                                                     0b00 = 2k, 0b01 = 4k
+                                                     0b10 = 8k, 0b11 = 16k
+                                                     DEFAULT: 8k */
+#define    RX_PAGE_SIZE_SHIFT           0
+#define    RX_PAGE_SIZE_MTU_COUNT_MASK  0x00007800 /* # of MTU buffers the hw
+                                                     packs into a page.
+                                                     DEFAULT: 4 */
+#define    RX_PAGE_SIZE_MTU_COUNT_SHIFT 11
+#define    RX_PAGE_SIZE_MTU_STRIDE_MASK 0x18000000 /* # of bytes that separate
+                                                        each MTU buffer +
+                                                        offset from each
+                                                        other.
+                                                        0b00 = 1k, 0b01 = 2k
+                                                        0b10 = 4k, 0b11 = 8k
+                                                        DEFAULT: 0x1 */
+#define    RX_PAGE_SIZE_MTU_STRIDE_SHIFT 27
+#define    RX_PAGE_SIZE_MTU_OFF_MASK    0xC0000000 /* offset in each page that
+                                                     hw writes the MTU buffer
+                                                     into.
+                                                     0b00 = 0,
+                                                     0b01 = 64 bytes
+                                                     0b10 = 96, 0b11 = 128
+                                                     DEFAULT: 0x1 */
+#define    RX_PAGE_SIZE_MTU_OFF_SHIFT   30
+
+/* 11-bit counter points to next location in RX FIFO to be loaded/read.
+ * shadow write pointers enable retries in case of early receive aborts.
+ * DEFAULT: 0x0. generated on 64-bit boundaries.
+ */
+#define  REG_RX_FIFO_WRITE_PTR             0x4008  /* RX FIFO write pointer */
+#define  REG_RX_FIFO_READ_PTR              0x400C  /* RX FIFO read pointer */
+#define  REG_RX_IPP_FIFO_SHADOW_WRITE_PTR  0x4010  /* RX IPP FIFO shadow write
+                                                     pointer */
+#define  REG_RX_IPP_FIFO_SHADOW_READ_PTR   0x4014  /* RX IPP FIFO shadow read
+                                                     pointer */
+#define  REG_RX_IPP_FIFO_READ_PTR          0x400C  /* RX IPP FIFO read
+                                                     pointer. (8-bit counter) */
+
+/* current state of RX DMA state engines + other info
+ * DEFAULT: 0x0
+ */
+#define  REG_RX_DEBUG                      0x401C  /* RX debug */
+#define    RX_DEBUG_LOAD_STATE_MASK        0x0000000F /* load state machine w/ MAC:
+                                                        0x0 = idle,   0x1 = load_bop
+                                                        0x2 = load 1, 0x3 = load 2
+                                                        0x4 = load 3, 0x5 = load 4
+                                                        0x6 = last detect
+                                                        0x7 = wait req
+                                                        0x8 = wait req statuss 1st
+                                                        0x9 = load st
+                                                        0xa = bubble mac
+                                                        0xb = error */
+#define    RX_DEBUG_LM_STATE_MASK          0x00000070 /* load state machine w/ HP and
+                                                        RX FIFO:
+                                                        0x0 = idle,   0x1 = hp xfr
+                                                        0x2 = wait hp ready
+                                                        0x3 = wait flow code
+                                                        0x4 = fifo xfer
+                                                        0x5 = make status
+                                                        0x6 = csum ready
+                                                        0x7 = error */
+#define    RX_DEBUG_FC_STATE_MASK          0x000000180 /* flow control state machine
+                                                        w/ MAC:
+                                                        0x0 = idle
+                                                        0x1 = wait xoff ack
+                                                        0x2 = wait xon
+                                                        0x3 = wait xon ack */
+#define    RX_DEBUG_DATA_STATE_MASK        0x000001E00 /* unload data state machine
+                                                        states:
+                                                        0x0 = idle data
+                                                        0x1 = header begin
+                                                        0x2 = xfer header
+                                                        0x3 = xfer header ld
+                                                        0x4 = mtu begin
+                                                        0x5 = xfer mtu
+                                                        0x6 = xfer mtu ld
+                                                        0x7 = jumbo begin
+                                                        0x8 = xfer jumbo
+                                                        0x9 = xfer jumbo ld
+                                                        0xa = reas begin
+                                                        0xb = xfer reas
+                                                        0xc = flush tag
+                                                        0xd = xfer reas ld
+                                                        0xe = error
+                                                        0xf = bubble idle */
+#define    RX_DEBUG_DESC_STATE_MASK        0x0001E000 /* unload desc state machine
+                                                        states:
+                                                        0x0 = idle desc
+                                                        0x1 = wait ack
+                                                        0x9 = wait ack 2
+                                                        0x2 = fetch desc 1
+                                                        0xa = fetch desc 2
+                                                        0x3 = load ptrs
+                                                        0x4 = wait dma
+                                                        0x5 = wait ack batch
+                                                        0x6 = post batch
+                                                        0x7 = xfr done */
+#define    RX_DEBUG_INTR_READ_PTR_MASK     0x30000000 /* interrupt read ptr of the
+                                                        interrupt queue */
+#define    RX_DEBUG_INTR_WRITE_PTR_MASK    0xC0000000 /* interrupt write pointer
+                                                        of the interrupt queue */
+
+/* flow control frames are emitted using two PAUSE thresholds:
+ * XOFF PAUSE uses pause time value pre-programmed in the Send PAUSE MAC reg
+ * XON PAUSE uses a pause time of 0. granularity of threshold is 64bytes.
+ * PAUSE thresholds defined in terms of FIFO occupancy and may be translated
+ * into FIFO vacancy using RX_FIFO_SIZE. setting ON will trigger XON frames
+ * when FIFO reaches 0. OFF threshold should not be > size of RX FIFO. max
+ * value is is 0x6F.
+ * DEFAULT: 0x00078
+ */
+#define  REG_RX_PAUSE_THRESH               0x4020  /* RX pause thresholds */
+#define    RX_PAUSE_THRESH_QUANTUM         64
+#define    RX_PAUSE_THRESH_OFF_MASK        0x000001FF /* XOFF PAUSE emitted when
+                                                        RX FIFO occupancy >
+                                                        value*64B */
+#define    RX_PAUSE_THRESH_OFF_SHIFT       0
+#define    RX_PAUSE_THRESH_ON_MASK         0x001FF000 /* XON PAUSE emitted after
+                                                        emitting XOFF PAUSE when RX
+                                                        FIFO occupancy falls below
+                                                        this value*64B. must be
+                                                        < XOFF threshold. if =
+                                                        RX_FIFO_SIZE< XON frames are
+                                                        never emitted. */
+#define    RX_PAUSE_THRESH_ON_SHIFT        12
+
+/* 13-bit register used to control RX desc fetching and intr generation. if 4+
+ * valid RX descriptors are available, Cassini will read 4 at a time.
+ * writing N means that all desc up to *but* excluding N are available. N must
+ * be a multiple of 4 (N % 4 = 0). first desc should be cache-line aligned.
+ * DEFAULT: 0 on reset
+ */
+#define  REG_RX_KICK                    0x4024  /* RX kick reg */
+
+/* 8KB aligned 64-bit pointer to the base of the RX free/completion rings.
+ * lower 13 bits of the low register are hard-wired to 0.
+ */
+#define  REG_RX_DB_LOW                     0x4028  /* RX descriptor ring
+                                                        base low */
+#define  REG_RX_DB_HI                      0x402C  /* RX descriptor ring
+                                                        base hi */
+#define  REG_RX_CB_LOW                     0x4030  /* RX completion ring
+                                                        base low */
+#define  REG_RX_CB_HI                      0x4034  /* RX completion ring
+                                                        base hi */
+/* 13-bit register indicate desc used by cassini for receive frames. used
+ * for diagnostic purposes.
+ * DEFAULT: 0 on reset
+ */
+#define  REG_RX_COMP                       0x4038  /* (ro) RX completion */
+
+/* HEAD and TAIL are used to control RX desc posting and interrupt
+ * generation.  hw moves the head register to pass ownership to sw. sw
+ * moves the tail register to pass ownership back to hw. to give all
+ * entries to hw, set TAIL = HEAD.  if HEAD and TAIL indicate that no
+ * more entries are available, DMA will pause and an interrupt will be
+ * generated to indicate no more entries are available.  sw can use
+ * this interrupt to reduce the # of times it must update the
+ * completion tail register.
+ * DEFAULT: 0 on reset
+ */
+#define  REG_RX_COMP_HEAD                  0x403C  /* RX completion head */
+#define  REG_RX_COMP_TAIL                  0x4040  /* RX completion tail */
+
+/* values used for receive interrupt blanking. loaded each time the ISR is read
+ * DEFAULT: 0x00000000
+ */
+#define  REG_RX_BLANK                      0x4044  /* RX blanking register
+                                                        for ISR read */
+#define    RX_BLANK_INTR_PKT_MASK          0x000001FF /* RX_DONE intr asserted if
+                                                        this many sets of completion
+                                                        writebacks (up to 2 packets)
+                                                        occur since the last time
+                                                        the ISR was read. 0 = no
+                                                        packet blanking */
+#define    RX_BLANK_INTR_PKT_SHIFT         0
+#define    RX_BLANK_INTR_TIME_MASK         0x3FFFF000 /* RX_DONE interrupt asserted
+                                                        if that many clocks were
+                                                        counted since last time the
+                                                        ISR was read.
+                                                        each count is 512 core
+                                                        clocks (125MHz). 0 = no
+                                                        time blanking */
+#define    RX_BLANK_INTR_TIME_SHIFT        12
+
+/* values used for interrupt generation based on threshold values of how
+ * many free desc and completion entries are available for hw use.
+ * DEFAULT: 0x00000000
+ */
+#define  REG_RX_AE_THRESH                  0x4048  /* RX almost empty
+                                                        thresholds */
+#define    RX_AE_THRESH_FREE_MASK          0x00001FFF /* RX_BUF_AE will be
+                                                        generated if # desc
+                                                        avail for hw use <=
+                                                        # */
+#define    RX_AE_THRESH_FREE_SHIFT         0
+#define    RX_AE_THRESH_COMP_MASK          0x0FFFE000 /* RX_COMP_AE will be
+                                                        generated if # of
+                                                        completion entries
+                                                        avail for hw use <=
+                                                        # */
+#define    RX_AE_THRESH_COMP_SHIFT         13
+
+/* probabilities for random early drop (RED) thresholds on a FIFO threshold
+ * basis. probability should increase when the FIFO level increases. control
+ * packets are never dropped and not counted in stats. probability programmed
+ * on a 12.5% granularity. e.g., 0x1 = 1/8 packets dropped.
+ * DEFAULT: 0x00000000
+ */
+#define  REG_RX_RED                      0x404C  /* RX random early detect enable */
+#define    RX_RED_4K_6K_FIFO_MASK        0x000000FF /*  4KB < FIFO thresh < 6KB */
+#define    RX_RED_6K_8K_FIFO_MASK        0x0000FF00 /*  6KB < FIFO thresh < 8KB */
+#define    RX_RED_8K_10K_FIFO_MASK       0x00FF0000 /*  8KB < FIFO thresh < 10KB */
+#define    RX_RED_10K_12K_FIFO_MASK      0xFF000000 /* 10KB < FIFO thresh < 12KB */
+
+/* FIFO fullness levels for RX FIFO, RX control FIFO, and RX IPP FIFO.
+ * RX control FIFO = # of packets in RX FIFO.
+ * DEFAULT: 0x0
+ */
+#define  REG_RX_FIFO_FULLNESS              0x4050  /* (ro) RX FIFO fullness */
+#define    RX_FIFO_FULLNESS_RX_FIFO_MASK   0x3FF80000 /* level w/ 8B granularity */
+#define    RX_FIFO_FULLNESS_IPP_FIFO_MASK  0x0007FF00 /* level w/ 8B granularity */
+#define    RX_FIFO_FULLNESS_RX_PKT_MASK    0x000000FF /* # packets in RX FIFO */
+#define  REG_RX_IPP_PACKET_COUNT           0x4054  /* RX IPP packet counter */
+#define  REG_RX_WORK_DMA_PTR_LOW           0x4058  /* RX working DMA ptr low */
+#define  REG_RX_WORK_DMA_PTR_HI            0x405C  /* RX working DMA ptr
+                                                     high */
+
+/* BIST testing ro RX FIFO, RX control FIFO, and RX IPP FIFO. only RX BIST
+ * START/COMPLETE is writeable. START will clear when the BIST has completed
+ * checking all 17 RAMS.
+ * DEFAULT: 0bxxxx xxxxx xxxx xxxx xxxx x000 0000 0000 00x0
+ */
+#define  REG_RX_BIST                       0x4060  /* (ro) RX BIST */
+#define    RX_BIST_32A_PASS                0x80000000 /* RX FIFO 32A passed */
+#define    RX_BIST_33A_PASS                0x40000000 /* RX FIFO 33A passed */
+#define    RX_BIST_32B_PASS                0x20000000 /* RX FIFO 32B passed */
+#define    RX_BIST_33B_PASS                0x10000000 /* RX FIFO 33B passed */
+#define    RX_BIST_32C_PASS                0x08000000 /* RX FIFO 32C passed */
+#define    RX_BIST_33C_PASS                0x04000000 /* RX FIFO 33C passed */
+#define    RX_BIST_IPP_32A_PASS            0x02000000 /* RX IPP FIFO 33B passed */
+#define    RX_BIST_IPP_33A_PASS            0x01000000 /* RX IPP FIFO 33A passed */
+#define    RX_BIST_IPP_32B_PASS            0x00800000 /* RX IPP FIFO 32B passed */
+#define    RX_BIST_IPP_33B_PASS            0x00400000 /* RX IPP FIFO 33B passed */
+#define    RX_BIST_IPP_32C_PASS            0x00200000 /* RX IPP FIFO 32C passed */
+#define    RX_BIST_IPP_33C_PASS            0x00100000 /* RX IPP FIFO 33C passed */
+#define    RX_BIST_CTRL_32_PASS            0x00800000 /* RX CTRL FIFO 32 passed */
+#define    RX_BIST_CTRL_33_PASS            0x00400000 /* RX CTRL FIFO 33 passed */
+#define    RX_BIST_REAS_26A_PASS           0x00200000 /* RX Reas 26A passed */
+#define    RX_BIST_REAS_26B_PASS           0x00100000 /* RX Reas 26B passed */
+#define    RX_BIST_REAS_27_PASS            0x00080000 /* RX Reas 27 passed */
+#define    RX_BIST_STATE_MASK              0x00078000 /* BIST state machine */
+#define    RX_BIST_SUMMARY                 0x00000002 /* when BIST complete,
+                                                        summary pass bit
+                                                        contains AND of BIST
+                                                        results of all 16
+                                                        RAMS */
+#define    RX_BIST_START                   0x00000001 /* write 1 to start
+                                                        BIST. self clears
+                                                        on completion. */
+
+/* next location in RX CTRL FIFO that will be loaded w/ data from RX IPP/read
+ * from to retrieve packet control info.
+ * DEFAULT: 0
+ */
+#define  REG_RX_CTRL_FIFO_WRITE_PTR        0x4064  /* (ro) RX control FIFO
+                                                     write ptr */
+#define  REG_RX_CTRL_FIFO_READ_PTR         0x4068  /* (ro) RX control FIFO read
+                                                     ptr */
+
+/* receive interrupt blanking. loaded each time interrupt alias register is
+ * read.
+ * DEFAULT: 0x0
+ */
+#define  REG_RX_BLANK_ALIAS_READ           0x406C  /* RX blanking register for
+                                                     alias read */
+#define    RX_BAR_INTR_PACKET_MASK         0x000001FF /* assert RX_DONE if #
+                                                        completion writebacks
+                                                        > # since last ISR
+                                                        read. 0 = no
+                                                        blanking. up to 2
+                                                        packets per
+                                                        completion wb. */
+#define    RX_BAR_INTR_TIME_MASK           0x3FFFF000 /* assert RX_DONE if #
+                                                        clocks > # since last
+                                                        ISR read. each count
+                                                        is 512 core clocks
+                                                        (125MHz). 0 = no
+                                                        blanking. */
+
+/* diagnostic access to RX FIFO. 32 LSB accessed via DATA_LOW. 32 MSB accessed
+ * via DATA_HI_T0 or DATA_HI_T1. TAG reads the tag bit. writing HI_T0
+ * will unset the tag bit while writing HI_T1 will set the tag bit. to reset
+ * to normal operation after diagnostics, write to address location 0x0.
+ * RX_DMA_EN bit must be set to 0x0 for RX FIFO PIO access. DATA_HI should
+ * be the last write access of a write sequence.
+ * DEFAULT: undefined
+ */
+#define  REG_RX_FIFO_ADDR                  0x4080  /* RX FIFO address */
+#define  REG_RX_FIFO_TAG                   0x4084  /* RX FIFO tag */
+#define  REG_RX_FIFO_DATA_LOW              0x4088  /* RX FIFO data low */
+#define  REG_RX_FIFO_DATA_HI_T0            0x408C  /* RX FIFO data high T0 */
+#define  REG_RX_FIFO_DATA_HI_T1            0x4090  /* RX FIFO data high T1 */
+
+/* diagnostic assess to RX CTRL FIFO. 8-bit FIFO_ADDR holds address of
+ * 81 bit control entry and 6 bit flow id. LOW and MID are both 32-bit
+ * accesses. HI is 7-bits with 6-bit flow id and 1 bit control
+ * word. RX_DMA_EN must be 0 for RX CTRL FIFO PIO access. DATA_HI
+ * should be last write access of the write sequence.
+ * DEFAULT: undefined
+ */
+#define  REG_RX_CTRL_FIFO_ADDR             0x4094  /* RX Control FIFO and
+                                                     Batching FIFO addr */
+#define  REG_RX_CTRL_FIFO_DATA_LOW         0x4098  /* RX Control FIFO data
+                                                     low */
+#define  REG_RX_CTRL_FIFO_DATA_MID         0x409C  /* RX Control FIFO data
+                                                     mid */
+#define  REG_RX_CTRL_FIFO_DATA_HI          0x4100  /* RX Control FIFO data
+                                                     hi and flow id */
+#define    RX_CTRL_FIFO_DATA_HI_CTRL       0x0001  /* upper bit of ctrl word */
+#define    RX_CTRL_FIFO_DATA_HI_FLOW_MASK  0x007E  /* flow id */
+
+/* diagnostic access to RX IPP FIFO. same semantics as RX_FIFO.
+ * DEFAULT: undefined
+ */
+#define  REG_RX_IPP_FIFO_ADDR              0x4104  /* RX IPP FIFO address */
+#define  REG_RX_IPP_FIFO_TAG               0x4108  /* RX IPP FIFO tag */
+#define  REG_RX_IPP_FIFO_DATA_LOW          0x410C  /* RX IPP FIFO data low */
+#define  REG_RX_IPP_FIFO_DATA_HI_T0        0x4110  /* RX IPP FIFO data high
+                                                     T0 */
+#define  REG_RX_IPP_FIFO_DATA_HI_T1        0x4114  /* RX IPP FIFO data high
+                                                     T1 */
+
+/* 64-bit pointer to receive data buffer in host memory used for headers and
+ * small packets. MSB in high register. loaded by DMA state machine and
+ * increments as DMA writes receive data. only 50 LSB are incremented. top
+ * 13 bits taken from RX descriptor.
+ * DEFAULT: undefined
+ */
+#define  REG_RX_HEADER_PAGE_PTR_LOW        0x4118  /* (ro) RX header page ptr
+                                                     low */
+#define  REG_RX_HEADER_PAGE_PTR_HI         0x411C  /* (ro) RX header page ptr
+                                                     high */
+#define  REG_RX_MTU_PAGE_PTR_LOW           0x4120  /* (ro) RX MTU page pointer
+                                                     low */
+#define  REG_RX_MTU_PAGE_PTR_HI            0x4124  /* (ro) RX MTU page pointer
+                                                     high */
+
+/* PIO diagnostic access to RX reassembly DMA Table RAM. 6-bit register holds
+ * one of 64 79-bit locations in the RX Reassembly DMA table and the addr of
+ * one of the 64 byte locations in the Batching table. LOW holds 32 LSB.
+ * MID holds the next 32 LSB. HIGH holds the 15 MSB. RX_DMA_EN must be set
+ * to 0 for PIO access. DATA_HIGH should be last write of write sequence.
+ * layout:
+ * reassmbl ptr [78:15] | reassmbl index [14:1] | reassmbl entry valid [0]
+ * DEFAULT: undefined
+ */
+#define  REG_RX_TABLE_ADDR             0x4128  /* RX reassembly DMA table
+                                                 address */
+#define    RX_TABLE_ADDR_MASK          0x0000003F /* address mask */
+
+#define  REG_RX_TABLE_DATA_LOW         0x412C  /* RX reassembly DMA table
+                                                 data low */
+#define  REG_RX_TABLE_DATA_MID         0x4130  /* RX reassembly DMA table
+                                                 data mid */
+#define  REG_RX_TABLE_DATA_HI          0x4134  /* RX reassembly DMA table
+                                                 data high */
+
+/* cassini+ only */
+/* 8KB aligned 64-bit pointer to base of RX rings. lower 13 bits hardwired to
+ * 0. same semantics as primary desc/complete rings.
+ */
+#define  REG_PLUS_RX_DB1_LOW            0x4200  /* RX descriptor ring
+                                                  2 base low */
+#define  REG_PLUS_RX_DB1_HI             0x4204  /* RX descriptor ring
+                                                  2 base high */
+#define  REG_PLUS_RX_CB1_LOW            0x4208  /* RX completion ring
+                                                  2 base low. 4 total */
+#define  REG_PLUS_RX_CB1_HI             0x420C  /* RX completion ring
+                                                  2 base high. 4 total */
+#define  REG_PLUS_RX_CBN_LOW(x)        (REG_PLUS_RX_CB1_LOW + 8*((x) - 1))
+#define  REG_PLUS_RX_CBN_HI(x)         (REG_PLUS_RX_CB1_HI + 8*((x) - 1))
+#define  REG_PLUS_RX_KICK1             0x4220  /* RX Kick 2 register */
+#define  REG_PLUS_RX_COMP1             0x4224  /* (ro) RX completion 2
+                                                 reg */
+#define  REG_PLUS_RX_COMP1_HEAD        0x4228  /* (ro) RX completion 2
+                                                 head reg. 4 total. */
+#define  REG_PLUS_RX_COMP1_TAIL        0x422C  /* RX completion 2
+                                                 tail reg. 4 total. */
+#define  REG_PLUS_RX_COMPN_HEAD(x)    (REG_PLUS_RX_COMP1_HEAD + 8*((x) - 1))
+#define  REG_PLUS_RX_COMPN_TAIL(x)    (REG_PLUS_RX_COMP1_TAIL + 8*((x) - 1))
+#define  REG_PLUS_RX_AE1_THRESH        0x4240  /* RX almost empty 2
+                                                 thresholds */
+#define    RX_AE1_THRESH_FREE_MASK     RX_AE_THRESH_FREE_MASK
+#define    RX_AE1_THRESH_FREE_SHIFT    RX_AE_THRESH_FREE_SHIFT
+
+/** header parser registers **/
+
+/* RX parser configuration register.
+ * DEFAULT: 0x1651004
+ */
+#define  REG_HP_CFG                       0x4140  /* header parser
+                                                    configuration reg */
+#define    HP_CFG_PARSE_EN                0x00000001 /* enab header parsing */
+#define    HP_CFG_NUM_CPU_MASK            0x000000FC /* # processors
+                                                     0 = 64. 0x3f = 63 */
+#define    HP_CFG_NUM_CPU_SHIFT           2
+#define    HP_CFG_SYN_INC_MASK            0x00000100 /* SYN bit won't increment
+                                                       TCP seq # by one when
+                                                       stored in FDBM */
+#define    HP_CFG_TCP_THRESH_MASK         0x000FFE00 /* # bytes of TCP data
+                                                       needed to be considered
+                                                       for reassembly */
+#define    HP_CFG_TCP_THRESH_SHIFT        9
+
+/* access to RX Instruction RAM. 5-bit register/counter holds addr
+ * of 39 bit entry to be read/written. 32 LSB in _DATA_LOW. 7 MSB in _DATA_HI.
+ * RX_DMA_EN must be 0 for RX instr PIO access. DATA_HI should be last access
+ * of sequence.
+ * DEFAULT: undefined
+ */
+#define  REG_HP_INSTR_RAM_ADDR             0x4144  /* HP instruction RAM
+                                                     address */
+#define    HP_INSTR_RAM_ADDR_MASK          0x01F   /* 5-bit mask */
+#define  REG_HP_INSTR_RAM_DATA_LOW         0x4148  /* HP instruction RAM
+                                                     data low */
+#define    HP_INSTR_RAM_LOW_OUTMASK_MASK   0x0000FFFF
+#define    HP_INSTR_RAM_LOW_OUTMASK_SHIFT  0
+#define    HP_INSTR_RAM_LOW_OUTSHIFT_MASK  0x000F0000
+#define    HP_INSTR_RAM_LOW_OUTSHIFT_SHIFT 16
+#define    HP_INSTR_RAM_LOW_OUTEN_MASK     0x00300000
+#define    HP_INSTR_RAM_LOW_OUTEN_SHIFT    20
+#define    HP_INSTR_RAM_LOW_OUTARG_MASK    0xFFC00000
+#define    HP_INSTR_RAM_LOW_OUTARG_SHIFT   22
+#define  REG_HP_INSTR_RAM_DATA_MID         0x414C  /* HP instruction RAM
+                                                     data mid */
+#define    HP_INSTR_RAM_MID_OUTARG_MASK    0x00000003
+#define    HP_INSTR_RAM_MID_OUTARG_SHIFT   0
+#define    HP_INSTR_RAM_MID_OUTOP_MASK     0x0000003C
+#define    HP_INSTR_RAM_MID_OUTOP_SHIFT    2
+#define    HP_INSTR_RAM_MID_FNEXT_MASK     0x000007C0
+#define    HP_INSTR_RAM_MID_FNEXT_SHIFT    6
+#define    HP_INSTR_RAM_MID_FOFF_MASK      0x0003F800
+#define    HP_INSTR_RAM_MID_FOFF_SHIFT     11
+#define    HP_INSTR_RAM_MID_SNEXT_MASK     0x007C0000
+#define    HP_INSTR_RAM_MID_SNEXT_SHIFT    18
+#define    HP_INSTR_RAM_MID_SOFF_MASK      0x3F800000
+#define    HP_INSTR_RAM_MID_SOFF_SHIFT     23
+#define    HP_INSTR_RAM_MID_OP_MASK        0xC0000000
+#define    HP_INSTR_RAM_MID_OP_SHIFT       30
+#define  REG_HP_INSTR_RAM_DATA_HI          0x4150  /* HP instruction RAM
+                                                     data high */
+#define    HP_INSTR_RAM_HI_VAL_MASK        0x0000FFFF
+#define    HP_INSTR_RAM_HI_VAL_SHIFT       0
+#define    HP_INSTR_RAM_HI_MASK_MASK       0xFFFF0000
+#define    HP_INSTR_RAM_HI_MASK_SHIFT      16
+
+/* PIO access into RX Header parser data RAM and flow database.
+ * 11-bit register. Data fills the LSB portion of bus if less than 32 bits.
+ * DATA_RAM: write RAM_FDB_DATA with index to access DATA_RAM.
+ * RAM bytes = 4*(x - 1) + [3:0]. e.g., 0 -> [3:0], 31 -> [123:120]
+ * FLOWDB: write DATA_RAM_FDB register and then read/write FDB1-12 to access
+ * flow database.
+ * RX_DMA_EN must be 0 for RX parser RAM PIO access. RX Parser RAM data reg
+ * should be the last write access of the write sequence.
+ * DEFAULT: undefined
+ */
+#define  REG_HP_DATA_RAM_FDB_ADDR          0x4154  /* HP data and FDB
+                                                     RAM address */
+#define    HP_DATA_RAM_FDB_DATA_MASK       0x001F  /* select 1 of 86 byte
+                                                     locations in header
+                                                     parser data ram to
+                                                     read/write */
+#define    HP_DATA_RAM_FDB_FDB_MASK        0x3F00  /* 1 of 64 353-bit locations
+                                                     in the flow database */
+#define  REG_HP_DATA_RAM_DATA              0x4158  /* HP data RAM data */
+
+/* HP flow database registers: 1 - 12, 0x415C - 0x4188, 4 8-bit bytes
+ * FLOW_DB(1) = IP_SA[127:96], FLOW_DB(2) = IP_SA[95:64]
+ * FLOW_DB(3) = IP_SA[63:32],  FLOW_DB(4) = IP_SA[31:0]
+ * FLOW_DB(5) = IP_DA[127:96], FLOW_DB(6) = IP_DA[95:64]
+ * FLOW_DB(7) = IP_DA[63:32],  FLOW_DB(8) = IP_DA[31:0]
+ * FLOW_DB(9) = {TCP_SP[15:0],TCP_DP[15:0]}
+ * FLOW_DB(10) = bit 0 has value for flow valid
+ * FLOW_DB(11) = TCP_SEQ[63:32], FLOW_DB(12) = TCP_SEQ[31:0]
+ */
+#define  REG_HP_FLOW_DB0                   0x415C  /* HP flow database 1 reg */
+#define  REG_HP_FLOW_DBN(x)                (REG_HP_FLOW_DB0 + (x)*4)
+
+/* diagnostics for RX Header Parser block.
+ * ASUN: the header parser state machine register is used for diagnostics
+ * purposes. however, the spec doesn't have any details on it.
+ */
+#define  REG_HP_STATE_MACHINE              0x418C  /* (ro) HP state machine */
+#define  REG_HP_STATUS0                    0x4190  /* (ro) HP status 1 */
+#define    HP_STATUS0_SAP_MASK             0xFFFF0000 /* SAP */
+#define    HP_STATUS0_L3_OFF_MASK          0x0000FE00 /* L3 offset */
+#define    HP_STATUS0_LB_CPUNUM_MASK       0x000001F8 /* load balancing CPU
+                                                        number */
+#define    HP_STATUS0_HRP_OPCODE_MASK      0x00000007 /* HRP opcode */
+
+#define  REG_HP_STATUS1                    0x4194  /* (ro) HP status 2 */
+#define    HP_STATUS1_ACCUR2_MASK          0xE0000000 /* accu R2[6:4] */
+#define    HP_STATUS1_FLOWID_MASK          0x1F800000 /* flow id */
+#define    HP_STATUS1_TCP_OFF_MASK         0x007F0000 /* tcp payload offset */
+#define    HP_STATUS1_TCP_SIZE_MASK        0x0000FFFF /* tcp payload size */
+
+#define  REG_HP_STATUS2                    0x4198  /* (ro) HP status 3 */
+#define    HP_STATUS2_ACCUR2_MASK          0xF0000000 /* accu R2[3:0] */
+#define    HP_STATUS2_CSUM_OFF_MASK        0x07F00000 /* checksum start
+                                                        start offset */
+#define    HP_STATUS2_ACCUR1_MASK          0x000FE000 /* accu R1 */
+#define    HP_STATUS2_FORCE_DROP           0x00001000 /* force drop */
+#define    HP_STATUS2_BWO_REASSM           0x00000800 /* batching w/o
+                                                        reassembly */
+#define    HP_STATUS2_JH_SPLIT_EN          0x00000400 /* jumbo header split
+                                                        enable */
+#define    HP_STATUS2_FORCE_TCP_NOCHECK    0x00000200 /* force tcp no payload
+                                                        check */
+#define    HP_STATUS2_DATA_MASK_ZERO       0x00000100 /* mask of data length
+                                                        equal to zero */
+#define    HP_STATUS2_FORCE_TCP_CHECK      0x00000080 /* force tcp payload
+                                                        chk */
+#define    HP_STATUS2_MASK_TCP_THRESH      0x00000040 /* mask of payload
+                                                        threshold */
+#define    HP_STATUS2_NO_ASSIST            0x00000020 /* no assist */
+#define    HP_STATUS2_CTRL_PACKET_FLAG     0x00000010 /* control packet flag */
+#define    HP_STATUS2_TCP_FLAG_CHECK       0x00000008 /* tcp flag check */
+#define    HP_STATUS2_SYN_FLAG             0x00000004 /* syn flag */
+#define    HP_STATUS2_TCP_CHECK            0x00000002 /* tcp payload chk */
+#define    HP_STATUS2_TCP_NOCHECK          0x00000001 /* tcp no payload chk */
+
+/* BIST for header parser(HP) and flow database memories (FDBM). set _START
+ * to start BIST. controller clears _START on completion. _START can also
+ * be cleared to force termination of BIST. a bit set indicates that that
+ * memory passed its BIST.
+ */
+#define  REG_HP_RAM_BIST                   0x419C  /* HP RAM BIST reg */
+#define    HP_RAM_BIST_HP_DATA_PASS        0x80000000 /* HP data ram */
+#define    HP_RAM_BIST_HP_INSTR0_PASS      0x40000000 /* HP instr ram 0 */
+#define    HP_RAM_BIST_HP_INSTR1_PASS      0x20000000 /* HP instr ram 1 */
+#define    HP_RAM_BIST_HP_INSTR2_PASS      0x10000000 /* HP instr ram 2 */
+#define    HP_RAM_BIST_FDBM_AGE0_PASS      0x08000000 /* FDBM aging RAM0 */
+#define    HP_RAM_BIST_FDBM_AGE1_PASS      0x04000000 /* FDBM aging RAM1 */
+#define    HP_RAM_BIST_FDBM_FLOWID00_PASS  0x02000000 /* FDBM flowid RAM0
+                                                        bank 0 */
+#define    HP_RAM_BIST_FDBM_FLOWID10_PASS  0x01000000 /* FDBM flowid RAM1
+                                                        bank 0 */
+#define    HP_RAM_BIST_FDBM_FLOWID20_PASS  0x00800000 /* FDBM flowid RAM2
+                                                        bank 0 */
+#define    HP_RAM_BIST_FDBM_FLOWID30_PASS  0x00400000 /* FDBM flowid RAM3
+                                                        bank 0 */
+#define    HP_RAM_BIST_FDBM_FLOWID01_PASS  0x00200000 /* FDBM flowid RAM0
+                                                        bank 1 */
+#define    HP_RAM_BIST_FDBM_FLOWID11_PASS  0x00100000 /* FDBM flowid RAM1
+                                                        bank 2 */
+#define    HP_RAM_BIST_FDBM_FLOWID21_PASS  0x00080000 /* FDBM flowid RAM2
+                                                        bank 1 */
+#define    HP_RAM_BIST_FDBM_FLOWID31_PASS  0x00040000 /* FDBM flowid RAM3
+                                                        bank 1 */
+#define    HP_RAM_BIST_FDBM_TCPSEQ_PASS    0x00020000 /* FDBM tcp sequence
+                                                        RAM */
+#define    HP_RAM_BIST_SUMMARY             0x00000002 /* all BIST tests */
+#define    HP_RAM_BIST_START               0x00000001 /* start/stop BIST */
+
+
+/** MAC registers.  **/
+/* reset bits are set using a PIO write and self-cleared after the command
+ * execution has completed.
+ */
+#define  REG_MAC_TX_RESET                  0x6000  /* TX MAC software reset
+                                                     command (default: 0x0) */
+#define  REG_MAC_RX_RESET                  0x6004  /* RX MAC software reset
+                                                     command (default: 0x0) */
+/* execute a pause flow control frame transmission
+ DEFAULT: 0x0XXXX */
+#define  REG_MAC_SEND_PAUSE                0x6008  /* send pause command reg */
+#define    MAC_SEND_PAUSE_TIME_MASK        0x0000FFFF /* value of pause time
+                                                        to be sent on network
+                                                        in units of slot
+                                                        times */
+#define    MAC_SEND_PAUSE_SEND             0x00010000 /* send pause flow ctrl
+                                                        frame on network */
+
+/* bit set indicates that event occurred. auto-cleared when status register
+ * is read and have corresponding mask bits in mask register. events will
+ * trigger an interrupt if the corresponding mask bit is 0.
+ * status register default: 0x00000000
+ * mask register default = 0xFFFFFFFF on reset
+ */
+#define  REG_MAC_TX_STATUS                 0x6010  /* TX MAC status reg */
+#define    MAC_TX_FRAME_XMIT               0x0001  /* successful frame
+                                                     transmision */
+#define    MAC_TX_UNDERRUN                 0x0002  /* terminated frame
+                                                     transmission due to
+                                                     data starvation in the
+                                                     xmit data path */
+#define    MAC_TX_MAX_PACKET_ERR           0x0004  /* frame exceeds max allowed
+                                                     length passed to TX MAC
+                                                     by the DMA engine */
+#define    MAC_TX_COLL_NORMAL              0x0008  /* rollover of the normal
+                                                     collision counter */
+#define    MAC_TX_COLL_EXCESS              0x0010  /* rollover of the excessive
+                                                     collision counter */
+#define    MAC_TX_COLL_LATE                0x0020  /* rollover of the late
+                                                     collision counter */
+#define    MAC_TX_COLL_FIRST               0x0040  /* rollover of the first
+                                                     collision counter */
+#define    MAC_TX_DEFER_TIMER              0x0080  /* rollover of the defer
+                                                     timer */
+#define    MAC_TX_PEAK_ATTEMPTS            0x0100  /* rollover of the peak
+                                                     attempts counter */
+
+#define  REG_MAC_RX_STATUS                 0x6014  /* RX MAC status reg */
+#define    MAC_RX_FRAME_RECV               0x0001  /* successful receipt of
+                                                     a frame */
+#define    MAC_RX_OVERFLOW                 0x0002  /* dropped frame due to
+                                                     RX FIFO overflow */
+#define    MAC_RX_FRAME_COUNT              0x0004  /* rollover of receive frame
+                                                     counter */
+#define    MAC_RX_ALIGN_ERR                0x0008  /* rollover of alignment
+                                                     error counter */
+#define    MAC_RX_CRC_ERR                  0x0010  /* rollover of crc error
+                                                     counter */
+#define    MAC_RX_LEN_ERR                  0x0020  /* rollover of length
+                                                     error counter */
+#define    MAC_RX_VIOL_ERR                 0x0040  /* rollover of code
+                                                     violation error */
+
+/* DEFAULT: 0xXXXX0000 on reset */
+#define  REG_MAC_CTRL_STATUS               0x6018  /* MAC control status reg */
+#define    MAC_CTRL_PAUSE_RECEIVED         0x00000001  /* successful
+                                                         reception of a
+                                                         pause control
+                                                         frame */
+#define    MAC_CTRL_PAUSE_STATE            0x00000002  /* MAC has made a
+                                                         transition from
+                                                         "not paused" to
+                                                         "paused" */
+#define    MAC_CTRL_NOPAUSE_STATE          0x00000004  /* MAC has made a
+                                                         transition from
+                                                         "paused" to "not
+                                                         paused" */
+#define    MAC_CTRL_PAUSE_TIME_MASK        0xFFFF0000  /* value of pause time
+                                                         operand that was
+                                                         received in the last
+                                                         pause flow control
+                                                         frame */
+
+/* layout identical to TX MAC[8:0] */
+#define  REG_MAC_TX_MASK                   0x6020  /* TX MAC mask reg */
+/* layout identical to RX MAC[6:0] */
+#define  REG_MAC_RX_MASK                   0x6024  /* RX MAC mask reg */
+/* layout identical to CTRL MAC[2:0] */
+#define  REG_MAC_CTRL_MASK                 0x6028  /* MAC control mask reg */
+
+/* to ensure proper operation, CFG_EN must be cleared to 0 and a delay
+ * imposed before writes to other bits in the TX_MAC_CFG register or any of
+ * the MAC parameters is performed. delay dependent upon time required to
+ * transmit a maximum size frame (= MAC_FRAMESIZE_MAX*8/Mbps). e.g.,
+ * the delay for a 1518-byte frame on a 100Mbps network is 125us.
+ * alternatively, just poll TX_CFG_EN until it reads back as 0.
+ * NOTE: on half-duplex 1Gbps, TX_CFG_CARRIER_EXTEND and
+ * RX_CFG_CARRIER_EXTEND should be set and the SLOT_TIME register should
+ * be 0x200 (slot time of 512 bytes)
+ */
+#define  REG_MAC_TX_CFG                 0x6030  /* TX MAC config reg */
+#define    MAC_TX_CFG_EN                0x0001  /* enable TX MAC. 0 will
+                                                     force TXMAC state
+                                                     machine to remain in
+                                                     idle state or to
+                                                     transition to idle state
+                                                     on completion of an
+                                                     ongoing packet. */
+#define    MAC_TX_CFG_IGNORE_CARRIER    0x0002  /* disable CSMA/CD deferral
+                                                  process. set to 1 when
+                                                  full duplex and 0 when
+                                                  half duplex */
+#define    MAC_TX_CFG_IGNORE_COLL       0x0004  /* disable CSMA/CD backoff
+                                                  algorithm. set to 1 when
+                                                  full duplex and 0 when
+                                                  half duplex */
+#define    MAC_TX_CFG_IPG_EN            0x0008  /* enable extension of the
+                                                  Rx-to-TX IPG. after
+                                                  receiving a frame, TX
+                                                  MAC will reset its
+                                                  deferral process to
+                                                  carrier sense for the
+                                                  amount of time = IPG0 +
+                                                  IPG1 and commit to
+                                                  transmission for time
+                                                  specified in IPG2. when
+                                                  0 or when xmitting frames
+                                                  back-to-pack (Tx-to-Tx
+                                                  IPG), TX MAC ignores
+                                                  IPG0 and will only use
+                                                  IPG1 for deferral time.
+                                                  IPG2 still used. */
+#define    MAC_TX_CFG_NEVER_GIVE_UP_EN  0x0010  /* TX MAC will not easily
+                                                  give up on frame
+                                                  xmission. if backoff
+                                                  algorithm reaches the
+                                                  ATTEMPT_LIMIT, it will
+                                                  clear attempts counter
+                                                  and continue trying to
+                                                  send the frame as
+                                                  specified by
+                                                  GIVE_UP_LIM. when 0,
+                                                  TX MAC will execute
+                                                  standard CSMA/CD prot. */
+#define    MAC_TX_CFG_NEVER_GIVE_UP_LIM 0x0020  /* when set, TX MAC will
+                                                  continue to try to xmit
+                                                  until successful. when
+                                                  0, TX MAC will continue
+                                                  to try xmitting until
+                                                  successful or backoff
+                                                  algorithm reaches
+                                                  ATTEMPT_LIMIT*16 */
+#define    MAC_TX_CFG_NO_BACKOFF        0x0040  /* modify CSMA/CD to disable
+                                                  backoff algorithm. TX
+                                                  MAC will not back off
+                                                  after a xmission attempt
+                                                  that resulted in a
+                                                  collision. */
+#define    MAC_TX_CFG_SLOW_DOWN         0x0080  /* modify CSMA/CD so that
+                                                  deferral process is reset
+                                                  in response to carrier
+                                                  sense during the entire
+                                                  duration of IPG. TX MAC
+                                                  will only commit to frame
+                                                  xmission after frame
+                                                  xmission has actually
+                                                  begun. */
+#define    MAC_TX_CFG_NO_FCS            0x0100  /* TX MAC will not generate
+                                                  CRC for all xmitted
+                                                  packets. when clear, CRC
+                                                  generation is dependent
+                                                  upon NO_CRC bit in the
+                                                  xmit control word from
+                                                  TX DMA */
+#define    MAC_TX_CFG_CARRIER_EXTEND    0x0200  /* enables xmit part of the
+                                                  carrier extension
+                                                  feature. this allows for
+                                                  longer collision domains
+                                                  by extending the carrier
+                                                  and collision window
+                                                  from the end of FCS until
+                                                  the end of the slot time
+                                                  if necessary. Required
+                                                  for half-duplex at 1Gbps,
+                                                  clear otherwise. */
+
+/* when CRC is not stripped, reassembly packets will not contain the CRC.
+ * these will be stripped by HRP because it reassembles layer 4 data, and the
+ * CRC is layer 2. however, non-reassembly packets will still contain the CRC
+ * when passed to the host. to ensure proper operation, need to wait 3.2ms
+ * after clearing RX_CFG_EN before writing to any other RX MAC registers
+ * or other MAC parameters. alternatively, poll RX_CFG_EN until it clears
+ * to 0. similary, HASH_FILTER_EN and ADDR_FILTER_EN have the same
+ * restrictions as CFG_EN.
+ */
+#define  REG_MAC_RX_CFG                 0x6034  /* RX MAC config reg */
+#define    MAC_RX_CFG_EN                0x0001  /* enable RX MAC */
+#define    MAC_RX_CFG_STRIP_PAD         0x0002  /* always program to 0.
+                                                  feature not supported */
+#define    MAC_RX_CFG_STRIP_FCS         0x0004  /* RX MAC will strip the
+                                                  last 4 bytes of a
+                                                  received frame. */
+#define    MAC_RX_CFG_PROMISC_EN        0x0008  /* promiscuous mode */
+#define    MAC_RX_CFG_PROMISC_GROUP_EN  0x0010  /* accept all valid
+                                                  multicast frames (group
+                                                  bit in DA field set) */
+#define    MAC_RX_CFG_HASH_FILTER_EN    0x0020  /* use hash table to filter
+                                                  multicast addresses */
+#define    MAC_RX_CFG_ADDR_FILTER_EN    0x0040  /* cause RX MAC to use
+                                                  address filtering regs
+                                                  to filter both unicast
+                                                  and multicast
+                                                  addresses */
+#define    MAC_RX_CFG_DISABLE_DISCARD   0x0080  /* pass errored frames to
+                                                  RX DMA by setting BAD
+                                                  bit but not Abort bit
+                                                  in the status. CRC,
+                                                  framing, and length errs
+                                                  will not increment
+                                                  error counters. frames
+                                                  which don't match dest
+                                                  addr will be passed up
+                                                  w/ BAD bit set. */
+#define    MAC_RX_CFG_CARRIER_EXTEND    0x0100  /* enable reception of
+                                                  packet bursts generated
+                                                  by carrier extension
+                                                  with packet bursting
+                                                  senders. only applies
+                                                  to half-duplex 1Gbps */
+
+/* DEFAULT: 0x0 */
+#define  REG_MAC_CTRL_CFG               0x6038  /* MAC control config reg */
+#define    MAC_CTRL_CFG_SEND_PAUSE_EN   0x0001  /* respond to requests for
+                                                  sending pause flow ctrl
+                                                  frames */
+#define    MAC_CTRL_CFG_RECV_PAUSE_EN   0x0002  /* respond to received
+                                                  pause flow ctrl frames */
+#define    MAC_CTRL_CFG_PASS_CTRL       0x0004  /* pass valid MAC ctrl
+                                                  packets to RX DMA */
+
+/* to ensure proper operation, a global initialization sequence should be
+ * performed when a loopback config is entered or exited. if programmed after
+ * a hw or global sw reset, RX/TX MAC software reset and initialization
+ * should be done to ensure stable clocking.
+ * DEFAULT: 0x0
+ */
+#define  REG_MAC_XIF_CFG                0x603C  /* XIF config reg */
+#define    MAC_XIF_TX_MII_OUTPUT_EN        0x0001  /* enable output drivers
+                                                     on MII xmit bus */
+#define    MAC_XIF_MII_INT_LOOPBACK        0x0002  /* loopback GMII xmit data
+                                                     path to GMII recv data
+                                                     path. phy mode register
+                                                     clock selection must be
+                                                     set to GMII mode and
+                                                     GMII_MODE should be set
+                                                     to 1. in loopback mode,
+                                                     REFCLK will drive the
+                                                     entire mac core. 0 for
+                                                     normal operation. */
+#define    MAC_XIF_DISABLE_ECHO            0x0004  /* disables receive data
+                                                     path during packet
+                                                     xmission. clear to 0
+                                                     in any full duplex mode,
+                                                     in any loopback mode,
+                                                     or in half-duplex SERDES
+                                                     or SLINK modes. set when
+                                                     in half-duplex when
+                                                     using external phy. */
+#define    MAC_XIF_GMII_MODE               0x0008  /* MAC operates with GMII
+                                                     clocks and datapath */
+#define    MAC_XIF_MII_BUFFER_OUTPUT_EN    0x0010  /* MII_BUF_EN pin. enable
+                                                     external tristate buffer
+                                                     on the MII receive
+                                                     bus. */
+#define    MAC_XIF_LINK_LED                0x0020  /* LINKLED# active (low) */
+#define    MAC_XIF_FDPLX_LED               0x0040  /* FDPLXLED# active (low) */
+
+#define  REG_MAC_IPG0                      0x6040  /* inter-packet gap0 reg.
+                                                     recommended: 0x00 */
+#define  REG_MAC_IPG1                      0x6044  /* inter-packet gap1 reg
+                                                     recommended: 0x08 */
+#define  REG_MAC_IPG2                      0x6048  /* inter-packet gap2 reg
+                                                     recommended: 0x04 */
+#define  REG_MAC_SLOT_TIME                 0x604C  /* slot time reg
+                                                     recommended: 0x40 */
+#define  REG_MAC_FRAMESIZE_MIN             0x6050  /* min frame size reg
+                                                     recommended: 0x40 */
+
+/* FRAMESIZE_MAX holds both the max frame size as well as the max burst size.
+ * recommended value:  0x2000.05EE
+ */
+#define  REG_MAC_FRAMESIZE_MAX             0x6054  /* max frame size reg */
+#define    MAC_FRAMESIZE_MAX_BURST_MASK    0x3FFF0000 /* max burst size */
+#define    MAC_FRAMESIZE_MAX_BURST_SHIFT   16
+#define    MAC_FRAMESIZE_MAX_FRAME_MASK    0x00007FFF /* max frame size */
+#define    MAC_FRAMESIZE_MAX_FRAME_SHIFT   0
+#define  REG_MAC_PA_SIZE                   0x6058  /* PA size reg. number of
+                                                     preamble bytes that the
+                                                     TX MAC will xmit at the
+                                                     beginning of each frame
+                                                     value should be 2 or
+                                                     greater. recommended
+                                                     value: 0x07 */
+#define  REG_MAC_JAM_SIZE                  0x605C  /* jam size reg. duration
+                                                     of jam in units of media
+                                                     byte time. recommended
+                                                     value: 0x04 */
+#define  REG_MAC_ATTEMPT_LIMIT             0x6060  /* attempt limit reg. #
+                                                     of attempts TX MAC will
+                                                     make to xmit a frame
+                                                     before it resets its
+                                                     attempts counter. after
+                                                     the limit has been
+                                                     reached, TX MAC may or
+                                                     may not drop the frame
+                                                     dependent upon value
+                                                     in TX_MAC_CFG.
+                                                     recommended
+                                                     value: 0x10 */
+#define  REG_MAC_CTRL_TYPE                 0x6064  /* MAC control type reg.
+                                                     type field of a MAC
+                                                     ctrl frame. recommended
+                                                     value: 0x8808 */
+
+/* mac address registers: 0 - 44, 0x6080 - 0x6130, 4 8-bit bytes.
+ * register           contains                   comparison
+ *    0        16 MSB of primary MAC addr        [47:32] of DA field
+ *    1        16 middle bits ""                 [31:16] of DA field
+ *    2        16 LSB ""                         [15:0] of DA field
+ *    3*x      16MSB of alt MAC addr 1-15        [47:32] of DA field
+ *    4*x      16 middle bits ""                 [31:16]
+ *    5*x      16 LSB ""                         [15:0]
+ *    42       16 MSB of MAC CTRL addr           [47:32] of DA.
+ *    43       16 middle bits ""                 [31:16]
+ *    44       16 LSB ""                         [15:0]
+ *    MAC CTRL addr must be the reserved multicast addr for MAC CTRL frames.
+ *    if there is a match, MAC will set the bit for alternative address
+ *    filter pass [15]
+
+ *    here is the map of registers given MAC address notation: a:b:c:d:e:f
+ *                     ab             cd             ef
+ *    primary addr     reg 2          reg 1          reg 0
+ *    alt addr 1       reg 5          reg 4          reg 3
+ *    alt addr x       reg 5*x        reg 4*x        reg 3*x
+ *    ctrl addr        reg 44         reg 43         reg 42
+ */
+#define  REG_MAC_ADDR0                     0x6080  /* MAC address 0 reg */
+#define  REG_MAC_ADDRN(x)                  (REG_MAC_ADDR0 + (x)*4)
+#define  REG_MAC_ADDR_FILTER0              0x614C  /* address filter 0 reg
+                                                     [47:32] */
+#define  REG_MAC_ADDR_FILTER1              0x6150  /* address filter 1 reg
+                                                     [31:16] */
+#define  REG_MAC_ADDR_FILTER2              0x6154  /* address filter 2 reg
+                                                     [15:0] */
+#define  REG_MAC_ADDR_FILTER2_1_MASK       0x6158  /* address filter 2 and 1
+                                                     mask reg. 8-bit reg
+                                                     contains nibble mask for
+                                                     reg 2 and 1. */
+#define  REG_MAC_ADDR_FILTER0_MASK         0x615C  /* address filter 0 mask
+                                                     reg */
+
+/* hash table registers: 0 - 15, 0x6160 - 0x619C, 4 8-bit bytes
+ * 16-bit registers contain bits of the hash table.
+ * reg x  -> [16*(15 - x) + 15 : 16*(15 - x)].
+ * e.g., 15 -> [15:0], 0 -> [255:240]
+ */
+#define  REG_MAC_HASH_TABLE0               0x6160  /* hash table 0 reg */
+#define  REG_MAC_HASH_TABLEN(x)            (REG_MAC_HASH_TABLE0 + (x)*4)
+
+/* statistics registers. these registers generate an interrupt on
+ * overflow. recommended initialization: 0x0000. most are 16-bits except
+ * for PEAK_ATTEMPTS register which is 8 bits.
+ */
+#define  REG_MAC_COLL_NORMAL               0x61A0 /* normal collision
+                                                    counter. */
+#define  REG_MAC_COLL_FIRST                0x61A4 /* first attempt
+                                                    successful collision
+                                                    counter */
+#define  REG_MAC_COLL_EXCESS               0x61A8 /* excessive collision
+                                                    counter */
+#define  REG_MAC_COLL_LATE                 0x61AC /* late collision counter */
+#define  REG_MAC_TIMER_DEFER               0x61B0 /* defer timer. time base
+                                                    is the media byte
+                                                    clock/256 */
+#define  REG_MAC_ATTEMPTS_PEAK             0x61B4 /* peak attempts reg */
+#define  REG_MAC_RECV_FRAME                0x61B8 /* receive frame counter */
+#define  REG_MAC_LEN_ERR                   0x61BC /* length error counter */
+#define  REG_MAC_ALIGN_ERR                 0x61C0 /* alignment error counter */
+#define  REG_MAC_FCS_ERR                   0x61C4 /* FCS error counter */
+#define  REG_MAC_RX_CODE_ERR               0x61C8 /* RX code violation
+                                                    error counter */
+
+/* misc registers */
+#define  REG_MAC_RANDOM_SEED               0x61CC /* random number seed reg.
+                                                  10-bit register used as a
+                                                  seed  for the random number
+                                                  generator for the CSMA/CD
+                                                  backoff algorithm. only
+                                                  programmed after power-on
+                                                  reset and should be a
+                                                  random value which has a
+                                                  high likelihood of being
+                                                  unique for each MAC
+                                                  attached to a network
+                                                  segment (e.g., 10 LSB of
+                                                  MAC address) */
+
+/* ASUN: there's a PAUSE_TIMER (ro) described, but it's not in the address
+ *       map
+ */
+
+/* 27-bit register has the current state for key state machines in the MAC */
+#define  REG_MAC_STATE_MACHINE             0x61D0 /* (ro) state machine reg */
+#define    MAC_SM_RLM_MASK                 0x07800000
+#define    MAC_SM_RLM_SHIFT                23
+#define    MAC_SM_RX_FC_MASK               0x00700000
+#define    MAC_SM_RX_FC_SHIFT              20
+#define    MAC_SM_TLM_MASK                 0x000F0000
+#define    MAC_SM_TLM_SHIFT                16
+#define    MAC_SM_ENCAP_SM_MASK            0x0000F000
+#define    MAC_SM_ENCAP_SM_SHIFT           12
+#define    MAC_SM_TX_REQ_MASK              0x00000C00
+#define    MAC_SM_TX_REQ_SHIFT             10
+#define    MAC_SM_TX_FC_MASK               0x000003C0
+#define    MAC_SM_TX_FC_SHIFT              6
+#define    MAC_SM_FIFO_WRITE_SEL_MASK      0x00000038
+#define    MAC_SM_FIFO_WRITE_SEL_SHIFT     3
+#define    MAC_SM_TX_FIFO_EMPTY_MASK       0x00000007
+#define    MAC_SM_TX_FIFO_EMPTY_SHIFT      0
+
+/** MIF registers. the MIF can be programmed in either bit-bang or
+ *  frame mode.
+ **/
+#define  REG_MIF_BIT_BANG_CLOCK            0x6200 /* MIF bit-bang clock.
+                                                  1 -> 0 will generate a
+                                                  rising edge. 0 -> 1 will
+                                                  generate a falling edge. */
+#define  REG_MIF_BIT_BANG_DATA             0x6204 /* MIF bit-bang data. 1-bit
+                                                    register generates data */
+#define  REG_MIF_BIT_BANG_OUTPUT_EN        0x6208 /* MIF bit-bang output
+                                                    enable. enable when
+                                                    xmitting data from MIF to
+                                                    transceiver. */
+
+/* 32-bit register serves as an instruction register when the MIF is
+ * programmed in frame mode. load this register w/ a valid instruction
+ * (as per IEEE 802.3u MII spec). poll this register to check for instruction
+ * execution completion. during a read operation, this register will also
+ * contain the 16-bit data returned by the tranceiver. unless specified
+ * otherwise, fields are considered "don't care" when polling for
+ * completion.
+ */
+#define  REG_MIF_FRAME                     0x620C /* MIF frame/output reg */
+#define    MIF_FRAME_START_MASK            0xC0000000 /* start of frame.
+                                                        load w/ 01 when
+                                                        issuing an instr */
+#define    MIF_FRAME_ST                    0x40000000 /* STart of frame */
+#define    MIF_FRAME_OPCODE_MASK           0x30000000 /* opcode. 01 for a
+                                                        write. 10 for a
+                                                        read */
+#define    MIF_FRAME_OP_READ               0x20000000 /* read OPcode */
+#define    MIF_FRAME_OP_WRITE              0x10000000 /* write OPcode */
+#define    MIF_FRAME_PHY_ADDR_MASK         0x0F800000 /* phy address. when
+                                                        issuing an instr,
+                                                        this field should be
+                                                        loaded w/ the XCVR
+                                                        addr */
+#define    MIF_FRAME_PHY_ADDR_SHIFT        23
+#define    MIF_FRAME_REG_ADDR_MASK         0x007C0000 /* register address.
+                                                        when issuing an instr,
+                                                        addr of register
+                                                        to be read/written */
+#define    MIF_FRAME_REG_ADDR_SHIFT        18
+#define    MIF_FRAME_TURN_AROUND_MSB       0x00020000 /* turn around, MSB.
+                                                        when issuing an instr,
+                                                        set this bit to 1 */
+#define    MIF_FRAME_TURN_AROUND_LSB       0x00010000 /* turn around, LSB.
+                                                        when issuing an instr,
+                                                        set this bit to 0.
+                                                        when polling for
+                                                        completion, 1 means
+                                                        that instr execution
+                                                        has been completed */
+#define    MIF_FRAME_DATA_MASK             0x0000FFFF /* instruction payload
+                                                        load with 16-bit data
+                                                        to be written in
+                                                        transceiver reg for a
+                                                        write. doesn't matter
+                                                        in a read. when
+                                                        polling for
+                                                        completion, field is
+                                                        "don't care" for write
+                                                        and 16-bit data
+                                                        returned by the
+                                                        transceiver for a
+                                                        read (if valid bit
+                                                        is set) */
+#define  REG_MIF_CFG                    0x6210 /* MIF config reg */
+#define    MIF_CFG_PHY_SELECT           0x0001 /* 1 -> select MDIO_1
+                                                 0 -> select MDIO_0 */
+#define    MIF_CFG_POLL_EN              0x0002 /* enable polling
+                                                 mechanism. if set,
+                                                 BB_MODE should be 0 */
+#define    MIF_CFG_BB_MODE              0x0004 /* 1 -> bit-bang mode
+                                                 0 -> frame mode */
+#define    MIF_CFG_POLL_REG_MASK        0x00F8 /* register address to be
+                                                 used by polling mode.
+                                                 only meaningful if POLL_EN
+                                                 is set to 1 */
+#define    MIF_CFG_POLL_REG_SHIFT       3
+#define    MIF_CFG_MDIO_0               0x0100 /* (ro) dual purpose.
+                                                 when MDIO_0 is idle,
+                                                 1 -> tranceiver is
+                                                 connected to MDIO_0.
+                                                 when MIF is communicating
+                                                 w/ MDIO_0 in bit-bang
+                                                 mode, this bit indicates
+                                                 the incoming bit stream
+                                                 during a read op */
+#define    MIF_CFG_MDIO_1               0x0200 /* (ro) dual purpose.
+                                                 when MDIO_1 is idle,
+                                                 1 -> transceiver is
+                                                 connected to MDIO_1.
+                                                 when MIF is communicating
+                                                 w/ MDIO_1 in bit-bang
+                                                 mode, this bit indicates
+                                                 the incoming bit stream
+                                                 during a read op */
+#define    MIF_CFG_POLL_PHY_MASK        0x7C00 /* tranceiver address to
+                                                 be polled */
+#define    MIF_CFG_POLL_PHY_SHIFT       10
+
+/* 16-bit register used to determine which bits in the POLL_STATUS portion of
+ * the MIF_STATUS register will cause an interrupt. if a mask bit is 0,
+ * corresponding bit of the POLL_STATUS will generate a MIF interrupt when
+ * set. DEFAULT: 0xFFFF
+ */
+#define  REG_MIF_MASK                      0x6214 /* MIF mask reg */
+
+/* 32-bit register used when in poll mode. auto-cleared after being read */
+#define  REG_MIF_STATUS                    0x6218 /* MIF status reg */
+#define    MIF_STATUS_POLL_DATA_MASK       0xFFFF0000 /* poll data contains
+                                                        the "latest image"
+                                                        update of the XCVR
+                                                        reg being read */
+#define    MIF_STATUS_POLL_DATA_SHIFT      16
+#define    MIF_STATUS_POLL_STATUS_MASK     0x0000FFFF /* poll status indicates
+                                                        which bits in the
+                                                        POLL_DATA field have
+                                                        changed since the
+                                                        MIF_STATUS reg was
+                                                        last read */
+#define    MIF_STATUS_POLL_STATUS_SHIFT    0
+
+/* 7-bit register has current state for all state machines in the MIF */
+#define  REG_MIF_STATE_MACHINE             0x621C /* MIF state machine reg */
+#define    MIF_SM_CONTROL_MASK             0x07   /* control state machine
+                                                    state */
+#define    MIF_SM_EXECUTION_MASK           0x60   /* execution state machine
+                                                    state */
+
+/** PCS/Serialink. the following registers are equivalent to the standard
+ *  MII management registers except that they're directly mapped in
+ *  Cassini's register space.
+ **/
+
+/* the auto-negotiation enable bit should be programmed the same at
+ * the link partner as in the local device to enable auto-negotiation to
+ * complete. when that bit is reprogrammed, auto-neg/manual config is
+ * restarted automatically.
+ * DEFAULT: 0x1040
+ */
+#define  REG_PCS_MII_CTRL                  0x9000 /* PCS MII control reg */
+#define    PCS_MII_CTRL_1000_SEL           0x0040 /* reads 1. ignored on
+                                                    writes */
+#define    PCS_MII_CTRL_COLLISION_TEST     0x0080 /* COL signal at the PCS
+                                                    to MAC interface is
+                                                    activated regardless
+                                                    of activity */
+#define    PCS_MII_CTRL_DUPLEX             0x0100 /* forced 0x0. PCS
+                                                    behaviour same for
+                                                    half and full dplx */
+#define    PCS_MII_RESTART_AUTONEG         0x0200 /* self clearing.
+                                                    restart auto-
+                                                    negotiation */
+#define    PCS_MII_ISOLATE                 0x0400 /* read as 0. ignored
+                                                    on writes */
+#define    PCS_MII_POWER_DOWN              0x0800 /* read as 0. ignored
+                                                    on writes */
+#define    PCS_MII_AUTONEG_EN              0x1000 /* default 1. PCS goes
+                                                    through automatic
+                                                    link config before it
+                                                    can be used. when 0,
+                                                    link can be used
+                                                    w/out any link config
+                                                    phase */
+#define    PCS_MII_10_100_SEL              0x2000 /* read as 0. ignored on
+                                                    writes */
+#define    PCS_MII_RESET                   0x8000 /* reset PCS. self-clears
+                                                    when done */
+
+/* DEFAULT: 0x0108 */
+#define  REG_PCS_MII_STATUS                0x9004 /* PCS MII status reg */
+#define    PCS_MII_STATUS_EXTEND_CAP       0x0001 /* reads 0 */
+#define    PCS_MII_STATUS_JABBER_DETECT    0x0002 /* reads 0 */
+#define    PCS_MII_STATUS_LINK_STATUS      0x0004 /* 1 -> link up.
+                                                    0 -> link down. 0 is
+                                                    latched so that 0 is
+                                                    kept until read. read
+                                                    2x to determine if the
+                                                    link has gone up again */
+#define    PCS_MII_STATUS_AUTONEG_ABLE     0x0008 /* reads 1 (able to perform
+                                                    auto-neg) */
+#define    PCS_MII_STATUS_REMOTE_FAULT     0x0010 /* 1 -> remote fault detected
+                                                    from received link code
+                                                    word. only valid after
+                                                    auto-neg completed */
+#define    PCS_MII_STATUS_AUTONEG_COMP     0x0020 /* 1 -> auto-negotiation
+                                                         completed
+                                                    0 -> auto-negotiation not
+                                                    completed */
+#define    PCS_MII_STATUS_EXTEND_STATUS    0x0100 /* reads as 1. used as an
+                                                    indication that this is
+                                                    a 1000 Base-X PHY. writes
+                                                    to it are ignored */
+
+/* used during auto-negotiation.
+ * DEFAULT: 0x00E0
+ */
+#define  REG_PCS_MII_ADVERT                0x9008 /* PCS MII advertisement
+                                                    reg */
+#define    PCS_MII_ADVERT_FD               0x0020  /* advertise full duplex
+                                                     1000 Base-X */
+#define    PCS_MII_ADVERT_HD               0x0040  /* advertise half-duplex
+                                                     1000 Base-X */
+#define    PCS_MII_ADVERT_SYM_PAUSE        0x0080  /* advertise PAUSE
+                                                     symmetric capability */
+#define    PCS_MII_ADVERT_ASYM_PAUSE       0x0100  /* advertises PAUSE
+                                                     asymmetric capability */
+#define    PCS_MII_ADVERT_RF_MASK          0x3000 /* remote fault. write bit13
+                                                    to optionally indicate to
+                                                    link partner that chip is
+                                                    going off-line. bit12 will
+                                                    get set when signal
+                                                    detect == FAIL and will
+                                                    remain set until
+                                                    successful negotiation */
+#define    PCS_MII_ADVERT_ACK              0x4000 /* (ro) */
+#define    PCS_MII_ADVERT_NEXT_PAGE        0x8000 /* (ro) forced 0x0 */
+
+/* contents updated as a result of autonegotiation. layout and definitions
+ * identical to PCS_MII_ADVERT
+ */
+#define  REG_PCS_MII_LPA                   0x900C /* PCS MII link partner
+                                                    ability reg */
+#define    PCS_MII_LPA_FD             PCS_MII_ADVERT_FD
+#define    PCS_MII_LPA_HD             PCS_MII_ADVERT_HD
+#define    PCS_MII_LPA_SYM_PAUSE      PCS_MII_ADVERT_SYM_PAUSE
+#define    PCS_MII_LPA_ASYM_PAUSE     PCS_MII_ADVERT_ASYM_PAUSE
+#define    PCS_MII_LPA_RF_MASK        PCS_MII_ADVERT_RF_MASK
+#define    PCS_MII_LPA_ACK            PCS_MII_ADVERT_ACK
+#define    PCS_MII_LPA_NEXT_PAGE      PCS_MII_ADVERT_NEXT_PAGE
+
+/* DEFAULT: 0x0 */
+#define  REG_PCS_CFG                       0x9010 /* PCS config reg */
+#define    PCS_CFG_EN                      0x01   /* enable PCS. must be
+                                                    0 when modifying
+                                                    PCS_MII_ADVERT */
+#define    PCS_CFG_SD_OVERRIDE             0x02   /* sets signal detect to
+                                                    OK. bit is
+                                                    non-resettable */
+#define    PCS_CFG_SD_ACTIVE_LOW           0x04   /* changes interpretation
+                                                    of optical signal to make
+                                                    signal detect okay when
+                                                    signal is low */
+#define    PCS_CFG_JITTER_STUDY_MASK       0x18   /* used to make jitter
+                                                    measurements. a single
+                                                    code group is xmitted
+                                                    regularly.
+                                                    0x0 = normal operation
+                                                    0x1 = high freq test
+                                                          pattern, D21.5
+                                                    0x2 = low freq test
+                                                          pattern, K28.7
+                                                    0x3 = reserved */
+#define    PCS_CFG_10MS_TIMER_OVERRIDE     0x20   /* shortens 10-20ms auto-
+                                                    negotiation timer to
+                                                    a few cycles for test
+                                                    purposes */
+
+/* used for diagnostic purposes. bits 20-22 autoclear on read */
+#define  REG_PCS_STATE_MACHINE             0x9014 /* (ro) PCS state machine
+                                                    and diagnostic reg */
+#define    PCS_SM_TX_STATE_MASK            0x0000000F /* 0 and 1 indicate
+                                                        xmission of idle.
+                                                        otherwise, xmission of
+                                                        a packet */
+#define    PCS_SM_RX_STATE_MASK            0x000000F0 /* 0 indicates reception
+                                                        of idle. otherwise,
+                                                        reception of packet */
+#define    PCS_SM_WORD_SYNC_STATE_MASK     0x00000700 /* 0 indicates loss of
+                                                        sync */
+#define    PCS_SM_SEQ_DETECT_STATE_MASK    0x00001800 /* cycling through 0-3
+                                                        indicates reception of
+                                                        Config codes. cycling
+                                                        through 0-1 indicates
+                                                        reception of idles */
+#define    PCS_SM_LINK_STATE_MASK          0x0001E000
+#define        SM_LINK_STATE_UP            0x00016000 /* link state is up */
+
+#define    PCS_SM_LOSS_LINK_C              0x00100000 /* loss of link due to
+                                                        recept of Config
+                                                        codes */
+#define    PCS_SM_LOSS_LINK_SYNC           0x00200000 /* loss of link due to
+                                                        loss of sync */
+#define    PCS_SM_LOSS_SIGNAL_DETECT       0x00400000 /* signal detect goes
+                                                        from OK to FAIL. bit29
+                                                        will also be set if
+                                                        this is set */
+#define    PCS_SM_NO_LINK_BREAKLINK        0x01000000 /* link not up due to
+                                                       receipt of breaklink
+                                                       C codes from partner.
+                                                       C codes w/ 0 content
+                                                       received triggering
+                                                       start/restart of
+                                                       autonegotiation.
+                                                       should be sent for
+                                                       no longer than 20ms */
+#define    PCS_SM_NO_LINK_SERDES           0x02000000 /* serdes being
+                                                       initialized. see serdes
+                                                       state reg */
+#define    PCS_SM_NO_LINK_C                0x04000000 /* C codes not stable or
+                                                        not received */
+#define    PCS_SM_NO_LINK_SYNC             0x08000000 /* word sync not
+                                                        achieved */
+#define    PCS_SM_NO_LINK_WAIT_C           0x10000000 /* waiting for C codes
+                                                        w/ ack bit set */
+#define    PCS_SM_NO_LINK_NO_IDLE          0x20000000 /* link partner continues
+                                                        to send C codes
+                                                        instead of idle
+                                                        symbols or pkt data */
+
+/* this register indicates interrupt changes in specific PCS MII status bits.
+ * PCS_INT may be masked at the ISR level. only a single bit is implemented
+ * for link status change.
+ */
+#define  REG_PCS_INTR_STATUS               0x9018 /* PCS interrupt status */
+#define    PCS_INTR_STATUS_LINK_CHANGE     0x04   /* link status has changed
+                                                    since last read */
+
+/* control which network interface is used. no more than one bit should
+ * be set.
+ * DEFAULT: none
+ */
+#define  REG_PCS_DATAPATH_MODE             0x9050 /* datapath mode reg */
+#define    PCS_DATAPATH_MODE_MII           0x00 /* PCS is not used and
+                                                  MII/GMII is selected.
+                                                  selection between MII and
+                                                  GMII is controlled by
+                                                  XIF_CFG */
+#define    PCS_DATAPATH_MODE_SERDES        0x02 /* PCS is used via the
+                                                  10-bit interface */
+
+/* input to serdes chip or serialink block */
+#define  REG_PCS_SERDES_CTRL              0x9054 /* serdes control reg */
+#define    PCS_SERDES_CTRL_LOOPBACK       0x01   /* enable loopback on
+                                                   serdes interface */
+#define    PCS_SERDES_CTRL_SYNCD_EN       0x02   /* enable sync carrier
+                                                   detection. should be
+                                                   0x0 for normal
+                                                   operation */
+#define    PCS_SERDES_CTRL_LOCKREF       0x04   /* frequency-lock RBC[0:1]
+                                                  to REFCLK when set.
+                                                  when clear, receiver
+                                                  clock locks to incoming
+                                                  serial data */
+
+/* multiplex test outputs into the PROM address (PA_3 through PA_0) pins.
+ * should be 0x0 for normal operations.
+ * 0b000          normal operation, PROM address[3:0] selected
+ * 0b001          rxdma req, rxdma ack, rxdma ready, rxdma read
+ * 0b010          rxmac req, rx ack, rx tag, rx clk shared
+ * 0b011          txmac req, tx ack, tx tag, tx retry req
+ * 0b100          tx tp3, tx tp2, tx tp1, tx tp0
+ * 0b101          R period RX, R period TX, R period HP, R period BIM
+ * DEFAULT: 0x0
+ */
+#define  REG_PCS_SHARED_OUTPUT_SEL         0x9058 /* shared output select */
+#define    PCS_SOS_PROM_ADDR_MASK          0x0007
+
+/* used for diagnostics. this register indicates progress of the SERDES
+ * boot up.
+ * 0b00       undergoing reset
+ * 0b01       waiting 500us while lockrefn is asserted
+ * 0b10       waiting for comma detect
+ * 0b11       receive data is synchronized
+ * DEFAULT: 0x0
+ */
+#define  REG_PCS_SERDES_STATE              0x905C /* (ro) serdes state */
+#define    PCS_SERDES_STATE_MASK           0x03
+
+/* used for diagnostics. indicates number of packets transmitted or received.
+ * counters rollover w/out generating an interrupt.
+ * DEFAULT: 0x0
+ */
+#define  REG_PCS_PACKET_COUNT              0x9060 /* (ro) PCS packet counter */
+#define    PCS_PACKET_COUNT_TX             0x000007FF /* pkts xmitted by PCS */
+#define    PCS_PACKET_COUNT_RX             0x07FF0000 /* pkts recvd by PCS
+                                                        whether they
+                                                        encountered an error
+                                                        or not */
+
+/** LocalBus Devices. the following provides run-time access to the
+ *  Cassini's PROM
+ ***/
+#define  REG_EXPANSION_ROM_RUN_START       0x100000 /* expansion rom run time
+                                                      access */
+#define  REG_EXPANSION_ROM_RUN_END         0x17FFFF
+
+#define  REG_SECOND_LOCALBUS_START         0x180000 /* secondary local bus
+                                                      device */
+#define  REG_SECOND_LOCALBUS_END           0x1FFFFF
+
+/* entropy device */
+#define  REG_ENTROPY_START                 REG_SECOND_LOCALBUS_START
+#define  REG_ENTROPY_DATA                  (REG_ENTROPY_START + 0x00)
+#define  REG_ENTROPY_STATUS                (REG_ENTROPY_START + 0x04)
+#define      ENTROPY_STATUS_DRDY           0x01
+#define      ENTROPY_STATUS_BUSY           0x02
+#define      ENTROPY_STATUS_CIPHER         0x04
+#define      ENTROPY_STATUS_BYPASS_MASK    0x18
+#define  REG_ENTROPY_MODE                  (REG_ENTROPY_START + 0x05)
+#define      ENTROPY_MODE_KEY_MASK         0x07
+#define      ENTROPY_MODE_ENCRYPT          0x40
+#define  REG_ENTROPY_RAND_REG              (REG_ENTROPY_START + 0x06)
+#define  REG_ENTROPY_RESET                 (REG_ENTROPY_START + 0x07)
+#define      ENTROPY_RESET_DES_IO          0x01
+#define      ENTROPY_RESET_STC_MODE        0x02
+#define      ENTROPY_RESET_KEY_CACHE       0x04
+#define      ENTROPY_RESET_IV              0x08
+#define  REG_ENTROPY_IV                    (REG_ENTROPY_START + 0x08)
+#define  REG_ENTROPY_KEY0                  (REG_ENTROPY_START + 0x10)
+#define  REG_ENTROPY_KEYN(x)               (REG_ENTROPY_KEY0 + 4*(x))
+
+/* phys of interest w/ their special mii registers */
+#define PHY_LUCENT_B0     0x00437421
+#define   LUCENT_MII_REG      0x1F
+
+#define PHY_NS_DP83065    0x20005c78
+#define   DP83065_MII_MEM     0x16
+#define   DP83065_MII_REGD    0x1D
+#define   DP83065_MII_REGE    0x1E
+
+#define PHY_BROADCOM_5411 0x00206071
+#define PHY_BROADCOM_B0   0x00206050
+#define   BROADCOM_MII_REG4   0x14
+#define   BROADCOM_MII_REG5   0x15
+#define   BROADCOM_MII_REG7   0x17
+#define   BROADCOM_MII_REG8   0x18
+
+#define   CAS_MII_ANNPTR          0x07
+#define   CAS_MII_ANNPRR          0x08
+#define   CAS_MII_1000_CTRL       0x09
+#define   CAS_MII_1000_STATUS     0x0A
+#define   CAS_MII_1000_EXTEND     0x0F
+
+#define   CAS_BMSR_1000_EXTEND    0x0100 /* supports 1000Base-T extended status */
+/*
+ * if autoneg is disabled, here's the table:
+ * BMCR_SPEED100 = 100Mbps
+ * BMCR_SPEED1000 = 1000Mbps
+ * ~(BMCR_SPEED100 | BMCR_SPEED1000) = 10Mbps
+ */
+#define   CAS_BMCR_SPEED1000      0x0040  /* Select 1000Mbps */
+
+#define   CAS_ADVERTISE_1000HALF   0x0100
+#define   CAS_ADVERTISE_1000FULL   0x0200
+#define   CAS_ADVERTISE_PAUSE      0x0400
+#define   CAS_ADVERTISE_ASYM_PAUSE 0x0800
+
+/* regular lpa register */
+#define   CAS_LPA_PAUSE                   CAS_ADVERTISE_PAUSE
+#define   CAS_LPA_ASYM_PAUSE       CAS_ADVERTISE_ASYM_PAUSE
+
+/* 1000_STATUS register */
+#define   CAS_LPA_1000HALF        0x0400
+#define   CAS_LPA_1000FULL        0x0800
+
+#define   CAS_EXTEND_1000XFULL    0x8000
+#define   CAS_EXTEND_1000XHALF    0x4000
+#define   CAS_EXTEND_1000TFULL    0x2000
+#define   CAS_EXTEND_1000THALF    0x1000
+
+/* cassini header parser firmware */
+typedef struct cas_hp_inst {
+       const char *note;
+
+       u16 mask, val;
+
+       u8 op;
+       u8 soff, snext; /* if match succeeds, new offset and match */
+       u8 foff, fnext; /* if match fails, new offset and match */
+       /* output info */
+       u8 outop;    /* output opcode */
+
+       u16 outarg;  /* output argument */
+       u8 outenab;  /* output enable: 0 = not, 1 = if match
+                        2 = if !match, 3 = always */
+       u8 outshift; /* barrel shift right, 4 bits */
+       u16 outmask;
+} cas_hp_inst_t;
+
+/* comparison */
+#define OP_EQ     0 /* packet == value */
+#define OP_LT     1 /* packet < value */
+#define OP_GT     2 /* packet > value */
+#define OP_NP     3 /* new packet */
+
+/* output opcodes */
+#define        CL_REG  0
+#define        LD_FID  1
+#define        LD_SEQ  2
+#define        LD_CTL  3
+#define        LD_SAP  4
+#define        LD_R1   5
+#define        LD_L3   6
+#define        LD_SUM  7
+#define        LD_HDR  8
+#define        IM_FID  9
+#define        IM_SEQ  10
+#define        IM_SAP  11
+#define        IM_R1   12
+#define        IM_CTL  13
+#define        LD_LEN  14
+#define        ST_FLG  15
+
+/* match setp #s for IP4TCP4 */
+#define S1_PCKT         0
+#define S1_VLAN         1
+#define S1_CFI          2
+#define S1_8023         3
+#define S1_LLC          4
+#define S1_LLCc         5
+#define S1_IPV4         6
+#define S1_IPV4c        7
+#define S1_IPV4F        8
+#define S1_TCP44        9
+#define S1_IPV6         10
+#define S1_IPV6L        11
+#define S1_IPV6c        12
+#define S1_TCP64        13
+#define S1_TCPSQ        14
+#define S1_TCPFG        15
+#define        S1_TCPHL        16
+#define        S1_TCPHc        17
+#define        S1_CLNP         18
+#define        S1_CLNP2        19
+#define        S1_DROP         20
+#define        S2_HTTP         21
+#define        S1_ESP4         22
+#define        S1_AH4          23
+#define        S1_ESP6         24
+#define        S1_AH6          25
+
+#define CAS_PROG_IP46TCP4_PREAMBLE \
+{ "packet arrival?", 0xffff, 0x0000, OP_NP,  6, S1_VLAN,  0, S1_PCKT,  \
+  CL_REG, 0x3ff, 1, 0x0, 0x0000}, \
+{ "VLAN?", 0xffff, 0x8100, OP_EQ,  1, S1_CFI,   0, S1_8023,  \
+  IM_CTL, 0x00a,  3, 0x0, 0xffff}, \
+{ "CFI?", 0x1000, 0x1000, OP_EQ,  0, S1_DROP,  1, S1_8023, \
+  CL_REG, 0x000,  0, 0x0, 0x0000}, \
+{ "8023?", 0xffff, 0x0600, OP_LT,  1, S1_LLC,   0, S1_IPV4, \
+  CL_REG, 0x000,  0, 0x0, 0x0000}, \
+{ "LLC?", 0xffff, 0xaaaa, OP_EQ,  1, S1_LLCc,  0, S1_CLNP, \
+  CL_REG, 0x000,  0, 0x0, 0x0000}, \
+{ "LLCc?", 0xff00, 0x0300, OP_EQ,  2, S1_IPV4,  0, S1_CLNP, \
+  CL_REG, 0x000,  0, 0x0, 0x0000}, \
+{ "IPV4?", 0xffff, 0x0800, OP_EQ,  1, S1_IPV4c, 0, S1_IPV6, \
+  LD_SAP, 0x100,  3, 0x0, 0xffff}, \
+{ "IPV4 cont?", 0xff00, 0x4500, OP_EQ,  3, S1_IPV4F, 0, S1_CLNP, \
+  LD_SUM, 0x00a,  1, 0x0, 0x0000}, \
+{ "IPV4 frag?", 0x3fff, 0x0000, OP_EQ,  1, S1_TCP44, 0, S1_CLNP, \
+  LD_LEN, 0x03e,  1, 0x0, 0xffff}, \
+{ "TCP44?", 0x00ff, 0x0006, OP_EQ,  7, S1_TCPSQ, 0, S1_CLNP, \
+  LD_FID, 0x182,  1, 0x0, 0xffff}, /* FID IP4&TCP src+dst */ \
+{ "IPV6?", 0xffff, 0x86dd, OP_EQ,  1, S1_IPV6L, 0, S1_CLNP,  \
+  LD_SUM, 0x015,  1, 0x0, 0x0000}, \
+{ "IPV6 len", 0xf000, 0x6000, OP_EQ,  0, S1_IPV6c, 0, S1_CLNP, \
+  IM_R1,  0x128,  1, 0x0, 0xffff}, \
+{ "IPV6 cont?", 0x0000, 0x0000, OP_EQ,  3, S1_TCP64, 0, S1_CLNP, \
+  LD_FID, 0x484,  1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */ \
+{ "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_CLNP, \
+  LD_LEN, 0x03f,  1, 0x0, 0xffff}
+
+#ifdef USE_HP_IP46TCP4
+static cas_hp_inst_t cas_prog_ip46tcp4tab[] = {
+       CAS_PROG_IP46TCP4_PREAMBLE,
+       { "TCP seq", /* DADDR should point to dest port */
+         0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 4, S1_TCPFG, LD_SEQ,
+         0x081,  3, 0x0, 0xffff}, /* Load TCP seq # */
+       { "TCP control flags", 0x0000, 0x0000, OP_EQ,  0, S1_TCPHL, 0,
+         S1_TCPHL, ST_FLG, 0x045,  3, 0x0, 0x002f}, /* Load TCP flags */
+       { "TCP length", 0x0000, 0x0000, OP_EQ,  0, S1_TCPHc, 0,
+         S1_TCPHc, LD_R1,  0x205,  3, 0xB, 0xf000},
+       { "TCP length cont", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0,
+         S1_PCKT,  LD_HDR, 0x0ff,  3, 0x0, 0xffff},
+       { "Cleanup", 0x0000, 0x0000, OP_EQ,  0, S1_CLNP2,  0, S1_CLNP2,
+         IM_CTL, 0x001,  3, 0x0, 0x0001},
+       { "Cleanup 2", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
+         IM_CTL, 0x000,  0, 0x0, 0x0000},
+       { "Drop packet", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
+         IM_CTL, 0x080,  3, 0x0, 0xffff},
+       { NULL },
+};
+#ifdef HP_IP46TCP4_DEFAULT
+#define CAS_HP_FIRMWARE               cas_prog_ip46tcp4tab
+#endif
+#endif
+
+/*
+ * Alternate table load which excludes HTTP server traffic from reassembly.
+ * It is substantially similar to the basic table, with one extra state
+ * and a few extra compares. */
+#ifdef USE_HP_IP46TCP4NOHTTP
+static cas_hp_inst_t cas_prog_ip46tcp4nohttptab[] = {
+       CAS_PROG_IP46TCP4_PREAMBLE,
+       { "TCP seq", /* DADDR should point to dest port */
+         0xFFFF, 0x0080, OP_EQ,  0, S2_HTTP,  0, S1_TCPFG, LD_SEQ,
+         0x081,  3, 0x0, 0xffff} , /* Load TCP seq # */
+       { "TCP control flags", 0xFFFF, 0x8080, OP_EQ,  0, S2_HTTP,  0,
+         S1_TCPHL, ST_FLG, 0x145,  2, 0x0, 0x002f, }, /* Load TCP flags */
+       { "TCP length", 0x0000, 0x0000, OP_EQ,  0, S1_TCPHc, 0, S1_TCPHc,
+         LD_R1,  0x205,  3, 0xB, 0xf000},
+       { "TCP length cont", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
+         LD_HDR, 0x0ff,  3, 0x0, 0xffff},
+       { "Cleanup", 0x0000, 0x0000, OP_EQ,  0, S1_CLNP2,  0, S1_CLNP2,
+         IM_CTL, 0x001,  3, 0x0, 0x0001},
+       { "Cleanup 2", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
+         CL_REG, 0x002,  3, 0x0, 0x0000},
+       { "Drop packet", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
+         IM_CTL, 0x080,  3, 0x0, 0xffff},
+       { "No HTTP", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
+         IM_CTL, 0x044,  3, 0x0, 0xffff},
+       { NULL },
+};
+#ifdef HP_IP46TCP4NOHTTP_DEFAULT
+#define CAS_HP_FIRMWARE               cas_prog_ip46tcp4nohttptab
+#endif
+#endif
+
+/* match step #s for IP4FRAG */
+#define        S3_IPV6c        11
+#define        S3_TCP64        12
+#define        S3_TCPSQ        13
+#define        S3_TCPFG        14
+#define        S3_TCPHL        15
+#define        S3_TCPHc        16
+#define        S3_FRAG         17
+#define        S3_FOFF         18
+#define        S3_CLNP         19
+
+#ifdef USE_HP_IP4FRAG
+static cas_hp_inst_t cas_prog_ip4fragtab[] = {
+       { "packet arrival?", 0xffff, 0x0000, OP_NP,  6, S1_VLAN,  0, S1_PCKT,
+         CL_REG, 0x3ff, 1, 0x0, 0x0000},
+       { "VLAN?", 0xffff, 0x8100, OP_EQ,  1, S1_CFI,   0, S1_8023,
+         IM_CTL, 0x00a,  3, 0x0, 0xffff},
+       { "CFI?", 0x1000, 0x1000, OP_EQ,  0, S3_CLNP,  1, S1_8023,
+         CL_REG, 0x000,  0, 0x0, 0x0000},
+       { "8023?", 0xffff, 0x0600, OP_LT,  1, S1_LLC,   0, S1_IPV4,
+         CL_REG, 0x000,  0, 0x0, 0x0000},
+       { "LLC?", 0xffff, 0xaaaa, OP_EQ,  1, S1_LLCc,  0, S3_CLNP,
+         CL_REG, 0x000,  0, 0x0, 0x0000},
+       { "LLCc?",0xff00, 0x0300, OP_EQ,  2, S1_IPV4,  0, S3_CLNP,
+         CL_REG, 0x000,  0, 0x0, 0x0000},
+       { "IPV4?", 0xffff, 0x0800, OP_EQ,  1, S1_IPV4c, 0, S1_IPV6,
+         LD_SAP, 0x100,  3, 0x0, 0xffff},
+       { "IPV4 cont?", 0xff00, 0x4500, OP_EQ,  3, S1_IPV4F, 0, S3_CLNP,
+         LD_SUM, 0x00a,  1, 0x0, 0x0000},
+       { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ,  1, S1_TCP44, 0, S3_FRAG,
+         LD_LEN, 0x03e,  3, 0x0, 0xffff},
+       { "TCP44?", 0x00ff, 0x0006, OP_EQ,  7, S3_TCPSQ, 0, S3_CLNP,
+         LD_FID, 0x182,  3, 0x0, 0xffff}, /* FID IP4&TCP src+dst */
+       { "IPV6?", 0xffff, 0x86dd, OP_EQ,  1, S3_IPV6c, 0, S3_CLNP,
+         LD_SUM, 0x015,  1, 0x0, 0x0000},
+       { "IPV6 cont?", 0xf000, 0x6000, OP_EQ,  3, S3_TCP64, 0, S3_CLNP,
+         LD_FID, 0x484,  1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */
+       { "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S3_TCPSQ, 0, S3_CLNP,
+         LD_LEN, 0x03f,  1, 0x0, 0xffff},
+       { "TCP seq",    /* DADDR should point to dest port */
+         0x0000, 0x0000, OP_EQ,  0, S3_TCPFG, 4, S3_TCPFG, LD_SEQ,
+         0x081,  3, 0x0, 0xffff}, /* Load TCP seq # */
+       { "TCP control flags", 0x0000, 0x0000, OP_EQ,  0, S3_TCPHL, 0,
+         S3_TCPHL, ST_FLG, 0x045,  3, 0x0, 0x002f}, /* Load TCP flags */
+       { "TCP length", 0x0000, 0x0000, OP_EQ,  0, S3_TCPHc, 0, S3_TCPHc,
+         LD_R1,  0x205,  3, 0xB, 0xf000},
+       { "TCP length cont", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
+         LD_HDR, 0x0ff,  3, 0x0, 0xffff},
+       { "IP4 Fragment", 0x0000, 0x0000, OP_EQ,  0, S3_FOFF,  0, S3_FOFF,
+         LD_FID, 0x103,  3, 0x0, 0xffff}, /* FID IP4 src+dst */
+       { "IP4 frag offset", 0x0000, 0x0000, OP_EQ,  0, S3_FOFF,  0, S3_FOFF,
+         LD_SEQ, 0x040,  1, 0xD, 0xfff8},
+       { "Cleanup", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
+         IM_CTL, 0x001,  3, 0x0, 0x0001},
+       { NULL },
+};
+#ifdef HP_IP4FRAG_DEFAULT
+#define CAS_HP_FIRMWARE               cas_prog_ip4fragtab
+#endif
+#endif
+
+/*
+ * Alternate table which does batching without reassembly
+ */
+#ifdef USE_HP_IP46TCP4BATCH
+static cas_hp_inst_t cas_prog_ip46tcp4batchtab[] = {
+       CAS_PROG_IP46TCP4_PREAMBLE,
+       { "TCP seq",    /* DADDR should point to dest port */
+         0x0000, 0x0000, OP_EQ,  0, S1_TCPFG, 0, S1_TCPFG, LD_SEQ,
+         0x081,  3, 0x0, 0xffff}, /* Load TCP seq # */
+       { "TCP control flags", 0x0000, 0x0000, OP_EQ,  0, S1_TCPHL, 0,
+         S1_TCPHL, ST_FLG, 0x000,  3, 0x0, 0x0000}, /* Load TCP flags */
+       { "TCP length", 0x0000, 0x0000, OP_EQ,  0, S1_TCPHc, 0,
+         S1_TCPHc, LD_R1,  0x205,  3, 0xB, 0xf000},
+       { "TCP length cont", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0,
+         S1_PCKT,  IM_CTL, 0x040,  3, 0x0, 0xffff}, /* set batch bit */
+       { "Cleanup", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
+         IM_CTL, 0x001,  3, 0x0, 0x0001},
+       { "Drop packet", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0,
+         S1_PCKT,  IM_CTL, 0x080,  3, 0x0, 0xffff},
+       { NULL },
+};
+#ifdef HP_IP46TCP4BATCH_DEFAULT
+#define CAS_HP_FIRMWARE               cas_prog_ip46tcp4batchtab
+#endif
+#endif
+
+/* Workaround for Cassini rev2 descriptor corruption problem.
+ * Does batching without reassembly, and sets the SAP to a known
+ * data pattern for all packets.
+ */
+#ifdef USE_HP_WORKAROUND
+static cas_hp_inst_t  cas_prog_workaroundtab[] = {
+       { "packet arrival?", 0xffff, 0x0000, OP_NP,  6, S1_VLAN,  0,
+         S1_PCKT,  CL_REG, 0x3ff,  1, 0x0, 0x0000} ,
+       { "VLAN?", 0xffff, 0x8100, OP_EQ,  1, S1_CFI, 0, S1_8023,
+         IM_CTL, 0x04a,  3, 0x0, 0xffff},
+       { "CFI?", 0x1000, 0x1000, OP_EQ,  0, S1_CLNP,  1, S1_8023,
+         CL_REG, 0x000,  0, 0x0, 0x0000},
+       { "8023?", 0xffff, 0x0600, OP_LT,  1, S1_LLC,   0, S1_IPV4,
+         CL_REG, 0x000,  0, 0x0, 0x0000},
+       { "LLC?", 0xffff, 0xaaaa, OP_EQ,  1, S1_LLCc,  0, S1_CLNP,
+         CL_REG, 0x000,  0, 0x0, 0x0000},
+       { "LLCc?", 0xff00, 0x0300, OP_EQ,  2, S1_IPV4,  0, S1_CLNP,
+         CL_REG, 0x000,  0, 0x0, 0x0000},
+       { "IPV4?", 0xffff, 0x0800, OP_EQ,  1, S1_IPV4c, 0, S1_IPV6,
+         IM_SAP, 0x6AE,  3, 0x0, 0xffff},
+       { "IPV4 cont?", 0xff00, 0x4500, OP_EQ,  3, S1_IPV4F, 0, S1_CLNP,
+         LD_SUM, 0x00a,  1, 0x0, 0x0000},
+       { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ,  1, S1_TCP44, 0, S1_CLNP,
+         LD_LEN, 0x03e,  1, 0x0, 0xffff},
+       { "TCP44?", 0x00ff, 0x0006, OP_EQ,  7, S1_TCPSQ, 0, S1_CLNP,
+         LD_FID, 0x182,  3, 0x0, 0xffff}, /* FID IP4&TCP src+dst */
+       { "IPV6?", 0xffff, 0x86dd, OP_EQ,  1, S1_IPV6L, 0, S1_CLNP,
+         LD_SUM, 0x015,  1, 0x0, 0x0000},
+       { "IPV6 len", 0xf000, 0x6000, OP_EQ,  0, S1_IPV6c, 0, S1_CLNP,
+         IM_R1,  0x128,  1, 0x0, 0xffff},
+       { "IPV6 cont?", 0x0000, 0x0000, OP_EQ,  3, S1_TCP64, 0, S1_CLNP,
+         LD_FID, 0x484,  1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */
+       { "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_CLNP,
+         LD_LEN, 0x03f,  1, 0x0, 0xffff},
+       { "TCP seq",      /* DADDR should point to dest port */
+         0x0000, 0x0000, OP_EQ,  0, S1_TCPFG, 4, S1_TCPFG, LD_SEQ,
+         0x081,  3, 0x0, 0xffff}, /* Load TCP seq # */
+       { "TCP control flags", 0x0000, 0x0000, OP_EQ,  0, S1_TCPHL, 0,
+         S1_TCPHL, ST_FLG, 0x045,  3, 0x0, 0x002f}, /* Load TCP flags */
+       { "TCP length", 0x0000, 0x0000, OP_EQ,  0, S1_TCPHc, 0, S1_TCPHc,
+         LD_R1,  0x205,  3, 0xB, 0xf000},
+       { "TCP length cont", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0,
+         S1_PCKT,  LD_HDR, 0x0ff,  3, 0x0, 0xffff},
+       { "Cleanup", 0x0000, 0x0000, OP_EQ,  0, S1_CLNP2, 0, S1_CLNP2,
+         IM_SAP, 0x6AE,  3, 0x0, 0xffff} ,
+       { "Cleanup 2", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
+         IM_CTL, 0x001,  3, 0x0, 0x0001},
+       { NULL },
+};
+#ifdef HP_WORKAROUND_DEFAULT
+#define CAS_HP_FIRMWARE               cas_prog_workaroundtab
+#endif
+#endif
+
+#ifdef USE_HP_ENCRYPT
+static cas_hp_inst_t  cas_prog_encryptiontab[] = {
+       { "packet arrival?", 0xffff, 0x0000, OP_NP,  6, S1_VLAN,  0,
+         S1_PCKT,  CL_REG, 0x3ff,  1, 0x0, 0x0000},
+       { "VLAN?", 0xffff, 0x8100, OP_EQ,  1, S1_CFI,   0, S1_8023,
+         IM_CTL, 0x00a,  3, 0x0, 0xffff},
+#if 0
+//"CFI?", /* 02 FIND CFI and If FIND go to S1_DROP */
+//0x1000, 0x1000, OP_EQ,  0, S1_DROP,  1, S1_8023,  CL_REG, 0x000,  0, 0x0, 0x00
+       00,
+#endif
+       { "CFI?", /* FIND CFI and If FIND go to CleanUP1 (ignore and send to host) */
+         0x1000, 0x1000, OP_EQ,  0, S1_CLNP,  1, S1_8023,
+         CL_REG, 0x000,  0, 0x0, 0x0000},
+       { "8023?", 0xffff, 0x0600, OP_LT,  1, S1_LLC,   0, S1_IPV4,
+         CL_REG, 0x000,  0, 0x0, 0x0000},
+       { "LLC?", 0xffff, 0xaaaa, OP_EQ,  1, S1_LLCc,  0, S1_CLNP,
+         CL_REG, 0x000,  0, 0x0, 0x0000},
+       { "LLCc?", 0xff00, 0x0300, OP_EQ,  2, S1_IPV4,  0, S1_CLNP,
+         CL_REG, 0x000,  0, 0x0, 0x0000},
+       { "IPV4?", 0xffff, 0x0800, OP_EQ,  1, S1_IPV4c, 0, S1_IPV6,
+         LD_SAP, 0x100,  3, 0x0, 0xffff},
+       { "IPV4 cont?", 0xff00, 0x4500, OP_EQ,  3, S1_IPV4F, 0, S1_CLNP,
+         LD_SUM, 0x00a,  1, 0x0, 0x0000},
+       { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ,  1, S1_TCP44, 0, S1_CLNP,
+         LD_LEN, 0x03e,  1, 0x0, 0xffff},
+       { "TCP44?", 0x00ff, 0x0006, OP_EQ,  7, S1_TCPSQ, 0, S1_ESP4,
+         LD_FID, 0x182,  1, 0x0, 0xffff}, /* FID IP4&TCP src+dst */
+       { "IPV6?", 0xffff, 0x86dd, OP_EQ,  1, S1_IPV6L, 0, S1_CLNP,
+         LD_SUM, 0x015,  1, 0x0, 0x0000},
+       { "IPV6 len", 0xf000, 0x6000, OP_EQ,  0, S1_IPV6c, 0, S1_CLNP,
+         IM_R1,  0x128,  1, 0x0, 0xffff},
+       { "IPV6 cont?", 0x0000, 0x0000, OP_EQ,  3, S1_TCP64, 0, S1_CLNP,
+         LD_FID, 0x484,  1, 0x0, 0xffff}, /*  FID IP6&TCP src+dst */
+       { "TCP64?",
+#if 0
+//@@@0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_ESP6,  LD_LEN, 0x03f,  1, 0x0, 0xffff,
+#endif
+         0xff00, 0x0600, OP_EQ, 12, S1_TCPSQ, 0, S1_ESP6,  LD_LEN,
+         0x03f,  1, 0x0, 0xffff},
+       { "TCP seq", /* 14:DADDR should point to dest port */
+         0xFFFF, 0x0080, OP_EQ,  0, S2_HTTP,  0, S1_TCPFG, LD_SEQ,
+         0x081,  3, 0x0, 0xffff}, /* Load TCP seq # */
+       { "TCP control flags", 0xFFFF, 0x8080, OP_EQ,  0, S2_HTTP,  0,
+         S1_TCPHL, ST_FLG, 0x145,  2, 0x0, 0x002f}, /* Load TCP flags */
+       { "TCP length", 0x0000, 0x0000, OP_EQ,  0, S1_TCPHc, 0, S1_TCPHc,
+         LD_R1,  0x205,  3, 0xB, 0xf000} ,
+       { "TCP length cont", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0,
+         S1_PCKT,  LD_HDR, 0x0ff,  3, 0x0, 0xffff},
+       { "Cleanup", 0x0000, 0x0000, OP_EQ,  0, S1_CLNP2,  0, S1_CLNP2,
+         IM_CTL, 0x001,  3, 0x0, 0x0001},
+       { "Cleanup 2", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
+         CL_REG, 0x002,  3, 0x0, 0x0000},
+       { "Drop packet", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
+         IM_CTL, 0x080,  3, 0x0, 0xffff},
+       { "No HTTP", 0x0000, 0x0000, OP_EQ,  0, S1_PCKT,  0, S1_PCKT,
+         IM_CTL, 0x044,  3, 0x0, 0xffff},
+       { "IPV4 ESP encrypted?",  /* S1_ESP4 */
+         0x00ff, 0x0032, OP_EQ,  0, S1_CLNP2, 0, S1_AH4, IM_CTL,
+         0x021, 1,  0x0, 0xffff},
+       { "IPV4 AH encrypted?",   /* S1_AH4 */
+         0x00ff, 0x0033, OP_EQ,  0, S1_CLNP2, 0, S1_CLNP, IM_CTL,
+         0x021, 1,  0x0, 0xffff},
+       { "IPV6 ESP encrypted?",  /* S1_ESP6 */
+#if 0
+//@@@0x00ff, 0x0032, OP_EQ,  0, S1_CLNP2, 0, S1_AH6, IM_CTL, 0x021, 1,  0x0, 0xffff,
+#endif
+         0xff00, 0x3200, OP_EQ,  0, S1_CLNP2, 0, S1_AH6, IM_CTL,
+         0x021, 1,  0x0, 0xffff},
+       { "IPV6 AH encrypted?",   /* S1_AH6 */
+#if 0
+//@@@0x00ff, 0x0033, OP_EQ,  0, S1_CLNP2, 0, S1_CLNP, IM_CTL, 0x021, 1,  0x0, 0xffff,
+#endif
+         0xff00, 0x3300, OP_EQ,  0, S1_CLNP2, 0, S1_CLNP, IM_CTL,
+         0x021, 1,  0x0, 0xffff},
+       { NULL },
+};
+#ifdef HP_ENCRYPT_DEFAULT
+#define CAS_HP_FIRMWARE               cas_prog_encryptiontab
+#endif
+#endif
+
+static cas_hp_inst_t cas_prog_null[] = { {NULL} };
+#ifdef HP_NULL_DEFAULT
+#define CAS_HP_FIRMWARE               cas_prog_null
+#endif
+
+/* phy types */
+#define   CAS_PHY_UNKNOWN       0x00
+#define   CAS_PHY_SERDES        0x01
+#define   CAS_PHY_MII_MDIO0     0x02
+#define   CAS_PHY_MII_MDIO1     0x04
+#define   CAS_PHY_MII(x)        ((x) & (CAS_PHY_MII_MDIO0 | CAS_PHY_MII_MDIO1))
+
+/* _RING_INDEX is the index for the ring sizes to be used.  _RING_SIZE
+ * is the actual size. the default index for the various rings is
+ * 8. NOTE: there a bunch of alignment constraints for the rings. to
+ * deal with that, i just allocate rings to create the desired
+ * alignment. here are the constraints:
+ *   RX DESC and COMP rings must be 8KB aligned
+ *   TX DESC must be 2KB aligned.
+ * if you change the numbers, be cognizant of how the alignment will change
+ * in INIT_BLOCK as well.
+ */
+
+#define DESC_RING_I_TO_S(x)  (32*(1 << (x)))
+#define COMP_RING_I_TO_S(x)  (128*(1 << (x)))
+#define TX_DESC_RING_INDEX 4  /* 512 = 8k */
+#define RX_DESC_RING_INDEX 4  /* 512 = 8k */
+#define RX_COMP_RING_INDEX 4  /* 2048 = 64k: should be 4x rx ring size */
+
+#if (TX_DESC_RING_INDEX > 8) || (TX_DESC_RING_INDEX < 0)
+#error TX_DESC_RING_INDEX must be between 0 and 8
+#endif
+
+#if (RX_DESC_RING_INDEX > 8) || (RX_DESC_RING_INDEX < 0)
+#error RX_DESC_RING_INDEX must be between 0 and 8
+#endif
+
+#if (RX_COMP_RING_INDEX > 8) || (RX_COMP_RING_INDEX < 0)
+#error RX_COMP_RING_INDEX must be between 0 and 8
+#endif
+
+#define N_TX_RINGS                    MAX_TX_RINGS      /* for QoS */
+#define N_TX_RINGS_MASK               MAX_TX_RINGS_MASK
+#define N_RX_DESC_RINGS               MAX_RX_DESC_RINGS /* 1 for ipsec */
+#define N_RX_COMP_RINGS               0x1 /* for mult. PCI interrupts */
+
+/* number of flows that can go through re-assembly */
+#define N_RX_FLOWS                    64
+
+#define TX_DESC_RING_SIZE  DESC_RING_I_TO_S(TX_DESC_RING_INDEX)
+#define RX_DESC_RING_SIZE  DESC_RING_I_TO_S(RX_DESC_RING_INDEX)
+#define RX_COMP_RING_SIZE  COMP_RING_I_TO_S(RX_COMP_RING_INDEX)
+#define TX_DESC_RINGN_INDEX(x) TX_DESC_RING_INDEX
+#define RX_DESC_RINGN_INDEX(x) RX_DESC_RING_INDEX
+#define RX_COMP_RINGN_INDEX(x) RX_COMP_RING_INDEX
+#define TX_DESC_RINGN_SIZE(x)  TX_DESC_RING_SIZE
+#define RX_DESC_RINGN_SIZE(x)  RX_DESC_RING_SIZE
+#define RX_COMP_RINGN_SIZE(x)  RX_COMP_RING_SIZE
+
+/* convert values */
+#define CAS_BASE(x, y)                (((y) << (x ## _SHIFT)) & (x ## _MASK))
+#define CAS_VAL(x, y)                 (((y) & (x ## _MASK)) >> (x ## _SHIFT))
+#define CAS_TX_RINGN_BASE(y)          ((TX_DESC_RINGN_INDEX(y) << \
+                                        TX_CFG_DESC_RINGN_SHIFT(y)) & \
+                                        TX_CFG_DESC_RINGN_MASK(y))
+
+/* min is 2k, but we can't do jumbo frames unless it's at least 8k */
+#define CAS_MIN_PAGE_SHIFT            11 /* 2048 */
+#define CAS_JUMBO_PAGE_SHIFT          13 /* 8192 */
+#define CAS_MAX_PAGE_SHIFT            14 /* 16384 */
+
+#define TX_DESC_BUFLEN_MASK         0x0000000000003FFFULL /* buffer length in
+                                                            bytes. 0 - 9256 */
+#define TX_DESC_BUFLEN_SHIFT        0
+#define TX_DESC_CSUM_START_MASK     0x00000000001F8000ULL /* checksum start. #
+                                                            of bytes to be
+                                                            skipped before
+                                                            csum calc begins.
+                                                            value must be
+                                                            even */
+#define TX_DESC_CSUM_START_SHIFT    15
+#define TX_DESC_CSUM_STUFF_MASK     0x000000001FE00000ULL /* checksum stuff.
+                                                            byte offset w/in
+                                                            the pkt for the
+                                                            1st csum byte.
+                                                            must be > 8 */
+#define TX_DESC_CSUM_STUFF_SHIFT    21
+#define TX_DESC_CSUM_EN             0x0000000020000000ULL /* enable checksum */
+#define TX_DESC_EOF                 0x0000000040000000ULL /* end of frame */
+#define TX_DESC_SOF                 0x0000000080000000ULL /* start of frame */
+#define TX_DESC_INTME               0x0000000100000000ULL /* interrupt me */
+#define TX_DESC_NO_CRC              0x0000000200000000ULL /* debugging only.
+                                                            CRC will not be
+                                                            inserted into
+                                                            outgoing frame. */
+struct cas_tx_desc {
+       __le64     control;
+       __le64     buffer;
+};
+
+/* descriptor ring for free buffers contains page-sized buffers. the index
+ * value is not used by the hw in any way. it's just stored and returned in
+ * the completion ring.
+ */
+struct cas_rx_desc {
+       __le64     index;
+       __le64     buffer;
+};
+
+/* received packets are put on the completion ring. */
+/* word 1 */
+#define RX_COMP1_DATA_SIZE_MASK           0x0000000007FFE000ULL
+#define RX_COMP1_DATA_SIZE_SHIFT          13
+#define RX_COMP1_DATA_OFF_MASK            0x000001FFF8000000ULL
+#define RX_COMP1_DATA_OFF_SHIFT           27
+#define RX_COMP1_DATA_INDEX_MASK          0x007FFE0000000000ULL
+#define RX_COMP1_DATA_INDEX_SHIFT         41
+#define RX_COMP1_SKIP_MASK                0x0180000000000000ULL
+#define RX_COMP1_SKIP_SHIFT               55
+#define RX_COMP1_RELEASE_NEXT             0x0200000000000000ULL
+#define RX_COMP1_SPLIT_PKT                0x0400000000000000ULL
+#define RX_COMP1_RELEASE_FLOW             0x0800000000000000ULL
+#define RX_COMP1_RELEASE_DATA             0x1000000000000000ULL
+#define RX_COMP1_RELEASE_HDR              0x2000000000000000ULL
+#define RX_COMP1_TYPE_MASK                0xC000000000000000ULL
+#define RX_COMP1_TYPE_SHIFT               62
+
+/* word 2 */
+#define RX_COMP2_NEXT_INDEX_MASK          0x00000007FFE00000ULL
+#define RX_COMP2_NEXT_INDEX_SHIFT         21
+#define RX_COMP2_HDR_SIZE_MASK            0x00000FF800000000ULL
+#define RX_COMP2_HDR_SIZE_SHIFT           35
+#define RX_COMP2_HDR_OFF_MASK             0x0003F00000000000ULL
+#define RX_COMP2_HDR_OFF_SHIFT            44
+#define RX_COMP2_HDR_INDEX_MASK           0xFFFC000000000000ULL
+#define RX_COMP2_HDR_INDEX_SHIFT          50
+
+/* word 3 */
+#define RX_COMP3_SMALL_PKT                0x0000000000000001ULL
+#define RX_COMP3_JUMBO_PKT                0x0000000000000002ULL
+#define RX_COMP3_JUMBO_HDR_SPLIT_EN       0x0000000000000004ULL
+#define RX_COMP3_CSUM_START_MASK          0x000000000007F000ULL
+#define RX_COMP3_CSUM_START_SHIFT         12
+#define RX_COMP3_FLOWID_MASK              0x0000000001F80000ULL
+#define RX_COMP3_FLOWID_SHIFT             19
+#define RX_COMP3_OPCODE_MASK              0x000000000E000000ULL
+#define RX_COMP3_OPCODE_SHIFT             25
+#define RX_COMP3_FORCE_FLAG               0x0000000010000000ULL
+#define RX_COMP3_NO_ASSIST                0x0000000020000000ULL
+#define RX_COMP3_LOAD_BAL_MASK            0x000001F800000000ULL
+#define RX_COMP3_LOAD_BAL_SHIFT           35
+#define RX_PLUS_COMP3_ENC_PKT             0x0000020000000000ULL /* cas+ */
+#define RX_COMP3_L3_HEAD_OFF_MASK         0x0000FE0000000000ULL /* cas */
+#define RX_COMP3_L3_HEAD_OFF_SHIFT        41
+#define RX_PLUS_COMP_L3_HEAD_OFF_MASK     0x0000FC0000000000ULL /* cas+ */
+#define RX_PLUS_COMP_L3_HEAD_OFF_SHIFT    42
+#define RX_COMP3_SAP_MASK                 0xFFFF000000000000ULL
+#define RX_COMP3_SAP_SHIFT                48
+
+/* word 4 */
+#define RX_COMP4_TCP_CSUM_MASK            0x000000000000FFFFULL
+#define RX_COMP4_TCP_CSUM_SHIFT           0
+#define RX_COMP4_PKT_LEN_MASK             0x000000003FFF0000ULL
+#define RX_COMP4_PKT_LEN_SHIFT            16
+#define RX_COMP4_PERFECT_MATCH_MASK       0x00000003C0000000ULL
+#define RX_COMP4_PERFECT_MATCH_SHIFT      30
+#define RX_COMP4_ZERO                     0x0000080000000000ULL
+#define RX_COMP4_HASH_VAL_MASK            0x0FFFF00000000000ULL
+#define RX_COMP4_HASH_VAL_SHIFT           44
+#define RX_COMP4_HASH_PASS                0x1000000000000000ULL
+#define RX_COMP4_BAD                      0x4000000000000000ULL
+#define RX_COMP4_LEN_MISMATCH             0x8000000000000000ULL
+
+/* we encode the following: ring/index/release. only 14 bits
+ * are usable.
+ * NOTE: the encoding is dependent upon RX_DESC_RING_SIZE and
+ *       MAX_RX_DESC_RINGS. */
+#define RX_INDEX_NUM_MASK                 0x0000000000000FFFULL
+#define RX_INDEX_NUM_SHIFT                0
+#define RX_INDEX_RING_MASK                0x0000000000001000ULL
+#define RX_INDEX_RING_SHIFT               12
+#define RX_INDEX_RELEASE                  0x0000000000002000ULL
+
+struct cas_rx_comp {
+       __le64     word1;
+       __le64     word2;
+       __le64     word3;
+       __le64     word4;
+};
+
+enum link_state {
+       link_down = 0,  /* No link, will retry */
+       link_aneg,      /* Autoneg in progress */
+       link_force_try, /* Try Forced link speed */
+       link_force_ret, /* Forced mode worked, retrying autoneg */
+       link_force_ok,  /* Stay in forced mode */
+       link_up         /* Link is up */
+};
+
+typedef struct cas_page {
+       struct list_head list;
+       struct page *buffer;
+       dma_addr_t dma_addr;
+       int used;
+} cas_page_t;
+
+
+/* some alignment constraints:
+ * TX DESC, RX DESC, and RX COMP must each be 8K aligned.
+ * TX COMPWB must be 8-byte aligned.
+ * to accomplish this, here's what we do:
+ *
+ * INIT_BLOCK_RX_COMP  = 64k (already aligned)
+ * INIT_BLOCK_RX_DESC  = 8k
+ * INIT_BLOCK_TX       = 8k
+ * INIT_BLOCK_RX1_DESC = 8k
+ * TX COMPWB
+ */
+#define INIT_BLOCK_TX           (TX_DESC_RING_SIZE)
+#define INIT_BLOCK_RX_DESC      (RX_DESC_RING_SIZE)
+#define INIT_BLOCK_RX_COMP      (RX_COMP_RING_SIZE)
+
+struct cas_init_block {
+       struct cas_rx_comp rxcs[N_RX_COMP_RINGS][INIT_BLOCK_RX_COMP];
+       struct cas_rx_desc rxds[N_RX_DESC_RINGS][INIT_BLOCK_RX_DESC];
+       struct cas_tx_desc txds[N_TX_RINGS][INIT_BLOCK_TX];
+       __le64 tx_compwb;
+};
+
+/* tiny buffers to deal with target abort issue. we allocate a bit
+ * over so that we don't have target abort issues with these buffers
+ * as well.
+ */
+#define TX_TINY_BUF_LEN    0x100
+#define TX_TINY_BUF_BLOCK  ((INIT_BLOCK_TX + 1)*TX_TINY_BUF_LEN)
+
+struct cas_tiny_count {
+       int nbufs;
+       int used;
+};
+
+struct cas {
+       spinlock_t lock; /* for most bits */
+       spinlock_t tx_lock[N_TX_RINGS]; /* tx bits */
+       spinlock_t stat_lock[N_TX_RINGS + 1]; /* for stat gathering */
+       spinlock_t rx_inuse_lock; /* rx inuse list */
+       spinlock_t rx_spare_lock; /* rx spare list */
+
+       void __iomem *regs;
+       int tx_new[N_TX_RINGS], tx_old[N_TX_RINGS];
+       int rx_old[N_RX_DESC_RINGS];
+       int rx_cur[N_RX_COMP_RINGS], rx_new[N_RX_COMP_RINGS];
+       int rx_last[N_RX_DESC_RINGS];
+
+       struct napi_struct napi;
+
+       /* Set when chip is actually in operational state
+        * (ie. not power managed) */
+       int hw_running;
+       int opened;
+       struct mutex pm_mutex; /* open/close/suspend/resume */
+
+       struct cas_init_block *init_block;
+       struct cas_tx_desc *init_txds[MAX_TX_RINGS];
+       struct cas_rx_desc *init_rxds[MAX_RX_DESC_RINGS];
+       struct cas_rx_comp *init_rxcs[MAX_RX_COMP_RINGS];
+
+       /* we use sk_buffs for tx and pages for rx. the rx skbuffs
+        * are there for flow re-assembly. */
+       struct sk_buff      *tx_skbs[N_TX_RINGS][TX_DESC_RING_SIZE];
+       struct sk_buff_head  rx_flows[N_RX_FLOWS];
+       cas_page_t          *rx_pages[N_RX_DESC_RINGS][RX_DESC_RING_SIZE];
+       struct list_head     rx_spare_list, rx_inuse_list;
+       int                  rx_spares_needed;
+
+       /* for small packets when copying would be quicker than
+          mapping */
+       struct cas_tiny_count tx_tiny_use[N_TX_RINGS][TX_DESC_RING_SIZE];
+       u8 *tx_tiny_bufs[N_TX_RINGS];
+
+       u32                     msg_enable;
+
+       /* N_TX_RINGS must be >= N_RX_DESC_RINGS */
+       struct net_device_stats net_stats[N_TX_RINGS + 1];
+
+       u32                     pci_cfg[64 >> 2];
+       u8                      pci_revision;
+
+       int                     phy_type;
+       int                     phy_addr;
+       u32                     phy_id;
+#define CAS_FLAG_1000MB_CAP     0x00000001
+#define CAS_FLAG_REG_PLUS       0x00000002
+#define CAS_FLAG_TARGET_ABORT   0x00000004
+#define CAS_FLAG_SATURN         0x00000008
+#define CAS_FLAG_RXD_POST_MASK  0x000000F0
+#define CAS_FLAG_RXD_POST_SHIFT 4
+#define CAS_FLAG_RXD_POST(x)    ((1 << (CAS_FLAG_RXD_POST_SHIFT + (x))) & \
+                                 CAS_FLAG_RXD_POST_MASK)
+#define CAS_FLAG_ENTROPY_DEV    0x00000100
+#define CAS_FLAG_NO_HW_CSUM     0x00000200
+       u32                     cas_flags;
+       int                     packet_min; /* minimum packet size */
+       int                     tx_fifo_size;
+       int                     rx_fifo_size;
+       int                     rx_pause_off;
+       int                     rx_pause_on;
+       int                     crc_size;      /* 4 if half-duplex */
+
+       int                     pci_irq_INTC;
+       int                     min_frame_size; /* for tx fifo workaround */
+
+       /* page size allocation */
+       int                     page_size;
+       int                     page_order;
+       int                     mtu_stride;
+
+       u32                     mac_rx_cfg;
+
+       /* Autoneg & PHY control */
+       int                     link_cntl;
+       int                     link_fcntl;
+       enum link_state         lstate;
+       struct timer_list       link_timer;
+       int                     timer_ticks;
+       struct work_struct      reset_task;
+#if 0
+       atomic_t                reset_task_pending;
+#else
+       atomic_t                reset_task_pending;
+       atomic_t                reset_task_pending_mtu;
+       atomic_t                reset_task_pending_spare;
+       atomic_t                reset_task_pending_all;
+#endif
+
+       /* Link-down problem workaround */
+#define LINK_TRANSITION_UNKNOWN        0
+#define LINK_TRANSITION_ON_FAILURE     1
+#define LINK_TRANSITION_STILL_FAILED   2
+#define LINK_TRANSITION_LINK_UP        3
+#define LINK_TRANSITION_LINK_CONFIG    4
+#define LINK_TRANSITION_LINK_DOWN      5
+#define LINK_TRANSITION_REQUESTED_RESET        6
+       int                     link_transition;
+       int                     link_transition_jiffies_valid;
+       unsigned long           link_transition_jiffies;
+
+       /* Tuning */
+       u8 orig_cacheline_size; /* value when loaded */
+#define CAS_PREF_CACHELINE_SIZE         0x20   /* Minimum desired */
+
+       /* Diagnostic counters and state. */
+       int                     casreg_len; /* reg-space size for dumping */
+       u64                     pause_entered;
+       u16                     pause_last_time_recvd;
+
+       dma_addr_t block_dvma, tx_tiny_dvma[N_TX_RINGS];
+       struct pci_dev *pdev;
+       struct net_device *dev;
+#if defined(CONFIG_OF)
+       struct device_node      *of_node;
+#endif
+
+       /* Firmware Info */
+       u16                     fw_load_addr;
+       u32                     fw_size;
+       u8                      *fw_data;
+};
+
+#define TX_DESC_NEXT(r, x)  (((x) + 1) & (TX_DESC_RINGN_SIZE(r) - 1))
+#define RX_DESC_ENTRY(r, x) ((x) & (RX_DESC_RINGN_SIZE(r) - 1))
+#define RX_COMP_ENTRY(r, x) ((x) & (RX_COMP_RINGN_SIZE(r) - 1))
+
+#define TX_BUFF_COUNT(r, x, y)    ((x) <= (y) ? ((y) - (x)) : \
+        (TX_DESC_RINGN_SIZE(r) - (x) + (y)))
+
+#define TX_BUFFS_AVAIL(cp, i)  ((cp)->tx_old[(i)] <= (cp)->tx_new[(i)] ? \
+        (cp)->tx_old[(i)] + (TX_DESC_RINGN_SIZE(i) - 1) - (cp)->tx_new[(i)] : \
+        (cp)->tx_old[(i)] - (cp)->tx_new[(i)] - 1)
+
+#define CAS_ALIGN(addr, align) \
+     (((unsigned long) (addr) + ((align) - 1UL)) & ~((align) - 1))
+
+#define RX_FIFO_SIZE                  16384
+#define EXPANSION_ROM_SIZE            65536
+
+#define CAS_MC_EXACT_MATCH_SIZE       15
+#define CAS_MC_HASH_SIZE              256
+#define CAS_MC_HASH_MAX              (CAS_MC_EXACT_MATCH_SIZE + \
+                                      CAS_MC_HASH_SIZE)
+
+#define TX_TARGET_ABORT_LEN           0x20
+#define RX_SWIVEL_OFF_VAL             0x2
+#define RX_AE_FREEN_VAL(x)            (RX_DESC_RINGN_SIZE(x) >> 1)
+#define RX_AE_COMP_VAL                (RX_COMP_RING_SIZE >> 1)
+#define RX_BLANK_INTR_PKT_VAL         0x05
+#define RX_BLANK_INTR_TIME_VAL        0x0F
+#define HP_TCP_THRESH_VAL             1530 /* reduce to enable reassembly */
+
+#define RX_SPARE_COUNT                (RX_DESC_RING_SIZE >> 1)
+#define RX_SPARE_RECOVER_VAL          (RX_SPARE_COUNT >> 2)
+
+#endif /* _CASSINI_H */
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
new file mode 100644 (file)
index 0000000..ed47585
--- /dev/null
@@ -0,0 +1,10263 @@
+/* niu.c: Neptune ethernet driver.
+ *
+ * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/mii.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/ipv6.h>
+#include <linux/log2.h>
+#include <linux/jiffies.h>
+#include <linux/crc32.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <linux/io.h>
+#include <linux/of_device.h>
+
+#include "niu.h"
+
+#define DRV_MODULE_NAME                "niu"
+#define DRV_MODULE_VERSION     "1.1"
+#define DRV_MODULE_RELDATE     "Apr 22, 2010"
+
+static char version[] __devinitdata =
+       DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_DESCRIPTION("NIU ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#ifndef readq
+static u64 readq(void __iomem *reg)
+{
+       return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
+}
+
+static void writeq(u64 val, void __iomem *reg)
+{
+       writel(val & 0xffffffff, reg);
+       writel(val >> 32, reg + 0x4UL);
+}
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(niu_pci_tbl) = {
+       {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
+       {}
+};
+
+MODULE_DEVICE_TABLE(pci, niu_pci_tbl);
+
+#define NIU_TX_TIMEOUT                 (5 * HZ)
+
+#define nr64(reg)              readq(np->regs + (reg))
+#define nw64(reg, val)         writeq((val), np->regs + (reg))
+
+#define nr64_mac(reg)          readq(np->mac_regs + (reg))
+#define nw64_mac(reg, val)     writeq((val), np->mac_regs + (reg))
+
+#define nr64_ipp(reg)          readq(np->regs + np->ipp_off + (reg))
+#define nw64_ipp(reg, val)     writeq((val), np->regs + np->ipp_off + (reg))
+
+#define nr64_pcs(reg)          readq(np->regs + np->pcs_off + (reg))
+#define nw64_pcs(reg, val)     writeq((val), np->regs + np->pcs_off + (reg))
+
+#define nr64_xpcs(reg)         readq(np->regs + np->xpcs_off + (reg))
+#define nw64_xpcs(reg, val)    writeq((val), np->regs + np->xpcs_off + (reg))
+
+#define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+
+static int niu_debug;
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "NIU debug level");
+
+#define niu_lock_parent(np, flags) \
+       spin_lock_irqsave(&np->parent->lock, flags)
+#define niu_unlock_parent(np, flags) \
+       spin_unlock_irqrestore(&np->parent->lock, flags)
+
+static int serdes_init_10g_serdes(struct niu *np);
+
+static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg,
+                                    u64 bits, int limit, int delay)
+{
+       while (--limit >= 0) {
+               u64 val = nr64_mac(reg);
+
+               if (!(val & bits))
+                       break;
+               udelay(delay);
+       }
+       if (limit < 0)
+               return -ENODEV;
+       return 0;
+}
+
+static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
+                                       u64 bits, int limit, int delay,
+                                       const char *reg_name)
+{
+       int err;
+
+       nw64_mac(reg, bits);
+       err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
+       if (err)
+               netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
+                          (unsigned long long)bits, reg_name,
+                          (unsigned long long)nr64_mac(reg));
+       return err;
+}
+
+#define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
+({     BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
+       __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
+})
+
+static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg,
+                                    u64 bits, int limit, int delay)
+{
+       while (--limit >= 0) {
+               u64 val = nr64_ipp(reg);
+
+               if (!(val & bits))
+                       break;
+               udelay(delay);
+       }
+       if (limit < 0)
+               return -ENODEV;
+       return 0;
+}
+
+static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
+                                       u64 bits, int limit, int delay,
+                                       const char *reg_name)
+{
+       int err;
+       u64 val;
+
+       val = nr64_ipp(reg);
+       val |= bits;
+       nw64_ipp(reg, val);
+
+       err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
+       if (err)
+               netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
+                          (unsigned long long)bits, reg_name,
+                          (unsigned long long)nr64_ipp(reg));
+       return err;
+}
+
+#define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
+({     BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
+       __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
+})
+
+static int __niu_wait_bits_clear(struct niu *np, unsigned long reg,
+                                u64 bits, int limit, int delay)
+{
+       while (--limit >= 0) {
+               u64 val = nr64(reg);
+
+               if (!(val & bits))
+                       break;
+               udelay(delay);
+       }
+       if (limit < 0)
+               return -ENODEV;
+       return 0;
+}
+
+#define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
+({     BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
+       __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
+})
+
+static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
+                                   u64 bits, int limit, int delay,
+                                   const char *reg_name)
+{
+       int err;
+
+       nw64(reg, bits);
+       err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
+       if (err)
+               netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
+                          (unsigned long long)bits, reg_name,
+                          (unsigned long long)nr64(reg));
+       return err;
+}
+
+#define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
+({     BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
+       __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
+})
+
+static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on)
+{
+       u64 val = (u64) lp->timer;
+
+       if (on)
+               val |= LDG_IMGMT_ARM;
+
+       nw64(LDG_IMGMT(lp->ldg_num), val);
+}
+
+static int niu_ldn_irq_enable(struct niu *np, int ldn, int on)
+{
+       unsigned long mask_reg, bits;
+       u64 val;
+
+       if (ldn < 0 || ldn > LDN_MAX)
+               return -EINVAL;
+
+       if (ldn < 64) {
+               mask_reg = LD_IM0(ldn);
+               bits = LD_IM0_MASK;
+       } else {
+               mask_reg = LD_IM1(ldn - 64);
+               bits = LD_IM1_MASK;
+       }
+
+       val = nr64(mask_reg);
+       if (on)
+               val &= ~bits;
+       else
+               val |= bits;
+       nw64(mask_reg, val);
+
+       return 0;
+}
+
+static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on)
+{
+       struct niu_parent *parent = np->parent;
+       int i;
+
+       for (i = 0; i <= LDN_MAX; i++) {
+               int err;
+
+               if (parent->ldg_map[i] != lp->ldg_num)
+                       continue;
+
+               err = niu_ldn_irq_enable(np, i, on);
+               if (err)
+                       return err;
+       }
+       return 0;
+}
+
+static int niu_enable_interrupts(struct niu *np, int on)
+{
+       int i;
+
+       for (i = 0; i < np->num_ldg; i++) {
+               struct niu_ldg *lp = &np->ldg[i];
+               int err;
+
+               err = niu_enable_ldn_in_ldg(np, lp, on);
+               if (err)
+                       return err;
+       }
+       for (i = 0; i < np->num_ldg; i++)
+               niu_ldg_rearm(np, &np->ldg[i], on);
+
+       return 0;
+}
+
+static u32 phy_encode(u32 type, int port)
+{
+       return type << (port * 2);
+}
+
+static u32 phy_decode(u32 val, int port)
+{
+       return (val >> (port * 2)) & PORT_TYPE_MASK;
+}
+
+static int mdio_wait(struct niu *np)
+{
+       int limit = 1000;
+       u64 val;
+
+       while (--limit > 0) {
+               val = nr64(MIF_FRAME_OUTPUT);
+               if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1)
+                       return val & MIF_FRAME_OUTPUT_DATA;
+
+               udelay(10);
+       }
+
+       return -ENODEV;
+}
+
+static int mdio_read(struct niu *np, int port, int dev, int reg)
+{
+       int err;
+
+       nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
+       err = mdio_wait(np);
+       if (err < 0)
+               return err;
+
+       nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev));
+       return mdio_wait(np);
+}
+
+static int mdio_write(struct niu *np, int port, int dev, int reg, int data)
+{
+       int err;
+
+       nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
+       err = mdio_wait(np);
+       if (err < 0)
+               return err;
+
+       nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data));
+       err = mdio_wait(np);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+static int mii_read(struct niu *np, int port, int reg)
+{
+       nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg));
+       return mdio_wait(np);
+}
+
+static int mii_write(struct niu *np, int port, int reg, int data)
+{
+       int err;
+
+       nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data));
+       err = mdio_wait(np);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val)
+{
+       int err;
+
+       err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+                        ESR2_TI_PLL_TX_CFG_L(channel),
+                        val & 0xffff);
+       if (!err)
+               err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+                                ESR2_TI_PLL_TX_CFG_H(channel),
+                                val >> 16);
+       return err;
+}
+
+static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val)
+{
+       int err;
+
+       err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+                        ESR2_TI_PLL_RX_CFG_L(channel),
+                        val & 0xffff);
+       if (!err)
+               err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+                                ESR2_TI_PLL_RX_CFG_H(channel),
+                                val >> 16);
+       return err;
+}
+
+/* Mode is always 10G fiber.  */
+static int serdes_init_niu_10g_fiber(struct niu *np)
+{
+       struct niu_link_config *lp = &np->link_config;
+       u32 tx_cfg, rx_cfg;
+       unsigned long i;
+
+       tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
+       rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
+                 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
+                 PLL_RX_CFG_EQ_LP_ADAPTIVE);
+
+       if (lp->loopback_mode == LOOPBACK_PHY) {
+               u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
+
+               mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+                          ESR2_TI_PLL_TEST_CFG_L, test_cfg);
+
+               tx_cfg |= PLL_TX_CFG_ENTEST;
+               rx_cfg |= PLL_RX_CFG_ENTEST;
+       }
+
+       /* Initialize all 4 lanes of the SERDES.  */
+       for (i = 0; i < 4; i++) {
+               int err = esr2_set_tx_cfg(np, i, tx_cfg);
+               if (err)
+                       return err;
+       }
+
+       for (i = 0; i < 4; i++) {
+               int err = esr2_set_rx_cfg(np, i, rx_cfg);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int serdes_init_niu_1g_serdes(struct niu *np)
+{
+       struct niu_link_config *lp = &np->link_config;
+       u16 pll_cfg, pll_sts;
+       int max_retry = 100;
+       u64 uninitialized_var(sig), mask, val;
+       u32 tx_cfg, rx_cfg;
+       unsigned long i;
+       int err;
+
+       tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV |
+                 PLL_TX_CFG_RATE_HALF);
+       rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
+                 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
+                 PLL_RX_CFG_RATE_HALF);
+
+       if (np->port == 0)
+               rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE;
+
+       if (lp->loopback_mode == LOOPBACK_PHY) {
+               u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
+
+               mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+                          ESR2_TI_PLL_TEST_CFG_L, test_cfg);
+
+               tx_cfg |= PLL_TX_CFG_ENTEST;
+               rx_cfg |= PLL_RX_CFG_ENTEST;
+       }
+
+       /* Initialize PLL for 1G */
+       pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X);
+
+       err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+                        ESR2_TI_PLL_CFG_L, pll_cfg);
+       if (err) {
+               netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
+                          np->port, __func__);
+               return err;
+       }
+
+       pll_sts = PLL_CFG_ENPLL;
+
+       err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+                        ESR2_TI_PLL_STS_L, pll_sts);
+       if (err) {
+               netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
+                          np->port, __func__);
+               return err;
+       }
+
+       udelay(200);
+
+       /* Initialize all 4 lanes of the SERDES.  */
+       for (i = 0; i < 4; i++) {
+               err = esr2_set_tx_cfg(np, i, tx_cfg);
+               if (err)
+                       return err;
+       }
+
+       for (i = 0; i < 4; i++) {
+               err = esr2_set_rx_cfg(np, i, rx_cfg);
+               if (err)
+                       return err;
+       }
+
+       switch (np->port) {
+       case 0:
+               val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
+               mask = val;
+               break;
+
+       case 1:
+               val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
+               mask = val;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       while (max_retry--) {
+               sig = nr64(ESR_INT_SIGNALS);
+               if ((sig & mask) == val)
+                       break;
+
+               mdelay(500);
+       }
+
+       if ((sig & mask) != val) {
+               netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
+                          np->port, (int)(sig & mask), (int)val);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int serdes_init_niu_10g_serdes(struct niu *np)
+{
+       struct niu_link_config *lp = &np->link_config;
+       u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
+       int max_retry = 100;
+       u64 uninitialized_var(sig), mask, val;
+       unsigned long i;
+       int err;
+
+       tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
+       rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
+                 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
+                 PLL_RX_CFG_EQ_LP_ADAPTIVE);
+
+       if (lp->loopback_mode == LOOPBACK_PHY) {
+               u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
+
+               mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+                          ESR2_TI_PLL_TEST_CFG_L, test_cfg);
+
+               tx_cfg |= PLL_TX_CFG_ENTEST;
+               rx_cfg |= PLL_RX_CFG_ENTEST;
+       }
+
+       /* Initialize PLL for 10G */
+       pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X);
+
+       err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+                        ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
+       if (err) {
+               netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
+                          np->port, __func__);
+               return err;
+       }
+
+       pll_sts = PLL_CFG_ENPLL;
+
+       err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+                        ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
+       if (err) {
+               netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
+                          np->port, __func__);
+               return err;
+       }
+
+       udelay(200);
+
+       /* Initialize all 4 lanes of the SERDES.  */
+       for (i = 0; i < 4; i++) {
+               err = esr2_set_tx_cfg(np, i, tx_cfg);
+               if (err)
+                       return err;
+       }
+
+       for (i = 0; i < 4; i++) {
+               err = esr2_set_rx_cfg(np, i, rx_cfg);
+               if (err)
+                       return err;
+       }
+
+       /* check if serdes is ready */
+
+       switch (np->port) {
+       case 0:
+               mask = ESR_INT_SIGNALS_P0_BITS;
+               val = (ESR_INT_SRDY0_P0 |
+                      ESR_INT_DET0_P0 |
+                      ESR_INT_XSRDY_P0 |
+                      ESR_INT_XDP_P0_CH3 |
+                      ESR_INT_XDP_P0_CH2 |
+                      ESR_INT_XDP_P0_CH1 |
+                      ESR_INT_XDP_P0_CH0);
+               break;
+
+       case 1:
+               mask = ESR_INT_SIGNALS_P1_BITS;
+               val = (ESR_INT_SRDY0_P1 |
+                      ESR_INT_DET0_P1 |
+                      ESR_INT_XSRDY_P1 |
+                      ESR_INT_XDP_P1_CH3 |
+                      ESR_INT_XDP_P1_CH2 |
+                      ESR_INT_XDP_P1_CH1 |
+                      ESR_INT_XDP_P1_CH0);
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       while (max_retry--) {
+               sig = nr64(ESR_INT_SIGNALS);
+               if ((sig & mask) == val)
+                       break;
+
+               mdelay(500);
+       }
+
+       if ((sig & mask) != val) {
+               pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n",
+                       np->port, (int)(sig & mask), (int)val);
+
+               /* 10G failed, try initializing at 1G */
+               err = serdes_init_niu_1g_serdes(np);
+               if (!err) {
+                       np->flags &= ~NIU_FLAGS_10G;
+                       np->mac_xcvr = MAC_XCVR_PCS;
+               }  else {
+                       netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
+                                  np->port);
+                       return -ENODEV;
+               }
+       }
+       return 0;
+}
+
+static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val)
+{
+       int err;
+
+       err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan));
+       if (err >= 0) {
+               *val = (err & 0xffff);
+               err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
+                               ESR_RXTX_CTRL_H(chan));
+               if (err >= 0)
+                       *val |= ((err & 0xffff) << 16);
+               err = 0;
+       }
+       return err;
+}
+
+static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val)
+{
+       int err;
+
+       err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
+                       ESR_GLUE_CTRL0_L(chan));
+       if (err >= 0) {
+               *val = (err & 0xffff);
+               err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
+                               ESR_GLUE_CTRL0_H(chan));
+               if (err >= 0) {
+                       *val |= ((err & 0xffff) << 16);
+                       err = 0;
+               }
+       }
+       return err;
+}
+
+static int esr_read_reset(struct niu *np, u32 *val)
+{
+       int err;
+
+       err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
+                       ESR_RXTX_RESET_CTRL_L);
+       if (err >= 0) {
+               *val = (err & 0xffff);
+               err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
+                               ESR_RXTX_RESET_CTRL_H);
+               if (err >= 0) {
+                       *val |= ((err & 0xffff) << 16);
+                       err = 0;
+               }
+       }
+       return err;
+}
+
+static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val)
+{
+       int err;
+
+       err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
+                        ESR_RXTX_CTRL_L(chan), val & 0xffff);
+       if (!err)
+               err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
+                                ESR_RXTX_CTRL_H(chan), (val >> 16));
+       return err;
+}
+
+static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val)
+{
+       int err;
+
+       err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
+                       ESR_GLUE_CTRL0_L(chan), val & 0xffff);
+       if (!err)
+               err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
+                                ESR_GLUE_CTRL0_H(chan), (val >> 16));
+       return err;
+}
+
+static int esr_reset(struct niu *np)
+{
+       u32 uninitialized_var(reset);
+       int err;
+
+       err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
+                        ESR_RXTX_RESET_CTRL_L, 0x0000);
+       if (err)
+               return err;
+       err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
+                        ESR_RXTX_RESET_CTRL_H, 0xffff);
+       if (err)
+               return err;
+       udelay(200);
+
+       err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
+                        ESR_RXTX_RESET_CTRL_L, 0xffff);
+       if (err)
+               return err;
+       udelay(200);
+
+       err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
+                        ESR_RXTX_RESET_CTRL_H, 0x0000);
+       if (err)
+               return err;
+       udelay(200);
+
+       err = esr_read_reset(np, &reset);
+       if (err)
+               return err;
+       if (reset != 0) {
+               netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n",
+                          np->port, reset);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int serdes_init_10g(struct niu *np)
+{
+       struct niu_link_config *lp = &np->link_config;
+       unsigned long ctrl_reg, test_cfg_reg, i;
+       u64 ctrl_val, test_cfg_val, sig, mask, val;
+       int err;
+
+       switch (np->port) {
+       case 0:
+               ctrl_reg = ENET_SERDES_0_CTRL_CFG;
+               test_cfg_reg = ENET_SERDES_0_TEST_CFG;
+               break;
+       case 1:
+               ctrl_reg = ENET_SERDES_1_CTRL_CFG;
+               test_cfg_reg = ENET_SERDES_1_TEST_CFG;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+       ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
+                   ENET_SERDES_CTRL_SDET_1 |
+                   ENET_SERDES_CTRL_SDET_2 |
+                   ENET_SERDES_CTRL_SDET_3 |
+                   (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
+                   (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
+                   (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
+                   (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
+                   (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
+                   (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
+                   (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
+                   (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
+       test_cfg_val = 0;
+
+       if (lp->loopback_mode == LOOPBACK_PHY) {
+               test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
+                                 ENET_SERDES_TEST_MD_0_SHIFT) |
+                                (ENET_TEST_MD_PAD_LOOPBACK <<
+                                 ENET_SERDES_TEST_MD_1_SHIFT) |
+                                (ENET_TEST_MD_PAD_LOOPBACK <<
+                                 ENET_SERDES_TEST_MD_2_SHIFT) |
+                                (ENET_TEST_MD_PAD_LOOPBACK <<
+                                 ENET_SERDES_TEST_MD_3_SHIFT));
+       }
+
+       nw64(ctrl_reg, ctrl_val);
+       nw64(test_cfg_reg, test_cfg_val);
+
+       /* Initialize all 4 lanes of the SERDES.  */
+       for (i = 0; i < 4; i++) {
+               u32 rxtx_ctrl, glue0;
+
+               err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
+               if (err)
+                       return err;
+               err = esr_read_glue0(np, i, &glue0);
+               if (err)
+                       return err;
+
+               rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
+               rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
+                             (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
+
+               glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
+                          ESR_GLUE_CTRL0_THCNT |
+                          ESR_GLUE_CTRL0_BLTIME);
+               glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
+                         (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
+                         (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
+                         (BLTIME_300_CYCLES <<
+                          ESR_GLUE_CTRL0_BLTIME_SHIFT));
+
+               err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
+               if (err)
+                       return err;
+               err = esr_write_glue0(np, i, glue0);
+               if (err)
+                       return err;
+       }
+
+       err = esr_reset(np);
+       if (err)
+               return err;
+
+       sig = nr64(ESR_INT_SIGNALS);
+       switch (np->port) {
+       case 0:
+               mask = ESR_INT_SIGNALS_P0_BITS;
+               val = (ESR_INT_SRDY0_P0 |
+                      ESR_INT_DET0_P0 |
+                      ESR_INT_XSRDY_P0 |
+                      ESR_INT_XDP_P0_CH3 |
+                      ESR_INT_XDP_P0_CH2 |
+                      ESR_INT_XDP_P0_CH1 |
+                      ESR_INT_XDP_P0_CH0);
+               break;
+
+       case 1:
+               mask = ESR_INT_SIGNALS_P1_BITS;
+               val = (ESR_INT_SRDY0_P1 |
+                      ESR_INT_DET0_P1 |
+                      ESR_INT_XSRDY_P1 |
+                      ESR_INT_XDP_P1_CH3 |
+                      ESR_INT_XDP_P1_CH2 |
+                      ESR_INT_XDP_P1_CH1 |
+                      ESR_INT_XDP_P1_CH0);
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       if ((sig & mask) != val) {
+               if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
+                       np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
+                       return 0;
+               }
+               netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
+                          np->port, (int)(sig & mask), (int)val);
+               return -ENODEV;
+       }
+       if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
+               np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
+       return 0;
+}
+
+static int serdes_init_1g(struct niu *np)
+{
+       u64 val;
+
+       val = nr64(ENET_SERDES_1_PLL_CFG);
+       val &= ~ENET_SERDES_PLL_FBDIV2;
+       switch (np->port) {
+       case 0:
+               val |= ENET_SERDES_PLL_HRATE0;
+               break;
+       case 1:
+               val |= ENET_SERDES_PLL_HRATE1;
+               break;
+       case 2:
+               val |= ENET_SERDES_PLL_HRATE2;
+               break;
+       case 3:
+               val |= ENET_SERDES_PLL_HRATE3;
+               break;
+       default:
+               return -EINVAL;
+       }
+       nw64(ENET_SERDES_1_PLL_CFG, val);
+
+       return 0;
+}
+
+static int serdes_init_1g_serdes(struct niu *np)
+{
+       struct niu_link_config *lp = &np->link_config;
+       unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
+       u64 ctrl_val, test_cfg_val, sig, mask, val;
+       int err;
+       u64 reset_val, val_rd;
+
+       val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 |
+               ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 |
+               ENET_SERDES_PLL_FBDIV0;
+       switch (np->port) {
+       case 0:
+               reset_val =  ENET_SERDES_RESET_0;
+               ctrl_reg = ENET_SERDES_0_CTRL_CFG;
+               test_cfg_reg = ENET_SERDES_0_TEST_CFG;
+               pll_cfg = ENET_SERDES_0_PLL_CFG;
+               break;
+       case 1:
+               reset_val =  ENET_SERDES_RESET_1;
+               ctrl_reg = ENET_SERDES_1_CTRL_CFG;
+               test_cfg_reg = ENET_SERDES_1_TEST_CFG;
+               pll_cfg = ENET_SERDES_1_PLL_CFG;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+       ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
+                   ENET_SERDES_CTRL_SDET_1 |
+                   ENET_SERDES_CTRL_SDET_2 |
+                   ENET_SERDES_CTRL_SDET_3 |
+                   (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
+                   (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
+                   (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
+                   (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
+                   (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
+                   (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
+                   (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
+                   (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
+       test_cfg_val = 0;
+
+       if (lp->loopback_mode == LOOPBACK_PHY) {
+               test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
+                                 ENET_SERDES_TEST_MD_0_SHIFT) |
+                                (ENET_TEST_MD_PAD_LOOPBACK <<
+                                 ENET_SERDES_TEST_MD_1_SHIFT) |
+                                (ENET_TEST_MD_PAD_LOOPBACK <<
+                                 ENET_SERDES_TEST_MD_2_SHIFT) |
+                                (ENET_TEST_MD_PAD_LOOPBACK <<
+                                 ENET_SERDES_TEST_MD_3_SHIFT));
+       }
+
+       nw64(ENET_SERDES_RESET, reset_val);
+       mdelay(20);
+       val_rd = nr64(ENET_SERDES_RESET);
+       val_rd &= ~reset_val;
+       nw64(pll_cfg, val);
+       nw64(ctrl_reg, ctrl_val);
+       nw64(test_cfg_reg, test_cfg_val);
+       nw64(ENET_SERDES_RESET, val_rd);
+       mdelay(2000);
+
+       /* Initialize all 4 lanes of the SERDES.  */
+       for (i = 0; i < 4; i++) {
+               u32 rxtx_ctrl, glue0;
+
+               err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
+               if (err)
+                       return err;
+               err = esr_read_glue0(np, i, &glue0);
+               if (err)
+                       return err;
+
+               rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
+               rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
+                             (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
+
+               glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
+                          ESR_GLUE_CTRL0_THCNT |
+                          ESR_GLUE_CTRL0_BLTIME);
+               glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
+                         (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
+                         (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
+                         (BLTIME_300_CYCLES <<
+                          ESR_GLUE_CTRL0_BLTIME_SHIFT));
+
+               err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
+               if (err)
+                       return err;
+               err = esr_write_glue0(np, i, glue0);
+               if (err)
+                       return err;
+       }
+
+
+       sig = nr64(ESR_INT_SIGNALS);
+       switch (np->port) {
+       case 0:
+               val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
+               mask = val;
+               break;
+
+       case 1:
+               val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
+               mask = val;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       if ((sig & mask) != val) {
+               netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
+                          np->port, (int)(sig & mask), (int)val);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int link_status_1g_serdes(struct niu *np, int *link_up_p)
+{
+       struct niu_link_config *lp = &np->link_config;
+       int link_up;
+       u64 val;
+       u16 current_speed;
+       unsigned long flags;
+       u8 current_duplex;
+
+       link_up = 0;
+       current_speed = SPEED_INVALID;
+       current_duplex = DUPLEX_INVALID;
+
+       spin_lock_irqsave(&np->lock, flags);
+
+       val = nr64_pcs(PCS_MII_STAT);
+
+       if (val & PCS_MII_STAT_LINK_STATUS) {
+               link_up = 1;
+               current_speed = SPEED_1000;
+               current_duplex = DUPLEX_FULL;
+       }
+
+       lp->active_speed = current_speed;
+       lp->active_duplex = current_duplex;
+       spin_unlock_irqrestore(&np->lock, flags);
+
+       *link_up_p = link_up;
+       return 0;
+}
+
+static int link_status_10g_serdes(struct niu *np, int *link_up_p)
+{
+       unsigned long flags;
+       struct niu_link_config *lp = &np->link_config;
+       int link_up = 0;
+       int link_ok = 1;
+       u64 val, val2;
+       u16 current_speed;
+       u8 current_duplex;
+
+       if (!(np->flags & NIU_FLAGS_10G))
+               return link_status_1g_serdes(np, link_up_p);
+
+       current_speed = SPEED_INVALID;
+       current_duplex = DUPLEX_INVALID;
+       spin_lock_irqsave(&np->lock, flags);
+
+       val = nr64_xpcs(XPCS_STATUS(0));
+       val2 = nr64_mac(XMAC_INTER2);
+       if (val2 & 0x01000000)
+               link_ok = 0;
+
+       if ((val & 0x1000ULL) && link_ok) {
+               link_up = 1;
+               current_speed = SPEED_10000;
+               current_duplex = DUPLEX_FULL;
+       }
+       lp->active_speed = current_speed;
+       lp->active_duplex = current_duplex;
+       spin_unlock_irqrestore(&np->lock, flags);
+       *link_up_p = link_up;
+       return 0;
+}
+
+static int link_status_mii(struct niu *np, int *link_up_p)
+{
+       struct niu_link_config *lp = &np->link_config;
+       int err;
+       int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus;
+       int supported, advertising, active_speed, active_duplex;
+
+       err = mii_read(np, np->phy_addr, MII_BMCR);
+       if (unlikely(err < 0))
+               return err;
+       bmcr = err;
+
+       err = mii_read(np, np->phy_addr, MII_BMSR);
+       if (unlikely(err < 0))
+               return err;
+       bmsr = err;
+
+       err = mii_read(np, np->phy_addr, MII_ADVERTISE);
+       if (unlikely(err < 0))
+               return err;
+       advert = err;
+
+       err = mii_read(np, np->phy_addr, MII_LPA);
+       if (unlikely(err < 0))
+               return err;
+       lpa = err;
+
+       if (likely(bmsr & BMSR_ESTATEN)) {
+               err = mii_read(np, np->phy_addr, MII_ESTATUS);
+               if (unlikely(err < 0))
+                       return err;
+               estatus = err;
+
+               err = mii_read(np, np->phy_addr, MII_CTRL1000);
+               if (unlikely(err < 0))
+                       return err;
+               ctrl1000 = err;
+
+               err = mii_read(np, np->phy_addr, MII_STAT1000);
+               if (unlikely(err < 0))
+                       return err;
+               stat1000 = err;
+       } else
+               estatus = ctrl1000 = stat1000 = 0;
+
+       supported = 0;
+       if (bmsr & BMSR_ANEGCAPABLE)
+               supported |= SUPPORTED_Autoneg;
+       if (bmsr & BMSR_10HALF)
+               supported |= SUPPORTED_10baseT_Half;
+       if (bmsr & BMSR_10FULL)
+               supported |= SUPPORTED_10baseT_Full;
+       if (bmsr & BMSR_100HALF)
+               supported |= SUPPORTED_100baseT_Half;
+       if (bmsr & BMSR_100FULL)
+               supported |= SUPPORTED_100baseT_Full;
+       if (estatus & ESTATUS_1000_THALF)
+               supported |= SUPPORTED_1000baseT_Half;
+       if (estatus & ESTATUS_1000_TFULL)
+               supported |= SUPPORTED_1000baseT_Full;
+       lp->supported = supported;
+
+       advertising = 0;
+       if (advert & ADVERTISE_10HALF)
+               advertising |= ADVERTISED_10baseT_Half;
+       if (advert & ADVERTISE_10FULL)
+               advertising |= ADVERTISED_10baseT_Full;
+       if (advert & ADVERTISE_100HALF)
+               advertising |= ADVERTISED_100baseT_Half;
+       if (advert & ADVERTISE_100FULL)
+               advertising |= ADVERTISED_100baseT_Full;
+       if (ctrl1000 & ADVERTISE_1000HALF)
+               advertising |= ADVERTISED_1000baseT_Half;
+       if (ctrl1000 & ADVERTISE_1000FULL)
+               advertising |= ADVERTISED_1000baseT_Full;
+
+       if (bmcr & BMCR_ANENABLE) {
+               int neg, neg1000;
+
+               lp->active_autoneg = 1;
+               advertising |= ADVERTISED_Autoneg;
+
+               neg = advert & lpa;
+               neg1000 = (ctrl1000 << 2) & stat1000;
+
+               if (neg1000 & (LPA_1000FULL | LPA_1000HALF))
+                       active_speed = SPEED_1000;
+               else if (neg & LPA_100)
+                       active_speed = SPEED_100;
+               else if (neg & (LPA_10HALF | LPA_10FULL))
+                       active_speed = SPEED_10;
+               else
+                       active_speed = SPEED_INVALID;
+
+               if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX))
+                       active_duplex = DUPLEX_FULL;
+               else if (active_speed != SPEED_INVALID)
+                       active_duplex = DUPLEX_HALF;
+               else
+                       active_duplex = DUPLEX_INVALID;
+       } else {
+               lp->active_autoneg = 0;
+
+               if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100))
+                       active_speed = SPEED_1000;
+               else if (bmcr & BMCR_SPEED100)
+                       active_speed = SPEED_100;
+               else
+                       active_speed = SPEED_10;
+
+               if (bmcr & BMCR_FULLDPLX)
+                       active_duplex = DUPLEX_FULL;
+               else
+                       active_duplex = DUPLEX_HALF;
+       }
+
+       lp->active_advertising = advertising;
+       lp->active_speed = active_speed;
+       lp->active_duplex = active_duplex;
+       *link_up_p = !!(bmsr & BMSR_LSTATUS);
+
+       return 0;
+}
+
+static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
+{
+       struct niu_link_config *lp = &np->link_config;
+       u16 current_speed, bmsr;
+       unsigned long flags;
+       u8 current_duplex;
+       int err, link_up;
+
+       link_up = 0;
+       current_speed = SPEED_INVALID;
+       current_duplex = DUPLEX_INVALID;
+
+       spin_lock_irqsave(&np->lock, flags);
+
+       err = -EINVAL;
+
+       err = mii_read(np, np->phy_addr, MII_BMSR);
+       if (err < 0)
+               goto out;
+
+       bmsr = err;
+       if (bmsr & BMSR_LSTATUS) {
+               u16 adv, lpa;
+
+               err = mii_read(np, np->phy_addr, MII_ADVERTISE);
+               if (err < 0)
+                       goto out;
+               adv = err;
+
+               err = mii_read(np, np->phy_addr, MII_LPA);
+               if (err < 0)
+                       goto out;
+               lpa = err;
+
+               err = mii_read(np, np->phy_addr, MII_ESTATUS);
+               if (err < 0)
+                       goto out;
+               link_up = 1;
+               current_speed = SPEED_1000;
+               current_duplex = DUPLEX_FULL;
+
+       }
+       lp->active_speed = current_speed;
+       lp->active_duplex = current_duplex;
+       err = 0;
+
+out:
+       spin_unlock_irqrestore(&np->lock, flags);
+
+       *link_up_p = link_up;
+       return err;
+}
+
+static int link_status_1g(struct niu *np, int *link_up_p)
+{
+       struct niu_link_config *lp = &np->link_config;
+       unsigned long flags;
+       int err;
+
+       spin_lock_irqsave(&np->lock, flags);
+
+       err = link_status_mii(np, link_up_p);
+       lp->supported |= SUPPORTED_TP;
+       lp->active_advertising |= ADVERTISED_TP;
+
+       spin_unlock_irqrestore(&np->lock, flags);
+       return err;
+}
+
+static int bcm8704_reset(struct niu *np)
+{
+       int err, limit;
+
+       err = mdio_read(np, np->phy_addr,
+                       BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
+       if (err < 0 || err == 0xffff)
+               return err;
+       err |= BMCR_RESET;
+       err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
+                        MII_BMCR, err);
+       if (err)
+               return err;
+
+       limit = 1000;
+       while (--limit >= 0) {
+               err = mdio_read(np, np->phy_addr,
+                               BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
+               if (err < 0)
+                       return err;
+               if (!(err & BMCR_RESET))
+                       break;
+       }
+       if (limit < 0) {
+               netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n",
+                          np->port, (err & 0xffff));
+               return -ENODEV;
+       }
+       return 0;
+}
+
+/* When written, certain PHY registers need to be read back twice
+ * in order for the bits to settle properly.
+ */
+static int bcm8704_user_dev3_readback(struct niu *np, int reg)
+{
+       int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
+       if (err < 0)
+               return err;
+       err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
+       if (err < 0)
+               return err;
+       return 0;
+}
+
+static int bcm8706_init_user_dev3(struct niu *np)
+{
+       int err;
+
+
+       err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+                       BCM8704_USER_OPT_DIGITAL_CTRL);
+       if (err < 0)
+               return err;
+       err &= ~USER_ODIG_CTRL_GPIOS;
+       err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
+       err |=  USER_ODIG_CTRL_RESV2;
+       err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+                        BCM8704_USER_OPT_DIGITAL_CTRL, err);
+       if (err)
+               return err;
+
+       mdelay(1000);
+
+       return 0;
+}
+
+static int bcm8704_init_user_dev3(struct niu *np)
+{
+       int err;
+
+       err = mdio_write(np, np->phy_addr,
+                        BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL,
+                        (USER_CONTROL_OPTXRST_LVL |
+                         USER_CONTROL_OPBIASFLT_LVL |
+                         USER_CONTROL_OBTMPFLT_LVL |
+                         USER_CONTROL_OPPRFLT_LVL |
+                         USER_CONTROL_OPTXFLT_LVL |
+                         USER_CONTROL_OPRXLOS_LVL |
+                         USER_CONTROL_OPRXFLT_LVL |
+                         USER_CONTROL_OPTXON_LVL |
+                         (0x3f << USER_CONTROL_RES1_SHIFT)));
+       if (err)
+               return err;
+
+       err = mdio_write(np, np->phy_addr,
+                        BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL,
+                        (USER_PMD_TX_CTL_XFP_CLKEN |
+                         (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) |
+                         (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) |
+                         USER_PMD_TX_CTL_TSCK_LPWREN));
+       if (err)
+               return err;
+
+       err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL);
+       if (err)
+               return err;
+       err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL);
+       if (err)
+               return err;
+
+       err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+                       BCM8704_USER_OPT_DIGITAL_CTRL);
+       if (err < 0)
+               return err;
+       err &= ~USER_ODIG_CTRL_GPIOS;
+       err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
+       err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+                        BCM8704_USER_OPT_DIGITAL_CTRL, err);
+       if (err)
+               return err;
+
+       mdelay(1000);
+
+       return 0;
+}
+
+static int mrvl88x2011_act_led(struct niu *np, int val)
+{
+       int     err;
+
+       err  = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
+               MRVL88X2011_LED_8_TO_11_CTL);
+       if (err < 0)
+               return err;
+
+       err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK);
+       err |=  MRVL88X2011_LED(MRVL88X2011_LED_ACT,val);
+
+       return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
+                         MRVL88X2011_LED_8_TO_11_CTL, err);
+}
+
+static int mrvl88x2011_led_blink_rate(struct niu *np, int rate)
+{
+       int     err;
+
+       err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
+                       MRVL88X2011_LED_BLINK_CTL);
+       if (err >= 0) {
+               err &= ~MRVL88X2011_LED_BLKRATE_MASK;
+               err |= (rate << 4);
+
+               err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
+                                MRVL88X2011_LED_BLINK_CTL, err);
+       }
+
+       return err;
+}
+
+static int xcvr_init_10g_mrvl88x2011(struct niu *np)
+{
+       int     err;
+
+       /* Set LED functions */
+       err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS);
+       if (err)
+               return err;
+
+       /* led activity */
+       err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF);
+       if (err)
+               return err;
+
+       err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
+                       MRVL88X2011_GENERAL_CTL);
+       if (err < 0)
+               return err;
+
+       err |= MRVL88X2011_ENA_XFPREFCLK;
+
+       err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
+                        MRVL88X2011_GENERAL_CTL, err);
+       if (err < 0)
+               return err;
+
+       err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
+                       MRVL88X2011_PMA_PMD_CTL_1);
+       if (err < 0)
+               return err;
+
+       if (np->link_config.loopback_mode == LOOPBACK_MAC)
+               err |= MRVL88X2011_LOOPBACK;
+       else
+               err &= ~MRVL88X2011_LOOPBACK;
+
+       err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
+                        MRVL88X2011_PMA_PMD_CTL_1, err);
+       if (err < 0)
+               return err;
+
+       /* Enable PMD  */
+       return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
+                         MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX);
+}
+
+
+static int xcvr_diag_bcm870x(struct niu *np)
+{
+       u16 analog_stat0, tx_alarm_status;
+       int err = 0;
+
+#if 1
+       err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
+                       MII_STAT1000);
+       if (err < 0)
+               return err;
+       pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err);
+
+       err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
+       if (err < 0)
+               return err;
+       pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err);
+
+       err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
+                       MII_NWAYTEST);
+       if (err < 0)
+               return err;
+       pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err);
+#endif
+
+       /* XXX dig this out it might not be so useful XXX */
+       err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+                       BCM8704_USER_ANALOG_STATUS0);
+       if (err < 0)
+               return err;
+       err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+                       BCM8704_USER_ANALOG_STATUS0);
+       if (err < 0)
+               return err;
+       analog_stat0 = err;
+
+       err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+                       BCM8704_USER_TX_ALARM_STATUS);
+       if (err < 0)
+               return err;
+       err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
+                       BCM8704_USER_TX_ALARM_STATUS);
+       if (err < 0)
+               return err;
+       tx_alarm_status = err;
+
+       if (analog_stat0 != 0x03fc) {
+               if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
+                       pr_info("Port %u cable not connected or bad cable\n",
+                               np->port);
+               } else if (analog_stat0 == 0x639c) {
+                       pr_info("Port %u optical module is bad or missing\n",
+                               np->port);
+               }
+       }
+
+       return 0;
+}
+
+static int xcvr_10g_set_lb_bcm870x(struct niu *np)
+{
+       struct niu_link_config *lp = &np->link_config;
+       int err;
+
+       err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
+                       MII_BMCR);
+       if (err < 0)
+               return err;
+
+       err &= ~BMCR_LOOPBACK;
+
+       if (lp->loopback_mode == LOOPBACK_MAC)
+               err |= BMCR_LOOPBACK;
+
+       err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
+                        MII_BMCR, err);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+static int xcvr_init_10g_bcm8706(struct niu *np)
+{
+       int err = 0;
+       u64 val;
+
+       if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) &&
+           (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0)
+                       return err;
+
+       val = nr64_mac(XMAC_CONFIG);
+       val &= ~XMAC_CONFIG_LED_POLARITY;
+       val |= XMAC_CONFIG_FORCE_LED_ON;
+       nw64_mac(XMAC_CONFIG, val);
+
+       val = nr64(MIF_CONFIG);
+       val |= MIF_CONFIG_INDIRECT_MODE;
+       nw64(MIF_CONFIG, val);
+
+       err = bcm8704_reset(np);
+       if (err)
+               return err;
+
+       err = xcvr_10g_set_lb_bcm870x(np);
+       if (err)
+               return err;
+
+       err = bcm8706_init_user_dev3(np);
+       if (err)
+               return err;
+
+       err = xcvr_diag_bcm870x(np);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+static int xcvr_init_10g_bcm8704(struct niu *np)
+{
+       int err;
+
+       err = bcm8704_reset(np);
+       if (err)
+               return err;
+
+       err = bcm8704_init_user_dev3(np);
+       if (err)
+               return err;
+
+       err = xcvr_10g_set_lb_bcm870x(np);
+       if (err)
+               return err;
+
+       err =  xcvr_diag_bcm870x(np);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+static int xcvr_init_10g(struct niu *np)
+{
+       int phy_id, err;
+       u64 val;
+
+       val = nr64_mac(XMAC_CONFIG);
+       val &= ~XMAC_CONFIG_LED_POLARITY;
+       val |= XMAC_CONFIG_FORCE_LED_ON;
+       nw64_mac(XMAC_CONFIG, val);
+
+       /* XXX shared resource, lock parent XXX */
+       val = nr64(MIF_CONFIG);
+       val |= MIF_CONFIG_INDIRECT_MODE;
+       nw64(MIF_CONFIG, val);
+
+       phy_id = phy_decode(np->parent->port_phy, np->port);
+       phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
+
+       /* handle different phy types */
+       switch (phy_id & NIU_PHY_ID_MASK) {
+       case NIU_PHY_ID_MRVL88X2011:
+               err = xcvr_init_10g_mrvl88x2011(np);
+               break;
+
+       default: /* bcom 8704 */
+               err = xcvr_init_10g_bcm8704(np);
+               break;
+       }
+
+       return err;
+}
+
+static int mii_reset(struct niu *np)
+{
+       int limit, err;
+
+       err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET);
+       if (err)
+               return err;
+
+       limit = 1000;
+       while (--limit >= 0) {
+               udelay(500);
+               err = mii_read(np, np->phy_addr, MII_BMCR);
+               if (err < 0)
+                       return err;
+               if (!(err & BMCR_RESET))
+                       break;
+       }
+       if (limit < 0) {
+               netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n",
+                          np->port, err);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int xcvr_init_1g_rgmii(struct niu *np)
+{
+       int err;
+       u64 val;
+       u16 bmcr, bmsr, estat;
+
+       val = nr64(MIF_CONFIG);
+       val &= ~MIF_CONFIG_INDIRECT_MODE;
+       nw64(MIF_CONFIG, val);
+
+       err = mii_reset(np);
+       if (err)
+               return err;
+
+       err = mii_read(np, np->phy_addr, MII_BMSR);
+       if (err < 0)
+               return err;
+       bmsr = err;
+
+       estat = 0;
+       if (bmsr & BMSR_ESTATEN) {
+               err = mii_read(np, np->phy_addr, MII_ESTATUS);
+               if (err < 0)
+                       return err;
+               estat = err;
+       }
+
+       bmcr = 0;
+       err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
+       if (err)
+               return err;
+
+       if (bmsr & BMSR_ESTATEN) {
+               u16 ctrl1000 = 0;
+
+               if (estat & ESTATUS_1000_TFULL)
+                       ctrl1000 |= ADVERTISE_1000FULL;
+               err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
+               if (err)
+                       return err;
+       }
+
+       bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX);
+
+       err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
+       if (err)
+               return err;
+
+       err = mii_read(np, np->phy_addr, MII_BMCR);
+       if (err < 0)
+               return err;
+       bmcr = mii_read(np, np->phy_addr, MII_BMCR);
+
+       err = mii_read(np, np->phy_addr, MII_BMSR);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+static int mii_init_common(struct niu *np)
+{
+       struct niu_link_config *lp = &np->link_config;
+       u16 bmcr, bmsr, adv, estat;
+       int err;
+
+       err = mii_reset(np);
+       if (err)
+               return err;
+
+       err = mii_read(np, np->phy_addr, MII_BMSR);
+       if (err < 0)
+               return err;
+       bmsr = err;
+
+       estat = 0;
+       if (bmsr & BMSR_ESTATEN) {
+               err = mii_read(np, np->phy_addr, MII_ESTATUS);
+               if (err < 0)
+                       return err;
+               estat = err;
+       }
+
+       bmcr = 0;
+       err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
+       if (err)
+               return err;
+
+       if (lp->loopback_mode == LOOPBACK_MAC) {
+               bmcr |= BMCR_LOOPBACK;
+               if (lp->active_speed == SPEED_1000)
+                       bmcr |= BMCR_SPEED1000;
+               if (lp->active_duplex == DUPLEX_FULL)
+                       bmcr |= BMCR_FULLDPLX;
+       }
+
+       if (lp->loopback_mode == LOOPBACK_PHY) {
+               u16 aux;
+
+               aux = (BCM5464R_AUX_CTL_EXT_LB |
+                      BCM5464R_AUX_CTL_WRITE_1);
+               err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux);
+               if (err)
+                       return err;
+       }
+
+       if (lp->autoneg) {
+               u16 ctrl1000;
+
+               adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
+               if ((bmsr & BMSR_10HALF) &&
+                       (lp->advertising & ADVERTISED_10baseT_Half))
+                       adv |= ADVERTISE_10HALF;
+               if ((bmsr & BMSR_10FULL) &&
+                       (lp->advertising & ADVERTISED_10baseT_Full))
+                       adv |= ADVERTISE_10FULL;
+               if ((bmsr & BMSR_100HALF) &&
+                       (lp->advertising & ADVERTISED_100baseT_Half))
+                       adv |= ADVERTISE_100HALF;
+               if ((bmsr & BMSR_100FULL) &&
+                       (lp->advertising & ADVERTISED_100baseT_Full))
+                       adv |= ADVERTISE_100FULL;
+               err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv);
+               if (err)
+                       return err;
+
+               if (likely(bmsr & BMSR_ESTATEN)) {
+                       ctrl1000 = 0;
+                       if ((estat & ESTATUS_1000_THALF) &&
+                               (lp->advertising & ADVERTISED_1000baseT_Half))
+                               ctrl1000 |= ADVERTISE_1000HALF;
+                       if ((estat & ESTATUS_1000_TFULL) &&
+                               (lp->advertising & ADVERTISED_1000baseT_Full))
+                               ctrl1000 |= ADVERTISE_1000FULL;
+                       err = mii_write(np, np->phy_addr,
+                                       MII_CTRL1000, ctrl1000);
+                       if (err)
+                               return err;
+               }
+
+               bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+       } else {
+               /* !lp->autoneg */
+               int fulldpx;
+
+               if (lp->duplex == DUPLEX_FULL) {
+                       bmcr |= BMCR_FULLDPLX;
+                       fulldpx = 1;
+               } else if (lp->duplex == DUPLEX_HALF)
+                       fulldpx = 0;
+               else
+                       return -EINVAL;
+
+               if (lp->speed == SPEED_1000) {
+                       /* if X-full requested while not supported, or
+                          X-half requested while not supported... */
+                       if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) ||
+                               (!fulldpx && !(estat & ESTATUS_1000_THALF)))
+                               return -EINVAL;
+                       bmcr |= BMCR_SPEED1000;
+               } else if (lp->speed == SPEED_100) {
+                       if ((fulldpx && !(bmsr & BMSR_100FULL)) ||
+                               (!fulldpx && !(bmsr & BMSR_100HALF)))
+                               return -EINVAL;
+                       bmcr |= BMCR_SPEED100;
+               } else if (lp->speed == SPEED_10) {
+                       if ((fulldpx && !(bmsr & BMSR_10FULL)) ||
+                               (!fulldpx && !(bmsr & BMSR_10HALF)))
+                               return -EINVAL;
+               } else
+                       return -EINVAL;
+       }
+
+       err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
+       if (err)
+               return err;
+
+#if 0
+       err = mii_read(np, np->phy_addr, MII_BMCR);
+       if (err < 0)
+               return err;
+       bmcr = err;
+
+       err = mii_read(np, np->phy_addr, MII_BMSR);
+       if (err < 0)
+               return err;
+       bmsr = err;
+
+       pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
+               np->port, bmcr, bmsr);
+#endif
+
+       return 0;
+}
+
+static int xcvr_init_1g(struct niu *np)
+{
+       u64 val;
+
+       /* XXX shared resource, lock parent XXX */
+       val = nr64(MIF_CONFIG);
+       val &= ~MIF_CONFIG_INDIRECT_MODE;
+       nw64(MIF_CONFIG, val);
+
+       return mii_init_common(np);
+}
+
+static int niu_xcvr_init(struct niu *np)
+{
+       const struct niu_phy_ops *ops = np->phy_ops;
+       int err;
+
+       err = 0;
+       if (ops->xcvr_init)
+               err = ops->xcvr_init(np);
+
+       return err;
+}
+
+static int niu_serdes_init(struct niu *np)
+{
+       const struct niu_phy_ops *ops = np->phy_ops;
+       int err;
+
+       err = 0;
+       if (ops->serdes_init)
+               err = ops->serdes_init(np);
+
+       return err;
+}
+
+static void niu_init_xif(struct niu *);
+static void niu_handle_led(struct niu *, int status);
+
+static int niu_link_status_common(struct niu *np, int link_up)
+{
+       struct niu_link_config *lp = &np->link_config;
+       struct net_device *dev = np->dev;
+       unsigned long flags;
+
+       if (!netif_carrier_ok(dev) && link_up) {
+               netif_info(np, link, dev, "Link is up at %s, %s duplex\n",
+                          lp->active_speed == SPEED_10000 ? "10Gb/sec" :
+                          lp->active_speed == SPEED_1000 ? "1Gb/sec" :
+                          lp->active_speed == SPEED_100 ? "100Mbit/sec" :
+                          "10Mbit/sec",
+                          lp->active_duplex == DUPLEX_FULL ? "full" : "half");
+
+               spin_lock_irqsave(&np->lock, flags);
+               niu_init_xif(np);
+               niu_handle_led(np, 1);
+               spin_unlock_irqrestore(&np->lock, flags);
+
+               netif_carrier_on(dev);
+       } else if (netif_carrier_ok(dev) && !link_up) {
+               netif_warn(np, link, dev, "Link is down\n");
+               spin_lock_irqsave(&np->lock, flags);
+               niu_handle_led(np, 0);
+               spin_unlock_irqrestore(&np->lock, flags);
+               netif_carrier_off(dev);
+       }
+
+       return 0;
+}
+
+static int link_status_10g_mrvl(struct niu *np, int *link_up_p)
+{
+       int err, link_up, pma_status, pcs_status;
+
+       link_up = 0;
+
+       err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
+                       MRVL88X2011_10G_PMD_STATUS_2);
+       if (err < 0)
+               goto out;
+
+       /* Check PMA/PMD Register: 1.0001.2 == 1 */
+       err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
+                       MRVL88X2011_PMA_PMD_STATUS_1);
+       if (err < 0)
+               goto out;
+
+       pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
+
+        /* Check PMC Register : 3.0001.2 == 1: read twice */
+       err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
+                       MRVL88X2011_PMA_PMD_STATUS_1);
+       if (err < 0)
+               goto out;
+
+       err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
+                       MRVL88X2011_PMA_PMD_STATUS_1);
+       if (err < 0)
+               goto out;
+
+       pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
+
+        /* Check XGXS Register : 4.0018.[0-3,12] */
+       err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR,
+                       MRVL88X2011_10G_XGXS_LANE_STAT);
+       if (err < 0)
+               goto out;
+
+       if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 |
+                   PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 |
+                   PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC |
+                   0x800))
+               link_up = (pma_status && pcs_status) ? 1 : 0;
+
+       np->link_config.active_speed = SPEED_10000;
+       np->link_config.active_duplex = DUPLEX_FULL;
+       err = 0;
+out:
+       mrvl88x2011_act_led(np, (link_up ?
+                                MRVL88X2011_LED_CTL_PCS_ACT :
+                                MRVL88X2011_LED_CTL_OFF));
+
+       *link_up_p = link_up;
+       return err;
+}
+
+static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
+{
+       int err, link_up;
+       link_up = 0;
+
+       err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
+                       BCM8704_PMD_RCV_SIGDET);
+       if (err < 0 || err == 0xffff)
+               goto out;
+       if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
+               err = 0;
+               goto out;
+       }
+
+       err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
+                       BCM8704_PCS_10G_R_STATUS);
+       if (err < 0)
+               goto out;
+
+       if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
+               err = 0;
+               goto out;
+       }
+
+       err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
+                       BCM8704_PHYXS_XGXS_LANE_STAT);
+       if (err < 0)
+               goto out;
+       if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
+                   PHYXS_XGXS_LANE_STAT_MAGIC |
+                   PHYXS_XGXS_LANE_STAT_PATTEST |
+                   PHYXS_XGXS_LANE_STAT_LANE3 |
+                   PHYXS_XGXS_LANE_STAT_LANE2 |
+                   PHYXS_XGXS_LANE_STAT_LANE1 |
+                   PHYXS_XGXS_LANE_STAT_LANE0)) {
+               err = 0;
+               np->link_config.active_speed = SPEED_INVALID;
+               np->link_config.active_duplex = DUPLEX_INVALID;
+               goto out;
+       }
+
+       link_up = 1;
+       np->link_config.active_speed = SPEED_10000;
+       np->link_config.active_duplex = DUPLEX_FULL;
+       err = 0;
+
+out:
+       *link_up_p = link_up;
+       return err;
+}
+
+static int link_status_10g_bcom(struct niu *np, int *link_up_p)
+{
+       int err, link_up;
+
+       link_up = 0;
+
+       err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
+                       BCM8704_PMD_RCV_SIGDET);
+       if (err < 0)
+               goto out;
+       if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
+               err = 0;
+               goto out;
+       }
+
+       err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
+                       BCM8704_PCS_10G_R_STATUS);
+       if (err < 0)
+               goto out;
+       if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
+               err = 0;
+               goto out;
+       }
+
+       err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
+                       BCM8704_PHYXS_XGXS_LANE_STAT);
+       if (err < 0)
+               goto out;
+
+       if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
+                   PHYXS_XGXS_LANE_STAT_MAGIC |
+                   PHYXS_XGXS_LANE_STAT_LANE3 |
+                   PHYXS_XGXS_LANE_STAT_LANE2 |
+                   PHYXS_XGXS_LANE_STAT_LANE1 |
+                   PHYXS_XGXS_LANE_STAT_LANE0)) {
+               err = 0;
+               goto out;
+       }
+
+       link_up = 1;
+       np->link_config.active_speed = SPEED_10000;
+       np->link_config.active_duplex = DUPLEX_FULL;
+       err = 0;
+
+out:
+       *link_up_p = link_up;
+       return err;
+}
+
+static int link_status_10g(struct niu *np, int *link_up_p)
+{
+       unsigned long flags;
+       int err = -EINVAL;
+
+       spin_lock_irqsave(&np->lock, flags);
+
+       if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
+               int phy_id;
+
+               phy_id = phy_decode(np->parent->port_phy, np->port);
+               phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
+
+               /* handle different phy types */
+               switch (phy_id & NIU_PHY_ID_MASK) {
+               case NIU_PHY_ID_MRVL88X2011:
+                       err = link_status_10g_mrvl(np, link_up_p);
+                       break;
+
+               default: /* bcom 8704 */
+                       err = link_status_10g_bcom(np, link_up_p);
+                       break;
+               }
+       }
+
+       spin_unlock_irqrestore(&np->lock, flags);
+
+       return err;
+}
+
+static int niu_10g_phy_present(struct niu *np)
+{
+       u64 sig, mask, val;
+
+       sig = nr64(ESR_INT_SIGNALS);
+       switch (np->port) {
+       case 0:
+               mask = ESR_INT_SIGNALS_P0_BITS;
+               val = (ESR_INT_SRDY0_P0 |
+                      ESR_INT_DET0_P0 |
+                      ESR_INT_XSRDY_P0 |
+                      ESR_INT_XDP_P0_CH3 |
+                      ESR_INT_XDP_P0_CH2 |
+                      ESR_INT_XDP_P0_CH1 |
+                      ESR_INT_XDP_P0_CH0);
+               break;
+
+       case 1:
+               mask = ESR_INT_SIGNALS_P1_BITS;
+               val = (ESR_INT_SRDY0_P1 |
+                      ESR_INT_DET0_P1 |
+                      ESR_INT_XSRDY_P1 |
+                      ESR_INT_XDP_P1_CH3 |
+                      ESR_INT_XDP_P1_CH2 |
+                      ESR_INT_XDP_P1_CH1 |
+                      ESR_INT_XDP_P1_CH0);
+               break;
+
+       default:
+               return 0;
+       }
+
+       if ((sig & mask) != val)
+               return 0;
+       return 1;
+}
+
+static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
+{
+       unsigned long flags;
+       int err = 0;
+       int phy_present;
+       int phy_present_prev;
+
+       spin_lock_irqsave(&np->lock, flags);
+
+       if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
+               phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ?
+                       1 : 0;
+               phy_present = niu_10g_phy_present(np);
+               if (phy_present != phy_present_prev) {
+                       /* state change */
+                       if (phy_present) {
+                               /* A NEM was just plugged in */
+                               np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
+                               if (np->phy_ops->xcvr_init)
+                                       err = np->phy_ops->xcvr_init(np);
+                               if (err) {
+                                       err = mdio_read(np, np->phy_addr,
+                                               BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
+                                       if (err == 0xffff) {
+                                               /* No mdio, back-to-back XAUI */
+                                               goto out;
+                                       }
+                                       /* debounce */
+                                       np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
+                               }
+                       } else {
+                               np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
+                               *link_up_p = 0;
+                               netif_warn(np, link, np->dev,
+                                          "Hotplug PHY Removed\n");
+                       }
+               }
+out:
+               if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) {
+                       err = link_status_10g_bcm8706(np, link_up_p);
+                       if (err == 0xffff) {
+                               /* No mdio, back-to-back XAUI: it is C10NEM */
+                               *link_up_p = 1;
+                               np->link_config.active_speed = SPEED_10000;
+                               np->link_config.active_duplex = DUPLEX_FULL;
+                       }
+               }
+       }
+
+       spin_unlock_irqrestore(&np->lock, flags);
+
+       return 0;
+}
+
+static int niu_link_status(struct niu *np, int *link_up_p)
+{
+       const struct niu_phy_ops *ops = np->phy_ops;
+       int err;
+
+       err = 0;
+       if (ops->link_status)
+               err = ops->link_status(np, link_up_p);
+
+       return err;
+}
+
+static void niu_timer(unsigned long __opaque)
+{
+       struct niu *np = (struct niu *) __opaque;
+       unsigned long off;
+       int err, link_up;
+
+       err = niu_link_status(np, &link_up);
+       if (!err)
+               niu_link_status_common(np, link_up);
+
+       if (netif_carrier_ok(np->dev))
+               off = 5 * HZ;
+       else
+               off = 1 * HZ;
+       np->timer.expires = jiffies + off;
+
+       add_timer(&np->timer);
+}
+
+static const struct niu_phy_ops phy_ops_10g_serdes = {
+       .serdes_init            = serdes_init_10g_serdes,
+       .link_status            = link_status_10g_serdes,
+};
+
+static const struct niu_phy_ops phy_ops_10g_serdes_niu = {
+       .serdes_init            = serdes_init_niu_10g_serdes,
+       .link_status            = link_status_10g_serdes,
+};
+
+static const struct niu_phy_ops phy_ops_1g_serdes_niu = {
+       .serdes_init            = serdes_init_niu_1g_serdes,
+       .link_status            = link_status_1g_serdes,
+};
+
+static const struct niu_phy_ops phy_ops_1g_rgmii = {
+       .xcvr_init              = xcvr_init_1g_rgmii,
+       .link_status            = link_status_1g_rgmii,
+};
+
+static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
+       .serdes_init            = serdes_init_niu_10g_fiber,
+       .xcvr_init              = xcvr_init_10g,
+       .link_status            = link_status_10g,
+};
+
+static const struct niu_phy_ops phy_ops_10g_fiber = {
+       .serdes_init            = serdes_init_10g,
+       .xcvr_init              = xcvr_init_10g,
+       .link_status            = link_status_10g,
+};
+
+static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = {
+       .serdes_init            = serdes_init_10g,
+       .xcvr_init              = xcvr_init_10g_bcm8706,
+       .link_status            = link_status_10g_hotplug,
+};
+
+static const struct niu_phy_ops phy_ops_niu_10g_hotplug = {
+       .serdes_init            = serdes_init_niu_10g_fiber,
+       .xcvr_init              = xcvr_init_10g_bcm8706,
+       .link_status            = link_status_10g_hotplug,
+};
+
+static const struct niu_phy_ops phy_ops_10g_copper = {
+       .serdes_init            = serdes_init_10g,
+       .link_status            = link_status_10g, /* XXX */
+};
+
+static const struct niu_phy_ops phy_ops_1g_fiber = {
+       .serdes_init            = serdes_init_1g,
+       .xcvr_init              = xcvr_init_1g,
+       .link_status            = link_status_1g,
+};
+
+static const struct niu_phy_ops phy_ops_1g_copper = {
+       .xcvr_init              = xcvr_init_1g,
+       .link_status            = link_status_1g,
+};
+
+struct niu_phy_template {
+       const struct niu_phy_ops        *ops;
+       u32                             phy_addr_base;
+};
+
+static const struct niu_phy_template phy_template_niu_10g_fiber = {
+       .ops            = &phy_ops_10g_fiber_niu,
+       .phy_addr_base  = 16,
+};
+
+static const struct niu_phy_template phy_template_niu_10g_serdes = {
+       .ops            = &phy_ops_10g_serdes_niu,
+       .phy_addr_base  = 0,
+};
+
+static const struct niu_phy_template phy_template_niu_1g_serdes = {
+       .ops            = &phy_ops_1g_serdes_niu,
+       .phy_addr_base  = 0,
+};
+
+static const struct niu_phy_template phy_template_10g_fiber = {
+       .ops            = &phy_ops_10g_fiber,
+       .phy_addr_base  = 8,
+};
+
+static const struct niu_phy_template phy_template_10g_fiber_hotplug = {
+       .ops            = &phy_ops_10g_fiber_hotplug,
+       .phy_addr_base  = 8,
+};
+
+static const struct niu_phy_template phy_template_niu_10g_hotplug = {
+       .ops            = &phy_ops_niu_10g_hotplug,
+       .phy_addr_base  = 8,
+};
+
+static const struct niu_phy_template phy_template_10g_copper = {
+       .ops            = &phy_ops_10g_copper,
+       .phy_addr_base  = 10,
+};
+
+static const struct niu_phy_template phy_template_1g_fiber = {
+       .ops            = &phy_ops_1g_fiber,
+       .phy_addr_base  = 0,
+};
+
+static const struct niu_phy_template phy_template_1g_copper = {
+       .ops            = &phy_ops_1g_copper,
+       .phy_addr_base  = 0,
+};
+
+static const struct niu_phy_template phy_template_1g_rgmii = {
+       .ops            = &phy_ops_1g_rgmii,
+       .phy_addr_base  = 0,
+};
+
+static const struct niu_phy_template phy_template_10g_serdes = {
+       .ops            = &phy_ops_10g_serdes,
+       .phy_addr_base  = 0,
+};
+
+static int niu_atca_port_num[4] = {
+       0, 0,  11, 10
+};
+
+static int serdes_init_10g_serdes(struct niu *np)
+{
+       struct niu_link_config *lp = &np->link_config;
+       unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
+       u64 ctrl_val, test_cfg_val, sig, mask, val;
+
+       switch (np->port) {
+       case 0:
+               ctrl_reg = ENET_SERDES_0_CTRL_CFG;
+               test_cfg_reg = ENET_SERDES_0_TEST_CFG;
+               pll_cfg = ENET_SERDES_0_PLL_CFG;
+               break;
+       case 1:
+               ctrl_reg = ENET_SERDES_1_CTRL_CFG;
+               test_cfg_reg = ENET_SERDES_1_TEST_CFG;
+               pll_cfg = ENET_SERDES_1_PLL_CFG;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+       ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
+                   ENET_SERDES_CTRL_SDET_1 |
+                   ENET_SERDES_CTRL_SDET_2 |
+                   ENET_SERDES_CTRL_SDET_3 |
+                   (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
+                   (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
+                   (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
+                   (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
+                   (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
+                   (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
+                   (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
+                   (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
+       test_cfg_val = 0;
+
+       if (lp->loopback_mode == LOOPBACK_PHY) {
+               test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
+                                 ENET_SERDES_TEST_MD_0_SHIFT) |
+                                (ENET_TEST_MD_PAD_LOOPBACK <<
+                                 ENET_SERDES_TEST_MD_1_SHIFT) |
+                                (ENET_TEST_MD_PAD_LOOPBACK <<
+                                 ENET_SERDES_TEST_MD_2_SHIFT) |
+                                (ENET_TEST_MD_PAD_LOOPBACK <<
+                                 ENET_SERDES_TEST_MD_3_SHIFT));
+       }
+
+       esr_reset(np);
+       nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2);
+       nw64(ctrl_reg, ctrl_val);
+       nw64(test_cfg_reg, test_cfg_val);
+
+       /* Initialize all 4 lanes of the SERDES.  */
+       for (i = 0; i < 4; i++) {
+               u32 rxtx_ctrl, glue0;
+               int err;
+
+               err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
+               if (err)
+                       return err;
+               err = esr_read_glue0(np, i, &glue0);
+               if (err)
+                       return err;
+
+               rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
+               rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
+                             (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
+
+               glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
+                          ESR_GLUE_CTRL0_THCNT |
+                          ESR_GLUE_CTRL0_BLTIME);
+               glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
+                         (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
+                         (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
+                         (BLTIME_300_CYCLES <<
+                          ESR_GLUE_CTRL0_BLTIME_SHIFT));
+
+               err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
+               if (err)
+                       return err;
+               err = esr_write_glue0(np, i, glue0);
+               if (err)
+                       return err;
+       }
+
+
+       sig = nr64(ESR_INT_SIGNALS);
+       switch (np->port) {
+       case 0:
+               mask = ESR_INT_SIGNALS_P0_BITS;
+               val = (ESR_INT_SRDY0_P0 |
+                      ESR_INT_DET0_P0 |
+                      ESR_INT_XSRDY_P0 |
+                      ESR_INT_XDP_P0_CH3 |
+                      ESR_INT_XDP_P0_CH2 |
+                      ESR_INT_XDP_P0_CH1 |
+                      ESR_INT_XDP_P0_CH0);
+               break;
+
+       case 1:
+               mask = ESR_INT_SIGNALS_P1_BITS;
+               val = (ESR_INT_SRDY0_P1 |
+                      ESR_INT_DET0_P1 |
+                      ESR_INT_XSRDY_P1 |
+                      ESR_INT_XDP_P1_CH3 |
+                      ESR_INT_XDP_P1_CH2 |
+                      ESR_INT_XDP_P1_CH1 |
+                      ESR_INT_XDP_P1_CH0);
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       if ((sig & mask) != val) {
+               int err;
+               err = serdes_init_1g_serdes(np);
+               if (!err) {
+                       np->flags &= ~NIU_FLAGS_10G;
+                       np->mac_xcvr = MAC_XCVR_PCS;
+               }  else {
+                       netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
+                                  np->port);
+                       return -ENODEV;
+               }
+       }
+
+       return 0;
+}
+
+static int niu_determine_phy_disposition(struct niu *np)
+{
+       struct niu_parent *parent = np->parent;
+       u8 plat_type = parent->plat_type;
+       const struct niu_phy_template *tp;
+       u32 phy_addr_off = 0;
+
+       if (plat_type == PLAT_TYPE_NIU) {
+               switch (np->flags &
+                       (NIU_FLAGS_10G |
+                        NIU_FLAGS_FIBER |
+                        NIU_FLAGS_XCVR_SERDES)) {
+               case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
+                       /* 10G Serdes */
+                       tp = &phy_template_niu_10g_serdes;
+                       break;
+               case NIU_FLAGS_XCVR_SERDES:
+                       /* 1G Serdes */
+                       tp = &phy_template_niu_1g_serdes;
+                       break;
+               case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
+                       /* 10G Fiber */
+               default:
+                       if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
+                               tp = &phy_template_niu_10g_hotplug;
+                               if (np->port == 0)
+                                       phy_addr_off = 8;
+                               if (np->port == 1)
+                                       phy_addr_off = 12;
+                       } else {
+                               tp = &phy_template_niu_10g_fiber;
+                               phy_addr_off += np->port;
+                       }
+                       break;
+               }
+       } else {
+               switch (np->flags &
+                       (NIU_FLAGS_10G |
+                        NIU_FLAGS_FIBER |
+                        NIU_FLAGS_XCVR_SERDES)) {
+               case 0:
+                       /* 1G copper */
+                       tp = &phy_template_1g_copper;
+                       if (plat_type == PLAT_TYPE_VF_P0)
+                               phy_addr_off = 10;
+                       else if (plat_type == PLAT_TYPE_VF_P1)
+                               phy_addr_off = 26;
+
+                       phy_addr_off += (np->port ^ 0x3);
+                       break;
+
+               case NIU_FLAGS_10G:
+                       /* 10G copper */
+                       tp = &phy_template_10g_copper;
+                       break;
+
+               case NIU_FLAGS_FIBER:
+                       /* 1G fiber */
+                       tp = &phy_template_1g_fiber;
+                       break;
+
+               case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
+                       /* 10G fiber */
+                       tp = &phy_template_10g_fiber;
+                       if (plat_type == PLAT_TYPE_VF_P0 ||
+                           plat_type == PLAT_TYPE_VF_P1)
+                               phy_addr_off = 8;
+                       phy_addr_off += np->port;
+                       if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
+                               tp = &phy_template_10g_fiber_hotplug;
+                               if (np->port == 0)
+                                       phy_addr_off = 8;
+                               if (np->port == 1)
+                                       phy_addr_off = 12;
+                       }
+                       break;
+
+               case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
+               case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
+               case NIU_FLAGS_XCVR_SERDES:
+                       switch(np->port) {
+                       case 0:
+                       case 1:
+                               tp = &phy_template_10g_serdes;
+                               break;
+                       case 2:
+                       case 3:
+                               tp = &phy_template_1g_rgmii;
+                               break;
+                       default:
+                               return -EINVAL;
+                               break;
+                       }
+                       phy_addr_off = niu_atca_port_num[np->port];
+                       break;
+
+               default:
+                       return -EINVAL;
+               }
+       }
+
+       np->phy_ops = tp->ops;
+       np->phy_addr = tp->phy_addr_base + phy_addr_off;
+
+       return 0;
+}
+
+static int niu_init_link(struct niu *np)
+{
+       struct niu_parent *parent = np->parent;
+       int err, ignore;
+
+       if (parent->plat_type == PLAT_TYPE_NIU) {
+               err = niu_xcvr_init(np);
+               if (err)
+                       return err;
+               msleep(200);
+       }
+       err = niu_serdes_init(np);
+       if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY))
+               return err;
+       msleep(200);
+       err = niu_xcvr_init(np);
+       if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY))
+               niu_link_status(np, &ignore);
+       return 0;
+}
+
+static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
+{
+       u16 reg0 = addr[4] << 8 | addr[5];
+       u16 reg1 = addr[2] << 8 | addr[3];
+       u16 reg2 = addr[0] << 8 | addr[1];
+
+       if (np->flags & NIU_FLAGS_XMAC) {
+               nw64_mac(XMAC_ADDR0, reg0);
+               nw64_mac(XMAC_ADDR1, reg1);
+               nw64_mac(XMAC_ADDR2, reg2);
+       } else {
+               nw64_mac(BMAC_ADDR0, reg0);
+               nw64_mac(BMAC_ADDR1, reg1);
+               nw64_mac(BMAC_ADDR2, reg2);
+       }
+}
+
+static int niu_num_alt_addr(struct niu *np)
+{
+       if (np->flags & NIU_FLAGS_XMAC)
+               return XMAC_NUM_ALT_ADDR;
+       else
+               return BMAC_NUM_ALT_ADDR;
+}
+
+static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr)
+{
+       u16 reg0 = addr[4] << 8 | addr[5];
+       u16 reg1 = addr[2] << 8 | addr[3];
+       u16 reg2 = addr[0] << 8 | addr[1];
+
+       if (index >= niu_num_alt_addr(np))
+               return -EINVAL;
+
+       if (np->flags & NIU_FLAGS_XMAC) {
+               nw64_mac(XMAC_ALT_ADDR0(index), reg0);
+               nw64_mac(XMAC_ALT_ADDR1(index), reg1);
+               nw64_mac(XMAC_ALT_ADDR2(index), reg2);
+       } else {
+               nw64_mac(BMAC_ALT_ADDR0(index), reg0);
+               nw64_mac(BMAC_ALT_ADDR1(index), reg1);
+               nw64_mac(BMAC_ALT_ADDR2(index), reg2);
+       }
+
+       return 0;
+}
+
+static int niu_enable_alt_mac(struct niu *np, int index, int on)
+{
+       unsigned long reg;
+       u64 val, mask;
+
+       if (index >= niu_num_alt_addr(np))
+               return -EINVAL;
+
+       if (np->flags & NIU_FLAGS_XMAC) {
+               reg = XMAC_ADDR_CMPEN;
+               mask = 1 << index;
+       } else {
+               reg = BMAC_ADDR_CMPEN;
+               mask = 1 << (index + 1);
+       }
+
+       val = nr64_mac(reg);
+       if (on)
+               val |= mask;
+       else
+               val &= ~mask;
+       nw64_mac(reg, val);
+
+       return 0;
+}
+
+static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg,
+                                  int num, int mac_pref)
+{
+       u64 val = nr64_mac(reg);
+       val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR);
+       val |= num;
+       if (mac_pref)
+               val |= HOST_INFO_MPR;
+       nw64_mac(reg, val);
+}
+
+static int __set_rdc_table_num(struct niu *np,
+                              int xmac_index, int bmac_index,
+                              int rdc_table_num, int mac_pref)
+{
+       unsigned long reg;
+
+       if (rdc_table_num & ~HOST_INFO_MACRDCTBLN)
+               return -EINVAL;
+       if (np->flags & NIU_FLAGS_XMAC)
+               reg = XMAC_HOST_INFO(xmac_index);
+       else
+               reg = BMAC_HOST_INFO(bmac_index);
+       __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref);
+       return 0;
+}
+
+static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num,
+                                        int mac_pref)
+{
+       return __set_rdc_table_num(np, 17, 0, table_num, mac_pref);
+}
+
+static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num,
+                                          int mac_pref)
+{
+       return __set_rdc_table_num(np, 16, 8, table_num, mac_pref);
+}
+
+static int niu_set_alt_mac_rdc_table(struct niu *np, int idx,
+                                    int table_num, int mac_pref)
+{
+       if (idx >= niu_num_alt_addr(np))
+               return -EINVAL;
+       return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref);
+}
+
+static u64 vlan_entry_set_parity(u64 reg_val)
+{
+       u64 port01_mask;
+       u64 port23_mask;
+
+       port01_mask = 0x00ff;
+       port23_mask = 0xff00;
+
+       if (hweight64(reg_val & port01_mask) & 1)
+               reg_val |= ENET_VLAN_TBL_PARITY0;
+       else
+               reg_val &= ~ENET_VLAN_TBL_PARITY0;
+
+       if (hweight64(reg_val & port23_mask) & 1)
+               reg_val |= ENET_VLAN_TBL_PARITY1;
+       else
+               reg_val &= ~ENET_VLAN_TBL_PARITY1;
+
+       return reg_val;
+}
+
+static void vlan_tbl_write(struct niu *np, unsigned long index,
+                          int port, int vpr, int rdc_table)
+{
+       u64 reg_val = nr64(ENET_VLAN_TBL(index));
+
+       reg_val &= ~((ENET_VLAN_TBL_VPR |
+                     ENET_VLAN_TBL_VLANRDCTBLN) <<
+                    ENET_VLAN_TBL_SHIFT(port));
+       if (vpr)
+               reg_val |= (ENET_VLAN_TBL_VPR <<
+                           ENET_VLAN_TBL_SHIFT(port));
+       reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port));
+
+       reg_val = vlan_entry_set_parity(reg_val);
+
+       nw64(ENET_VLAN_TBL(index), reg_val);
+}
+
+static void vlan_tbl_clear(struct niu *np)
+{
+       int i;
+
+       for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++)
+               nw64(ENET_VLAN_TBL(i), 0);
+}
+
+static int tcam_wait_bit(struct niu *np, u64 bit)
+{
+       int limit = 1000;
+
+       while (--limit > 0) {
+               if (nr64(TCAM_CTL) & bit)
+                       break;
+               udelay(1);
+       }
+       if (limit <= 0)
+               return -ENODEV;
+
+       return 0;
+}
+
+static int tcam_flush(struct niu *np, int index)
+{
+       nw64(TCAM_KEY_0, 0x00);
+       nw64(TCAM_KEY_MASK_0, 0xff);
+       nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
+
+       return tcam_wait_bit(np, TCAM_CTL_STAT);
+}
+
+#if 0
+static int tcam_read(struct niu *np, int index,
+                    u64 *key, u64 *mask)
+{
+       int err;
+
+       nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index));
+       err = tcam_wait_bit(np, TCAM_CTL_STAT);
+       if (!err) {
+               key[0] = nr64(TCAM_KEY_0);
+               key[1] = nr64(TCAM_KEY_1);
+               key[2] = nr64(TCAM_KEY_2);
+               key[3] = nr64(TCAM_KEY_3);
+               mask[0] = nr64(TCAM_KEY_MASK_0);
+               mask[1] = nr64(TCAM_KEY_MASK_1);
+               mask[2] = nr64(TCAM_KEY_MASK_2);
+               mask[3] = nr64(TCAM_KEY_MASK_3);
+       }
+       return err;
+}
+#endif
+
+static int tcam_write(struct niu *np, int index,
+                     u64 *key, u64 *mask)
+{
+       nw64(TCAM_KEY_0, key[0]);
+       nw64(TCAM_KEY_1, key[1]);
+       nw64(TCAM_KEY_2, key[2]);
+       nw64(TCAM_KEY_3, key[3]);
+       nw64(TCAM_KEY_MASK_0, mask[0]);
+       nw64(TCAM_KEY_MASK_1, mask[1]);
+       nw64(TCAM_KEY_MASK_2, mask[2]);
+       nw64(TCAM_KEY_MASK_3, mask[3]);
+       nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
+
+       return tcam_wait_bit(np, TCAM_CTL_STAT);
+}
+
+#if 0
+static int tcam_assoc_read(struct niu *np, int index, u64 *data)
+{
+       int err;
+
+       nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index));
+       err = tcam_wait_bit(np, TCAM_CTL_STAT);
+       if (!err)
+               *data = nr64(TCAM_KEY_1);
+
+       return err;
+}
+#endif
+
+static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data)
+{
+       nw64(TCAM_KEY_1, assoc_data);
+       nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index));
+
+       return tcam_wait_bit(np, TCAM_CTL_STAT);
+}
+
+static void tcam_enable(struct niu *np, int on)
+{
+       u64 val = nr64(FFLP_CFG_1);
+
+       if (on)
+               val &= ~FFLP_CFG_1_TCAM_DIS;
+       else
+               val |= FFLP_CFG_1_TCAM_DIS;
+       nw64(FFLP_CFG_1, val);
+}
+
+static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio)
+{
+       u64 val = nr64(FFLP_CFG_1);
+
+       val &= ~(FFLP_CFG_1_FFLPINITDONE |
+                FFLP_CFG_1_CAMLAT |
+                FFLP_CFG_1_CAMRATIO);
+       val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT);
+       val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT);
+       nw64(FFLP_CFG_1, val);
+
+       val = nr64(FFLP_CFG_1);
+       val |= FFLP_CFG_1_FFLPINITDONE;
+       nw64(FFLP_CFG_1, val);
+}
+
+static int tcam_user_eth_class_enable(struct niu *np, unsigned long class,
+                                     int on)
+{
+       unsigned long reg;
+       u64 val;
+
+       if (class < CLASS_CODE_ETHERTYPE1 ||
+           class > CLASS_CODE_ETHERTYPE2)
+               return -EINVAL;
+
+       reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
+       val = nr64(reg);
+       if (on)
+               val |= L2_CLS_VLD;
+       else
+               val &= ~L2_CLS_VLD;
+       nw64(reg, val);
+
+       return 0;
+}
+
+#if 0
+static int tcam_user_eth_class_set(struct niu *np, unsigned long class,
+                                  u64 ether_type)
+{
+       unsigned long reg;
+       u64 val;
+
+       if (class < CLASS_CODE_ETHERTYPE1 ||
+           class > CLASS_CODE_ETHERTYPE2 ||
+           (ether_type & ~(u64)0xffff) != 0)
+               return -EINVAL;
+
+       reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
+       val = nr64(reg);
+       val &= ~L2_CLS_ETYPE;
+       val |= (ether_type << L2_CLS_ETYPE_SHIFT);
+       nw64(reg, val);
+
+       return 0;
+}
+#endif
+
+static int tcam_user_ip_class_enable(struct niu *np, unsigned long class,
+                                    int on)
+{
+       unsigned long reg;
+       u64 val;
+
+       if (class < CLASS_CODE_USER_PROG1 ||
+           class > CLASS_CODE_USER_PROG4)
+               return -EINVAL;
+
+       reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
+       val = nr64(reg);
+       if (on)
+               val |= L3_CLS_VALID;
+       else
+               val &= ~L3_CLS_VALID;
+       nw64(reg, val);
+
+       return 0;
+}
+
+static int tcam_user_ip_class_set(struct niu *np, unsigned long class,
+                                 int ipv6, u64 protocol_id,
+                                 u64 tos_mask, u64 tos_val)
+{
+       unsigned long reg;
+       u64 val;
+
+       if (class < CLASS_CODE_USER_PROG1 ||
+           class > CLASS_CODE_USER_PROG4 ||
+           (protocol_id & ~(u64)0xff) != 0 ||
+           (tos_mask & ~(u64)0xff) != 0 ||
+           (tos_val & ~(u64)0xff) != 0)
+               return -EINVAL;
+
+       reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
+       val = nr64(reg);
+       val &= ~(L3_CLS_IPVER | L3_CLS_PID |
+                L3_CLS_TOSMASK | L3_CLS_TOS);
+       if (ipv6)
+               val |= L3_CLS_IPVER;
+       val |= (protocol_id << L3_CLS_PID_SHIFT);
+       val |= (tos_mask << L3_CLS_TOSMASK_SHIFT);
+       val |= (tos_val << L3_CLS_TOS_SHIFT);
+       nw64(reg, val);
+
+       return 0;
+}
+
+static int tcam_early_init(struct niu *np)
+{
+       unsigned long i;
+       int err;
+
+       tcam_enable(np, 0);
+       tcam_set_lat_and_ratio(np,
+                              DEFAULT_TCAM_LATENCY,
+                              DEFAULT_TCAM_ACCESS_RATIO);
+       for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) {
+               err = tcam_user_eth_class_enable(np, i, 0);
+               if (err)
+                       return err;
+       }
+       for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) {
+               err = tcam_user_ip_class_enable(np, i, 0);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int tcam_flush_all(struct niu *np)
+{
+       unsigned long i;
+
+       for (i = 0; i < np->parent->tcam_num_entries; i++) {
+               int err = tcam_flush(np, i);
+               if (err)
+                       return err;
+       }
+       return 0;
+}
+
+static u64 hash_addr_regval(unsigned long index, unsigned long num_entries)
+{
+       return (u64)index | (num_entries == 1 ? HASH_TBL_ADDR_AUTOINC : 0);
+}
+
+#if 0
+static int hash_read(struct niu *np, unsigned long partition,
+                    unsigned long index, unsigned long num_entries,
+                    u64 *data)
+{
+       u64 val = hash_addr_regval(index, num_entries);
+       unsigned long i;
+
+       if (partition >= FCRAM_NUM_PARTITIONS ||
+           index + num_entries > FCRAM_SIZE)
+               return -EINVAL;
+
+       nw64(HASH_TBL_ADDR(partition), val);
+       for (i = 0; i < num_entries; i++)
+               data[i] = nr64(HASH_TBL_DATA(partition));
+
+       return 0;
+}
+#endif
+
+static int hash_write(struct niu *np, unsigned long partition,
+                     unsigned long index, unsigned long num_entries,
+                     u64 *data)
+{
+       u64 val = hash_addr_regval(index, num_entries);
+       unsigned long i;
+
+       if (partition >= FCRAM_NUM_PARTITIONS ||
+           index + (num_entries * 8) > FCRAM_SIZE)
+               return -EINVAL;
+
+       nw64(HASH_TBL_ADDR(partition), val);
+       for (i = 0; i < num_entries; i++)
+               nw64(HASH_TBL_DATA(partition), data[i]);
+
+       return 0;
+}
+
+static void fflp_reset(struct niu *np)
+{
+       u64 val;
+
+       nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST);
+       udelay(10);
+       nw64(FFLP_CFG_1, 0);
+
+       val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE;
+       nw64(FFLP_CFG_1, val);
+}
+
+static void fflp_set_timings(struct niu *np)
+{
+       u64 val = nr64(FFLP_CFG_1);
+
+       val &= ~FFLP_CFG_1_FFLPINITDONE;
+       val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT);
+       nw64(FFLP_CFG_1, val);
+
+       val = nr64(FFLP_CFG_1);
+       val |= FFLP_CFG_1_FFLPINITDONE;
+       nw64(FFLP_CFG_1, val);
+
+       val = nr64(FCRAM_REF_TMR);
+       val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN);
+       val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT);
+       val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT);
+       nw64(FCRAM_REF_TMR, val);
+}
+
+static int fflp_set_partition(struct niu *np, u64 partition,
+                             u64 mask, u64 base, int enable)
+{
+       unsigned long reg;
+       u64 val;
+
+       if (partition >= FCRAM_NUM_PARTITIONS ||
+           (mask & ~(u64)0x1f) != 0 ||
+           (base & ~(u64)0x1f) != 0)
+               return -EINVAL;
+
+       reg = FLW_PRT_SEL(partition);
+
+       val = nr64(reg);
+       val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE);
+       val |= (mask << FLW_PRT_SEL_MASK_SHIFT);
+       val |= (base << FLW_PRT_SEL_BASE_SHIFT);
+       if (enable)
+               val |= FLW_PRT_SEL_EXT;
+       nw64(reg, val);
+
+       return 0;
+}
+
+static int fflp_disable_all_partitions(struct niu *np)
+{
+       unsigned long i;
+
+       for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) {
+               int err = fflp_set_partition(np, 0, 0, 0, 0);
+               if (err)
+                       return err;
+       }
+       return 0;
+}
+
+static void fflp_llcsnap_enable(struct niu *np, int on)
+{
+       u64 val = nr64(FFLP_CFG_1);
+
+       if (on)
+               val |= FFLP_CFG_1_LLCSNAP;
+       else
+               val &= ~FFLP_CFG_1_LLCSNAP;
+       nw64(FFLP_CFG_1, val);
+}
+
+static void fflp_errors_enable(struct niu *np, int on)
+{
+       u64 val = nr64(FFLP_CFG_1);
+
+       if (on)
+               val &= ~FFLP_CFG_1_ERRORDIS;
+       else
+               val |= FFLP_CFG_1_ERRORDIS;
+       nw64(FFLP_CFG_1, val);
+}
+
+static int fflp_hash_clear(struct niu *np)
+{
+       struct fcram_hash_ipv4 ent;
+       unsigned long i;
+
+       /* IPV4 hash entry with valid bit clear, rest is don't care.  */
+       memset(&ent, 0, sizeof(ent));
+       ent.header = HASH_HEADER_EXT;
+
+       for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) {
+               int err = hash_write(np, 0, i, 1, (u64 *) &ent);
+               if (err)
+                       return err;
+       }
+       return 0;
+}
+
+static int fflp_early_init(struct niu *np)
+{
+       struct niu_parent *parent;
+       unsigned long flags;
+       int err;
+
+       niu_lock_parent(np, flags);
+
+       parent = np->parent;
+       err = 0;
+       if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
+               if (np->parent->plat_type != PLAT_TYPE_NIU) {
+                       fflp_reset(np);
+                       fflp_set_timings(np);
+                       err = fflp_disable_all_partitions(np);
+                       if (err) {
+                               netif_printk(np, probe, KERN_DEBUG, np->dev,
+                                            "fflp_disable_all_partitions failed, err=%d\n",
+                                            err);
+                               goto out;
+                       }
+               }
+
+               err = tcam_early_init(np);
+               if (err) {
+                       netif_printk(np, probe, KERN_DEBUG, np->dev,
+                                    "tcam_early_init failed, err=%d\n", err);
+                       goto out;
+               }
+               fflp_llcsnap_enable(np, 1);
+               fflp_errors_enable(np, 0);
+               nw64(H1POLY, 0);
+               nw64(H2POLY, 0);
+
+               err = tcam_flush_all(np);
+               if (err) {
+                       netif_printk(np, probe, KERN_DEBUG, np->dev,
+                                    "tcam_flush_all failed, err=%d\n", err);
+                       goto out;
+               }
+               if (np->parent->plat_type != PLAT_TYPE_NIU) {
+                       err = fflp_hash_clear(np);
+                       if (err) {
+                               netif_printk(np, probe, KERN_DEBUG, np->dev,
+                                            "fflp_hash_clear failed, err=%d\n",
+                                            err);
+                               goto out;
+                       }
+               }
+
+               vlan_tbl_clear(np);
+
+               parent->flags |= PARENT_FLGS_CLS_HWINIT;
+       }
+out:
+       niu_unlock_parent(np, flags);
+       return err;
+}
+
+static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key)
+{
+       if (class_code < CLASS_CODE_USER_PROG1 ||
+           class_code > CLASS_CODE_SCTP_IPV6)
+               return -EINVAL;
+
+       nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key);
+       return 0;
+}
+
+static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key)
+{
+       if (class_code < CLASS_CODE_USER_PROG1 ||
+           class_code > CLASS_CODE_SCTP_IPV6)
+               return -EINVAL;
+
+       nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key);
+       return 0;
+}
+
+/* Entries for the ports are interleaved in the TCAM */
+static u16 tcam_get_index(struct niu *np, u16 idx)
+{
+       /* One entry reserved for IP fragment rule */
+       if (idx >= (np->clas.tcam_sz - 1))
+               idx = 0;
+       return np->clas.tcam_top + ((idx+1) * np->parent->num_ports);
+}
+
+static u16 tcam_get_size(struct niu *np)
+{
+       /* One entry reserved for IP fragment rule */
+       return np->clas.tcam_sz - 1;
+}
+
+static u16 tcam_get_valid_entry_cnt(struct niu *np)
+{
+       /* One entry reserved for IP fragment rule */
+       return np->clas.tcam_valid_entries - 1;
+}
+
+static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
+                             u32 offset, u32 size)
+{
+       int i = skb_shinfo(skb)->nr_frags;
+       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+       frag->page = page;
+       frag->page_offset = offset;
+       frag->size = size;
+
+       skb->len += size;
+       skb->data_len += size;
+       skb->truesize += size;
+
+       skb_shinfo(skb)->nr_frags = i + 1;
+}
+
+static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
+{
+       a >>= PAGE_SHIFT;
+       a ^= (a >> ilog2(MAX_RBR_RING_SIZE));
+
+       return a & (MAX_RBR_RING_SIZE - 1);
+}
+
+static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
+                                   struct page ***link)
+{
+       unsigned int h = niu_hash_rxaddr(rp, addr);
+       struct page *p, **pp;
+
+       addr &= PAGE_MASK;
+       pp = &rp->rxhash[h];
+       for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
+               if (p->index == addr) {
+                       *link = pp;
+                       goto found;
+               }
+       }
+       BUG();
+
+found:
+       return p;
+}
+
+static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
+{
+       unsigned int h = niu_hash_rxaddr(rp, base);
+
+       page->index = base;
+       page->mapping = (struct address_space *) rp->rxhash[h];
+       rp->rxhash[h] = page;
+}
+
+static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
+                           gfp_t mask, int start_index)
+{
+       struct page *page;
+       u64 addr;
+       int i;
+
+       page = alloc_page(mask);
+       if (!page)
+               return -ENOMEM;
+
+       addr = np->ops->map_page(np->device, page, 0,
+                                PAGE_SIZE, DMA_FROM_DEVICE);
+
+       niu_hash_page(rp, page, addr);
+       if (rp->rbr_blocks_per_page > 1)
+               atomic_add(rp->rbr_blocks_per_page - 1,
+                          &compound_head(page)->_count);
+
+       for (i = 0; i < rp->rbr_blocks_per_page; i++) {
+               __le32 *rbr = &rp->rbr[start_index + i];
+
+               *rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT);
+               addr += rp->rbr_block_size;
+       }
+
+       return 0;
+}
+
+static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
+{
+       int index = rp->rbr_index;
+
+       rp->rbr_pending++;
+       if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) {
+               int err = niu_rbr_add_page(np, rp, mask, index);
+
+               if (unlikely(err)) {
+                       rp->rbr_pending--;
+                       return;
+               }
+
+               rp->rbr_index += rp->rbr_blocks_per_page;
+               BUG_ON(rp->rbr_index > rp->rbr_table_size);
+               if (rp->rbr_index == rp->rbr_table_size)
+                       rp->rbr_index = 0;
+
+               if (rp->rbr_pending >= rp->rbr_kick_thresh) {
+                       nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending);
+                       rp->rbr_pending = 0;
+               }
+       }
+}
+
+static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
+{
+       unsigned int index = rp->rcr_index;
+       int num_rcr = 0;
+
+       rp->rx_dropped++;
+       while (1) {
+               struct page *page, **link;
+               u64 addr, val;
+               u32 rcr_size;
+
+               num_rcr++;
+
+               val = le64_to_cpup(&rp->rcr[index]);
+               addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
+                       RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
+               page = niu_find_rxpage(rp, addr, &link);
+
+               rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
+                                        RCR_ENTRY_PKTBUFSZ_SHIFT];
+               if ((page->index + PAGE_SIZE) - rcr_size == addr) {
+                       *link = (struct page *) page->mapping;
+                       np->ops->unmap_page(np->device, page->index,
+                                           PAGE_SIZE, DMA_FROM_DEVICE);
+                       page->index = 0;
+                       page->mapping = NULL;
+                       __free_page(page);
+                       rp->rbr_refill_pending++;
+               }
+
+               index = NEXT_RCR(rp, index);
+               if (!(val & RCR_ENTRY_MULTI))
+                       break;
+
+       }
+       rp->rcr_index = index;
+
+       return num_rcr;
+}
+
+static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
+                             struct rx_ring_info *rp)
+{
+       unsigned int index = rp->rcr_index;
+       struct rx_pkt_hdr1 *rh;
+       struct sk_buff *skb;
+       int len, num_rcr;
+
+       skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE);
+       if (unlikely(!skb))
+               return niu_rx_pkt_ignore(np, rp);
+
+       num_rcr = 0;
+       while (1) {
+               struct page *page, **link;
+               u32 rcr_size, append_size;
+               u64 addr, val, off;
+
+               num_rcr++;
+
+               val = le64_to_cpup(&rp->rcr[index]);
+
+               len = (val & RCR_ENTRY_L2_LEN) >>
+                       RCR_ENTRY_L2_LEN_SHIFT;
+               len -= ETH_FCS_LEN;
+
+               addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
+                       RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
+               page = niu_find_rxpage(rp, addr, &link);
+
+               rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
+                                        RCR_ENTRY_PKTBUFSZ_SHIFT];
+
+               off = addr & ~PAGE_MASK;
+               append_size = rcr_size;
+               if (num_rcr == 1) {
+                       int ptype;
+
+                       ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
+                       if ((ptype == RCR_PKT_TYPE_TCP ||
+                            ptype == RCR_PKT_TYPE_UDP) &&
+                           !(val & (RCR_ENTRY_NOPORT |
+                                    RCR_ENTRY_ERROR)))
+                               skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       else
+                               skb_checksum_none_assert(skb);
+               } else if (!(val & RCR_ENTRY_MULTI))
+                       append_size = len - skb->len;
+
+               niu_rx_skb_append(skb, page, off, append_size);
+               if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
+                       *link = (struct page *) page->mapping;
+                       np->ops->unmap_page(np->device, page->index,
+                                           PAGE_SIZE, DMA_FROM_DEVICE);
+                       page->index = 0;
+                       page->mapping = NULL;
+                       rp->rbr_refill_pending++;
+               } else
+                       get_page(page);
+
+               index = NEXT_RCR(rp, index);
+               if (!(val & RCR_ENTRY_MULTI))
+                       break;
+
+       }
+       rp->rcr_index = index;
+
+       len += sizeof(*rh);
+       len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN);
+       __pskb_pull_tail(skb, len);
+
+       rh = (struct rx_pkt_hdr1 *) skb->data;
+       if (np->dev->features & NETIF_F_RXHASH)
+               skb->rxhash = ((u32)rh->hashval2_0 << 24 |
+                              (u32)rh->hashval2_1 << 16 |
+                              (u32)rh->hashval1_1 << 8 |
+                              (u32)rh->hashval1_2 << 0);
+       skb_pull(skb, sizeof(*rh));
+
+       rp->rx_packets++;
+       rp->rx_bytes += skb->len;
+
+       skb->protocol = eth_type_trans(skb, np->dev);
+       skb_record_rx_queue(skb, rp->rx_channel);
+       napi_gro_receive(napi, skb);
+
+       return num_rcr;
+}
+
+static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
+{
+       int blocks_per_page = rp->rbr_blocks_per_page;
+       int err, index = rp->rbr_index;
+
+       err = 0;
+       while (index < (rp->rbr_table_size - blocks_per_page)) {
+               err = niu_rbr_add_page(np, rp, mask, index);
+               if (err)
+                       break;
+
+               index += blocks_per_page;
+       }
+
+       rp->rbr_index = index;
+       return err;
+}
+
+static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
+{
+       int i;
+
+       for (i = 0; i < MAX_RBR_RING_SIZE; i++) {
+               struct page *page;
+
+               page = rp->rxhash[i];
+               while (page) {
+                       struct page *next = (struct page *) page->mapping;
+                       u64 base = page->index;
+
+                       np->ops->unmap_page(np->device, base, PAGE_SIZE,
+                                           DMA_FROM_DEVICE);
+                       page->index = 0;
+                       page->mapping = NULL;
+
+                       __free_page(page);
+
+                       page = next;
+               }
+       }
+
+       for (i = 0; i < rp->rbr_table_size; i++)
+               rp->rbr[i] = cpu_to_le32(0);
+       rp->rbr_index = 0;
+}
+
+static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
+{
+       struct tx_buff_info *tb = &rp->tx_buffs[idx];
+       struct sk_buff *skb = tb->skb;
+       struct tx_pkt_hdr *tp;
+       u64 tx_flags;
+       int i, len;
+
+       tp = (struct tx_pkt_hdr *) skb->data;
+       tx_flags = le64_to_cpup(&tp->flags);
+
+       rp->tx_packets++;
+       rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) -
+                        ((tx_flags & TXHDR_PAD) / 2));
+
+       len = skb_headlen(skb);
+       np->ops->unmap_single(np->device, tb->mapping,
+                             len, DMA_TO_DEVICE);
+
+       if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK)
+               rp->mark_pending--;
+
+       tb->skb = NULL;
+       do {
+               idx = NEXT_TX(rp, idx);
+               len -= MAX_TX_DESC_LEN;
+       } while (len > 0);
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               tb = &rp->tx_buffs[idx];
+               BUG_ON(tb->skb != NULL);
+               np->ops->unmap_page(np->device, tb->mapping,
+                                   skb_shinfo(skb)->frags[i].size,
+                                   DMA_TO_DEVICE);
+               idx = NEXT_TX(rp, idx);
+       }
+
+       dev_kfree_skb(skb);
+
+       return idx;
+}
+
+#define NIU_TX_WAKEUP_THRESH(rp)               ((rp)->pending / 4)
+
+static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
+{
+       struct netdev_queue *txq;
+       u16 pkt_cnt, tmp;
+       int cons, index;
+       u64 cs;
+
+       index = (rp - np->tx_rings);
+       txq = netdev_get_tx_queue(np->dev, index);
+
+       cs = rp->tx_cs;
+       if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK))))
+               goto out;
+
+       tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT;
+       pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) &
+               (TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT);
+
+       rp->last_pkt_cnt = tmp;
+
+       cons = rp->cons;
+
+       netif_printk(np, tx_done, KERN_DEBUG, np->dev,
+                    "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
+
+       while (pkt_cnt--)
+               cons = release_tx_packet(np, rp, cons);
+
+       rp->cons = cons;
+       smp_mb();
+
+out:
+       if (unlikely(netif_tx_queue_stopped(txq) &&
+                    (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
+               __netif_tx_lock(txq, smp_processor_id());
+               if (netif_tx_queue_stopped(txq) &&
+                   (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
+                       netif_tx_wake_queue(txq);
+               __netif_tx_unlock(txq);
+       }
+}
+
+static inline void niu_sync_rx_discard_stats(struct niu *np,
+                                            struct rx_ring_info *rp,
+                                            const int limit)
+{
+       /* This elaborate scheme is needed for reading the RX discard
+        * counters, as they are only 16-bit and can overflow quickly,
+        * and because the overflow indication bit is not usable as
+        * the counter value does not wrap, but remains at max value
+        * 0xFFFF.
+        *
+        * In theory and in practice counters can be lost in between
+        * reading nr64() and clearing the counter nw64().  For this
+        * reason, the number of counter clearings nw64() is
+        * limited/reduced though the limit parameter.
+        */
+       int rx_channel = rp->rx_channel;
+       u32 misc, wred;
+
+       /* RXMISC (Receive Miscellaneous Discard Count), covers the
+        * following discard events: IPP (Input Port Process),
+        * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive
+        * Block Ring) prefetch buffer is empty.
+        */
+       misc = nr64(RXMISC(rx_channel));
+       if (unlikely((misc & RXMISC_COUNT) > limit)) {
+               nw64(RXMISC(rx_channel), 0);
+               rp->rx_errors += misc & RXMISC_COUNT;
+
+               if (unlikely(misc & RXMISC_OFLOW))
+                       dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n",
+                               rx_channel);
+
+               netif_printk(np, rx_err, KERN_DEBUG, np->dev,
+                            "rx-%d: MISC drop=%u over=%u\n",
+                            rx_channel, misc, misc-limit);
+       }
+
+       /* WRED (Weighted Random Early Discard) by hardware */
+       wred = nr64(RED_DIS_CNT(rx_channel));
+       if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) {
+               nw64(RED_DIS_CNT(rx_channel), 0);
+               rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
+
+               if (unlikely(wred & RED_DIS_CNT_OFLOW))
+                       dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel);
+
+               netif_printk(np, rx_err, KERN_DEBUG, np->dev,
+                            "rx-%d: WRED drop=%u over=%u\n",
+                            rx_channel, wred, wred-limit);
+       }
+}
+
+static int niu_rx_work(struct napi_struct *napi, struct niu *np,
+                      struct rx_ring_info *rp, int budget)
+{
+       int qlen, rcr_done = 0, work_done = 0;
+       struct rxdma_mailbox *mbox = rp->mbox;
+       u64 stat;
+
+#if 1
+       stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
+       qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN;
+#else
+       stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
+       qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN);
+#endif
+       mbox->rx_dma_ctl_stat = 0;
+       mbox->rcrstat_a = 0;
+
+       netif_printk(np, rx_status, KERN_DEBUG, np->dev,
+                    "%s(chan[%d]), stat[%llx] qlen=%d\n",
+                    __func__, rp->rx_channel, (unsigned long long)stat, qlen);
+
+       rcr_done = work_done = 0;
+       qlen = min(qlen, budget);
+       while (work_done < qlen) {
+               rcr_done += niu_process_rx_pkt(napi, np, rp);
+               work_done++;
+       }
+
+       if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) {
+               unsigned int i;
+
+               for (i = 0; i < rp->rbr_refill_pending; i++)
+                       niu_rbr_refill(np, rp, GFP_ATOMIC);
+               rp->rbr_refill_pending = 0;
+       }
+
+       stat = (RX_DMA_CTL_STAT_MEX |
+               ((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) |
+               ((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT));
+
+       nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
+
+       /* Only sync discards stats when qlen indicate potential for drops */
+       if (qlen > 10)
+               niu_sync_rx_discard_stats(np, rp, 0x7FFF);
+
+       return work_done;
+}
+
+static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
+{
+       u64 v0 = lp->v0;
+       u32 tx_vec = (v0 >> 32);
+       u32 rx_vec = (v0 & 0xffffffff);
+       int i, work_done = 0;
+
+       netif_printk(np, intr, KERN_DEBUG, np->dev,
+                    "%s() v0[%016llx]\n", __func__, (unsigned long long)v0);
+
+       for (i = 0; i < np->num_tx_rings; i++) {
+               struct tx_ring_info *rp = &np->tx_rings[i];
+               if (tx_vec & (1 << rp->tx_channel))
+                       niu_tx_work(np, rp);
+               nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0);
+       }
+
+       for (i = 0; i < np->num_rx_rings; i++) {
+               struct rx_ring_info *rp = &np->rx_rings[i];
+
+               if (rx_vec & (1 << rp->rx_channel)) {
+                       int this_work_done;
+
+                       this_work_done = niu_rx_work(&lp->napi, np, rp,
+                                                    budget);
+
+                       budget -= this_work_done;
+                       work_done += this_work_done;
+               }
+               nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0);
+       }
+
+       return work_done;
+}
+
+static int niu_poll(struct napi_struct *napi, int budget)
+{
+       struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi);
+       struct niu *np = lp->np;
+       int work_done;
+
+       work_done = niu_poll_core(np, lp, budget);
+
+       if (work_done < budget) {
+               napi_complete(napi);
+               niu_ldg_rearm(np, lp, 1);
+       }
+       return work_done;
+}
+
+static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
+                                 u64 stat)
+{
+       netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel);
+
+       if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
+               pr_cont("RBR_TMOUT ");
+       if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
+               pr_cont("RSP_CNT ");
+       if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
+               pr_cont("BYTE_EN_BUS ");
+       if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
+               pr_cont("RSP_DAT ");
+       if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
+               pr_cont("RCR_ACK ");
+       if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
+               pr_cont("RCR_SHA_PAR ");
+       if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
+               pr_cont("RBR_PRE_PAR ");
+       if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
+               pr_cont("CONFIG ");
+       if (stat & RX_DMA_CTL_STAT_RCRINCON)
+               pr_cont("RCRINCON ");
+       if (stat & RX_DMA_CTL_STAT_RCRFULL)
+               pr_cont("RCRFULL ");
+       if (stat & RX_DMA_CTL_STAT_RBRFULL)
+               pr_cont("RBRFULL ");
+       if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
+               pr_cont("RBRLOGPAGE ");
+       if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
+               pr_cont("CFIGLOGPAGE ");
+       if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
+               pr_cont("DC_FIDO ");
+
+       pr_cont(")\n");
+}
+
+static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
+{
+       u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
+       int err = 0;
+
+
+       if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL |
+                   RX_DMA_CTL_STAT_PORT_FATAL))
+               err = -EINVAL;
+
+       if (err) {
+               netdev_err(np->dev, "RX channel %u error, stat[%llx]\n",
+                          rp->rx_channel,
+                          (unsigned long long) stat);
+
+               niu_log_rxchan_errors(np, rp, stat);
+       }
+
+       nw64(RX_DMA_CTL_STAT(rp->rx_channel),
+            stat & RX_DMA_CTL_WRITE_CLEAR_ERRS);
+
+       return err;
+}
+
+static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
+                                 u64 cs)
+{
+       netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel);
+
+       if (cs & TX_CS_MBOX_ERR)
+               pr_cont("MBOX ");
+       if (cs & TX_CS_PKT_SIZE_ERR)
+               pr_cont("PKT_SIZE ");
+       if (cs & TX_CS_TX_RING_OFLOW)
+               pr_cont("TX_RING_OFLOW ");
+       if (cs & TX_CS_PREF_BUF_PAR_ERR)
+               pr_cont("PREF_BUF_PAR ");
+       if (cs & TX_CS_NACK_PREF)
+               pr_cont("NACK_PREF ");
+       if (cs & TX_CS_NACK_PKT_RD)
+               pr_cont("NACK_PKT_RD ");
+       if (cs & TX_CS_CONF_PART_ERR)
+               pr_cont("CONF_PART ");
+       if (cs & TX_CS_PKT_PRT_ERR)
+               pr_cont("PKT_PTR ");
+
+       pr_cont(")\n");
+}
+
+static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
+{
+       u64 cs, logh, logl;
+
+       cs = nr64(TX_CS(rp->tx_channel));
+       logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
+       logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
+
+       netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n",
+                  rp->tx_channel,
+                  (unsigned long long)cs,
+                  (unsigned long long)logh,
+                  (unsigned long long)logl);
+
+       niu_log_txchan_errors(np, rp, cs);
+
+       return -ENODEV;
+}
+
+static int niu_mif_interrupt(struct niu *np)
+{
+       u64 mif_status = nr64(MIF_STATUS);
+       int phy_mdint = 0;
+
+       if (np->flags & NIU_FLAGS_XMAC) {
+               u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS);
+
+               if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT)
+                       phy_mdint = 1;
+       }
+
+       netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n",
+                  (unsigned long long)mif_status, phy_mdint);
+
+       return -ENODEV;
+}
+
+static void niu_xmac_interrupt(struct niu *np)
+{
+       struct niu_xmac_stats *mp = &np->mac_stats.xmac;
+       u64 val;
+
+       val = nr64_mac(XTXMAC_STATUS);
+       if (val & XTXMAC_STATUS_FRAME_CNT_EXP)
+               mp->tx_frames += TXMAC_FRM_CNT_COUNT;
+       if (val & XTXMAC_STATUS_BYTE_CNT_EXP)
+               mp->tx_bytes += TXMAC_BYTE_CNT_COUNT;
+       if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR)
+               mp->tx_fifo_errors++;
+       if (val & XTXMAC_STATUS_TXMAC_OFLOW)
+               mp->tx_overflow_errors++;
+       if (val & XTXMAC_STATUS_MAX_PSIZE_ERR)
+               mp->tx_max_pkt_size_errors++;
+       if (val & XTXMAC_STATUS_TXMAC_UFLOW)
+               mp->tx_underflow_errors++;
+
+       val = nr64_mac(XRXMAC_STATUS);
+       if (val & XRXMAC_STATUS_LCL_FLT_STATUS)
+               mp->rx_local_faults++;
+       if (val & XRXMAC_STATUS_RFLT_DET)
+               mp->rx_remote_faults++;
+       if (val & XRXMAC_STATUS_LFLT_CNT_EXP)
+               mp->rx_link_faults += LINK_FAULT_CNT_COUNT;
+       if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP)
+               mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT;
+       if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP)
+               mp->rx_frags += RXMAC_FRAG_CNT_COUNT;
+       if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP)
+               mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
+       if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
+               mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
+       if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
+               mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
+       if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
+               mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
+       if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
+               mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT;
+       if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP)
+               mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT;
+       if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP)
+               mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT;
+       if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP)
+               mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT;
+       if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP)
+               mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT;
+       if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP)
+               mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT;
+       if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP)
+               mp->rx_octets += RXMAC_BT_CNT_COUNT;
+       if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP)
+               mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT;
+       if (val & XRXMAC_STATUS_LENERR_CNT_EXP)
+               mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT;
+       if (val & XRXMAC_STATUS_CRCERR_CNT_EXP)
+               mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT;
+       if (val & XRXMAC_STATUS_RXUFLOW)
+               mp->rx_underflows++;
+       if (val & XRXMAC_STATUS_RXOFLOW)
+               mp->rx_overflows++;
+
+       val = nr64_mac(XMAC_FC_STAT);
+       if (val & XMAC_FC_STAT_TX_MAC_NPAUSE)
+               mp->pause_off_state++;
+       if (val & XMAC_FC_STAT_TX_MAC_PAUSE)
+               mp->pause_on_state++;
+       if (val & XMAC_FC_STAT_RX_MAC_RPAUSE)
+               mp->pause_received++;
+}
+
+static void niu_bmac_interrupt(struct niu *np)
+{
+       struct niu_bmac_stats *mp = &np->mac_stats.bmac;
+       u64 val;
+
+       val = nr64_mac(BTXMAC_STATUS);
+       if (val & BTXMAC_STATUS_UNDERRUN)
+               mp->tx_underflow_errors++;
+       if (val & BTXMAC_STATUS_MAX_PKT_ERR)
+               mp->tx_max_pkt_size_errors++;
+       if (val & BTXMAC_STATUS_BYTE_CNT_EXP)
+               mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT;
+       if (val & BTXMAC_STATUS_FRAME_CNT_EXP)
+               mp->tx_frames += BTXMAC_FRM_CNT_COUNT;
+
+       val = nr64_mac(BRXMAC_STATUS);
+       if (val & BRXMAC_STATUS_OVERFLOW)
+               mp->rx_overflows++;
+       if (val & BRXMAC_STATUS_FRAME_CNT_EXP)
+               mp->rx_frames += BRXMAC_FRAME_CNT_COUNT;
+       if (val & BRXMAC_STATUS_ALIGN_ERR_EXP)
+               mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
+       if (val & BRXMAC_STATUS_CRC_ERR_EXP)
+               mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
+       if (val & BRXMAC_STATUS_LEN_ERR_EXP)
+               mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT;
+
+       val = nr64_mac(BMAC_CTRL_STATUS);
+       if (val & BMAC_CTRL_STATUS_NOPAUSE)
+               mp->pause_off_state++;
+       if (val & BMAC_CTRL_STATUS_PAUSE)
+               mp->pause_on_state++;
+       if (val & BMAC_CTRL_STATUS_PAUSE_RECV)
+               mp->pause_received++;
+}
+
+static int niu_mac_interrupt(struct niu *np)
+{
+       if (np->flags & NIU_FLAGS_XMAC)
+               niu_xmac_interrupt(np);
+       else
+               niu_bmac_interrupt(np);
+
+       return 0;
+}
+
+static void niu_log_device_error(struct niu *np, u64 stat)
+{
+       netdev_err(np->dev, "Core device errors ( ");
+
+       if (stat & SYS_ERR_MASK_META2)
+               pr_cont("META2 ");
+       if (stat & SYS_ERR_MASK_META1)
+               pr_cont("META1 ");
+       if (stat & SYS_ERR_MASK_PEU)
+               pr_cont("PEU ");
+       if (stat & SYS_ERR_MASK_TXC)
+               pr_cont("TXC ");
+       if (stat & SYS_ERR_MASK_RDMC)
+               pr_cont("RDMC ");
+       if (stat & SYS_ERR_MASK_TDMC)
+               pr_cont("TDMC ");
+       if (stat & SYS_ERR_MASK_ZCP)
+               pr_cont("ZCP ");
+       if (stat & SYS_ERR_MASK_FFLP)
+               pr_cont("FFLP ");
+       if (stat & SYS_ERR_MASK_IPP)
+               pr_cont("IPP ");
+       if (stat & SYS_ERR_MASK_MAC)
+               pr_cont("MAC ");
+       if (stat & SYS_ERR_MASK_SMX)
+               pr_cont("SMX ");
+
+       pr_cont(")\n");
+}
+
+static int niu_device_error(struct niu *np)
+{
+       u64 stat = nr64(SYS_ERR_STAT);
+
+       netdev_err(np->dev, "Core device error, stat[%llx]\n",
+                  (unsigned long long)stat);
+
+       niu_log_device_error(np, stat);
+
+       return -ENODEV;
+}
+
+static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp,
+                             u64 v0, u64 v1, u64 v2)
+{
+
+       int i, err = 0;
+
+       lp->v0 = v0;
+       lp->v1 = v1;
+       lp->v2 = v2;
+
+       if (v1 & 0x00000000ffffffffULL) {
+               u32 rx_vec = (v1 & 0xffffffff);
+
+               for (i = 0; i < np->num_rx_rings; i++) {
+                       struct rx_ring_info *rp = &np->rx_rings[i];
+
+                       if (rx_vec & (1 << rp->rx_channel)) {
+                               int r = niu_rx_error(np, rp);
+                               if (r) {
+                                       err = r;
+                               } else {
+                                       if (!v0)
+                                               nw64(RX_DMA_CTL_STAT(rp->rx_channel),
+                                                    RX_DMA_CTL_STAT_MEX);
+                               }
+                       }
+               }
+       }
+       if (v1 & 0x7fffffff00000000ULL) {
+               u32 tx_vec = (v1 >> 32) & 0x7fffffff;
+
+               for (i = 0; i < np->num_tx_rings; i++) {
+                       struct tx_ring_info *rp = &np->tx_rings[i];
+
+                       if (tx_vec & (1 << rp->tx_channel)) {
+                               int r = niu_tx_error(np, rp);
+                               if (r)
+                                       err = r;
+                       }
+               }
+       }
+       if ((v0 | v1) & 0x8000000000000000ULL) {
+               int r = niu_mif_interrupt(np);
+               if (r)
+                       err = r;
+       }
+       if (v2) {
+               if (v2 & 0x01ef) {
+                       int r = niu_mac_interrupt(np);
+                       if (r)
+                               err = r;
+               }
+               if (v2 & 0x0210) {
+                       int r = niu_device_error(np);
+                       if (r)
+                               err = r;
+               }
+       }
+
+       if (err)
+               niu_enable_interrupts(np, 0);
+
+       return err;
+}
+
+static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
+                           int ldn)
+{
+       struct rxdma_mailbox *mbox = rp->mbox;
+       u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
+
+       stat_write = (RX_DMA_CTL_STAT_RCRTHRES |
+                     RX_DMA_CTL_STAT_RCRTO);
+       nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
+
+       netif_printk(np, intr, KERN_DEBUG, np->dev,
+                    "%s() stat[%llx]\n", __func__, (unsigned long long)stat);
+}
+
+static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
+                           int ldn)
+{
+       rp->tx_cs = nr64(TX_CS(rp->tx_channel));
+
+       netif_printk(np, intr, KERN_DEBUG, np->dev,
+                    "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs);
+}
+
+static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
+{
+       struct niu_parent *parent = np->parent;
+       u32 rx_vec, tx_vec;
+       int i;
+
+       tx_vec = (v0 >> 32);
+       rx_vec = (v0 & 0xffffffff);
+
+       for (i = 0; i < np->num_rx_rings; i++) {
+               struct rx_ring_info *rp = &np->rx_rings[i];
+               int ldn = LDN_RXDMA(rp->rx_channel);
+
+               if (parent->ldg_map[ldn] != ldg)
+                       continue;
+
+               nw64(LD_IM0(ldn), LD_IM0_MASK);
+               if (rx_vec & (1 << rp->rx_channel))
+                       niu_rxchan_intr(np, rp, ldn);
+       }
+
+       for (i = 0; i < np->num_tx_rings; i++) {
+               struct tx_ring_info *rp = &np->tx_rings[i];
+               int ldn = LDN_TXDMA(rp->tx_channel);
+
+               if (parent->ldg_map[ldn] != ldg)
+                       continue;
+
+               nw64(LD_IM0(ldn), LD_IM0_MASK);
+               if (tx_vec & (1 << rp->tx_channel))
+                       niu_txchan_intr(np, rp, ldn);
+       }
+}
+
+static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
+                             u64 v0, u64 v1, u64 v2)
+{
+       if (likely(napi_schedule_prep(&lp->napi))) {
+               lp->v0 = v0;
+               lp->v1 = v1;
+               lp->v2 = v2;
+               __niu_fastpath_interrupt(np, lp->ldg_num, v0);
+               __napi_schedule(&lp->napi);
+       }
+}
+
+static irqreturn_t niu_interrupt(int irq, void *dev_id)
+{
+       struct niu_ldg *lp = dev_id;
+       struct niu *np = lp->np;
+       int ldg = lp->ldg_num;
+       unsigned long flags;
+       u64 v0, v1, v2;
+
+       if (netif_msg_intr(np))
+               printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)",
+                      __func__, lp, ldg);
+
+       spin_lock_irqsave(&np->lock, flags);
+
+       v0 = nr64(LDSV0(ldg));
+       v1 = nr64(LDSV1(ldg));
+       v2 = nr64(LDSV2(ldg));
+
+       if (netif_msg_intr(np))
+               pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n",
+                      (unsigned long long) v0,
+                      (unsigned long long) v1,
+                      (unsigned long long) v2);
+
+       if (unlikely(!v0 && !v1 && !v2)) {
+               spin_unlock_irqrestore(&np->lock, flags);
+               return IRQ_NONE;
+       }
+
+       if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) {
+               int err = niu_slowpath_interrupt(np, lp, v0, v1, v2);
+               if (err)
+                       goto out;
+       }
+       if (likely(v0 & ~((u64)1 << LDN_MIF)))
+               niu_schedule_napi(np, lp, v0, v1, v2);
+       else
+               niu_ldg_rearm(np, lp, 1);
+out:
+       spin_unlock_irqrestore(&np->lock, flags);
+
+       return IRQ_HANDLED;
+}
+
+static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp)
+{
+       if (rp->mbox) {
+               np->ops->free_coherent(np->device,
+                                      sizeof(struct rxdma_mailbox),
+                                      rp->mbox, rp->mbox_dma);
+               rp->mbox = NULL;
+       }
+       if (rp->rcr) {
+               np->ops->free_coherent(np->device,
+                                      MAX_RCR_RING_SIZE * sizeof(__le64),
+                                      rp->rcr, rp->rcr_dma);
+               rp->rcr = NULL;
+               rp->rcr_table_size = 0;
+               rp->rcr_index = 0;
+       }
+       if (rp->rbr) {
+               niu_rbr_free(np, rp);
+
+               np->ops->free_coherent(np->device,
+                                      MAX_RBR_RING_SIZE * sizeof(__le32),
+                                      rp->rbr, rp->rbr_dma);
+               rp->rbr = NULL;
+               rp->rbr_table_size = 0;
+               rp->rbr_index = 0;
+       }
+       kfree(rp->rxhash);
+       rp->rxhash = NULL;
+}
+
+static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp)
+{
+       if (rp->mbox) {
+               np->ops->free_coherent(np->device,
+                                      sizeof(struct txdma_mailbox),
+                                      rp->mbox, rp->mbox_dma);
+               rp->mbox = NULL;
+       }
+       if (rp->descr) {
+               int i;
+
+               for (i = 0; i < MAX_TX_RING_SIZE; i++) {
+                       if (rp->tx_buffs[i].skb)
+                               (void) release_tx_packet(np, rp, i);
+               }
+
+               np->ops->free_coherent(np->device,
+                                      MAX_TX_RING_SIZE * sizeof(__le64),
+                                      rp->descr, rp->descr_dma);
+               rp->descr = NULL;
+               rp->pending = 0;
+               rp->prod = 0;
+               rp->cons = 0;
+               rp->wrap_bit = 0;
+       }
+}
+
+static void niu_free_channels(struct niu *np)
+{
+       int i;
+
+       if (np->rx_rings) {
+               for (i = 0; i < np->num_rx_rings; i++) {
+                       struct rx_ring_info *rp = &np->rx_rings[i];
+
+                       niu_free_rx_ring_info(np, rp);
+               }
+               kfree(np->rx_rings);
+               np->rx_rings = NULL;
+               np->num_rx_rings = 0;
+       }
+
+       if (np->tx_rings) {
+               for (i = 0; i < np->num_tx_rings; i++) {
+                       struct tx_ring_info *rp = &np->tx_rings[i];
+
+                       niu_free_tx_ring_info(np, rp);
+               }
+               kfree(np->tx_rings);
+               np->tx_rings = NULL;
+               np->num_tx_rings = 0;
+       }
+}
+
+static int niu_alloc_rx_ring_info(struct niu *np,
+                                 struct rx_ring_info *rp)
+{
+       BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64);
+
+       rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *),
+                            GFP_KERNEL);
+       if (!rp->rxhash)
+               return -ENOMEM;
+
+       rp->mbox = np->ops->alloc_coherent(np->device,
+                                          sizeof(struct rxdma_mailbox),
+                                          &rp->mbox_dma, GFP_KERNEL);
+       if (!rp->mbox)
+               return -ENOMEM;
+       if ((unsigned long)rp->mbox & (64UL - 1)) {
+               netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n",
+                          rp->mbox);
+               return -EINVAL;
+       }
+
+       rp->rcr = np->ops->alloc_coherent(np->device,
+                                         MAX_RCR_RING_SIZE * sizeof(__le64),
+                                         &rp->rcr_dma, GFP_KERNEL);
+       if (!rp->rcr)
+               return -ENOMEM;
+       if ((unsigned long)rp->rcr & (64UL - 1)) {
+               netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n",
+                          rp->rcr);
+               return -EINVAL;
+       }
+       rp->rcr_table_size = MAX_RCR_RING_SIZE;
+       rp->rcr_index = 0;
+
+       rp->rbr = np->ops->alloc_coherent(np->device,
+                                         MAX_RBR_RING_SIZE * sizeof(__le32),
+                                         &rp->rbr_dma, GFP_KERNEL);
+       if (!rp->rbr)
+               return -ENOMEM;
+       if ((unsigned long)rp->rbr & (64UL - 1)) {
+               netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n",
+                          rp->rbr);
+               return -EINVAL;
+       }
+       rp->rbr_table_size = MAX_RBR_RING_SIZE;
+       rp->rbr_index = 0;
+       rp->rbr_pending = 0;
+
+       return 0;
+}
+
+static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp)
+{
+       int mtu = np->dev->mtu;
+
+       /* These values are recommended by the HW designers for fair
+        * utilization of DRR amongst the rings.
+        */
+       rp->max_burst = mtu + 32;
+       if (rp->max_burst > 4096)
+               rp->max_burst = 4096;
+}
+
+static int niu_alloc_tx_ring_info(struct niu *np,
+                                 struct tx_ring_info *rp)
+{
+       BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64);
+
+       rp->mbox = np->ops->alloc_coherent(np->device,
+                                          sizeof(struct txdma_mailbox),
+                                          &rp->mbox_dma, GFP_KERNEL);
+       if (!rp->mbox)
+               return -ENOMEM;
+       if ((unsigned long)rp->mbox & (64UL - 1)) {
+               netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n",
+                          rp->mbox);
+               return -EINVAL;
+       }
+
+       rp->descr = np->ops->alloc_coherent(np->device,
+                                           MAX_TX_RING_SIZE * sizeof(__le64),
+                                           &rp->descr_dma, GFP_KERNEL);
+       if (!rp->descr)
+               return -ENOMEM;
+       if ((unsigned long)rp->descr & (64UL - 1)) {
+               netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n",
+                          rp->descr);
+               return -EINVAL;
+       }
+
+       rp->pending = MAX_TX_RING_SIZE;
+       rp->prod = 0;
+       rp->cons = 0;
+       rp->wrap_bit = 0;
+
+       /* XXX make these configurable... XXX */
+       rp->mark_freq = rp->pending / 4;
+
+       niu_set_max_burst(np, rp);
+
+       return 0;
+}
+
+static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
+{
+       u16 bss;
+
+       bss = min(PAGE_SHIFT, 15);
+
+       rp->rbr_block_size = 1 << bss;
+       rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
+
+       rp->rbr_sizes[0] = 256;
+       rp->rbr_sizes[1] = 1024;
+       if (np->dev->mtu > ETH_DATA_LEN) {
+               switch (PAGE_SIZE) {
+               case 4 * 1024:
+                       rp->rbr_sizes[2] = 4096;
+                       break;
+
+               default:
+                       rp->rbr_sizes[2] = 8192;
+                       break;
+               }
+       } else {
+               rp->rbr_sizes[2] = 2048;
+       }
+       rp->rbr_sizes[3] = rp->rbr_block_size;
+}
+
+static int niu_alloc_channels(struct niu *np)
+{
+       struct niu_parent *parent = np->parent;
+       int first_rx_channel, first_tx_channel;
+       int num_rx_rings, num_tx_rings;
+       struct rx_ring_info *rx_rings;
+       struct tx_ring_info *tx_rings;
+       int i, port, err;
+
+       port = np->port;
+       first_rx_channel = first_tx_channel = 0;
+       for (i = 0; i < port; i++) {
+               first_rx_channel += parent->rxchan_per_port[i];
+               first_tx_channel += parent->txchan_per_port[i];
+       }
+
+       num_rx_rings = parent->rxchan_per_port[port];
+       num_tx_rings = parent->txchan_per_port[port];
+
+       rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info),
+                          GFP_KERNEL);
+       err = -ENOMEM;
+       if (!rx_rings)
+               goto out_err;
+
+       np->num_rx_rings = num_rx_rings;
+       smp_wmb();
+       np->rx_rings = rx_rings;
+
+       netif_set_real_num_rx_queues(np->dev, num_rx_rings);
+
+       for (i = 0; i < np->num_rx_rings; i++) {
+               struct rx_ring_info *rp = &np->rx_rings[i];
+
+               rp->np = np;
+               rp->rx_channel = first_rx_channel + i;
+
+               err = niu_alloc_rx_ring_info(np, rp);
+               if (err)
+                       goto out_err;
+
+               niu_size_rbr(np, rp);
+
+               /* XXX better defaults, configurable, etc... XXX */
+               rp->nonsyn_window = 64;
+               rp->nonsyn_threshold = rp->rcr_table_size - 64;
+               rp->syn_window = 64;
+               rp->syn_threshold = rp->rcr_table_size - 64;
+               rp->rcr_pkt_threshold = 16;
+               rp->rcr_timeout = 8;
+               rp->rbr_kick_thresh = RBR_REFILL_MIN;
+               if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page)
+                       rp->rbr_kick_thresh = rp->rbr_blocks_per_page;
+
+               err = niu_rbr_fill(np, rp, GFP_KERNEL);
+               if (err)
+                       return err;
+       }
+
+       tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
+                          GFP_KERNEL);
+       err = -ENOMEM;
+       if (!tx_rings)
+               goto out_err;
+
+       np->num_tx_rings = num_tx_rings;
+       smp_wmb();
+       np->tx_rings = tx_rings;
+
+       netif_set_real_num_tx_queues(np->dev, num_tx_rings);
+
+       for (i = 0; i < np->num_tx_rings; i++) {
+               struct tx_ring_info *rp = &np->tx_rings[i];
+
+               rp->np = np;
+               rp->tx_channel = first_tx_channel + i;
+
+               err = niu_alloc_tx_ring_info(np, rp);
+               if (err)
+                       goto out_err;
+       }
+
+       return 0;
+
+out_err:
+       niu_free_channels(np);
+       return err;
+}
+
+static int niu_tx_cs_sng_poll(struct niu *np, int channel)
+{
+       int limit = 1000;
+
+       while (--limit > 0) {
+               u64 val = nr64(TX_CS(channel));
+               if (val & TX_CS_SNG_STATE)
+                       return 0;
+       }
+       return -ENODEV;
+}
+
+static int niu_tx_channel_stop(struct niu *np, int channel)
+{
+       u64 val = nr64(TX_CS(channel));
+
+       val |= TX_CS_STOP_N_GO;
+       nw64(TX_CS(channel), val);
+
+       return niu_tx_cs_sng_poll(np, channel);
+}
+
+static int niu_tx_cs_reset_poll(struct niu *np, int channel)
+{
+       int limit = 1000;
+
+       while (--limit > 0) {
+               u64 val = nr64(TX_CS(channel));
+               if (!(val & TX_CS_RST))
+                       return 0;
+       }
+       return -ENODEV;
+}
+
+static int niu_tx_channel_reset(struct niu *np, int channel)
+{
+       u64 val = nr64(TX_CS(channel));
+       int err;
+
+       val |= TX_CS_RST;
+       nw64(TX_CS(channel), val);
+
+       err = niu_tx_cs_reset_poll(np, channel);
+       if (!err)
+               nw64(TX_RING_KICK(channel), 0);
+
+       return err;
+}
+
+static int niu_tx_channel_lpage_init(struct niu *np, int channel)
+{
+       u64 val;
+
+       nw64(TX_LOG_MASK1(channel), 0);
+       nw64(TX_LOG_VAL1(channel), 0);
+       nw64(TX_LOG_MASK2(channel), 0);
+       nw64(TX_LOG_VAL2(channel), 0);
+       nw64(TX_LOG_PAGE_RELO1(channel), 0);
+       nw64(TX_LOG_PAGE_RELO2(channel), 0);
+       nw64(TX_LOG_PAGE_HDL(channel), 0);
+
+       val  = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT;
+       val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1);
+       nw64(TX_LOG_PAGE_VLD(channel), val);
+
+       /* XXX TXDMA 32bit mode? XXX */
+
+       return 0;
+}
+
+static void niu_txc_enable_port(struct niu *np, int on)
+{
+       unsigned long flags;
+       u64 val, mask;
+
+       niu_lock_parent(np, flags);
+       val = nr64(TXC_CONTROL);
+       mask = (u64)1 << np->port;
+       if (on) {
+               val |= TXC_CONTROL_ENABLE | mask;
+       } else {
+               val &= ~mask;
+               if ((val & ~TXC_CONTROL_ENABLE) == 0)
+                       val &= ~TXC_CONTROL_ENABLE;
+       }
+       nw64(TXC_CONTROL, val);
+       niu_unlock_parent(np, flags);
+}
+
+static void niu_txc_set_imask(struct niu *np, u64 imask)
+{
+       unsigned long flags;
+       u64 val;
+
+       niu_lock_parent(np, flags);
+       val = nr64(TXC_INT_MASK);
+       val &= ~TXC_INT_MASK_VAL(np->port);
+       val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port));
+       niu_unlock_parent(np, flags);
+}
+
+static void niu_txc_port_dma_enable(struct niu *np, int on)
+{
+       u64 val = 0;
+
+       if (on) {
+               int i;
+
+               for (i = 0; i < np->num_tx_rings; i++)
+                       val |= (1 << np->tx_rings[i].tx_channel);
+       }
+       nw64(TXC_PORT_DMA(np->port), val);
+}
+
+static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
+{
+       int err, channel = rp->tx_channel;
+       u64 val, ring_len;
+
+       err = niu_tx_channel_stop(np, channel);
+       if (err)
+               return err;
+
+       err = niu_tx_channel_reset(np, channel);
+       if (err)
+               return err;
+
+       err = niu_tx_channel_lpage_init(np, channel);
+       if (err)
+               return err;
+
+       nw64(TXC_DMA_MAX(channel), rp->max_burst);
+       nw64(TX_ENT_MSK(channel), 0);
+
+       if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
+                             TX_RNG_CFIG_STADDR)) {
+               netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n",
+                          channel, (unsigned long long)rp->descr_dma);
+               return -EINVAL;
+       }
+
+       /* The length field in TX_RNG_CFIG is measured in 64-byte
+        * blocks.  rp->pending is the number of TX descriptors in
+        * our ring, 8 bytes each, thus we divide by 8 bytes more
+        * to get the proper value the chip wants.
+        */
+       ring_len = (rp->pending / 8);
+
+       val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) |
+              rp->descr_dma);
+       nw64(TX_RNG_CFIG(channel), val);
+
+       if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
+           ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
+               netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n",
+                           channel, (unsigned long long)rp->mbox_dma);
+               return -EINVAL;
+       }
+       nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
+       nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR);
+
+       nw64(TX_CS(channel), 0);
+
+       rp->last_pkt_cnt = 0;
+
+       return 0;
+}
+
+static void niu_init_rdc_groups(struct niu *np)
+{
+       struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port];
+       int i, first_table_num = tp->first_table_num;
+
+       for (i = 0; i < tp->num_tables; i++) {
+               struct rdc_table *tbl = &tp->tables[i];
+               int this_table = first_table_num + i;
+               int slot;
+
+               for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++)
+                       nw64(RDC_TBL(this_table, slot),
+                            tbl->rxdma_channel[slot]);
+       }
+
+       nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]);
+}
+
+static void niu_init_drr_weight(struct niu *np)
+{
+       int type = phy_decode(np->parent->port_phy, np->port);
+       u64 val;
+
+       switch (type) {
+       case PORT_TYPE_10G:
+               val = PT_DRR_WEIGHT_DEFAULT_10G;
+               break;
+
+       case PORT_TYPE_1G:
+       default:
+               val = PT_DRR_WEIGHT_DEFAULT_1G;
+               break;
+       }
+       nw64(PT_DRR_WT(np->port), val);
+}
+
+static int niu_init_hostinfo(struct niu *np)
+{
+       struct niu_parent *parent = np->parent;
+       struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
+       int i, err, num_alt = niu_num_alt_addr(np);
+       int first_rdc_table = tp->first_table_num;
+
+       err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
+       if (err)
+               return err;
+
+       err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
+       if (err)
+               return err;
+
+       for (i = 0; i < num_alt; i++) {
+               err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int niu_rx_channel_reset(struct niu *np, int channel)
+{
+       return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel),
+                                     RXDMA_CFIG1_RST, 1000, 10,
+                                     "RXDMA_CFIG1");
+}
+
+static int niu_rx_channel_lpage_init(struct niu *np, int channel)
+{
+       u64 val;
+
+       nw64(RX_LOG_MASK1(channel), 0);
+       nw64(RX_LOG_VAL1(channel), 0);
+       nw64(RX_LOG_MASK2(channel), 0);
+       nw64(RX_LOG_VAL2(channel), 0);
+       nw64(RX_LOG_PAGE_RELO1(channel), 0);
+       nw64(RX_LOG_PAGE_RELO2(channel), 0);
+       nw64(RX_LOG_PAGE_HDL(channel), 0);
+
+       val  = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT;
+       val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1);
+       nw64(RX_LOG_PAGE_VLD(channel), val);
+
+       return 0;
+}
+
+static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp)
+{
+       u64 val;
+
+       val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) |
+              ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) |
+              ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) |
+              ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT));
+       nw64(RDC_RED_PARA(rp->rx_channel), val);
+}
+
+static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret)
+{
+       u64 val = 0;
+
+       *ret = 0;
+       switch (rp->rbr_block_size) {
+       case 4 * 1024:
+               val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT);
+               break;
+       case 8 * 1024:
+               val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT);
+               break;
+       case 16 * 1024:
+               val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT);
+               break;
+       case 32 * 1024:
+               val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT);
+               break;
+       default:
+               return -EINVAL;
+       }
+       val |= RBR_CFIG_B_VLD2;
+       switch (rp->rbr_sizes[2]) {
+       case 2 * 1024:
+               val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT);
+               break;
+       case 4 * 1024:
+               val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT);
+               break;
+       case 8 * 1024:
+               val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT);
+               break;
+       case 16 * 1024:
+               val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT);
+               break;
+
+       default:
+               return -EINVAL;
+       }
+       val |= RBR_CFIG_B_VLD1;
+       switch (rp->rbr_sizes[1]) {
+       case 1 * 1024:
+               val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT);
+               break;
+       case 2 * 1024:
+               val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT);
+               break;
+       case 4 * 1024:
+               val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT);
+               break;
+       case 8 * 1024:
+               val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT);
+               break;
+
+       default:
+               return -EINVAL;
+       }
+       val |= RBR_CFIG_B_VLD0;
+       switch (rp->rbr_sizes[0]) {
+       case 256:
+               val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT);
+               break;
+       case 512:
+               val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT);
+               break;
+       case 1 * 1024:
+               val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT);
+               break;
+       case 2 * 1024:
+               val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT);
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       *ret = val;
+       return 0;
+}
+
+static int niu_enable_rx_channel(struct niu *np, int channel, int on)
+{
+       u64 val = nr64(RXDMA_CFIG1(channel));
+       int limit;
+
+       if (on)
+               val |= RXDMA_CFIG1_EN;
+       else
+               val &= ~RXDMA_CFIG1_EN;
+       nw64(RXDMA_CFIG1(channel), val);
+
+       limit = 1000;
+       while (--limit > 0) {
+               if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST)
+                       break;
+               udelay(10);
+       }
+       if (limit <= 0)
+               return -ENODEV;
+       return 0;
+}
+
+static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
+{
+       int err, channel = rp->rx_channel;
+       u64 val;
+
+       err = niu_rx_channel_reset(np, channel);
+       if (err)
+               return err;
+
+       err = niu_rx_channel_lpage_init(np, channel);
+       if (err)
+               return err;
+
+       niu_rx_channel_wred_init(np, rp);
+
+       nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY);
+       nw64(RX_DMA_CTL_STAT(channel),
+            (RX_DMA_CTL_STAT_MEX |
+             RX_DMA_CTL_STAT_RCRTHRES |
+             RX_DMA_CTL_STAT_RCRTO |
+             RX_DMA_CTL_STAT_RBR_EMPTY));
+       nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
+       nw64(RXDMA_CFIG2(channel),
+            ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) |
+             RXDMA_CFIG2_FULL_HDR));
+       nw64(RBR_CFIG_A(channel),
+            ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
+            (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
+       err = niu_compute_rbr_cfig_b(rp, &val);
+       if (err)
+               return err;
+       nw64(RBR_CFIG_B(channel), val);
+       nw64(RCRCFIG_A(channel),
+            ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) |
+            (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR)));
+       nw64(RCRCFIG_B(channel),
+            ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) |
+            RCRCFIG_B_ENTOUT |
+            ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT));
+
+       err = niu_enable_rx_channel(np, channel, 1);
+       if (err)
+               return err;
+
+       nw64(RBR_KICK(channel), rp->rbr_index);
+
+       val = nr64(RX_DMA_CTL_STAT(channel));
+       val |= RX_DMA_CTL_STAT_RBR_EMPTY;
+       nw64(RX_DMA_CTL_STAT(channel), val);
+
+       return 0;
+}
+
+static int niu_init_rx_channels(struct niu *np)
+{
+       unsigned long flags;
+       u64 seed = jiffies_64;
+       int err, i;
+
+       niu_lock_parent(np, flags);
+       nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider);
+       nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL));
+       niu_unlock_parent(np, flags);
+
+       /* XXX RXDMA 32bit mode? XXX */
+
+       niu_init_rdc_groups(np);
+       niu_init_drr_weight(np);
+
+       err = niu_init_hostinfo(np);
+       if (err)
+               return err;
+
+       for (i = 0; i < np->num_rx_rings; i++) {
+               struct rx_ring_info *rp = &np->rx_rings[i];
+
+               err = niu_init_one_rx_channel(np, rp);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int niu_set_ip_frag_rule(struct niu *np)
+{
+       struct niu_parent *parent = np->parent;
+       struct niu_classifier *cp = &np->clas;
+       struct niu_tcam_entry *tp;
+       int index, err;
+
+       index = cp->tcam_top;
+       tp = &parent->tcam[index];
+
+       /* Note that the noport bit is the same in both ipv4 and
+        * ipv6 format TCAM entries.
+        */
+       memset(tp, 0, sizeof(*tp));
+       tp->key[1] = TCAM_V4KEY1_NOPORT;
+       tp->key_mask[1] = TCAM_V4KEY1_NOPORT;
+       tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
+                         ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT));
+       err = tcam_write(np, index, tp->key, tp->key_mask);
+       if (err)
+               return err;
+       err = tcam_assoc_write(np, index, tp->assoc_data);
+       if (err)
+               return err;
+       tp->valid = 1;
+       cp->tcam_valid_entries++;
+
+       return 0;
+}
+
+static int niu_init_classifier_hw(struct niu *np)
+{
+       struct niu_parent *parent = np->parent;
+       struct niu_classifier *cp = &np->clas;
+       int i, err;
+
+       nw64(H1POLY, cp->h1_init);
+       nw64(H2POLY, cp->h2_init);
+
+       err = niu_init_hostinfo(np);
+       if (err)
+               return err;
+
+       for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) {
+               struct niu_vlan_rdc *vp = &cp->vlan_mappings[i];
+
+               vlan_tbl_write(np, i, np->port,
+                              vp->vlan_pref, vp->rdc_num);
+       }
+
+       for (i = 0; i < cp->num_alt_mac_mappings; i++) {
+               struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i];
+
+               err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num,
+                                               ap->rdc_num, ap->mac_pref);
+               if (err)
+                       return err;
+       }
+
+       for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
+               int index = i - CLASS_CODE_USER_PROG1;
+
+               err = niu_set_tcam_key(np, i, parent->tcam_key[index]);
+               if (err)
+                       return err;
+               err = niu_set_flow_key(np, i, parent->flow_key[index]);
+               if (err)
+                       return err;
+       }
+
+       err = niu_set_ip_frag_rule(np);
+       if (err)
+               return err;
+
+       tcam_enable(np, 1);
+
+       return 0;
+}
+
+static int niu_zcp_write(struct niu *np, int index, u64 *data)
+{
+       nw64(ZCP_RAM_DATA0, data[0]);
+       nw64(ZCP_RAM_DATA1, data[1]);
+       nw64(ZCP_RAM_DATA2, data[2]);
+       nw64(ZCP_RAM_DATA3, data[3]);
+       nw64(ZCP_RAM_DATA4, data[4]);
+       nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL);
+       nw64(ZCP_RAM_ACC,
+            (ZCP_RAM_ACC_WRITE |
+             (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
+             (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
+
+       return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
+                                  1000, 100);
+}
+
+static int niu_zcp_read(struct niu *np, int index, u64 *data)
+{
+       int err;
+
+       err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
+                                 1000, 100);
+       if (err) {
+               netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n",
+                          (unsigned long long)nr64(ZCP_RAM_ACC));
+               return err;
+       }
+
+       nw64(ZCP_RAM_ACC,
+            (ZCP_RAM_ACC_READ |
+             (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
+             (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
+
+       err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
+                                 1000, 100);
+       if (err) {
+               netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n",
+                          (unsigned long long)nr64(ZCP_RAM_ACC));
+               return err;
+       }
+
+       data[0] = nr64(ZCP_RAM_DATA0);
+       data[1] = nr64(ZCP_RAM_DATA1);
+       data[2] = nr64(ZCP_RAM_DATA2);
+       data[3] = nr64(ZCP_RAM_DATA3);
+       data[4] = nr64(ZCP_RAM_DATA4);
+
+       return 0;
+}
+
+static void niu_zcp_cfifo_reset(struct niu *np)
+{
+       u64 val = nr64(RESET_CFIFO);
+
+       val |= RESET_CFIFO_RST(np->port);
+       nw64(RESET_CFIFO, val);
+       udelay(10);
+
+       val &= ~RESET_CFIFO_RST(np->port);
+       nw64(RESET_CFIFO, val);
+}
+
+static int niu_init_zcp(struct niu *np)
+{
+       u64 data[5], rbuf[5];
+       int i, max, err;
+
+       if (np->parent->plat_type != PLAT_TYPE_NIU) {
+               if (np->port == 0 || np->port == 1)
+                       max = ATLAS_P0_P1_CFIFO_ENTRIES;
+               else
+                       max = ATLAS_P2_P3_CFIFO_ENTRIES;
+       } else
+               max = NIU_CFIFO_ENTRIES;
+
+       data[0] = 0;
+       data[1] = 0;
+       data[2] = 0;
+       data[3] = 0;
+       data[4] = 0;
+
+       for (i = 0; i < max; i++) {
+               err = niu_zcp_write(np, i, data);
+               if (err)
+                       return err;
+               err = niu_zcp_read(np, i, rbuf);
+               if (err)
+                       return err;
+       }
+
+       niu_zcp_cfifo_reset(np);
+       nw64(CFIFO_ECC(np->port), 0);
+       nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL);
+       (void) nr64(ZCP_INT_STAT);
+       nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL);
+
+       return 0;
+}
+
+static void niu_ipp_write(struct niu *np, int index, u64 *data)
+{
+       u64 val = nr64_ipp(IPP_CFIG);
+
+       nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W);
+       nw64_ipp(IPP_DFIFO_WR_PTR, index);
+       nw64_ipp(IPP_DFIFO_WR0, data[0]);
+       nw64_ipp(IPP_DFIFO_WR1, data[1]);
+       nw64_ipp(IPP_DFIFO_WR2, data[2]);
+       nw64_ipp(IPP_DFIFO_WR3, data[3]);
+       nw64_ipp(IPP_DFIFO_WR4, data[4]);
+       nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W);
+}
+
+static void niu_ipp_read(struct niu *np, int index, u64 *data)
+{
+       nw64_ipp(IPP_DFIFO_RD_PTR, index);
+       data[0] = nr64_ipp(IPP_DFIFO_RD0);
+       data[1] = nr64_ipp(IPP_DFIFO_RD1);
+       data[2] = nr64_ipp(IPP_DFIFO_RD2);
+       data[3] = nr64_ipp(IPP_DFIFO_RD3);
+       data[4] = nr64_ipp(IPP_DFIFO_RD4);
+}
+
+static int niu_ipp_reset(struct niu *np)
+{
+       return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST,
+                                         1000, 100, "IPP_CFIG");
+}
+
+static int niu_init_ipp(struct niu *np)
+{
+       u64 data[5], rbuf[5], val;
+       int i, max, err;
+
+       if (np->parent->plat_type != PLAT_TYPE_NIU) {
+               if (np->port == 0 || np->port == 1)
+                       max = ATLAS_P0_P1_DFIFO_ENTRIES;
+               else
+                       max = ATLAS_P2_P3_DFIFO_ENTRIES;
+       } else
+               max = NIU_DFIFO_ENTRIES;
+
+       data[0] = 0;
+       data[1] = 0;
+       data[2] = 0;
+       data[3] = 0;
+       data[4] = 0;
+
+       for (i = 0; i < max; i++) {
+               niu_ipp_write(np, i, data);
+               niu_ipp_read(np, i, rbuf);
+       }
+
+       (void) nr64_ipp(IPP_INT_STAT);
+       (void) nr64_ipp(IPP_INT_STAT);
+
+       err = niu_ipp_reset(np);
+       if (err)
+               return err;
+
+       (void) nr64_ipp(IPP_PKT_DIS);
+       (void) nr64_ipp(IPP_BAD_CS_CNT);
+       (void) nr64_ipp(IPP_ECC);
+
+       (void) nr64_ipp(IPP_INT_STAT);
+
+       nw64_ipp(IPP_MSK, ~IPP_MSK_ALL);
+
+       val = nr64_ipp(IPP_CFIG);
+       val &= ~IPP_CFIG_IP_MAX_PKT;
+       val |= (IPP_CFIG_IPP_ENABLE |
+               IPP_CFIG_DFIFO_ECC_EN |
+               IPP_CFIG_DROP_BAD_CRC |
+               IPP_CFIG_CKSUM_EN |
+               (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT));
+       nw64_ipp(IPP_CFIG, val);
+
+       return 0;
+}
+
+static void niu_handle_led(struct niu *np, int status)
+{
+       u64 val;
+       val = nr64_mac(XMAC_CONFIG);
+
+       if ((np->flags & NIU_FLAGS_10G) != 0 &&
+           (np->flags & NIU_FLAGS_FIBER) != 0) {
+               if (status) {
+                       val |= XMAC_CONFIG_LED_POLARITY;
+                       val &= ~XMAC_CONFIG_FORCE_LED_ON;
+               } else {
+                       val |= XMAC_CONFIG_FORCE_LED_ON;
+                       val &= ~XMAC_CONFIG_LED_POLARITY;
+               }
+       }
+
+       nw64_mac(XMAC_CONFIG, val);
+}
+
+static void niu_init_xif_xmac(struct niu *np)
+{
+       struct niu_link_config *lp = &np->link_config;
+       u64 val;
+
+       if (np->flags & NIU_FLAGS_XCVR_SERDES) {
+               val = nr64(MIF_CONFIG);
+               val |= MIF_CONFIG_ATCA_GE;
+               nw64(MIF_CONFIG, val);
+       }
+
+       val = nr64_mac(XMAC_CONFIG);
+       val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
+
+       val |= XMAC_CONFIG_TX_OUTPUT_EN;
+
+       if (lp->loopback_mode == LOOPBACK_MAC) {
+               val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
+               val |= XMAC_CONFIG_LOOPBACK;
+       } else {
+               val &= ~XMAC_CONFIG_LOOPBACK;
+       }
+
+       if (np->flags & NIU_FLAGS_10G) {
+               val &= ~XMAC_CONFIG_LFS_DISABLE;
+       } else {
+               val |= XMAC_CONFIG_LFS_DISABLE;
+               if (!(np->flags & NIU_FLAGS_FIBER) &&
+                   !(np->flags & NIU_FLAGS_XCVR_SERDES))
+                       val |= XMAC_CONFIG_1G_PCS_BYPASS;
+               else
+                       val &= ~XMAC_CONFIG_1G_PCS_BYPASS;
+       }
+
+       val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
+
+       if (lp->active_speed == SPEED_100)
+               val |= XMAC_CONFIG_SEL_CLK_25MHZ;
+       else
+               val &= ~XMAC_CONFIG_SEL_CLK_25MHZ;
+
+       nw64_mac(XMAC_CONFIG, val);
+
+       val = nr64_mac(XMAC_CONFIG);
+       val &= ~XMAC_CONFIG_MODE_MASK;
+       if (np->flags & NIU_FLAGS_10G) {
+               val |= XMAC_CONFIG_MODE_XGMII;
+       } else {
+               if (lp->active_speed == SPEED_1000)
+                       val |= XMAC_CONFIG_MODE_GMII;
+               else
+                       val |= XMAC_CONFIG_MODE_MII;
+       }
+
+       nw64_mac(XMAC_CONFIG, val);
+}
+
+static void niu_init_xif_bmac(struct niu *np)
+{
+       struct niu_link_config *lp = &np->link_config;
+       u64 val;
+
+       val = BMAC_XIF_CONFIG_TX_OUTPUT_EN;
+
+       if (lp->loopback_mode == LOOPBACK_MAC)
+               val |= BMAC_XIF_CONFIG_MII_LOOPBACK;
+       else
+               val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK;
+
+       if (lp->active_speed == SPEED_1000)
+               val |= BMAC_XIF_CONFIG_GMII_MODE;
+       else
+               val &= ~BMAC_XIF_CONFIG_GMII_MODE;
+
+       val &= ~(BMAC_XIF_CONFIG_LINK_LED |
+                BMAC_XIF_CONFIG_LED_POLARITY);
+
+       if (!(np->flags & NIU_FLAGS_10G) &&
+           !(np->flags & NIU_FLAGS_FIBER) &&
+           lp->active_speed == SPEED_100)
+               val |= BMAC_XIF_CONFIG_25MHZ_CLOCK;
+       else
+               val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK;
+
+       nw64_mac(BMAC_XIF_CONFIG, val);
+}
+
+static void niu_init_xif(struct niu *np)
+{
+       if (np->flags & NIU_FLAGS_XMAC)
+               niu_init_xif_xmac(np);
+       else
+               niu_init_xif_bmac(np);
+}
+
+static void niu_pcs_mii_reset(struct niu *np)
+{
+       int limit = 1000;
+       u64 val = nr64_pcs(PCS_MII_CTL);
+       val |= PCS_MII_CTL_RST;
+       nw64_pcs(PCS_MII_CTL, val);
+       while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) {
+               udelay(100);
+               val = nr64_pcs(PCS_MII_CTL);
+       }
+}
+
+static void niu_xpcs_reset(struct niu *np)
+{
+       int limit = 1000;
+       u64 val = nr64_xpcs(XPCS_CONTROL1);
+       val |= XPCS_CONTROL1_RESET;
+       nw64_xpcs(XPCS_CONTROL1, val);
+       while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) {
+               udelay(100);
+               val = nr64_xpcs(XPCS_CONTROL1);
+       }
+}
+
+static int niu_init_pcs(struct niu *np)
+{
+       struct niu_link_config *lp = &np->link_config;
+       u64 val;
+
+       switch (np->flags & (NIU_FLAGS_10G |
+                            NIU_FLAGS_FIBER |
+                            NIU_FLAGS_XCVR_SERDES)) {
+       case NIU_FLAGS_FIBER:
+               /* 1G fiber */
+               nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
+               nw64_pcs(PCS_DPATH_MODE, 0);
+               niu_pcs_mii_reset(np);
+               break;
+
+       case NIU_FLAGS_10G:
+       case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
+       case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
+               /* 10G SERDES */
+               if (!(np->flags & NIU_FLAGS_XMAC))
+                       return -EINVAL;
+
+               /* 10G copper or fiber */
+               val = nr64_mac(XMAC_CONFIG);
+               val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
+               nw64_mac(XMAC_CONFIG, val);
+
+               niu_xpcs_reset(np);
+
+               val = nr64_xpcs(XPCS_CONTROL1);
+               if (lp->loopback_mode == LOOPBACK_PHY)
+                       val |= XPCS_CONTROL1_LOOPBACK;
+               else
+                       val &= ~XPCS_CONTROL1_LOOPBACK;
+               nw64_xpcs(XPCS_CONTROL1, val);
+
+               nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0);
+               (void) nr64_xpcs(XPCS_SYMERR_CNT01);
+               (void) nr64_xpcs(XPCS_SYMERR_CNT23);
+               break;
+
+
+       case NIU_FLAGS_XCVR_SERDES:
+               /* 1G SERDES */
+               niu_pcs_mii_reset(np);
+               nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
+               nw64_pcs(PCS_DPATH_MODE, 0);
+               break;
+
+       case 0:
+               /* 1G copper */
+       case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
+               /* 1G RGMII FIBER */
+               nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII);
+               niu_pcs_mii_reset(np);
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int niu_reset_tx_xmac(struct niu *np)
+{
+       return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST,
+                                         (XTXMAC_SW_RST_REG_RS |
+                                          XTXMAC_SW_RST_SOFT_RST),
+                                         1000, 100, "XTXMAC_SW_RST");
+}
+
+static int niu_reset_tx_bmac(struct niu *np)
+{
+       int limit;
+
+       nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET);
+       limit = 1000;
+       while (--limit >= 0) {
+               if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET))
+                       break;
+               udelay(100);
+       }
+       if (limit < 0) {
+               dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n",
+                       np->port,
+                       (unsigned long long) nr64_mac(BTXMAC_SW_RST));
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int niu_reset_tx_mac(struct niu *np)
+{
+       if (np->flags & NIU_FLAGS_XMAC)
+               return niu_reset_tx_xmac(np);
+       else
+               return niu_reset_tx_bmac(np);
+}
+
+static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max)
+{
+       u64 val;
+
+       val = nr64_mac(XMAC_MIN);
+       val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE |
+                XMAC_MIN_RX_MIN_PKT_SIZE);
+       val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT);
+       val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT);
+       nw64_mac(XMAC_MIN, val);
+
+       nw64_mac(XMAC_MAX, max);
+
+       nw64_mac(XTXMAC_STAT_MSK, ~(u64)0);
+
+       val = nr64_mac(XMAC_IPG);
+       if (np->flags & NIU_FLAGS_10G) {
+               val &= ~XMAC_IPG_IPG_XGMII;
+               val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT);
+       } else {
+               val &= ~XMAC_IPG_IPG_MII_GMII;
+               val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT);
+       }
+       nw64_mac(XMAC_IPG, val);
+
+       val = nr64_mac(XMAC_CONFIG);
+       val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC |
+                XMAC_CONFIG_STRETCH_MODE |
+                XMAC_CONFIG_VAR_MIN_IPG_EN |
+                XMAC_CONFIG_TX_ENABLE);
+       nw64_mac(XMAC_CONFIG, val);
+
+       nw64_mac(TXMAC_FRM_CNT, 0);
+       nw64_mac(TXMAC_BYTE_CNT, 0);
+}
+
+static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max)
+{
+       u64 val;
+
+       nw64_mac(BMAC_MIN_FRAME, min);
+       nw64_mac(BMAC_MAX_FRAME, max);
+
+       nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0);
+       nw64_mac(BMAC_CTRL_TYPE, 0x8808);
+       nw64_mac(BMAC_PREAMBLE_SIZE, 7);
+
+       val = nr64_mac(BTXMAC_CONFIG);
+       val &= ~(BTXMAC_CONFIG_FCS_DISABLE |
+                BTXMAC_CONFIG_ENABLE);
+       nw64_mac(BTXMAC_CONFIG, val);
+}
+
+static void niu_init_tx_mac(struct niu *np)
+{
+       u64 min, max;
+
+       min = 64;
+       if (np->dev->mtu > ETH_DATA_LEN)
+               max = 9216;
+       else
+               max = 1522;
+
+       /* The XMAC_MIN register only accepts values for TX min which
+        * have the low 3 bits cleared.
+        */
+       BUG_ON(min & 0x7);
+
+       if (np->flags & NIU_FLAGS_XMAC)
+               niu_init_tx_xmac(np, min, max);
+       else
+               niu_init_tx_bmac(np, min, max);
+}
+
+static int niu_reset_rx_xmac(struct niu *np)
+{
+       int limit;
+
+       nw64_mac(XRXMAC_SW_RST,
+                XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST);
+       limit = 1000;
+       while (--limit >= 0) {
+               if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
+                                                XRXMAC_SW_RST_SOFT_RST)))
+                       break;
+               udelay(100);
+       }
+       if (limit < 0) {
+               dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n",
+                       np->port,
+                       (unsigned long long) nr64_mac(XRXMAC_SW_RST));
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int niu_reset_rx_bmac(struct niu *np)
+{
+       int limit;
+
+       nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET);
+       limit = 1000;
+       while (--limit >= 0) {
+               if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET))
+                       break;
+               udelay(100);
+       }
+       if (limit < 0) {
+               dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n",
+                       np->port,
+                       (unsigned long long) nr64_mac(BRXMAC_SW_RST));
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int niu_reset_rx_mac(struct niu *np)
+{
+       if (np->flags & NIU_FLAGS_XMAC)
+               return niu_reset_rx_xmac(np);
+       else
+               return niu_reset_rx_bmac(np);
+}
+
+static void niu_init_rx_xmac(struct niu *np)
+{
+       struct niu_parent *parent = np->parent;
+       struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
+       int first_rdc_table = tp->first_table_num;
+       unsigned long i;
+       u64 val;
+
+       nw64_mac(XMAC_ADD_FILT0, 0);
+       nw64_mac(XMAC_ADD_FILT1, 0);
+       nw64_mac(XMAC_ADD_FILT2, 0);
+       nw64_mac(XMAC_ADD_FILT12_MASK, 0);
+       nw64_mac(XMAC_ADD_FILT00_MASK, 0);
+       for (i = 0; i < MAC_NUM_HASH; i++)
+               nw64_mac(XMAC_HASH_TBL(i), 0);
+       nw64_mac(XRXMAC_STAT_MSK, ~(u64)0);
+       niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
+       niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
+
+       val = nr64_mac(XMAC_CONFIG);
+       val &= ~(XMAC_CONFIG_RX_MAC_ENABLE |
+                XMAC_CONFIG_PROMISCUOUS |
+                XMAC_CONFIG_PROMISC_GROUP |
+                XMAC_CONFIG_ERR_CHK_DIS |
+                XMAC_CONFIG_RX_CRC_CHK_DIS |
+                XMAC_CONFIG_RESERVED_MULTICAST |
+                XMAC_CONFIG_RX_CODEV_CHK_DIS |
+                XMAC_CONFIG_ADDR_FILTER_EN |
+                XMAC_CONFIG_RCV_PAUSE_ENABLE |
+                XMAC_CONFIG_STRIP_CRC |
+                XMAC_CONFIG_PASS_FLOW_CTRL |
+                XMAC_CONFIG_MAC2IPP_PKT_CNT_EN);
+       val |= (XMAC_CONFIG_HASH_FILTER_EN);
+       nw64_mac(XMAC_CONFIG, val);
+
+       nw64_mac(RXMAC_BT_CNT, 0);
+       nw64_mac(RXMAC_BC_FRM_CNT, 0);
+       nw64_mac(RXMAC_MC_FRM_CNT, 0);
+       nw64_mac(RXMAC_FRAG_CNT, 0);
+       nw64_mac(RXMAC_HIST_CNT1, 0);
+       nw64_mac(RXMAC_HIST_CNT2, 0);
+       nw64_mac(RXMAC_HIST_CNT3, 0);
+       nw64_mac(RXMAC_HIST_CNT4, 0);
+       nw64_mac(RXMAC_HIST_CNT5, 0);
+       nw64_mac(RXMAC_HIST_CNT6, 0);
+       nw64_mac(RXMAC_HIST_CNT7, 0);
+       nw64_mac(RXMAC_MPSZER_CNT, 0);
+       nw64_mac(RXMAC_CRC_ER_CNT, 0);
+       nw64_mac(RXMAC_CD_VIO_CNT, 0);
+       nw64_mac(LINK_FAULT_CNT, 0);
+}
+
+static void niu_init_rx_bmac(struct niu *np)
+{
+       struct niu_parent *parent = np->parent;
+       struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
+       int first_rdc_table = tp->first_table_num;
+       unsigned long i;
+       u64 val;
+
+       nw64_mac(BMAC_ADD_FILT0, 0);
+       nw64_mac(BMAC_ADD_FILT1, 0);
+       nw64_mac(BMAC_ADD_FILT2, 0);
+       nw64_mac(BMAC_ADD_FILT12_MASK, 0);
+       nw64_mac(BMAC_ADD_FILT00_MASK, 0);
+       for (i = 0; i < MAC_NUM_HASH; i++)
+               nw64_mac(BMAC_HASH_TBL(i), 0);
+       niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
+       niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
+       nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0);
+
+       val = nr64_mac(BRXMAC_CONFIG);
+       val &= ~(BRXMAC_CONFIG_ENABLE |
+                BRXMAC_CONFIG_STRIP_PAD |
+                BRXMAC_CONFIG_STRIP_FCS |
+                BRXMAC_CONFIG_PROMISC |
+                BRXMAC_CONFIG_PROMISC_GRP |
+                BRXMAC_CONFIG_ADDR_FILT_EN |
+                BRXMAC_CONFIG_DISCARD_DIS);
+       val |= (BRXMAC_CONFIG_HASH_FILT_EN);
+       nw64_mac(BRXMAC_CONFIG, val);
+
+       val = nr64_mac(BMAC_ADDR_CMPEN);
+       val |= BMAC_ADDR_CMPEN_EN0;
+       nw64_mac(BMAC_ADDR_CMPEN, val);
+}
+
+static void niu_init_rx_mac(struct niu *np)
+{
+       niu_set_primary_mac(np, np->dev->dev_addr);
+
+       if (np->flags & NIU_FLAGS_XMAC)
+               niu_init_rx_xmac(np);
+       else
+               niu_init_rx_bmac(np);
+}
+
+static void niu_enable_tx_xmac(struct niu *np, int on)
+{
+       u64 val = nr64_mac(XMAC_CONFIG);
+
+       if (on)
+               val |= XMAC_CONFIG_TX_ENABLE;
+       else
+               val &= ~XMAC_CONFIG_TX_ENABLE;
+       nw64_mac(XMAC_CONFIG, val);
+}
+
+static void niu_enable_tx_bmac(struct niu *np, int on)
+{
+       u64 val = nr64_mac(BTXMAC_CONFIG);
+
+       if (on)
+               val |= BTXMAC_CONFIG_ENABLE;
+       else
+               val &= ~BTXMAC_CONFIG_ENABLE;
+       nw64_mac(BTXMAC_CONFIG, val);
+}
+
+static void niu_enable_tx_mac(struct niu *np, int on)
+{
+       if (np->flags & NIU_FLAGS_XMAC)
+               niu_enable_tx_xmac(np, on);
+       else
+               niu_enable_tx_bmac(np, on);
+}
+
+static void niu_enable_rx_xmac(struct niu *np, int on)
+{
+       u64 val = nr64_mac(XMAC_CONFIG);
+
+       val &= ~(XMAC_CONFIG_HASH_FILTER_EN |
+                XMAC_CONFIG_PROMISCUOUS);
+
+       if (np->flags & NIU_FLAGS_MCAST)
+               val |= XMAC_CONFIG_HASH_FILTER_EN;
+       if (np->flags & NIU_FLAGS_PROMISC)
+               val |= XMAC_CONFIG_PROMISCUOUS;
+
+       if (on)
+               val |= XMAC_CONFIG_RX_MAC_ENABLE;
+       else
+               val &= ~XMAC_CONFIG_RX_MAC_ENABLE;
+       nw64_mac(XMAC_CONFIG, val);
+}
+
+static void niu_enable_rx_bmac(struct niu *np, int on)
+{
+       u64 val = nr64_mac(BRXMAC_CONFIG);
+
+       val &= ~(BRXMAC_CONFIG_HASH_FILT_EN |
+                BRXMAC_CONFIG_PROMISC);
+
+       if (np->flags & NIU_FLAGS_MCAST)
+               val |= BRXMAC_CONFIG_HASH_FILT_EN;
+       if (np->flags & NIU_FLAGS_PROMISC)
+               val |= BRXMAC_CONFIG_PROMISC;
+
+       if (on)
+               val |= BRXMAC_CONFIG_ENABLE;
+       else
+               val &= ~BRXMAC_CONFIG_ENABLE;
+       nw64_mac(BRXMAC_CONFIG, val);
+}
+
+static void niu_enable_rx_mac(struct niu *np, int on)
+{
+       if (np->flags & NIU_FLAGS_XMAC)
+               niu_enable_rx_xmac(np, on);
+       else
+               niu_enable_rx_bmac(np, on);
+}
+
+static int niu_init_mac(struct niu *np)
+{
+       int err;
+
+       niu_init_xif(np);
+       err = niu_init_pcs(np);
+       if (err)
+               return err;
+
+       err = niu_reset_tx_mac(np);
+       if (err)
+               return err;
+       niu_init_tx_mac(np);
+       err = niu_reset_rx_mac(np);
+       if (err)
+               return err;
+       niu_init_rx_mac(np);
+
+       /* This looks hookey but the RX MAC reset we just did will
+        * undo some of the state we setup in niu_init_tx_mac() so we
+        * have to call it again.  In particular, the RX MAC reset will
+        * set the XMAC_MAX register back to it's default value.
+        */
+       niu_init_tx_mac(np);
+       niu_enable_tx_mac(np, 1);
+
+       niu_enable_rx_mac(np, 1);
+
+       return 0;
+}
+
+static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
+{
+       (void) niu_tx_channel_stop(np, rp->tx_channel);
+}
+
+static void niu_stop_tx_channels(struct niu *np)
+{
+       int i;
+
+       for (i = 0; i < np->num_tx_rings; i++) {
+               struct tx_ring_info *rp = &np->tx_rings[i];
+
+               niu_stop_one_tx_channel(np, rp);
+       }
+}
+
+static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
+{
+       (void) niu_tx_channel_reset(np, rp->tx_channel);
+}
+
+static void niu_reset_tx_channels(struct niu *np)
+{
+       int i;
+
+       for (i = 0; i < np->num_tx_rings; i++) {
+               struct tx_ring_info *rp = &np->tx_rings[i];
+
+               niu_reset_one_tx_channel(np, rp);
+       }
+}
+
+static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
+{
+       (void) niu_enable_rx_channel(np, rp->rx_channel, 0);
+}
+
+static void niu_stop_rx_channels(struct niu *np)
+{
+       int i;
+
+       for (i = 0; i < np->num_rx_rings; i++) {
+               struct rx_ring_info *rp = &np->rx_rings[i];
+
+               niu_stop_one_rx_channel(np, rp);
+       }
+}
+
+static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
+{
+       int channel = rp->rx_channel;
+
+       (void) niu_rx_channel_reset(np, channel);
+       nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL);
+       nw64(RX_DMA_CTL_STAT(channel), 0);
+       (void) niu_enable_rx_channel(np, channel, 0);
+}
+
+static void niu_reset_rx_channels(struct niu *np)
+{
+       int i;
+
+       for (i = 0; i < np->num_rx_rings; i++) {
+               struct rx_ring_info *rp = &np->rx_rings[i];
+
+               niu_reset_one_rx_channel(np, rp);
+       }
+}
+
+static void niu_disable_ipp(struct niu *np)
+{
+       u64 rd, wr, val;
+       int limit;
+
+       rd = nr64_ipp(IPP_DFIFO_RD_PTR);
+       wr = nr64_ipp(IPP_DFIFO_WR_PTR);
+       limit = 100;
+       while (--limit >= 0 && (rd != wr)) {
+               rd = nr64_ipp(IPP_DFIFO_RD_PTR);
+               wr = nr64_ipp(IPP_DFIFO_WR_PTR);
+       }
+       if (limit < 0 &&
+           (rd != 0 && wr != 1)) {
+               netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n",
+                          (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR),
+                          (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR));
+       }
+
+       val = nr64_ipp(IPP_CFIG);
+       val &= ~(IPP_CFIG_IPP_ENABLE |
+                IPP_CFIG_DFIFO_ECC_EN |
+                IPP_CFIG_DROP_BAD_CRC |
+                IPP_CFIG_CKSUM_EN);
+       nw64_ipp(IPP_CFIG, val);
+
+       (void) niu_ipp_reset(np);
+}
+
+static int niu_init_hw(struct niu *np)
+{
+       int i, err;
+
+       netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n");
+       niu_txc_enable_port(np, 1);
+       niu_txc_port_dma_enable(np, 1);
+       niu_txc_set_imask(np, 0);
+
+       netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n");
+       for (i = 0; i < np->num_tx_rings; i++) {
+               struct tx_ring_info *rp = &np->tx_rings[i];
+
+               err = niu_init_one_tx_channel(np, rp);
+               if (err)
+                       return err;
+       }
+
+       netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n");
+       err = niu_init_rx_channels(np);
+       if (err)
+               goto out_uninit_tx_channels;
+
+       netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n");
+       err = niu_init_classifier_hw(np);
+       if (err)
+               goto out_uninit_rx_channels;
+
+       netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n");
+       err = niu_init_zcp(np);
+       if (err)
+               goto out_uninit_rx_channels;
+
+       netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n");
+       err = niu_init_ipp(np);
+       if (err)
+               goto out_uninit_rx_channels;
+
+       netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n");
+       err = niu_init_mac(np);
+       if (err)
+               goto out_uninit_ipp;
+
+       return 0;
+
+out_uninit_ipp:
+       netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n");
+       niu_disable_ipp(np);
+
+out_uninit_rx_channels:
+       netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n");
+       niu_stop_rx_channels(np);
+       niu_reset_rx_channels(np);
+
+out_uninit_tx_channels:
+       netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n");
+       niu_stop_tx_channels(np);
+       niu_reset_tx_channels(np);
+
+       return err;
+}
+
+static void niu_stop_hw(struct niu *np)
+{
+       netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n");
+       niu_enable_interrupts(np, 0);
+
+       netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n");
+       niu_enable_rx_mac(np, 0);
+
+       netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n");
+       niu_disable_ipp(np);
+
+       netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n");
+       niu_stop_tx_channels(np);
+
+       netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n");
+       niu_stop_rx_channels(np);
+
+       netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n");
+       niu_reset_tx_channels(np);
+
+       netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n");
+       niu_reset_rx_channels(np);
+}
+
+static void niu_set_irq_name(struct niu *np)
+{
+       int port = np->port;
+       int i, j = 1;
+
+       sprintf(np->irq_name[0], "%s:MAC", np->dev->name);
+
+       if (port == 0) {
+               sprintf(np->irq_name[1], "%s:MIF", np->dev->name);
+               sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name);
+               j = 3;
+       }
+
+       for (i = 0; i < np->num_ldg - j; i++) {
+               if (i < np->num_rx_rings)
+                       sprintf(np->irq_name[i+j], "%s-rx-%d",
+                               np->dev->name, i);
+               else if (i < np->num_tx_rings + np->num_rx_rings)
+                       sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name,
+                               i - np->num_rx_rings);
+       }
+}
+
+static int niu_request_irq(struct niu *np)
+{
+       int i, j, err;
+
+       niu_set_irq_name(np);
+
+       err = 0;
+       for (i = 0; i < np->num_ldg; i++) {
+               struct niu_ldg *lp = &np->ldg[i];
+
+               err = request_irq(lp->irq, niu_interrupt, IRQF_SHARED,
+                                 np->irq_name[i], lp);
+               if (err)
+                       goto out_free_irqs;
+
+       }
+
+       return 0;
+
+out_free_irqs:
+       for (j = 0; j < i; j++) {
+               struct niu_ldg *lp = &np->ldg[j];
+
+               free_irq(lp->irq, lp);
+       }
+       return err;
+}
+
+static void niu_free_irq(struct niu *np)
+{
+       int i;
+
+       for (i = 0; i < np->num_ldg; i++) {
+               struct niu_ldg *lp = &np->ldg[i];
+
+               free_irq(lp->irq, lp);
+       }
+}
+
+static void niu_enable_napi(struct niu *np)
+{
+       int i;
+
+       for (i = 0; i < np->num_ldg; i++)
+               napi_enable(&np->ldg[i].napi);
+}
+
+static void niu_disable_napi(struct niu *np)
+{
+       int i;
+
+       for (i = 0; i < np->num_ldg; i++)
+               napi_disable(&np->ldg[i].napi);
+}
+
+static int niu_open(struct net_device *dev)
+{
+       struct niu *np = netdev_priv(dev);
+       int err;
+
+       netif_carrier_off(dev);
+
+       err = niu_alloc_channels(np);
+       if (err)
+               goto out_err;
+
+       err = niu_enable_interrupts(np, 0);
+       if (err)
+               goto out_free_channels;
+
+       err = niu_request_irq(np);
+       if (err)
+               goto out_free_channels;
+
+       niu_enable_napi(np);
+
+       spin_lock_irq(&np->lock);
+
+       err = niu_init_hw(np);
+       if (!err) {
+               init_timer(&np->timer);
+               np->timer.expires = jiffies + HZ;
+               np->timer.data = (unsigned long) np;
+               np->timer.function = niu_timer;
+
+               err = niu_enable_interrupts(np, 1);
+               if (err)
+                       niu_stop_hw(np);
+       }
+
+       spin_unlock_irq(&np->lock);
+
+       if (err) {
+               niu_disable_napi(np);
+               goto out_free_irq;
+       }
+
+       netif_tx_start_all_queues(dev);
+
+       if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
+               netif_carrier_on(dev);
+
+       add_timer(&np->timer);
+
+       return 0;
+
+out_free_irq:
+       niu_free_irq(np);
+
+out_free_channels:
+       niu_free_channels(np);
+
+out_err:
+       return err;
+}
+
+static void niu_full_shutdown(struct niu *np, struct net_device *dev)
+{
+       cancel_work_sync(&np->reset_task);
+
+       niu_disable_napi(np);
+       netif_tx_stop_all_queues(dev);
+
+       del_timer_sync(&np->timer);
+
+       spin_lock_irq(&np->lock);
+
+       niu_stop_hw(np);
+
+       spin_unlock_irq(&np->lock);
+}
+
+static int niu_close(struct net_device *dev)
+{
+       struct niu *np = netdev_priv(dev);
+
+       niu_full_shutdown(np, dev);
+
+       niu_free_irq(np);
+
+       niu_free_channels(np);
+
+       niu_handle_led(np, 0);
+
+       return 0;
+}
+
+static void niu_sync_xmac_stats(struct niu *np)
+{
+       struct niu_xmac_stats *mp = &np->mac_stats.xmac;
+
+       mp->tx_frames += nr64_mac(TXMAC_FRM_CNT);
+       mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT);
+
+       mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT);
+       mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT);
+       mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT);
+       mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT);
+       mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT);
+       mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1);
+       mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2);
+       mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3);
+       mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4);
+       mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5);
+       mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6);
+       mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7);
+       mp->rx_octets += nr64_mac(RXMAC_BT_CNT);
+       mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT);
+       mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT);
+       mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT);
+}
+
+static void niu_sync_bmac_stats(struct niu *np)
+{
+       struct niu_bmac_stats *mp = &np->mac_stats.bmac;
+
+       mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT);
+       mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT);
+
+       mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT);
+       mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
+       mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
+       mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT);
+}
+
+static void niu_sync_mac_stats(struct niu *np)
+{
+       if (np->flags & NIU_FLAGS_XMAC)
+               niu_sync_xmac_stats(np);
+       else
+               niu_sync_bmac_stats(np);
+}
+
+static void niu_get_rx_stats(struct niu *np,
+                            struct rtnl_link_stats64 *stats)
+{
+       u64 pkts, dropped, errors, bytes;
+       struct rx_ring_info *rx_rings;
+       int i;
+
+       pkts = dropped = errors = bytes = 0;
+
+       rx_rings = ACCESS_ONCE(np->rx_rings);
+       if (!rx_rings)
+               goto no_rings;
+
+       for (i = 0; i < np->num_rx_rings; i++) {
+               struct rx_ring_info *rp = &rx_rings[i];
+
+               niu_sync_rx_discard_stats(np, rp, 0);
+
+               pkts += rp->rx_packets;
+               bytes += rp->rx_bytes;
+               dropped += rp->rx_dropped;
+               errors += rp->rx_errors;
+       }
+
+no_rings:
+       stats->rx_packets = pkts;
+       stats->rx_bytes = bytes;
+       stats->rx_dropped = dropped;
+       stats->rx_errors = errors;
+}
+
+static void niu_get_tx_stats(struct niu *np,
+                            struct rtnl_link_stats64 *stats)
+{
+       u64 pkts, errors, bytes;
+       struct tx_ring_info *tx_rings;
+       int i;
+
+       pkts = errors = bytes = 0;
+
+       tx_rings = ACCESS_ONCE(np->tx_rings);
+       if (!tx_rings)
+               goto no_rings;
+
+       for (i = 0; i < np->num_tx_rings; i++) {
+               struct tx_ring_info *rp = &tx_rings[i];
+
+               pkts += rp->tx_packets;
+               bytes += rp->tx_bytes;
+               errors += rp->tx_errors;
+       }
+
+no_rings:
+       stats->tx_packets = pkts;
+       stats->tx_bytes = bytes;
+       stats->tx_errors = errors;
+}
+
+static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev,
+                                              struct rtnl_link_stats64 *stats)
+{
+       struct niu *np = netdev_priv(dev);
+
+       if (netif_running(dev)) {
+               niu_get_rx_stats(np, stats);
+               niu_get_tx_stats(np, stats);
+       }
+
+       return stats;
+}
+
+static void niu_load_hash_xmac(struct niu *np, u16 *hash)
+{
+       int i;
+
+       for (i = 0; i < 16; i++)
+               nw64_mac(XMAC_HASH_TBL(i), hash[i]);
+}
+
+static void niu_load_hash_bmac(struct niu *np, u16 *hash)
+{
+       int i;
+
+       for (i = 0; i < 16; i++)
+               nw64_mac(BMAC_HASH_TBL(i), hash[i]);
+}
+
+static void niu_load_hash(struct niu *np, u16 *hash)
+{
+       if (np->flags & NIU_FLAGS_XMAC)
+               niu_load_hash_xmac(np, hash);
+       else
+               niu_load_hash_bmac(np, hash);
+}
+
+static void niu_set_rx_mode(struct net_device *dev)
+{
+       struct niu *np = netdev_priv(dev);
+       int i, alt_cnt, err;
+       struct netdev_hw_addr *ha;
+       unsigned long flags;
+       u16 hash[16] = { 0, };
+
+       spin_lock_irqsave(&np->lock, flags);
+       niu_enable_rx_mac(np, 0);
+
+       np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
+       if (dev->flags & IFF_PROMISC)
+               np->flags |= NIU_FLAGS_PROMISC;
+       if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev)))
+               np->flags |= NIU_FLAGS_MCAST;
+
+       alt_cnt = netdev_uc_count(dev);
+       if (alt_cnt > niu_num_alt_addr(np)) {
+               alt_cnt = 0;
+               np->flags |= NIU_FLAGS_PROMISC;
+       }
+
+       if (alt_cnt) {
+               int index = 0;
+
+               netdev_for_each_uc_addr(ha, dev) {
+                       err = niu_set_alt_mac(np, index, ha->addr);
+                       if (err)
+                               netdev_warn(dev, "Error %d adding alt mac %d\n",
+                                           err, index);
+                       err = niu_enable_alt_mac(np, index, 1);
+                       if (err)
+                               netdev_warn(dev, "Error %d enabling alt mac %d\n",
+                                           err, index);
+
+                       index++;
+               }
+       } else {
+               int alt_start;
+               if (np->flags & NIU_FLAGS_XMAC)
+                       alt_start = 0;
+               else
+                       alt_start = 1;
+               for (i = alt_start; i < niu_num_alt_addr(np); i++) {
+                       err = niu_enable_alt_mac(np, i, 0);
+                       if (err)
+                               netdev_warn(dev, "Error %d disabling alt mac %d\n",
+                                           err, i);
+               }
+       }
+       if (dev->flags & IFF_ALLMULTI) {
+               for (i = 0; i < 16; i++)
+                       hash[i] = 0xffff;
+       } else if (!netdev_mc_empty(dev)) {
+               netdev_for_each_mc_addr(ha, dev) {
+                       u32 crc = ether_crc_le(ETH_ALEN, ha->addr);
+
+                       crc >>= 24;
+                       hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
+               }
+       }
+
+       if (np->flags & NIU_FLAGS_MCAST)
+               niu_load_hash(np, hash);
+
+       niu_enable_rx_mac(np, 1);
+       spin_unlock_irqrestore(&np->lock, flags);
+}
+
+static int niu_set_mac_addr(struct net_device *dev, void *p)
+{
+       struct niu *np = netdev_priv(dev);
+       struct sockaddr *addr = p;
+       unsigned long flags;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EINVAL;
+
+       memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+
+       if (!netif_running(dev))
+               return 0;
+
+       spin_lock_irqsave(&np->lock, flags);
+       niu_enable_rx_mac(np, 0);
+       niu_set_primary_mac(np, dev->dev_addr);
+       niu_enable_rx_mac(np, 1);
+       spin_unlock_irqrestore(&np->lock, flags);
+
+       return 0;
+}
+
+static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+       return -EOPNOTSUPP;
+}
+
+static void niu_netif_stop(struct niu *np)
+{
+       np->dev->trans_start = jiffies; /* prevent tx timeout */
+
+       niu_disable_napi(np);
+
+       netif_tx_disable(np->dev);
+}
+
+static void niu_netif_start(struct niu *np)
+{
+       /* NOTE: unconditional netif_wake_queue is only appropriate
+        * so long as all callers are assured to have free tx slots
+        * (such as after niu_init_hw).
+        */
+       netif_tx_wake_all_queues(np->dev);
+
+       niu_enable_napi(np);
+
+       niu_enable_interrupts(np, 1);
+}
+
+static void niu_reset_buffers(struct niu *np)
+{
+       int i, j, k, err;
+
+       if (np->rx_rings) {
+               for (i = 0; i < np->num_rx_rings; i++) {
+                       struct rx_ring_info *rp = &np->rx_rings[i];
+
+                       for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) {
+                               struct page *page;
+
+                               page = rp->rxhash[j];
+                               while (page) {
+                                       struct page *next =
+                                               (struct page *) page->mapping;
+                                       u64 base = page->index;
+                                       base = base >> RBR_DESCR_ADDR_SHIFT;
+                                       rp->rbr[k++] = cpu_to_le32(base);
+                                       page = next;
+                               }
+                       }
+                       for (; k < MAX_RBR_RING_SIZE; k++) {
+                               err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
+                               if (unlikely(err))
+                                       break;
+                       }
+
+                       rp->rbr_index = rp->rbr_table_size - 1;
+                       rp->rcr_index = 0;
+                       rp->rbr_pending = 0;
+                       rp->rbr_refill_pending = 0;
+               }
+       }
+       if (np->tx_rings) {
+               for (i = 0; i < np->num_tx_rings; i++) {
+                       struct tx_ring_info *rp = &np->tx_rings[i];
+
+                       for (j = 0; j < MAX_TX_RING_SIZE; j++) {
+                               if (rp->tx_buffs[j].skb)
+                                       (void) release_tx_packet(np, rp, j);
+                       }
+
+                       rp->pending = MAX_TX_RING_SIZE;
+                       rp->prod = 0;
+                       rp->cons = 0;
+                       rp->wrap_bit = 0;
+               }
+       }
+}
+
+static void niu_reset_task(struct work_struct *work)
+{
+       struct niu *np = container_of(work, struct niu, reset_task);
+       unsigned long flags;
+       int err;
+
+       spin_lock_irqsave(&np->lock, flags);
+       if (!netif_running(np->dev)) {
+               spin_unlock_irqrestore(&np->lock, flags);
+               return;
+       }
+
+       spin_unlock_irqrestore(&np->lock, flags);
+
+       del_timer_sync(&np->timer);
+
+       niu_netif_stop(np);
+
+       spin_lock_irqsave(&np->lock, flags);
+
+       niu_stop_hw(np);
+
+       spin_unlock_irqrestore(&np->lock, flags);
+
+       niu_reset_buffers(np);
+
+       spin_lock_irqsave(&np->lock, flags);
+
+       err = niu_init_hw(np);
+       if (!err) {
+               np->timer.expires = jiffies + HZ;
+               add_timer(&np->timer);
+               niu_netif_start(np);
+       }
+
+       spin_unlock_irqrestore(&np->lock, flags);
+}
+
+static void niu_tx_timeout(struct net_device *dev)
+{
+       struct niu *np = netdev_priv(dev);
+
+       dev_err(np->device, "%s: Transmit timed out, resetting\n",
+               dev->name);
+
+       schedule_work(&np->reset_task);
+}
+
+static void niu_set_txd(struct tx_ring_info *rp, int index,
+                       u64 mapping, u64 len, u64 mark,
+                       u64 n_frags)
+{
+       __le64 *desc = &rp->descr[index];
+
+       *desc = cpu_to_le64(mark |
+                           (n_frags << TX_DESC_NUM_PTR_SHIFT) |
+                           (len << TX_DESC_TR_LEN_SHIFT) |
+                           (mapping & TX_DESC_SAD));
+}
+
+static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
+                               u64 pad_bytes, u64 len)
+{
+       u16 eth_proto, eth_proto_inner;
+       u64 csum_bits, l3off, ihl, ret;
+       u8 ip_proto;
+       int ipv6;
+
+       eth_proto = be16_to_cpu(ehdr->h_proto);
+       eth_proto_inner = eth_proto;
+       if (eth_proto == ETH_P_8021Q) {
+               struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr;
+               __be16 val = vp->h_vlan_encapsulated_proto;
+
+               eth_proto_inner = be16_to_cpu(val);
+       }
+
+       ipv6 = ihl = 0;
+       switch (skb->protocol) {
+       case cpu_to_be16(ETH_P_IP):
+               ip_proto = ip_hdr(skb)->protocol;
+               ihl = ip_hdr(skb)->ihl;
+               break;
+       case cpu_to_be16(ETH_P_IPV6):
+               ip_proto = ipv6_hdr(skb)->nexthdr;
+               ihl = (40 >> 2);
+               ipv6 = 1;
+               break;
+       default:
+               ip_proto = ihl = 0;
+               break;
+       }
+
+       csum_bits = TXHDR_CSUM_NONE;
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               u64 start, stuff;
+
+               csum_bits = (ip_proto == IPPROTO_TCP ?
+                            TXHDR_CSUM_TCP :
+                            (ip_proto == IPPROTO_UDP ?
+                             TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
+
+               start = skb_checksum_start_offset(skb) -
+                       (pad_bytes + sizeof(struct tx_pkt_hdr));
+               stuff = start + skb->csum_offset;
+
+               csum_bits |= (start / 2) << TXHDR_L4START_SHIFT;
+               csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT;
+       }
+
+       l3off = skb_network_offset(skb) -
+               (pad_bytes + sizeof(struct tx_pkt_hdr));
+
+       ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) |
+              (len << TXHDR_LEN_SHIFT) |
+              ((l3off / 2) << TXHDR_L3START_SHIFT) |
+              (ihl << TXHDR_IHL_SHIFT) |
+              ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) |
+              ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) |
+              (ipv6 ? TXHDR_IP_VER : 0) |
+              csum_bits);
+
+       return ret;
+}
+
+static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
+                                 struct net_device *dev)
+{
+       struct niu *np = netdev_priv(dev);
+       unsigned long align, headroom;
+       struct netdev_queue *txq;
+       struct tx_ring_info *rp;
+       struct tx_pkt_hdr *tp;
+       unsigned int len, nfg;
+       struct ethhdr *ehdr;
+       int prod, i, tlen;
+       u64 mapping, mrk;
+
+       i = skb_get_queue_mapping(skb);
+       rp = &np->tx_rings[i];
+       txq = netdev_get_tx_queue(dev, i);
+
+       if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
+               netif_tx_stop_queue(txq);
+               dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name);
+               rp->tx_errors++;
+               return NETDEV_TX_BUSY;
+       }
+
+       if (skb->len < ETH_ZLEN) {
+               unsigned int pad_bytes = ETH_ZLEN - skb->len;
+
+               if (skb_pad(skb, pad_bytes))
+                       goto out;
+               skb_put(skb, pad_bytes);
+       }
+
+       len = sizeof(struct tx_pkt_hdr) + 15;
+       if (skb_headroom(skb) < len) {
+               struct sk_buff *skb_new;
+
+               skb_new = skb_realloc_headroom(skb, len);
+               if (!skb_new) {
+                       rp->tx_errors++;
+                       goto out_drop;
+               }
+               kfree_skb(skb);
+               skb = skb_new;
+       } else
+               skb_orphan(skb);
+
+       align = ((unsigned long) skb->data & (16 - 1));
+       headroom = align + sizeof(struct tx_pkt_hdr);
+
+       ehdr = (struct ethhdr *) skb->data;
+       tp = (struct tx_pkt_hdr *) skb_push(skb, headroom);
+
+       len = skb->len - sizeof(struct tx_pkt_hdr);
+       tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len));
+       tp->resv = 0;
+
+       len = skb_headlen(skb);
+       mapping = np->ops->map_single(np->device, skb->data,
+                                     len, DMA_TO_DEVICE);
+
+       prod = rp->prod;
+
+       rp->tx_buffs[prod].skb = skb;
+       rp->tx_buffs[prod].mapping = mapping;
+
+       mrk = TX_DESC_SOP;
+       if (++rp->mark_counter == rp->mark_freq) {
+               rp->mark_counter = 0;
+               mrk |= TX_DESC_MARK;
+               rp->mark_pending++;
+       }
+
+       tlen = len;
+       nfg = skb_shinfo(skb)->nr_frags;
+       while (tlen > 0) {
+               tlen -= MAX_TX_DESC_LEN;
+               nfg++;
+       }
+
+       while (len > 0) {
+               unsigned int this_len = len;
+
+               if (this_len > MAX_TX_DESC_LEN)
+                       this_len = MAX_TX_DESC_LEN;
+
+               niu_set_txd(rp, prod, mapping, this_len, mrk, nfg);
+               mrk = nfg = 0;
+
+               prod = NEXT_TX(rp, prod);
+               mapping += this_len;
+               len -= this_len;
+       }
+
+       for (i = 0; i <  skb_shinfo(skb)->nr_frags; i++) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+               len = frag->size;
+               mapping = np->ops->map_page(np->device, frag->page,
+                                           frag->page_offset, len,
+                                           DMA_TO_DEVICE);
+
+               rp->tx_buffs[prod].skb = NULL;
+               rp->tx_buffs[prod].mapping = mapping;
+
+               niu_set_txd(rp, prod, mapping, len, 0, 0);
+
+               prod = NEXT_TX(rp, prod);
+       }
+
+       if (prod < rp->prod)
+               rp->wrap_bit ^= TX_RING_KICK_WRAP;
+       rp->prod = prod;
+
+       nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3));
+
+       if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) {
+               netif_tx_stop_queue(txq);
+               if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))
+                       netif_tx_wake_queue(txq);
+       }
+
+out:
+       return NETDEV_TX_OK;
+
+out_drop:
+       rp->tx_errors++;
+       kfree_skb(skb);
+       goto out;
+}
+
+static int niu_change_mtu(struct net_device *dev, int new_mtu)
+{
+       struct niu *np = netdev_priv(dev);
+       int err, orig_jumbo, new_jumbo;
+
+       if (new_mtu < 68 || new_mtu > NIU_MAX_MTU)
+               return -EINVAL;
+
+       orig_jumbo = (dev->mtu > ETH_DATA_LEN);
+       new_jumbo = (new_mtu > ETH_DATA_LEN);
+
+       dev->mtu = new_mtu;
+
+       if (!netif_running(dev) ||
+           (orig_jumbo == new_jumbo))
+               return 0;
+
+       niu_full_shutdown(np, dev);
+
+       niu_free_channels(np);
+
+       niu_enable_napi(np);
+
+       err = niu_alloc_channels(np);
+       if (err)
+               return err;
+
+       spin_lock_irq(&np->lock);
+
+       err = niu_init_hw(np);
+       if (!err) {
+               init_timer(&np->timer);
+               np->timer.expires = jiffies + HZ;
+               np->timer.data = (unsigned long) np;
+               np->timer.function = niu_timer;
+
+               err = niu_enable_interrupts(np, 1);
+               if (err)
+                       niu_stop_hw(np);
+       }
+
+       spin_unlock_irq(&np->lock);
+
+       if (!err) {
+               netif_tx_start_all_queues(dev);
+               if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
+                       netif_carrier_on(dev);
+
+               add_timer(&np->timer);
+       }
+
+       return err;
+}
+
+static void niu_get_drvinfo(struct net_device *dev,
+                           struct ethtool_drvinfo *info)
+{
+       struct niu *np = netdev_priv(dev);
+       struct niu_vpd *vpd = &np->vpd;
+
+       strcpy(info->driver, DRV_MODULE_NAME);
+       strcpy(info->version, DRV_MODULE_VERSION);
+       sprintf(info->fw_version, "%d.%d",
+               vpd->fcode_major, vpd->fcode_minor);
+       if (np->parent->plat_type != PLAT_TYPE_NIU)
+               strcpy(info->bus_info, pci_name(np->pdev));
+}
+
+static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct niu *np = netdev_priv(dev);
+       struct niu_link_config *lp;
+
+       lp = &np->link_config;
+
+       memset(cmd, 0, sizeof(*cmd));
+       cmd->phy_address = np->phy_addr;
+       cmd->supported = lp->supported;
+       cmd->advertising = lp->active_advertising;
+       cmd->autoneg = lp->active_autoneg;
+       ethtool_cmd_speed_set(cmd, lp->active_speed);
+       cmd->duplex = lp->active_duplex;
+       cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
+       cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ?
+               XCVR_EXTERNAL : XCVR_INTERNAL;
+
+       return 0;
+}
+
+static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct niu *np = netdev_priv(dev);
+       struct niu_link_config *lp = &np->link_config;
+
+       lp->advertising = cmd->advertising;
+       lp->speed = ethtool_cmd_speed(cmd);
+       lp->duplex = cmd->duplex;
+       lp->autoneg = cmd->autoneg;
+       return niu_init_link(np);
+}
+
+static u32 niu_get_msglevel(struct net_device *dev)
+{
+       struct niu *np = netdev_priv(dev);
+       return np->msg_enable;
+}
+
+static void niu_set_msglevel(struct net_device *dev, u32 value)
+{
+       struct niu *np = netdev_priv(dev);
+       np->msg_enable = value;
+}
+
+static int niu_nway_reset(struct net_device *dev)
+{
+       struct niu *np = netdev_priv(dev);
+
+       if (np->link_config.autoneg)
+               return niu_init_link(np);
+
+       return 0;
+}
+
+static int niu_get_eeprom_len(struct net_device *dev)
+{
+       struct niu *np = netdev_priv(dev);
+
+       return np->eeprom_len;
+}
+
+static int niu_get_eeprom(struct net_device *dev,
+                         struct ethtool_eeprom *eeprom, u8 *data)
+{
+       struct niu *np = netdev_priv(dev);
+       u32 offset, len, val;
+
+       offset = eeprom->offset;
+       len = eeprom->len;
+
+       if (offset + len < offset)
+               return -EINVAL;
+       if (offset >= np->eeprom_len)
+               return -EINVAL;
+       if (offset + len > np->eeprom_len)
+               len = eeprom->len = np->eeprom_len - offset;
+
+       if (offset & 3) {
+               u32 b_offset, b_count;
+
+               b_offset = offset & 3;
+               b_count = 4 - b_offset;
+               if (b_count > len)
+                       b_count = len;
+
+               val = nr64(ESPC_NCR((offset - b_offset) / 4));
+               memcpy(data, ((char *)&val) + b_offset, b_count);
+               data += b_count;
+               len -= b_count;
+               offset += b_count;
+       }
+       while (len >= 4) {
+               val = nr64(ESPC_NCR(offset / 4));
+               memcpy(data, &val, 4);
+               data += 4;
+               len -= 4;
+               offset += 4;
+       }
+       if (len) {
+               val = nr64(ESPC_NCR(offset / 4));
+               memcpy(data, &val, len);
+       }
+       return 0;
+}
+
+static void niu_ethflow_to_l3proto(int flow_type, u8 *pid)
+{
+       switch (flow_type) {
+       case TCP_V4_FLOW:
+       case TCP_V6_FLOW:
+               *pid = IPPROTO_TCP;
+               break;
+       case UDP_V4_FLOW:
+       case UDP_V6_FLOW:
+               *pid = IPPROTO_UDP;
+               break;
+       case SCTP_V4_FLOW:
+       case SCTP_V6_FLOW:
+               *pid = IPPROTO_SCTP;
+               break;
+       case AH_V4_FLOW:
+       case AH_V6_FLOW:
+               *pid = IPPROTO_AH;
+               break;
+       case ESP_V4_FLOW:
+       case ESP_V6_FLOW:
+               *pid = IPPROTO_ESP;
+               break;
+       default:
+               *pid = 0;
+               break;
+       }
+}
+
+static int niu_class_to_ethflow(u64 class, int *flow_type)
+{
+       switch (class) {
+       case CLASS_CODE_TCP_IPV4:
+               *flow_type = TCP_V4_FLOW;
+               break;
+       case CLASS_CODE_UDP_IPV4:
+               *flow_type = UDP_V4_FLOW;
+               break;
+       case CLASS_CODE_AH_ESP_IPV4:
+               *flow_type = AH_V4_FLOW;
+               break;
+       case CLASS_CODE_SCTP_IPV4:
+               *flow_type = SCTP_V4_FLOW;
+               break;
+       case CLASS_CODE_TCP_IPV6:
+               *flow_type = TCP_V6_FLOW;
+               break;
+       case CLASS_CODE_UDP_IPV6:
+               *flow_type = UDP_V6_FLOW;
+               break;
+       case CLASS_CODE_AH_ESP_IPV6:
+               *flow_type = AH_V6_FLOW;
+               break;
+       case CLASS_CODE_SCTP_IPV6:
+               *flow_type = SCTP_V6_FLOW;
+               break;
+       case CLASS_CODE_USER_PROG1:
+       case CLASS_CODE_USER_PROG2:
+       case CLASS_CODE_USER_PROG3:
+       case CLASS_CODE_USER_PROG4:
+               *flow_type = IP_USER_FLOW;
+               break;
+       default:
+               return 0;
+       }
+
+       return 1;
+}
+
+static int niu_ethflow_to_class(int flow_type, u64 *class)
+{
+       switch (flow_type) {
+       case TCP_V4_FLOW:
+               *class = CLASS_CODE_TCP_IPV4;
+               break;
+       case UDP_V4_FLOW:
+               *class = CLASS_CODE_UDP_IPV4;
+               break;
+       case AH_ESP_V4_FLOW:
+       case AH_V4_FLOW:
+       case ESP_V4_FLOW:
+               *class = CLASS_CODE_AH_ESP_IPV4;
+               break;
+       case SCTP_V4_FLOW:
+               *class = CLASS_CODE_SCTP_IPV4;
+               break;
+       case TCP_V6_FLOW:
+               *class = CLASS_CODE_TCP_IPV6;
+               break;
+       case UDP_V6_FLOW:
+               *class = CLASS_CODE_UDP_IPV6;
+               break;
+       case AH_ESP_V6_FLOW:
+       case AH_V6_FLOW:
+       case ESP_V6_FLOW:
+               *class = CLASS_CODE_AH_ESP_IPV6;
+               break;
+       case SCTP_V6_FLOW:
+               *class = CLASS_CODE_SCTP_IPV6;
+               break;
+       default:
+               return 0;
+       }
+
+       return 1;
+}
+
+static u64 niu_flowkey_to_ethflow(u64 flow_key)
+{
+       u64 ethflow = 0;
+
+       if (flow_key & FLOW_KEY_L2DA)
+               ethflow |= RXH_L2DA;
+       if (flow_key & FLOW_KEY_VLAN)
+               ethflow |= RXH_VLAN;
+       if (flow_key & FLOW_KEY_IPSA)
+               ethflow |= RXH_IP_SRC;
+       if (flow_key & FLOW_KEY_IPDA)
+               ethflow |= RXH_IP_DST;
+       if (flow_key & FLOW_KEY_PROTO)
+               ethflow |= RXH_L3_PROTO;
+       if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT))
+               ethflow |= RXH_L4_B_0_1;
+       if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT))
+               ethflow |= RXH_L4_B_2_3;
+
+       return ethflow;
+
+}
+
+static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key)
+{
+       u64 key = 0;
+
+       if (ethflow & RXH_L2DA)
+               key |= FLOW_KEY_L2DA;
+       if (ethflow & RXH_VLAN)
+               key |= FLOW_KEY_VLAN;
+       if (ethflow & RXH_IP_SRC)
+               key |= FLOW_KEY_IPSA;
+       if (ethflow & RXH_IP_DST)
+               key |= FLOW_KEY_IPDA;
+       if (ethflow & RXH_L3_PROTO)
+               key |= FLOW_KEY_PROTO;
+       if (ethflow & RXH_L4_B_0_1)
+               key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT);
+       if (ethflow & RXH_L4_B_2_3)
+               key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT);
+
+       *flow_key = key;
+
+       return 1;
+
+}
+
+static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
+{
+       u64 class;
+
+       nfc->data = 0;
+
+       if (!niu_ethflow_to_class(nfc->flow_type, &class))
+               return -EINVAL;
+
+       if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
+           TCAM_KEY_DISC)
+               nfc->data = RXH_DISCARD;
+       else
+               nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class -
+                                                     CLASS_CODE_USER_PROG1]);
+       return 0;
+}
+
+static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp,
+                                       struct ethtool_rx_flow_spec *fsp)
+{
+       u32 tmp;
+       u16 prt;
+
+       tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
+       fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
+
+       tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
+       fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
+
+       tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
+       fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
+
+       tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
+       fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
+
+       fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >>
+               TCAM_V4KEY2_TOS_SHIFT;
+       fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >>
+               TCAM_V4KEY2_TOS_SHIFT;
+
+       switch (fsp->flow_type) {
+       case TCP_V4_FLOW:
+       case UDP_V4_FLOW:
+       case SCTP_V4_FLOW:
+               prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
+                       TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
+               fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
+
+               prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
+                       TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
+               fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
+
+               prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
+                       TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
+               fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
+
+               prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
+                        TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
+               fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
+               break;
+       case AH_V4_FLOW:
+       case ESP_V4_FLOW:
+               tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
+                       TCAM_V4KEY2_PORT_SPI_SHIFT;
+               fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
+
+               tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
+                       TCAM_V4KEY2_PORT_SPI_SHIFT;
+               fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
+               break;
+       case IP_USER_FLOW:
+               tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
+                       TCAM_V4KEY2_PORT_SPI_SHIFT;
+               fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
+
+               tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
+                       TCAM_V4KEY2_PORT_SPI_SHIFT;
+               fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
+
+               fsp->h_u.usr_ip4_spec.proto =
+                       (tp->key[2] & TCAM_V4KEY2_PROTO) >>
+                       TCAM_V4KEY2_PROTO_SHIFT;
+               fsp->m_u.usr_ip4_spec.proto =
+                       (tp->key_mask[2] & TCAM_V4KEY2_PROTO) >>
+                       TCAM_V4KEY2_PROTO_SHIFT;
+
+               fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+               break;
+       default:
+               break;
+       }
+}
+
+static int niu_get_ethtool_tcam_entry(struct niu *np,
+                                     struct ethtool_rxnfc *nfc)
+{
+       struct niu_parent *parent = np->parent;
+       struct niu_tcam_entry *tp;
+       struct ethtool_rx_flow_spec *fsp = &nfc->fs;
+       u16 idx;
+       u64 class;
+       int ret = 0;
+
+       idx = tcam_get_index(np, (u16)nfc->fs.location);
+
+       tp = &parent->tcam[idx];
+       if (!tp->valid) {
+               netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n",
+                           parent->index, (u16)nfc->fs.location, idx);
+               return -EINVAL;
+       }
+
+       /* fill the flow spec entry */
+       class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
+               TCAM_V4KEY0_CLASS_CODE_SHIFT;
+       ret = niu_class_to_ethflow(class, &fsp->flow_type);
+
+       if (ret < 0) {
+               netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
+                           parent->index);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) {
+               u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >>
+                       TCAM_V4KEY2_PROTO_SHIFT;
+               if (proto == IPPROTO_ESP) {
+                       if (fsp->flow_type == AH_V4_FLOW)
+                               fsp->flow_type = ESP_V4_FLOW;
+                       else
+                               fsp->flow_type = ESP_V6_FLOW;
+               }
+       }
+
+       switch (fsp->flow_type) {
+       case TCP_V4_FLOW:
+       case UDP_V4_FLOW:
+       case SCTP_V4_FLOW:
+       case AH_V4_FLOW:
+       case ESP_V4_FLOW:
+               niu_get_ip4fs_from_tcam_key(tp, fsp);
+               break;
+       case TCP_V6_FLOW:
+       case UDP_V6_FLOW:
+       case SCTP_V6_FLOW:
+       case AH_V6_FLOW:
+       case ESP_V6_FLOW:
+               /* Not yet implemented */
+               ret = -EINVAL;
+               break;
+       case IP_USER_FLOW:
+               niu_get_ip4fs_from_tcam_key(tp, fsp);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       if (ret < 0)
+               goto out;
+
+       if (tp->assoc_data & TCAM_ASSOCDATA_DISC)
+               fsp->ring_cookie = RX_CLS_FLOW_DISC;
+       else
+               fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >>
+                       TCAM_ASSOCDATA_OFFSET_SHIFT;
+
+       /* put the tcam size here */
+       nfc->data = tcam_get_size(np);
+out:
+       return ret;
+}
+
+static int niu_get_ethtool_tcam_all(struct niu *np,
+                                   struct ethtool_rxnfc *nfc,
+                                   u32 *rule_locs)
+{
+       struct niu_parent *parent = np->parent;
+       struct niu_tcam_entry *tp;
+       int i, idx, cnt;
+       unsigned long flags;
+       int ret = 0;
+
+       /* put the tcam size here */
+       nfc->data = tcam_get_size(np);
+
+       niu_lock_parent(np, flags);
+       for (cnt = 0, i = 0; i < nfc->data; i++) {
+               idx = tcam_get_index(np, i);
+               tp = &parent->tcam[idx];
+               if (!tp->valid)
+                       continue;
+               if (cnt == nfc->rule_cnt) {
+                       ret = -EMSGSIZE;
+                       break;
+               }
+               rule_locs[cnt] = i;
+               cnt++;
+       }
+       niu_unlock_parent(np, flags);
+
+       return ret;
+}
+
+static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+                      void *rule_locs)
+{
+       struct niu *np = netdev_priv(dev);
+       int ret = 0;
+
+       switch (cmd->cmd) {
+       case ETHTOOL_GRXFH:
+               ret = niu_get_hash_opts(np, cmd);
+               break;
+       case ETHTOOL_GRXRINGS:
+               cmd->data = np->num_rx_rings;
+               break;
+       case ETHTOOL_GRXCLSRLCNT:
+               cmd->rule_cnt = tcam_get_valid_entry_cnt(np);
+               break;
+       case ETHTOOL_GRXCLSRULE:
+               ret = niu_get_ethtool_tcam_entry(np, cmd);
+               break;
+       case ETHTOOL_GRXCLSRLALL:
+               ret = niu_get_ethtool_tcam_all(np, cmd, (u32 *)rule_locs);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
+{
+       u64 class;
+       u64 flow_key = 0;
+       unsigned long flags;
+
+       if (!niu_ethflow_to_class(nfc->flow_type, &class))
+               return -EINVAL;
+
+       if (class < CLASS_CODE_USER_PROG1 ||
+           class > CLASS_CODE_SCTP_IPV6)
+               return -EINVAL;
+
+       if (nfc->data & RXH_DISCARD) {
+               niu_lock_parent(np, flags);
+               flow_key = np->parent->tcam_key[class -
+                                              CLASS_CODE_USER_PROG1];
+               flow_key |= TCAM_KEY_DISC;
+               nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
+               np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key;
+               niu_unlock_parent(np, flags);
+               return 0;
+       } else {
+               /* Discard was set before, but is not set now */
+               if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
+                   TCAM_KEY_DISC) {
+                       niu_lock_parent(np, flags);
+                       flow_key = np->parent->tcam_key[class -
+                                              CLASS_CODE_USER_PROG1];
+                       flow_key &= ~TCAM_KEY_DISC;
+                       nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1),
+                            flow_key);
+                       np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] =
+                               flow_key;
+                       niu_unlock_parent(np, flags);
+               }
+       }
+
+       if (!niu_ethflow_to_flowkey(nfc->data, &flow_key))
+               return -EINVAL;
+
+       niu_lock_parent(np, flags);
+       nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
+       np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key;
+       niu_unlock_parent(np, flags);
+
+       return 0;
+}
+
+static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp,
+                                      struct niu_tcam_entry *tp,
+                                      int l2_rdc_tab, u64 class)
+{
+       u8 pid = 0;
+       u32 sip, dip, sipm, dipm, spi, spim;
+       u16 sport, dport, spm, dpm;
+
+       sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src);
+       sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src);
+       dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst);
+       dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst);
+
+       tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT;
+       tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE;
+       tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT;
+       tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM;
+
+       tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT;
+       tp->key[3] |= dip;
+
+       tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT;
+       tp->key_mask[3] |= dipm;
+
+       tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos <<
+                      TCAM_V4KEY2_TOS_SHIFT);
+       tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos <<
+                           TCAM_V4KEY2_TOS_SHIFT);
+       switch (fsp->flow_type) {
+       case TCP_V4_FLOW:
+       case UDP_V4_FLOW:
+       case SCTP_V4_FLOW:
+               sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
+               spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
+               dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
+               dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
+
+               tp->key[2] |= (((u64)sport << 16) | dport);
+               tp->key_mask[2] |= (((u64)spm << 16) | dpm);
+               niu_ethflow_to_l3proto(fsp->flow_type, &pid);
+               break;
+       case AH_V4_FLOW:
+       case ESP_V4_FLOW:
+               spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi);
+               spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi);
+
+               tp->key[2] |= spi;
+               tp->key_mask[2] |= spim;
+               niu_ethflow_to_l3proto(fsp->flow_type, &pid);
+               break;
+       case IP_USER_FLOW:
+               spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes);
+               spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes);
+
+               tp->key[2] |= spi;
+               tp->key_mask[2] |= spim;
+               pid = fsp->h_u.usr_ip4_spec.proto;
+               break;
+       default:
+               break;
+       }
+
+       tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT);
+       if (pid) {
+               tp->key_mask[2] |= TCAM_V4KEY2_PROTO;
+       }
+}
+
+static int niu_add_ethtool_tcam_entry(struct niu *np,
+                                     struct ethtool_rxnfc *nfc)
+{
+       struct niu_parent *parent = np->parent;
+       struct niu_tcam_entry *tp;
+       struct ethtool_rx_flow_spec *fsp = &nfc->fs;
+       struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port];
+       int l2_rdc_table = rdc_table->first_table_num;
+       u16 idx;
+       u64 class;
+       unsigned long flags;
+       int err, ret;
+
+       ret = 0;
+
+       idx = nfc->fs.location;
+       if (idx >= tcam_get_size(np))
+               return -EINVAL;
+
+       if (fsp->flow_type == IP_USER_FLOW) {
+               int i;
+               int add_usr_cls = 0;
+               struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec;
+               struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec;
+
+               if (uspec->ip_ver != ETH_RX_NFC_IP4)
+                       return -EINVAL;
+
+               niu_lock_parent(np, flags);
+
+               for (i = 0; i < NIU_L3_PROG_CLS; i++) {
+                       if (parent->l3_cls[i]) {
+                               if (uspec->proto == parent->l3_cls_pid[i]) {
+                                       class = parent->l3_cls[i];
+                                       parent->l3_cls_refcnt[i]++;
+                                       add_usr_cls = 1;
+                                       break;
+                               }
+                       } else {
+                               /* Program new user IP class */
+                               switch (i) {
+                               case 0:
+                                       class = CLASS_CODE_USER_PROG1;
+                                       break;
+                               case 1:
+                                       class = CLASS_CODE_USER_PROG2;
+                                       break;
+                               case 2:
+                                       class = CLASS_CODE_USER_PROG3;
+                                       break;
+                               case 3:
+                                       class = CLASS_CODE_USER_PROG4;
+                                       break;
+                               default:
+                                       break;
+                               }
+                               ret = tcam_user_ip_class_set(np, class, 0,
+                                                            uspec->proto,
+                                                            uspec->tos,
+                                                            umask->tos);
+                               if (ret)
+                                       goto out;
+
+                               ret = tcam_user_ip_class_enable(np, class, 1);
+                               if (ret)
+                                       goto out;
+                               parent->l3_cls[i] = class;
+                               parent->l3_cls_pid[i] = uspec->proto;
+                               parent->l3_cls_refcnt[i]++;
+                               add_usr_cls = 1;
+                               break;
+                       }
+               }
+               if (!add_usr_cls) {
+                       netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n",
+                                   parent->index, __func__, uspec->proto);
+                       ret = -EINVAL;
+                       goto out;
+               }
+               niu_unlock_parent(np, flags);
+       } else {
+               if (!niu_ethflow_to_class(fsp->flow_type, &class)) {
+                       return -EINVAL;
+               }
+       }
+
+       niu_lock_parent(np, flags);
+
+       idx = tcam_get_index(np, idx);
+       tp = &parent->tcam[idx];
+
+       memset(tp, 0, sizeof(*tp));
+
+       /* fill in the tcam key and mask */
+       switch (fsp->flow_type) {
+       case TCP_V4_FLOW:
+       case UDP_V4_FLOW:
+       case SCTP_V4_FLOW:
+       case AH_V4_FLOW:
+       case ESP_V4_FLOW:
+               niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
+               break;
+       case TCP_V6_FLOW:
+       case UDP_V6_FLOW:
+       case SCTP_V6_FLOW:
+       case AH_V6_FLOW:
+       case ESP_V6_FLOW:
+               /* Not yet implemented */
+               netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n",
+                           parent->index, __func__, fsp->flow_type);
+               ret = -EINVAL;
+               goto out;
+       case IP_USER_FLOW:
+               niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
+               break;
+       default:
+               netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n",
+                           parent->index, __func__, fsp->flow_type);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* fill in the assoc data */
+       if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+               tp->assoc_data = TCAM_ASSOCDATA_DISC;
+       } else {
+               if (fsp->ring_cookie >= np->num_rx_rings) {
+                       netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n",
+                                   parent->index, __func__,
+                                   (long long)fsp->ring_cookie);
+                       ret = -EINVAL;
+                       goto out;
+               }
+               tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
+                                 (fsp->ring_cookie <<
+                                  TCAM_ASSOCDATA_OFFSET_SHIFT));
+       }
+
+       err = tcam_write(np, idx, tp->key, tp->key_mask);
+       if (err) {
+               ret = -EINVAL;
+               goto out;
+       }
+       err = tcam_assoc_write(np, idx, tp->assoc_data);
+       if (err) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* validate the entry */
+       tp->valid = 1;
+       np->clas.tcam_valid_entries++;
+out:
+       niu_unlock_parent(np, flags);
+
+       return ret;
+}
+
+static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc)
+{
+       struct niu_parent *parent = np->parent;
+       struct niu_tcam_entry *tp;
+       u16 idx;
+       unsigned long flags;
+       u64 class;
+       int ret = 0;
+
+       if (loc >= tcam_get_size(np))
+               return -EINVAL;
+
+       niu_lock_parent(np, flags);
+
+       idx = tcam_get_index(np, loc);
+       tp = &parent->tcam[idx];
+
+       /* if the entry is of a user defined class, then update*/
+       class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
+               TCAM_V4KEY0_CLASS_CODE_SHIFT;
+
+       if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) {
+               int i;
+               for (i = 0; i < NIU_L3_PROG_CLS; i++) {
+                       if (parent->l3_cls[i] == class) {
+                               parent->l3_cls_refcnt[i]--;
+                               if (!parent->l3_cls_refcnt[i]) {
+                                       /* disable class */
+                                       ret = tcam_user_ip_class_enable(np,
+                                                                       class,
+                                                                       0);
+                                       if (ret)
+                                               goto out;
+                                       parent->l3_cls[i] = 0;
+                                       parent->l3_cls_pid[i] = 0;
+                               }
+                               break;
+                       }
+               }
+               if (i == NIU_L3_PROG_CLS) {
+                       netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n",
+                                   parent->index, __func__,
+                                   (unsigned long long)class);
+                       ret = -EINVAL;
+                       goto out;
+               }
+       }
+
+       ret = tcam_flush(np, idx);
+       if (ret)
+               goto out;
+
+       /* invalidate the entry */
+       tp->valid = 0;
+       np->clas.tcam_valid_entries--;
+out:
+       niu_unlock_parent(np, flags);
+
+       return ret;
+}
+
+static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+       struct niu *np = netdev_priv(dev);
+       int ret = 0;
+
+       switch (cmd->cmd) {
+       case ETHTOOL_SRXFH:
+               ret = niu_set_hash_opts(np, cmd);
+               break;
+       case ETHTOOL_SRXCLSRLINS:
+               ret = niu_add_ethtool_tcam_entry(np, cmd);
+               break;
+       case ETHTOOL_SRXCLSRLDEL:
+               ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static const struct {
+       const char string[ETH_GSTRING_LEN];
+} niu_xmac_stat_keys[] = {
+       { "tx_frames" },
+       { "tx_bytes" },
+       { "tx_fifo_errors" },
+       { "tx_overflow_errors" },
+       { "tx_max_pkt_size_errors" },
+       { "tx_underflow_errors" },
+       { "rx_local_faults" },
+       { "rx_remote_faults" },
+       { "rx_link_faults" },
+       { "rx_align_errors" },
+       { "rx_frags" },
+       { "rx_mcasts" },
+       { "rx_bcasts" },
+       { "rx_hist_cnt1" },
+       { "rx_hist_cnt2" },
+       { "rx_hist_cnt3" },
+       { "rx_hist_cnt4" },
+       { "rx_hist_cnt5" },
+       { "rx_hist_cnt6" },
+       { "rx_hist_cnt7" },
+       { "rx_octets" },
+       { "rx_code_violations" },
+       { "rx_len_errors" },
+       { "rx_crc_errors" },
+       { "rx_underflows" },
+       { "rx_overflows" },
+       { "pause_off_state" },
+       { "pause_on_state" },
+       { "pause_received" },
+};
+
+#define NUM_XMAC_STAT_KEYS     ARRAY_SIZE(niu_xmac_stat_keys)
+
+static const struct {
+       const char string[ETH_GSTRING_LEN];
+} niu_bmac_stat_keys[] = {
+       { "tx_underflow_errors" },
+       { "tx_max_pkt_size_errors" },
+       { "tx_bytes" },
+       { "tx_frames" },
+       { "rx_overflows" },
+       { "rx_frames" },
+       { "rx_align_errors" },
+       { "rx_crc_errors" },
+       { "rx_len_errors" },
+       { "pause_off_state" },
+       { "pause_on_state" },
+       { "pause_received" },
+};
+
+#define NUM_BMAC_STAT_KEYS     ARRAY_SIZE(niu_bmac_stat_keys)
+
+static const struct {
+       const char string[ETH_GSTRING_LEN];
+} niu_rxchan_stat_keys[] = {
+       { "rx_channel" },
+       { "rx_packets" },
+       { "rx_bytes" },
+       { "rx_dropped" },
+       { "rx_errors" },
+};
+
+#define NUM_RXCHAN_STAT_KEYS   ARRAY_SIZE(niu_rxchan_stat_keys)
+
+static const struct {
+       const char string[ETH_GSTRING_LEN];
+} niu_txchan_stat_keys[] = {
+       { "tx_channel" },
+       { "tx_packets" },
+       { "tx_bytes" },
+       { "tx_errors" },
+};
+
+#define NUM_TXCHAN_STAT_KEYS   ARRAY_SIZE(niu_txchan_stat_keys)
+
+static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+       struct niu *np = netdev_priv(dev);
+       int i;
+
+       if (stringset != ETH_SS_STATS)
+               return;
+
+       if (np->flags & NIU_FLAGS_XMAC) {
+               memcpy(data, niu_xmac_stat_keys,
+                      sizeof(niu_xmac_stat_keys));
+               data += sizeof(niu_xmac_stat_keys);
+       } else {
+               memcpy(data, niu_bmac_stat_keys,
+                      sizeof(niu_bmac_stat_keys));
+               data += sizeof(niu_bmac_stat_keys);
+       }
+       for (i = 0; i < np->num_rx_rings; i++) {
+               memcpy(data, niu_rxchan_stat_keys,
+                      sizeof(niu_rxchan_stat_keys));
+               data += sizeof(niu_rxchan_stat_keys);
+       }
+       for (i = 0; i < np->num_tx_rings; i++) {
+               memcpy(data, niu_txchan_stat_keys,
+                      sizeof(niu_txchan_stat_keys));
+               data += sizeof(niu_txchan_stat_keys);
+       }
+}
+
+static int niu_get_sset_count(struct net_device *dev, int stringset)
+{
+       struct niu *np = netdev_priv(dev);
+
+       if (stringset != ETH_SS_STATS)
+               return -EINVAL;
+
+       return (np->flags & NIU_FLAGS_XMAC ?
+                NUM_XMAC_STAT_KEYS :
+                NUM_BMAC_STAT_KEYS) +
+               (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) +
+               (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS);
+}
+
+static void niu_get_ethtool_stats(struct net_device *dev,
+                                 struct ethtool_stats *stats, u64 *data)
+{
+       struct niu *np = netdev_priv(dev);
+       int i;
+
+       niu_sync_mac_stats(np);
+       if (np->flags & NIU_FLAGS_XMAC) {
+               memcpy(data, &np->mac_stats.xmac,
+                      sizeof(struct niu_xmac_stats));
+               data += (sizeof(struct niu_xmac_stats) / sizeof(u64));
+       } else {
+               memcpy(data, &np->mac_stats.bmac,
+                      sizeof(struct niu_bmac_stats));
+               data += (sizeof(struct niu_bmac_stats) / sizeof(u64));
+       }
+       for (i = 0; i < np->num_rx_rings; i++) {
+               struct rx_ring_info *rp = &np->rx_rings[i];
+
+               niu_sync_rx_discard_stats(np, rp, 0);
+
+               data[0] = rp->rx_channel;
+               data[1] = rp->rx_packets;
+               data[2] = rp->rx_bytes;
+               data[3] = rp->rx_dropped;
+               data[4] = rp->rx_errors;
+               data += 5;
+       }
+       for (i = 0; i < np->num_tx_rings; i++) {
+               struct tx_ring_info *rp = &np->tx_rings[i];
+
+               data[0] = rp->tx_channel;
+               data[1] = rp->tx_packets;
+               data[2] = rp->tx_bytes;
+               data[3] = rp->tx_errors;
+               data += 4;
+       }
+}
+
+static u64 niu_led_state_save(struct niu *np)
+{
+       if (np->flags & NIU_FLAGS_XMAC)
+               return nr64_mac(XMAC_CONFIG);
+       else
+               return nr64_mac(BMAC_XIF_CONFIG);
+}
+
+static void niu_led_state_restore(struct niu *np, u64 val)
+{
+       if (np->flags & NIU_FLAGS_XMAC)
+               nw64_mac(XMAC_CONFIG, val);
+       else
+               nw64_mac(BMAC_XIF_CONFIG, val);
+}
+
+static void niu_force_led(struct niu *np, int on)
+{
+       u64 val, reg, bit;
+
+       if (np->flags & NIU_FLAGS_XMAC) {
+               reg = XMAC_CONFIG;
+               bit = XMAC_CONFIG_FORCE_LED_ON;
+       } else {
+               reg = BMAC_XIF_CONFIG;
+               bit = BMAC_XIF_CONFIG_LINK_LED;
+       }
+
+       val = nr64_mac(reg);
+       if (on)
+               val |= bit;
+       else
+               val &= ~bit;
+       nw64_mac(reg, val);
+}
+
+static int niu_set_phys_id(struct net_device *dev,
+                          enum ethtool_phys_id_state state)
+
+{
+       struct niu *np = netdev_priv(dev);
+
+       if (!netif_running(dev))
+               return -EAGAIN;
+
+       switch (state) {
+       case ETHTOOL_ID_ACTIVE:
+               np->orig_led_state = niu_led_state_save(np);
+               return 1;       /* cycle on/off once per second */
+
+       case ETHTOOL_ID_ON:
+               niu_force_led(np, 1);
+               break;
+
+       case ETHTOOL_ID_OFF:
+               niu_force_led(np, 0);
+               break;
+
+       case ETHTOOL_ID_INACTIVE:
+               niu_led_state_restore(np, np->orig_led_state);
+       }
+
+       return 0;
+}
+
+static const struct ethtool_ops niu_ethtool_ops = {
+       .get_drvinfo            = niu_get_drvinfo,
+       .get_link               = ethtool_op_get_link,
+       .get_msglevel           = niu_get_msglevel,
+       .set_msglevel           = niu_set_msglevel,
+       .nway_reset             = niu_nway_reset,
+       .get_eeprom_len         = niu_get_eeprom_len,
+       .get_eeprom             = niu_get_eeprom,
+       .get_settings           = niu_get_settings,
+       .set_settings           = niu_set_settings,
+       .get_strings            = niu_get_strings,
+       .get_sset_count         = niu_get_sset_count,
+       .get_ethtool_stats      = niu_get_ethtool_stats,
+       .set_phys_id            = niu_set_phys_id,
+       .get_rxnfc              = niu_get_nfc,
+       .set_rxnfc              = niu_set_nfc,
+};
+
+static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
+                             int ldg, int ldn)
+{
+       if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX)
+               return -EINVAL;
+       if (ldn < 0 || ldn > LDN_MAX)
+               return -EINVAL;
+
+       parent->ldg_map[ldn] = ldg;
+
+       if (np->parent->plat_type == PLAT_TYPE_NIU) {
+               /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by
+                * the firmware, and we're not supposed to change them.
+                * Validate the mapping, because if it's wrong we probably
+                * won't get any interrupts and that's painful to debug.
+                */
+               if (nr64(LDG_NUM(ldn)) != ldg) {
+                       dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
+                               np->port, ldn, ldg,
+                               (unsigned long long) nr64(LDG_NUM(ldn)));
+                       return -EINVAL;
+               }
+       } else
+               nw64(LDG_NUM(ldn), ldg);
+
+       return 0;
+}
+
+static int niu_set_ldg_timer_res(struct niu *np, int res)
+{
+       if (res < 0 || res > LDG_TIMER_RES_VAL)
+               return -EINVAL;
+
+
+       nw64(LDG_TIMER_RES, res);
+
+       return 0;
+}
+
+static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector)
+{
+       if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) ||
+           (func < 0 || func > 3) ||
+           (vector < 0 || vector > 0x1f))
+               return -EINVAL;
+
+       nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector);
+
+       return 0;
+}
+
+static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr)
+{
+       u64 frame, frame_base = (ESPC_PIO_STAT_READ_START |
+                                (addr << ESPC_PIO_STAT_ADDR_SHIFT));
+       int limit;
+
+       if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT))
+               return -EINVAL;
+
+       frame = frame_base;
+       nw64(ESPC_PIO_STAT, frame);
+       limit = 64;
+       do {
+               udelay(5);
+               frame = nr64(ESPC_PIO_STAT);
+               if (frame & ESPC_PIO_STAT_READ_END)
+                       break;
+       } while (limit--);
+       if (!(frame & ESPC_PIO_STAT_READ_END)) {
+               dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
+                       (unsigned long long) frame);
+               return -ENODEV;
+       }
+
+       frame = frame_base;
+       nw64(ESPC_PIO_STAT, frame);
+       limit = 64;
+       do {
+               udelay(5);
+               frame = nr64(ESPC_PIO_STAT);
+               if (frame & ESPC_PIO_STAT_READ_END)
+                       break;
+       } while (limit--);
+       if (!(frame & ESPC_PIO_STAT_READ_END)) {
+               dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
+                       (unsigned long long) frame);
+               return -ENODEV;
+       }
+
+       frame = nr64(ESPC_PIO_STAT);
+       return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT;
+}
+
+static int __devinit niu_pci_eeprom_read16(struct niu *np, u32 off)
+{
+       int err = niu_pci_eeprom_read(np, off);
+       u16 val;
+
+       if (err < 0)
+               return err;
+       val = (err << 8);
+       err = niu_pci_eeprom_read(np, off + 1);
+       if (err < 0)
+               return err;
+       val |= (err & 0xff);
+
+       return val;
+}
+
+static int __devinit niu_pci_eeprom_read16_swp(struct niu *np, u32 off)
+{
+       int err = niu_pci_eeprom_read(np, off);
+       u16 val;
+
+       if (err < 0)
+               return err;
+
+       val = (err & 0xff);
+       err = niu_pci_eeprom_read(np, off + 1);
+       if (err < 0)
+               return err;
+
+       val |= (err & 0xff) << 8;
+
+       return val;
+}
+
+static int __devinit niu_pci_vpd_get_propname(struct niu *np,
+                                             u32 off,
+                                             char *namebuf,
+                                             int namebuf_len)
+{
+       int i;
+
+       for (i = 0; i < namebuf_len; i++) {
+               int err = niu_pci_eeprom_read(np, off + i);
+               if (err < 0)
+                       return err;
+               *namebuf++ = err;
+               if (!err)
+                       break;
+       }
+       if (i >= namebuf_len)
+               return -EINVAL;
+
+       return i + 1;
+}
+
+static void __devinit niu_vpd_parse_version(struct niu *np)
+{
+       struct niu_vpd *vpd = &np->vpd;
+       int len = strlen(vpd->version) + 1;
+       const char *s = vpd->version;
+       int i;
+
+       for (i = 0; i < len - 5; i++) {
+               if (!strncmp(s + i, "FCode ", 6))
+                       break;
+       }
+       if (i >= len - 5)
+               return;
+
+       s += i + 5;
+       sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor);
+
+       netif_printk(np, probe, KERN_DEBUG, np->dev,
+                    "VPD_SCAN: FCODE major(%d) minor(%d)\n",
+                    vpd->fcode_major, vpd->fcode_minor);
+       if (vpd->fcode_major > NIU_VPD_MIN_MAJOR ||
+           (vpd->fcode_major == NIU_VPD_MIN_MAJOR &&
+            vpd->fcode_minor >= NIU_VPD_MIN_MINOR))
+               np->flags |= NIU_FLAGS_VPD_VALID;
+}
+
+/* ESPC_PIO_EN_ENABLE must be set */
+static int __devinit niu_pci_vpd_scan_props(struct niu *np,
+                                           u32 start, u32 end)
+{
+       unsigned int found_mask = 0;
+#define FOUND_MASK_MODEL       0x00000001
+#define FOUND_MASK_BMODEL      0x00000002
+#define FOUND_MASK_VERS                0x00000004
+#define FOUND_MASK_MAC         0x00000008
+#define FOUND_MASK_NMAC                0x00000010
+#define FOUND_MASK_PHY         0x00000020
+#define FOUND_MASK_ALL         0x0000003f
+
+       netif_printk(np, probe, KERN_DEBUG, np->dev,
+                    "VPD_SCAN: start[%x] end[%x]\n", start, end);
+       while (start < end) {
+               int len, err, prop_len;
+               char namebuf[64];
+               u8 *prop_buf;
+               int max_len;
+
+               if (found_mask == FOUND_MASK_ALL) {
+                       niu_vpd_parse_version(np);
+                       return 1;
+               }
+
+               err = niu_pci_eeprom_read(np, start + 2);
+               if (err < 0)
+                       return err;
+               len = err;
+               start += 3;
+
+               prop_len = niu_pci_eeprom_read(np, start + 4);
+               err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
+               if (err < 0)
+                       return err;
+
+               prop_buf = NULL;
+               max_len = 0;
+               if (!strcmp(namebuf, "model")) {
+                       prop_buf = np->vpd.model;
+                       max_len = NIU_VPD_MODEL_MAX;
+                       found_mask |= FOUND_MASK_MODEL;
+               } else if (!strcmp(namebuf, "board-model")) {
+                       prop_buf = np->vpd.board_model;
+                       max_len = NIU_VPD_BD_MODEL_MAX;
+                       found_mask |= FOUND_MASK_BMODEL;
+               } else if (!strcmp(namebuf, "version")) {
+                       prop_buf = np->vpd.version;
+                       max_len = NIU_VPD_VERSION_MAX;
+                       found_mask |= FOUND_MASK_VERS;
+               } else if (!strcmp(namebuf, "local-mac-address")) {
+                       prop_buf = np->vpd.local_mac;
+                       max_len = ETH_ALEN;
+                       found_mask |= FOUND_MASK_MAC;
+               } else if (!strcmp(namebuf, "num-mac-addresses")) {
+                       prop_buf = &np->vpd.mac_num;
+                       max_len = 1;
+                       found_mask |= FOUND_MASK_NMAC;
+               } else if (!strcmp(namebuf, "phy-type")) {
+                       prop_buf = np->vpd.phy_type;
+                       max_len = NIU_VPD_PHY_TYPE_MAX;
+                       found_mask |= FOUND_MASK_PHY;
+               }
+
+               if (max_len && prop_len > max_len) {
+                       dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len);
+                       return -EINVAL;
+               }
+
+               if (prop_buf) {
+                       u32 off = start + 5 + err;
+                       int i;
+
+                       netif_printk(np, probe, KERN_DEBUG, np->dev,
+                                    "VPD_SCAN: Reading in property [%s] len[%d]\n",
+                                    namebuf, prop_len);
+                       for (i = 0; i < prop_len; i++)
+                               *prop_buf++ = niu_pci_eeprom_read(np, off + i);
+               }
+
+               start += len;
+       }
+
+       return 0;
+}
+
+/* ESPC_PIO_EN_ENABLE must be set */
+static void __devinit niu_pci_vpd_fetch(struct niu *np, u32 start)
+{
+       u32 offset;
+       int err;
+
+       err = niu_pci_eeprom_read16_swp(np, start + 1);
+       if (err < 0)
+               return;
+
+       offset = err + 3;
+
+       while (start + offset < ESPC_EEPROM_SIZE) {
+               u32 here = start + offset;
+               u32 end;
+
+               err = niu_pci_eeprom_read(np, here);
+               if (err != 0x90)
+                       return;
+
+               err = niu_pci_eeprom_read16_swp(np, here + 1);
+               if (err < 0)
+                       return;
+
+               here = start + offset + 3;
+               end = start + offset + err;
+
+               offset += err;
+
+               err = niu_pci_vpd_scan_props(np, here, end);
+               if (err < 0 || err == 1)
+                       return;
+       }
+}
+
+/* ESPC_PIO_EN_ENABLE must be set */
+static u32 __devinit niu_pci_vpd_offset(struct niu *np)
+{
+       u32 start = 0, end = ESPC_EEPROM_SIZE, ret;
+       int err;
+
+       while (start < end) {
+               ret = start;
+
+               /* ROM header signature?  */
+               err = niu_pci_eeprom_read16(np, start +  0);
+               if (err != 0x55aa)
+                       return 0;
+
+               /* Apply offset to PCI data structure.  */
+               err = niu_pci_eeprom_read16(np, start + 23);
+               if (err < 0)
+                       return 0;
+               start += err;
+
+               /* Check for "PCIR" signature.  */
+               err = niu_pci_eeprom_read16(np, start +  0);
+               if (err != 0x5043)
+                       return 0;
+               err = niu_pci_eeprom_read16(np, start +  2);
+               if (err != 0x4952)
+                       return 0;
+
+               /* Check for OBP image type.  */
+               err = niu_pci_eeprom_read(np, start + 20);
+               if (err < 0)
+                       return 0;
+               if (err != 0x01) {
+                       err = niu_pci_eeprom_read(np, ret + 2);
+                       if (err < 0)
+                               return 0;
+
+                       start = ret + (err * 512);
+                       continue;
+               }
+
+               err = niu_pci_eeprom_read16_swp(np, start + 8);
+               if (err < 0)
+                       return err;
+               ret += err;
+
+               err = niu_pci_eeprom_read(np, ret + 0);
+               if (err != 0x82)
+                       return 0;
+
+               return ret;
+       }
+
+       return 0;
+}
+
+static int __devinit niu_phy_type_prop_decode(struct niu *np,
+                                             const char *phy_prop)
+{
+       if (!strcmp(phy_prop, "mif")) {
+               /* 1G copper, MII */
+               np->flags &= ~(NIU_FLAGS_FIBER |
+                              NIU_FLAGS_10G);
+               np->mac_xcvr = MAC_XCVR_MII;
+       } else if (!strcmp(phy_prop, "xgf")) {
+               /* 10G fiber, XPCS */
+               np->flags |= (NIU_FLAGS_10G |
+                             NIU_FLAGS_FIBER);
+               np->mac_xcvr = MAC_XCVR_XPCS;
+       } else if (!strcmp(phy_prop, "pcs")) {
+               /* 1G fiber, PCS */
+               np->flags &= ~NIU_FLAGS_10G;
+               np->flags |= NIU_FLAGS_FIBER;
+               np->mac_xcvr = MAC_XCVR_PCS;
+       } else if (!strcmp(phy_prop, "xgc")) {
+               /* 10G copper, XPCS */
+               np->flags |= NIU_FLAGS_10G;
+               np->flags &= ~NIU_FLAGS_FIBER;
+               np->mac_xcvr = MAC_XCVR_XPCS;
+       } else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) {
+               /* 10G Serdes or 1G Serdes, default to 10G */
+               np->flags |= NIU_FLAGS_10G;
+               np->flags &= ~NIU_FLAGS_FIBER;
+               np->flags |= NIU_FLAGS_XCVR_SERDES;
+               np->mac_xcvr = MAC_XCVR_XPCS;
+       } else {
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int niu_pci_vpd_get_nports(struct niu *np)
+{
+       int ports = 0;
+
+       if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) ||
+           (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) ||
+           (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) ||
+           (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) ||
+           (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) {
+               ports = 4;
+       } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) ||
+                  (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) ||
+                  (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) ||
+                  (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) {
+               ports = 2;
+       }
+
+       return ports;
+}
+
+static void __devinit niu_pci_vpd_validate(struct niu *np)
+{
+       struct net_device *dev = np->dev;
+       struct niu_vpd *vpd = &np->vpd;
+       u8 val8;
+
+       if (!is_valid_ether_addr(&vpd->local_mac[0])) {
+               dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n");
+
+               np->flags &= ~NIU_FLAGS_VPD_VALID;
+               return;
+       }
+
+       if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
+           !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
+               np->flags |= NIU_FLAGS_10G;
+               np->flags &= ~NIU_FLAGS_FIBER;
+               np->flags |= NIU_FLAGS_XCVR_SERDES;
+               np->mac_xcvr = MAC_XCVR_PCS;
+               if (np->port > 1) {
+                       np->flags |= NIU_FLAGS_FIBER;
+                       np->flags &= ~NIU_FLAGS_10G;
+               }
+               if (np->flags & NIU_FLAGS_10G)
+                       np->mac_xcvr = MAC_XCVR_XPCS;
+       } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
+               np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
+                             NIU_FLAGS_HOTPLUG_PHY);
+       } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
+               dev_err(np->device, "Illegal phy string [%s]\n",
+                       np->vpd.phy_type);
+               dev_err(np->device, "Falling back to SPROM\n");
+               np->flags &= ~NIU_FLAGS_VPD_VALID;
+               return;
+       }
+
+       memcpy(dev->perm_addr, vpd->local_mac, ETH_ALEN);
+
+       val8 = dev->perm_addr[5];
+       dev->perm_addr[5] += np->port;
+       if (dev->perm_addr[5] < val8)
+               dev->perm_addr[4]++;
+
+       memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
+}
+
+static int __devinit niu_pci_probe_sprom(struct niu *np)
+{
+       struct net_device *dev = np->dev;
+       int len, i;
+       u64 val, sum;
+       u8 val8;
+
+       val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ);
+       val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT;
+       len = val / 4;
+
+       np->eeprom_len = len;
+
+       netif_printk(np, probe, KERN_DEBUG, np->dev,
+                    "SPROM: Image size %llu\n", (unsigned long long)val);
+
+       sum = 0;
+       for (i = 0; i < len; i++) {
+               val = nr64(ESPC_NCR(i));
+               sum += (val >>  0) & 0xff;
+               sum += (val >>  8) & 0xff;
+               sum += (val >> 16) & 0xff;
+               sum += (val >> 24) & 0xff;
+       }
+       netif_printk(np, probe, KERN_DEBUG, np->dev,
+                    "SPROM: Checksum %x\n", (int)(sum & 0xff));
+       if ((sum & 0xff) != 0xab) {
+               dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff));
+               return -EINVAL;
+       }
+
+       val = nr64(ESPC_PHY_TYPE);
+       switch (np->port) {
+       case 0:
+               val8 = (val & ESPC_PHY_TYPE_PORT0) >>
+                       ESPC_PHY_TYPE_PORT0_SHIFT;
+               break;
+       case 1:
+               val8 = (val & ESPC_PHY_TYPE_PORT1) >>
+                       ESPC_PHY_TYPE_PORT1_SHIFT;
+               break;
+       case 2:
+               val8 = (val & ESPC_PHY_TYPE_PORT2) >>
+                       ESPC_PHY_TYPE_PORT2_SHIFT;
+               break;
+       case 3:
+               val8 = (val & ESPC_PHY_TYPE_PORT3) >>
+                       ESPC_PHY_TYPE_PORT3_SHIFT;
+               break;
+       default:
+               dev_err(np->device, "Bogus port number %u\n",
+                       np->port);
+               return -EINVAL;
+       }
+       netif_printk(np, probe, KERN_DEBUG, np->dev,
+                    "SPROM: PHY type %x\n", val8);
+
+       switch (val8) {
+       case ESPC_PHY_TYPE_1G_COPPER:
+               /* 1G copper, MII */
+               np->flags &= ~(NIU_FLAGS_FIBER |
+                              NIU_FLAGS_10G);
+               np->mac_xcvr = MAC_XCVR_MII;
+               break;
+
+       case ESPC_PHY_TYPE_1G_FIBER:
+               /* 1G fiber, PCS */
+               np->flags &= ~NIU_FLAGS_10G;
+               np->flags |= NIU_FLAGS_FIBER;
+               np->mac_xcvr = MAC_XCVR_PCS;
+               break;
+
+       case ESPC_PHY_TYPE_10G_COPPER:
+               /* 10G copper, XPCS */
+               np->flags |= NIU_FLAGS_10G;
+               np->flags &= ~NIU_FLAGS_FIBER;
+               np->mac_xcvr = MAC_XCVR_XPCS;
+               break;
+
+       case ESPC_PHY_TYPE_10G_FIBER:
+               /* 10G fiber, XPCS */
+               np->flags |= (NIU_FLAGS_10G |
+                             NIU_FLAGS_FIBER);
+               np->mac_xcvr = MAC_XCVR_XPCS;
+               break;
+
+       default:
+               dev_err(np->device, "Bogus SPROM phy type %u\n", val8);
+               return -EINVAL;
+       }
+
+       val = nr64(ESPC_MAC_ADDR0);
+       netif_printk(np, probe, KERN_DEBUG, np->dev,
+                    "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val);
+       dev->perm_addr[0] = (val >>  0) & 0xff;
+       dev->perm_addr[1] = (val >>  8) & 0xff;
+       dev->perm_addr[2] = (val >> 16) & 0xff;
+       dev->perm_addr[3] = (val >> 24) & 0xff;
+
+       val = nr64(ESPC_MAC_ADDR1);
+       netif_printk(np, probe, KERN_DEBUG, np->dev,
+                    "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val);
+       dev->perm_addr[4] = (val >>  0) & 0xff;
+       dev->perm_addr[5] = (val >>  8) & 0xff;
+
+       if (!is_valid_ether_addr(&dev->perm_addr[0])) {
+               dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
+                       dev->perm_addr);
+               return -EINVAL;
+       }
+
+       val8 = dev->perm_addr[5];
+       dev->perm_addr[5] += np->port;
+       if (dev->perm_addr[5] < val8)
+               dev->perm_addr[4]++;
+
+       memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
+
+       val = nr64(ESPC_MOD_STR_LEN);
+       netif_printk(np, probe, KERN_DEBUG, np->dev,
+                    "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val);
+       if (val >= 8 * 4)
+               return -EINVAL;
+
+       for (i = 0; i < val; i += 4) {
+               u64 tmp = nr64(ESPC_NCR(5 + (i / 4)));
+
+               np->vpd.model[i + 3] = (tmp >>  0) & 0xff;
+               np->vpd.model[i + 2] = (tmp >>  8) & 0xff;
+               np->vpd.model[i + 1] = (tmp >> 16) & 0xff;
+               np->vpd.model[i + 0] = (tmp >> 24) & 0xff;
+       }
+       np->vpd.model[val] = '\0';
+
+       val = nr64(ESPC_BD_MOD_STR_LEN);
+       netif_printk(np, probe, KERN_DEBUG, np->dev,
+                    "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val);
+       if (val >= 4 * 4)
+               return -EINVAL;
+
+       for (i = 0; i < val; i += 4) {
+               u64 tmp = nr64(ESPC_NCR(14 + (i / 4)));
+
+               np->vpd.board_model[i + 3] = (tmp >>  0) & 0xff;
+               np->vpd.board_model[i + 2] = (tmp >>  8) & 0xff;
+               np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff;
+               np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff;
+       }
+       np->vpd.board_model[val] = '\0';
+
+       np->vpd.mac_num =
+               nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL;
+       netif_printk(np, probe, KERN_DEBUG, np->dev,
+                    "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num);
+
+       return 0;
+}
+
+static int __devinit niu_get_and_validate_port(struct niu *np)
+{
+       struct niu_parent *parent = np->parent;
+
+       if (np->port <= 1)
+               np->flags |= NIU_FLAGS_XMAC;
+
+       if (!parent->num_ports) {
+               if (parent->plat_type == PLAT_TYPE_NIU) {
+                       parent->num_ports = 2;
+               } else {
+                       parent->num_ports = niu_pci_vpd_get_nports(np);
+                       if (!parent->num_ports) {
+                               /* Fall back to SPROM as last resort.
+                                * This will fail on most cards.
+                                */
+                               parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) &
+                                       ESPC_NUM_PORTS_MACS_VAL;
+
+                               /* All of the current probing methods fail on
+                                * Maramba on-board parts.
+                                */
+                               if (!parent->num_ports)
+                                       parent->num_ports = 4;
+                       }
+               }
+       }
+
+       if (np->port >= parent->num_ports)
+               return -ENODEV;
+
+       return 0;
+}
+
+static int __devinit phy_record(struct niu_parent *parent,
+                               struct phy_probe_info *p,
+                               int dev_id_1, int dev_id_2, u8 phy_port,
+                               int type)
+{
+       u32 id = (dev_id_1 << 16) | dev_id_2;
+       u8 idx;
+
+       if (dev_id_1 < 0 || dev_id_2 < 0)
+               return 0;
+       if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
+               if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
+                   ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011) &&
+                   ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8706))
+                       return 0;
+       } else {
+               if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
+                       return 0;
+       }
+
+       pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
+               parent->index, id,
+               type == PHY_TYPE_PMA_PMD ? "PMA/PMD" :
+               type == PHY_TYPE_PCS ? "PCS" : "MII",
+               phy_port);
+
+       if (p->cur[type] >= NIU_MAX_PORTS) {
+               pr_err("Too many PHY ports\n");
+               return -EINVAL;
+       }
+       idx = p->cur[type];
+       p->phy_id[type][idx] = id;
+       p->phy_port[type][idx] = phy_port;
+       p->cur[type] = idx + 1;
+       return 0;
+}
+
+static int __devinit port_has_10g(struct phy_probe_info *p, int port)
+{
+       int i;
+
+       for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) {
+               if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port)
+                       return 1;
+       }
+       for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) {
+               if (p->phy_port[PHY_TYPE_PCS][i] == port)
+                       return 1;
+       }
+
+       return 0;
+}
+
+static int __devinit count_10g_ports(struct phy_probe_info *p, int *lowest)
+{
+       int port, cnt;
+
+       cnt = 0;
+       *lowest = 32;
+       for (port = 8; port < 32; port++) {
+               if (port_has_10g(p, port)) {
+                       if (!cnt)
+                               *lowest = port;
+                       cnt++;
+               }
+       }
+
+       return cnt;
+}
+
+static int __devinit count_1g_ports(struct phy_probe_info *p, int *lowest)
+{
+       *lowest = 32;
+       if (p->cur[PHY_TYPE_MII])
+               *lowest = p->phy_port[PHY_TYPE_MII][0];
+
+       return p->cur[PHY_TYPE_MII];
+}
+
+static void __devinit niu_n2_divide_channels(struct niu_parent *parent)
+{
+       int num_ports = parent->num_ports;
+       int i;
+
+       for (i = 0; i < num_ports; i++) {
+               parent->rxchan_per_port[i] = (16 / num_ports);
+               parent->txchan_per_port[i] = (16 / num_ports);
+
+               pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
+                       parent->index, i,
+                       parent->rxchan_per_port[i],
+                       parent->txchan_per_port[i]);
+       }
+}
+
+static void __devinit niu_divide_channels(struct niu_parent *parent,
+                                         int num_10g, int num_1g)
+{
+       int num_ports = parent->num_ports;
+       int rx_chans_per_10g, rx_chans_per_1g;
+       int tx_chans_per_10g, tx_chans_per_1g;
+       int i, tot_rx, tot_tx;
+
+       if (!num_10g || !num_1g) {
+               rx_chans_per_10g = rx_chans_per_1g =
+                       (NIU_NUM_RXCHAN / num_ports);
+               tx_chans_per_10g = tx_chans_per_1g =
+                       (NIU_NUM_TXCHAN / num_ports);
+       } else {
+               rx_chans_per_1g = NIU_NUM_RXCHAN / 8;
+               rx_chans_per_10g = (NIU_NUM_RXCHAN -
+                                   (rx_chans_per_1g * num_1g)) /
+                       num_10g;
+
+               tx_chans_per_1g = NIU_NUM_TXCHAN / 6;
+               tx_chans_per_10g = (NIU_NUM_TXCHAN -
+                                   (tx_chans_per_1g * num_1g)) /
+                       num_10g;
+       }
+
+       tot_rx = tot_tx = 0;
+       for (i = 0; i < num_ports; i++) {
+               int type = phy_decode(parent->port_phy, i);
+
+               if (type == PORT_TYPE_10G) {
+                       parent->rxchan_per_port[i] = rx_chans_per_10g;
+                       parent->txchan_per_port[i] = tx_chans_per_10g;
+               } else {
+                       parent->rxchan_per_port[i] = rx_chans_per_1g;
+                       parent->txchan_per_port[i] = tx_chans_per_1g;
+               }
+               pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
+                       parent->index, i,
+                       parent->rxchan_per_port[i],
+                       parent->txchan_per_port[i]);
+               tot_rx += parent->rxchan_per_port[i];
+               tot_tx += parent->txchan_per_port[i];
+       }
+
+       if (tot_rx > NIU_NUM_RXCHAN) {
+               pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n",
+                      parent->index, tot_rx);
+               for (i = 0; i < num_ports; i++)
+                       parent->rxchan_per_port[i] = 1;
+       }
+       if (tot_tx > NIU_NUM_TXCHAN) {
+               pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n",
+                      parent->index, tot_tx);
+               for (i = 0; i < num_ports; i++)
+                       parent->txchan_per_port[i] = 1;
+       }
+       if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) {
+               pr_warning("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n",
+                          parent->index, tot_rx, tot_tx);
+       }
+}
+
+static void __devinit niu_divide_rdc_groups(struct niu_parent *parent,
+                                           int num_10g, int num_1g)
+{
+       int i, num_ports = parent->num_ports;
+       int rdc_group, rdc_groups_per_port;
+       int rdc_channel_base;
+
+       rdc_group = 0;
+       rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports;
+
+       rdc_channel_base = 0;
+
+       for (i = 0; i < num_ports; i++) {
+               struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i];
+               int grp, num_channels = parent->rxchan_per_port[i];
+               int this_channel_offset;
+
+               tp->first_table_num = rdc_group;
+               tp->num_tables = rdc_groups_per_port;
+               this_channel_offset = 0;
+               for (grp = 0; grp < tp->num_tables; grp++) {
+                       struct rdc_table *rt = &tp->tables[grp];
+                       int slot;
+
+                       pr_info("niu%d: Port %d RDC tbl(%d) [ ",
+                               parent->index, i, tp->first_table_num + grp);
+                       for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) {
+                               rt->rxdma_channel[slot] =
+                                       rdc_channel_base + this_channel_offset;
+
+                               pr_cont("%d ", rt->rxdma_channel[slot]);
+
+                               if (++this_channel_offset == num_channels)
+                                       this_channel_offset = 0;
+                       }
+                       pr_cont("]\n");
+               }
+
+               parent->rdc_default[i] = rdc_channel_base;
+
+               rdc_channel_base += num_channels;
+               rdc_group += rdc_groups_per_port;
+       }
+}
+
+static int __devinit fill_phy_probe_info(struct niu *np,
+                                        struct niu_parent *parent,
+                                        struct phy_probe_info *info)
+{
+       unsigned long flags;
+       int port, err;
+
+       memset(info, 0, sizeof(*info));
+
+       /* Port 0 to 7 are reserved for onboard Serdes, probe the rest.  */
+       niu_lock_parent(np, flags);
+       err = 0;
+       for (port = 8; port < 32; port++) {
+               int dev_id_1, dev_id_2;
+
+               dev_id_1 = mdio_read(np, port,
+                                    NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1);
+               dev_id_2 = mdio_read(np, port,
+                                    NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2);
+               err = phy_record(parent, info, dev_id_1, dev_id_2, port,
+                                PHY_TYPE_PMA_PMD);
+               if (err)
+                       break;
+               dev_id_1 = mdio_read(np, port,
+                                    NIU_PCS_DEV_ADDR, MII_PHYSID1);
+               dev_id_2 = mdio_read(np, port,
+                                    NIU_PCS_DEV_ADDR, MII_PHYSID2);
+               err = phy_record(parent, info, dev_id_1, dev_id_2, port,
+                                PHY_TYPE_PCS);
+               if (err)
+                       break;
+               dev_id_1 = mii_read(np, port, MII_PHYSID1);
+               dev_id_2 = mii_read(np, port, MII_PHYSID2);
+               err = phy_record(parent, info, dev_id_1, dev_id_2, port,
+                                PHY_TYPE_MII);
+               if (err)
+                       break;
+       }
+       niu_unlock_parent(np, flags);
+
+       return err;
+}
+
+static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
+{
+       struct phy_probe_info *info = &parent->phy_probe_info;
+       int lowest_10g, lowest_1g;
+       int num_10g, num_1g;
+       u32 val;
+       int err;
+
+       num_10g = num_1g = 0;
+
+       if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
+           !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
+               num_10g = 0;
+               num_1g = 2;
+               parent->plat_type = PLAT_TYPE_ATCA_CP3220;
+               parent->num_ports = 4;
+               val = (phy_encode(PORT_TYPE_1G, 0) |
+                      phy_encode(PORT_TYPE_1G, 1) |
+                      phy_encode(PORT_TYPE_1G, 2) |
+                      phy_encode(PORT_TYPE_1G, 3));
+       } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
+               num_10g = 2;
+               num_1g = 0;
+               parent->num_ports = 2;
+               val = (phy_encode(PORT_TYPE_10G, 0) |
+                      phy_encode(PORT_TYPE_10G, 1));
+       } else if ((np->flags & NIU_FLAGS_XCVR_SERDES) &&
+                  (parent->plat_type == PLAT_TYPE_NIU)) {
+               /* this is the Monza case */
+               if (np->flags & NIU_FLAGS_10G) {
+                       val = (phy_encode(PORT_TYPE_10G, 0) |
+                              phy_encode(PORT_TYPE_10G, 1));
+               } else {
+                       val = (phy_encode(PORT_TYPE_1G, 0) |
+                              phy_encode(PORT_TYPE_1G, 1));
+               }
+       } else {
+               err = fill_phy_probe_info(np, parent, info);
+               if (err)
+                       return err;
+
+               num_10g = count_10g_ports(info, &lowest_10g);
+               num_1g = count_1g_ports(info, &lowest_1g);
+
+               switch ((num_10g << 4) | num_1g) {
+               case 0x24:
+                       if (lowest_1g == 10)
+                               parent->plat_type = PLAT_TYPE_VF_P0;
+                       else if (lowest_1g == 26)
+                               parent->plat_type = PLAT_TYPE_VF_P1;
+                       else
+                               goto unknown_vg_1g_port;
+
+                       /* fallthru */
+               case 0x22:
+                       val = (phy_encode(PORT_TYPE_10G, 0) |
+                              phy_encode(PORT_TYPE_10G, 1) |
+                              phy_encode(PORT_TYPE_1G, 2) |
+                              phy_encode(PORT_TYPE_1G, 3));
+                       break;
+
+               case 0x20:
+                       val = (phy_encode(PORT_TYPE_10G, 0) |
+                              phy_encode(PORT_TYPE_10G, 1));
+                       break;
+
+               case 0x10:
+                       val = phy_encode(PORT_TYPE_10G, np->port);
+                       break;
+
+               case 0x14:
+                       if (lowest_1g == 10)
+                               parent->plat_type = PLAT_TYPE_VF_P0;
+                       else if (lowest_1g == 26)
+                               parent->plat_type = PLAT_TYPE_VF_P1;
+                       else
+                               goto unknown_vg_1g_port;
+
+                       /* fallthru */
+               case 0x13:
+                       if ((lowest_10g & 0x7) == 0)
+                               val = (phy_encode(PORT_TYPE_10G, 0) |
+                                      phy_encode(PORT_TYPE_1G, 1) |
+                                      phy_encode(PORT_TYPE_1G, 2) |
+                                      phy_encode(PORT_TYPE_1G, 3));
+                       else
+                               val = (phy_encode(PORT_TYPE_1G, 0) |
+                                      phy_encode(PORT_TYPE_10G, 1) |
+                                      phy_encode(PORT_TYPE_1G, 2) |
+                                      phy_encode(PORT_TYPE_1G, 3));
+                       break;
+
+               case 0x04:
+                       if (lowest_1g == 10)
+                               parent->plat_type = PLAT_TYPE_VF_P0;
+                       else if (lowest_1g == 26)
+                               parent->plat_type = PLAT_TYPE_VF_P1;
+                       else
+                               goto unknown_vg_1g_port;
+
+                       val = (phy_encode(PORT_TYPE_1G, 0) |
+                              phy_encode(PORT_TYPE_1G, 1) |
+                              phy_encode(PORT_TYPE_1G, 2) |
+                              phy_encode(PORT_TYPE_1G, 3));
+                       break;
+
+               default:
+                       pr_err("Unsupported port config 10G[%d] 1G[%d]\n",
+                              num_10g, num_1g);
+                       return -EINVAL;
+               }
+       }
+
+       parent->port_phy = val;
+
+       if (parent->plat_type == PLAT_TYPE_NIU)
+               niu_n2_divide_channels(parent);
+       else
+               niu_divide_channels(parent, num_10g, num_1g);
+
+       niu_divide_rdc_groups(parent, num_10g, num_1g);
+
+       return 0;
+
+unknown_vg_1g_port:
+       pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g);
+       return -EINVAL;
+}
+
+static int __devinit niu_probe_ports(struct niu *np)
+{
+       struct niu_parent *parent = np->parent;
+       int err, i;
+
+       if (parent->port_phy == PORT_PHY_UNKNOWN) {
+               err = walk_phys(np, parent);
+               if (err)
+                       return err;
+
+               niu_set_ldg_timer_res(np, 2);
+               for (i = 0; i <= LDN_MAX; i++)
+                       niu_ldn_irq_enable(np, i, 0);
+       }
+
+       if (parent->port_phy == PORT_PHY_INVALID)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int __devinit niu_classifier_swstate_init(struct niu *np)
+{
+       struct niu_classifier *cp = &np->clas;
+
+       cp->tcam_top = (u16) np->port;
+       cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports;
+       cp->h1_init = 0xffffffff;
+       cp->h2_init = 0xffff;
+
+       return fflp_early_init(np);
+}
+
+static void __devinit niu_link_config_init(struct niu *np)
+{
+       struct niu_link_config *lp = &np->link_config;
+
+       lp->advertising = (ADVERTISED_10baseT_Half |
+                          ADVERTISED_10baseT_Full |
+                          ADVERTISED_100baseT_Half |
+                          ADVERTISED_100baseT_Full |
+                          ADVERTISED_1000baseT_Half |
+                          ADVERTISED_1000baseT_Full |
+                          ADVERTISED_10000baseT_Full |
+                          ADVERTISED_Autoneg);
+       lp->speed = lp->active_speed = SPEED_INVALID;
+       lp->duplex = DUPLEX_FULL;
+       lp->active_duplex = DUPLEX_INVALID;
+       lp->autoneg = 1;
+#if 0
+       lp->loopback_mode = LOOPBACK_MAC;
+       lp->active_speed = SPEED_10000;
+       lp->active_duplex = DUPLEX_FULL;
+#else
+       lp->loopback_mode = LOOPBACK_DISABLED;
+#endif
+}
+
+static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np)
+{
+       switch (np->port) {
+       case 0:
+               np->mac_regs = np->regs + XMAC_PORT0_OFF;
+               np->ipp_off  = 0x00000;
+               np->pcs_off  = 0x04000;
+               np->xpcs_off = 0x02000;
+               break;
+
+       case 1:
+               np->mac_regs = np->regs + XMAC_PORT1_OFF;
+               np->ipp_off  = 0x08000;
+               np->pcs_off  = 0x0a000;
+               np->xpcs_off = 0x08000;
+               break;
+
+       case 2:
+               np->mac_regs = np->regs + BMAC_PORT2_OFF;
+               np->ipp_off  = 0x04000;
+               np->pcs_off  = 0x0e000;
+               np->xpcs_off = ~0UL;
+               break;
+
+       case 3:
+               np->mac_regs = np->regs + BMAC_PORT3_OFF;
+               np->ipp_off  = 0x0c000;
+               np->pcs_off  = 0x12000;
+               np->xpcs_off = ~0UL;
+               break;
+
+       default:
+               dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
+{
+       struct msix_entry msi_vec[NIU_NUM_LDG];
+       struct niu_parent *parent = np->parent;
+       struct pci_dev *pdev = np->pdev;
+       int i, num_irqs, err;
+       u8 first_ldg;
+
+       first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
+       for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
+               ldg_num_map[i] = first_ldg + i;
+
+       num_irqs = (parent->rxchan_per_port[np->port] +
+                   parent->txchan_per_port[np->port] +
+                   (np->port == 0 ? 3 : 1));
+       BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports));
+
+retry:
+       for (i = 0; i < num_irqs; i++) {
+               msi_vec[i].vector = 0;
+               msi_vec[i].entry = i;
+       }
+
+       err = pci_enable_msix(pdev, msi_vec, num_irqs);
+       if (err < 0) {
+               np->flags &= ~NIU_FLAGS_MSIX;
+               return;
+       }
+       if (err > 0) {
+               num_irqs = err;
+               goto retry;
+       }
+
+       np->flags |= NIU_FLAGS_MSIX;
+       for (i = 0; i < num_irqs; i++)
+               np->ldg[i].irq = msi_vec[i].vector;
+       np->num_ldg = num_irqs;
+}
+
+static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
+{
+#ifdef CONFIG_SPARC64
+       struct platform_device *op = np->op;
+       const u32 *int_prop;
+       int i;
+
+       int_prop = of_get_property(op->dev.of_node, "interrupts", NULL);
+       if (!int_prop)
+               return -ENODEV;
+
+       for (i = 0; i < op->archdata.num_irqs; i++) {
+               ldg_num_map[i] = int_prop[i];
+               np->ldg[i].irq = op->archdata.irqs[i];
+       }
+
+       np->num_ldg = op->archdata.num_irqs;
+
+       return 0;
+#else
+       return -EINVAL;
+#endif
+}
+
+static int __devinit niu_ldg_init(struct niu *np)
+{
+       struct niu_parent *parent = np->parent;
+       u8 ldg_num_map[NIU_NUM_LDG];
+       int first_chan, num_chan;
+       int i, err, ldg_rotor;
+       u8 port;
+
+       np->num_ldg = 1;
+       np->ldg[0].irq = np->dev->irq;
+       if (parent->plat_type == PLAT_TYPE_NIU) {
+               err = niu_n2_irq_init(np, ldg_num_map);
+               if (err)
+                       return err;
+       } else
+               niu_try_msix(np, ldg_num_map);
+
+       port = np->port;
+       for (i = 0; i < np->num_ldg; i++) {
+               struct niu_ldg *lp = &np->ldg[i];
+
+               netif_napi_add(np->dev, &lp->napi, niu_poll, 64);
+
+               lp->np = np;
+               lp->ldg_num = ldg_num_map[i];
+               lp->timer = 2; /* XXX */
+
+               /* On N2 NIU the firmware has setup the SID mappings so they go
+                * to the correct values that will route the LDG to the proper
+                * interrupt in the NCU interrupt table.
+                */
+               if (np->parent->plat_type != PLAT_TYPE_NIU) {
+                       err = niu_set_ldg_sid(np, lp->ldg_num, port, i);
+                       if (err)
+                               return err;
+               }
+       }
+
+       /* We adopt the LDG assignment ordering used by the N2 NIU
+        * 'interrupt' properties because that simplifies a lot of
+        * things.  This ordering is:
+        *
+        *      MAC
+        *      MIF     (if port zero)
+        *      SYSERR  (if port zero)
+        *      RX channels
+        *      TX channels
+        */
+
+       ldg_rotor = 0;
+
+       err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor],
+                                 LDN_MAC(port));
+       if (err)
+               return err;
+
+       ldg_rotor++;
+       if (ldg_rotor == np->num_ldg)
+               ldg_rotor = 0;
+
+       if (port == 0) {
+               err = niu_ldg_assign_ldn(np, parent,
+                                        ldg_num_map[ldg_rotor],
+                                        LDN_MIF);
+               if (err)
+                       return err;
+
+               ldg_rotor++;
+               if (ldg_rotor == np->num_ldg)
+                       ldg_rotor = 0;
+
+               err = niu_ldg_assign_ldn(np, parent,
+                                        ldg_num_map[ldg_rotor],
+                                        LDN_DEVICE_ERROR);
+               if (err)
+                       return err;
+
+               ldg_rotor++;
+               if (ldg_rotor == np->num_ldg)
+                       ldg_rotor = 0;
+
+       }
+
+       first_chan = 0;
+       for (i = 0; i < port; i++)
+               first_chan += parent->rxchan_per_port[i];
+       num_chan = parent->rxchan_per_port[port];
+
+       for (i = first_chan; i < (first_chan + num_chan); i++) {
+               err = niu_ldg_assign_ldn(np, parent,
+                                        ldg_num_map[ldg_rotor],
+                                        LDN_RXDMA(i));
+               if (err)
+                       return err;
+               ldg_rotor++;
+               if (ldg_rotor == np->num_ldg)
+                       ldg_rotor = 0;
+       }
+
+       first_chan = 0;
+       for (i = 0; i < port; i++)
+               first_chan += parent->txchan_per_port[i];
+       num_chan = parent->txchan_per_port[port];
+       for (i = first_chan; i < (first_chan + num_chan); i++) {
+               err = niu_ldg_assign_ldn(np, parent,
+                                        ldg_num_map[ldg_rotor],
+                                        LDN_TXDMA(i));
+               if (err)
+                       return err;
+               ldg_rotor++;
+               if (ldg_rotor == np->num_ldg)
+                       ldg_rotor = 0;
+       }
+
+       return 0;
+}
+
+static void __devexit niu_ldg_free(struct niu *np)
+{
+       if (np->flags & NIU_FLAGS_MSIX)
+               pci_disable_msix(np->pdev);
+}
+
+static int __devinit niu_get_of_props(struct niu *np)
+{
+#ifdef CONFIG_SPARC64
+       struct net_device *dev = np->dev;
+       struct device_node *dp;
+       const char *phy_type;
+       const u8 *mac_addr;
+       const char *model;
+       int prop_len;
+
+       if (np->parent->plat_type == PLAT_TYPE_NIU)
+               dp = np->op->dev.of_node;
+       else
+               dp = pci_device_to_OF_node(np->pdev);
+
+       phy_type = of_get_property(dp, "phy-type", &prop_len);
+       if (!phy_type) {
+               netdev_err(dev, "%s: OF node lacks phy-type property\n",
+                          dp->full_name);
+               return -EINVAL;
+       }
+
+       if (!strcmp(phy_type, "none"))
+               return -ENODEV;
+
+       strcpy(np->vpd.phy_type, phy_type);
+
+       if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
+               netdev_err(dev, "%s: Illegal phy string [%s]\n",
+                          dp->full_name, np->vpd.phy_type);
+               return -EINVAL;
+       }
+
+       mac_addr = of_get_property(dp, "local-mac-address", &prop_len);
+       if (!mac_addr) {
+               netdev_err(dev, "%s: OF node lacks local-mac-address property\n",
+                          dp->full_name);
+               return -EINVAL;
+       }
+       if (prop_len != dev->addr_len) {
+               netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n",
+                          dp->full_name, prop_len);
+       }
+       memcpy(dev->perm_addr, mac_addr, dev->addr_len);
+       if (!is_valid_ether_addr(&dev->perm_addr[0])) {
+               netdev_err(dev, "%s: OF MAC address is invalid\n",
+                          dp->full_name);
+               netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->perm_addr);
+               return -EINVAL;
+       }
+
+       memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
+
+       model = of_get_property(dp, "model", &prop_len);
+
+       if (model)
+               strcpy(np->vpd.model, model);
+
+       if (of_find_property(dp, "hot-swappable-phy", &prop_len)) {
+               np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
+                       NIU_FLAGS_HOTPLUG_PHY);
+       }
+
+       return 0;
+#else
+       return -EINVAL;
+#endif
+}
+
+static int __devinit niu_get_invariants(struct niu *np)
+{
+       int err, have_props;
+       u32 offset;
+
+       err = niu_get_of_props(np);
+       if (err == -ENODEV)
+               return err;
+
+       have_props = !err;
+
+       err = niu_init_mac_ipp_pcs_base(np);
+       if (err)
+               return err;
+
+       if (have_props) {
+               err = niu_get_and_validate_port(np);
+               if (err)
+                       return err;
+
+       } else  {
+               if (np->parent->plat_type == PLAT_TYPE_NIU)
+                       return -EINVAL;
+
+               nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE);
+               offset = niu_pci_vpd_offset(np);
+               netif_printk(np, probe, KERN_DEBUG, np->dev,
+                            "%s() VPD offset [%08x]\n", __func__, offset);
+               if (offset)
+                       niu_pci_vpd_fetch(np, offset);
+               nw64(ESPC_PIO_EN, 0);
+
+               if (np->flags & NIU_FLAGS_VPD_VALID) {
+                       niu_pci_vpd_validate(np);
+                       err = niu_get_and_validate_port(np);
+                       if (err)
+                               return err;
+               }
+
+               if (!(np->flags & NIU_FLAGS_VPD_VALID)) {
+                       err = niu_get_and_validate_port(np);
+                       if (err)
+                               return err;
+                       err = niu_pci_probe_sprom(np);
+                       if (err)
+                               return err;
+               }
+       }
+
+       err = niu_probe_ports(np);
+       if (err)
+               return err;
+
+       niu_ldg_init(np);
+
+       niu_classifier_swstate_init(np);
+       niu_link_config_init(np);
+
+       err = niu_determine_phy_disposition(np);
+       if (!err)
+               err = niu_init_link(np);
+
+       return err;
+}
+
+static LIST_HEAD(niu_parent_list);
+static DEFINE_MUTEX(niu_parent_lock);
+static int niu_parent_index;
+
+static ssize_t show_port_phy(struct device *dev,
+                            struct device_attribute *attr, char *buf)
+{
+       struct platform_device *plat_dev = to_platform_device(dev);
+       struct niu_parent *p = plat_dev->dev.platform_data;
+       u32 port_phy = p->port_phy;
+       char *orig_buf = buf;
+       int i;
+
+       if (port_phy == PORT_PHY_UNKNOWN ||
+           port_phy == PORT_PHY_INVALID)
+               return 0;
+
+       for (i = 0; i < p->num_ports; i++) {
+               const char *type_str;
+               int type;
+
+               type = phy_decode(port_phy, i);
+               if (type == PORT_TYPE_10G)
+                       type_str = "10G";
+               else
+                       type_str = "1G";
+               buf += sprintf(buf,
+                              (i == 0) ? "%s" : " %s",
+                              type_str);
+       }
+       buf += sprintf(buf, "\n");
+       return buf - orig_buf;
+}
+
+static ssize_t show_plat_type(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       struct platform_device *plat_dev = to_platform_device(dev);
+       struct niu_parent *p = plat_dev->dev.platform_data;
+       const char *type_str;
+
+       switch (p->plat_type) {
+       case PLAT_TYPE_ATLAS:
+               type_str = "atlas";
+               break;
+       case PLAT_TYPE_NIU:
+               type_str = "niu";
+               break;
+       case PLAT_TYPE_VF_P0:
+               type_str = "vf_p0";
+               break;
+       case PLAT_TYPE_VF_P1:
+               type_str = "vf_p1";
+               break;
+       default:
+               type_str = "unknown";
+               break;
+       }
+
+       return sprintf(buf, "%s\n", type_str);
+}
+
+static ssize_t __show_chan_per_port(struct device *dev,
+                                   struct device_attribute *attr, char *buf,
+                                   int rx)
+{
+       struct platform_device *plat_dev = to_platform_device(dev);
+       struct niu_parent *p = plat_dev->dev.platform_data;
+       char *orig_buf = buf;
+       u8 *arr;
+       int i;
+
+       arr = (rx ? p->rxchan_per_port : p->txchan_per_port);
+
+       for (i = 0; i < p->num_ports; i++) {
+               buf += sprintf(buf,
+                              (i == 0) ? "%d" : " %d",
+                              arr[i]);
+       }
+       buf += sprintf(buf, "\n");
+
+       return buf - orig_buf;
+}
+
+static ssize_t show_rxchan_per_port(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       return __show_chan_per_port(dev, attr, buf, 1);
+}
+
+static ssize_t show_txchan_per_port(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       return __show_chan_per_port(dev, attr, buf, 1);
+}
+
+static ssize_t show_num_ports(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       struct platform_device *plat_dev = to_platform_device(dev);
+       struct niu_parent *p = plat_dev->dev.platform_data;
+
+       return sprintf(buf, "%d\n", p->num_ports);
+}
+
+static struct device_attribute niu_parent_attributes[] = {
+       __ATTR(port_phy, S_IRUGO, show_port_phy, NULL),
+       __ATTR(plat_type, S_IRUGO, show_plat_type, NULL),
+       __ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL),
+       __ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL),
+       __ATTR(num_ports, S_IRUGO, show_num_ports, NULL),
+       {}
+};
+
+static struct niu_parent * __devinit niu_new_parent(struct niu *np,
+                                                   union niu_parent_id *id,
+                                                   u8 ptype)
+{
+       struct platform_device *plat_dev;
+       struct niu_parent *p;
+       int i;
+
+       plat_dev = platform_device_register_simple("niu-board", niu_parent_index,
+                                                  NULL, 0);
+       if (IS_ERR(plat_dev))
+               return NULL;
+
+       for (i = 0; attr_name(niu_parent_attributes[i]); i++) {
+               int err = device_create_file(&plat_dev->dev,
+                                            &niu_parent_attributes[i]);
+               if (err)
+                       goto fail_unregister;
+       }
+
+       p = kzalloc(sizeof(*p), GFP_KERNEL);
+       if (!p)
+               goto fail_unregister;
+
+       p->index = niu_parent_index++;
+
+       plat_dev->dev.platform_data = p;
+       p->plat_dev = plat_dev;
+
+       memcpy(&p->id, id, sizeof(*id));
+       p->plat_type = ptype;
+       INIT_LIST_HEAD(&p->list);
+       atomic_set(&p->refcnt, 0);
+       list_add(&p->list, &niu_parent_list);
+       spin_lock_init(&p->lock);
+
+       p->rxdma_clock_divider = 7500;
+
+       p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES;
+       if (p->plat_type == PLAT_TYPE_NIU)
+               p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES;
+
+       for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
+               int index = i - CLASS_CODE_USER_PROG1;
+
+               p->tcam_key[index] = TCAM_KEY_TSEL;
+               p->flow_key[index] = (FLOW_KEY_IPSA |
+                                     FLOW_KEY_IPDA |
+                                     FLOW_KEY_PROTO |
+                                     (FLOW_KEY_L4_BYTE12 <<
+                                      FLOW_KEY_L4_0_SHIFT) |
+                                     (FLOW_KEY_L4_BYTE12 <<
+                                      FLOW_KEY_L4_1_SHIFT));
+       }
+
+       for (i = 0; i < LDN_MAX + 1; i++)
+               p->ldg_map[i] = LDG_INVALID;
+
+       return p;
+
+fail_unregister:
+       platform_device_unregister(plat_dev);
+       return NULL;
+}
+
+static struct niu_parent * __devinit niu_get_parent(struct niu *np,
+                                                   union niu_parent_id *id,
+                                                   u8 ptype)
+{
+       struct niu_parent *p, *tmp;
+       int port = np->port;
+
+       mutex_lock(&niu_parent_lock);
+       p = NULL;
+       list_for_each_entry(tmp, &niu_parent_list, list) {
+               if (!memcmp(id, &tmp->id, sizeof(*id))) {
+                       p = tmp;
+                       break;
+               }
+       }
+       if (!p)
+               p = niu_new_parent(np, id, ptype);
+
+       if (p) {
+               char port_name[6];
+               int err;
+
+               sprintf(port_name, "port%d", port);
+               err = sysfs_create_link(&p->plat_dev->dev.kobj,
+                                       &np->device->kobj,
+                                       port_name);
+               if (!err) {
+                       p->ports[port] = np;
+                       atomic_inc(&p->refcnt);
+               }
+       }
+       mutex_unlock(&niu_parent_lock);
+
+       return p;
+}
+
+static void niu_put_parent(struct niu *np)
+{
+       struct niu_parent *p = np->parent;
+       u8 port = np->port;
+       char port_name[6];
+
+       BUG_ON(!p || p->ports[port] != np);
+
+       netif_printk(np, probe, KERN_DEBUG, np->dev,
+                    "%s() port[%u]\n", __func__, port);
+
+       sprintf(port_name, "port%d", port);
+
+       mutex_lock(&niu_parent_lock);
+
+       sysfs_remove_link(&p->plat_dev->dev.kobj, port_name);
+
+       p->ports[port] = NULL;
+       np->parent = NULL;
+
+       if (atomic_dec_and_test(&p->refcnt)) {
+               list_del(&p->list);
+               platform_device_unregister(p->plat_dev);
+       }
+
+       mutex_unlock(&niu_parent_lock);
+}
+
+static void *niu_pci_alloc_coherent(struct device *dev, size_t size,
+                                   u64 *handle, gfp_t flag)
+{
+       dma_addr_t dh;
+       void *ret;
+
+       ret = dma_alloc_coherent(dev, size, &dh, flag);
+       if (ret)
+               *handle = dh;
+       return ret;
+}
+
+static void niu_pci_free_coherent(struct device *dev, size_t size,
+                                 void *cpu_addr, u64 handle)
+{
+       dma_free_coherent(dev, size, cpu_addr, handle);
+}
+
+static u64 niu_pci_map_page(struct device *dev, struct page *page,
+                           unsigned long offset, size_t size,
+                           enum dma_data_direction direction)
+{
+       return dma_map_page(dev, page, offset, size, direction);
+}
+
+static void niu_pci_unmap_page(struct device *dev, u64 dma_address,
+                              size_t size, enum dma_data_direction direction)
+{
+       dma_unmap_page(dev, dma_address, size, direction);
+}
+
+static u64 niu_pci_map_single(struct device *dev, void *cpu_addr,
+                             size_t size,
+                             enum dma_data_direction direction)
+{
+       return dma_map_single(dev, cpu_addr, size, direction);
+}
+
+static void niu_pci_unmap_single(struct device *dev, u64 dma_address,
+                                size_t size,
+                                enum dma_data_direction direction)
+{
+       dma_unmap_single(dev, dma_address, size, direction);
+}
+
+static const struct niu_ops niu_pci_ops = {
+       .alloc_coherent = niu_pci_alloc_coherent,
+       .free_coherent  = niu_pci_free_coherent,
+       .map_page       = niu_pci_map_page,
+       .unmap_page     = niu_pci_unmap_page,
+       .map_single     = niu_pci_map_single,
+       .unmap_single   = niu_pci_unmap_single,
+};
+
+static void __devinit niu_driver_version(void)
+{
+       static int niu_version_printed;
+
+       if (niu_version_printed++ == 0)
+               pr_info("%s", version);
+}
+
+static struct net_device * __devinit niu_alloc_and_init(
+       struct device *gen_dev, struct pci_dev *pdev,
+       struct platform_device *op, const struct niu_ops *ops,
+       u8 port)
+{
+       struct net_device *dev;
+       struct niu *np;
+
+       dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
+       if (!dev) {
+               dev_err(gen_dev, "Etherdev alloc failed, aborting\n");
+               return NULL;
+       }
+
+       SET_NETDEV_DEV(dev, gen_dev);
+
+       np = netdev_priv(dev);
+       np->dev = dev;
+       np->pdev = pdev;
+       np->op = op;
+       np->device = gen_dev;
+       np->ops = ops;
+
+       np->msg_enable = niu_debug;
+
+       spin_lock_init(&np->lock);
+       INIT_WORK(&np->reset_task, niu_reset_task);
+
+       np->port = port;
+
+       return dev;
+}
+
+static const struct net_device_ops niu_netdev_ops = {
+       .ndo_open               = niu_open,
+       .ndo_stop               = niu_close,
+       .ndo_start_xmit         = niu_start_xmit,
+       .ndo_get_stats64        = niu_get_stats,
+       .ndo_set_multicast_list = niu_set_rx_mode,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = niu_set_mac_addr,
+       .ndo_do_ioctl           = niu_ioctl,
+       .ndo_tx_timeout         = niu_tx_timeout,
+       .ndo_change_mtu         = niu_change_mtu,
+};
+
+static void __devinit niu_assign_netdev_ops(struct net_device *dev)
+{
+       dev->netdev_ops = &niu_netdev_ops;
+       dev->ethtool_ops = &niu_ethtool_ops;
+       dev->watchdog_timeo = NIU_TX_TIMEOUT;
+}
+
+static void __devinit niu_device_announce(struct niu *np)
+{
+       struct net_device *dev = np->dev;
+
+       pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr);
+
+       if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) {
+               pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
+                               dev->name,
+                               (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
+                               (np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
+                               (np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"),
+                               (np->mac_xcvr == MAC_XCVR_MII ? "MII" :
+                                (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
+                               np->vpd.phy_type);
+       } else {
+               pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
+                               dev->name,
+                               (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
+                               (np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
+                               (np->flags & NIU_FLAGS_FIBER ? "FIBER" :
+                                (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" :
+                                 "COPPER")),
+                               (np->mac_xcvr == MAC_XCVR_MII ? "MII" :
+                                (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
+                               np->vpd.phy_type);
+       }
+}
+
+static void __devinit niu_set_basic_features(struct net_device *dev)
+{
+       dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH;
+       dev->features |= dev->hw_features | NETIF_F_RXCSUM;
+}
+
+static int __devinit niu_pci_init_one(struct pci_dev *pdev,
+                                     const struct pci_device_id *ent)
+{
+       union niu_parent_id parent_id;
+       struct net_device *dev;
+       struct niu *np;
+       int err, pos;
+       u64 dma_mask;
+       u16 val16;
+
+       niu_driver_version();
+
+       err = pci_enable_device(pdev);
+       if (err) {
+               dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+               return err;
+       }
+
+       if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
+           !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+               dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n");
+               err = -ENODEV;
+               goto err_out_disable_pdev;
+       }
+
+       err = pci_request_regions(pdev, DRV_MODULE_NAME);
+       if (err) {
+               dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+               goto err_out_disable_pdev;
+       }
+
+       pos = pci_pcie_cap(pdev);
+       if (pos <= 0) {
+               dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
+               goto err_out_free_res;
+       }
+
+       dev = niu_alloc_and_init(&pdev->dev, pdev, NULL,
+                                &niu_pci_ops, PCI_FUNC(pdev->devfn));
+       if (!dev) {
+               err = -ENOMEM;
+               goto err_out_free_res;
+       }
+       np = netdev_priv(dev);
+
+       memset(&parent_id, 0, sizeof(parent_id));
+       parent_id.pci.domain = pci_domain_nr(pdev->bus);
+       parent_id.pci.bus = pdev->bus->number;
+       parent_id.pci.device = PCI_SLOT(pdev->devfn);
+
+       np->parent = niu_get_parent(np, &parent_id,
+                                   PLAT_TYPE_ATLAS);
+       if (!np->parent) {
+               err = -ENOMEM;
+               goto err_out_free_dev;
+       }
+
+       pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
+       val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
+       val16 |= (PCI_EXP_DEVCTL_CERE |
+                 PCI_EXP_DEVCTL_NFERE |
+                 PCI_EXP_DEVCTL_FERE |
+                 PCI_EXP_DEVCTL_URRE |
+                 PCI_EXP_DEVCTL_RELAX_EN);
+       pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
+
+       dma_mask = DMA_BIT_MASK(44);
+       err = pci_set_dma_mask(pdev, dma_mask);
+       if (!err) {
+               dev->features |= NETIF_F_HIGHDMA;
+               err = pci_set_consistent_dma_mask(pdev, dma_mask);
+               if (err) {
+                       dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n");
+                       goto err_out_release_parent;
+               }
+       }
+       if (err || dma_mask == DMA_BIT_MASK(32)) {
+               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (err) {
+                       dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
+                       goto err_out_release_parent;
+               }
+       }
+
+       niu_set_basic_features(dev);
+
+       np->regs = pci_ioremap_bar(pdev, 0);
+       if (!np->regs) {
+               dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
+               err = -ENOMEM;
+               goto err_out_release_parent;
+       }
+
+       pci_set_master(pdev);
+       pci_save_state(pdev);
+
+       dev->irq = pdev->irq;
+
+       niu_assign_netdev_ops(dev);
+
+       err = niu_get_invariants(np);
+       if (err) {
+               if (err != -ENODEV)
+                       dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n");
+               goto err_out_iounmap;
+       }
+
+       err = register_netdev(dev);
+       if (err) {
+               dev_err(&pdev->dev, "Cannot register net device, aborting\n");
+               goto err_out_iounmap;
+       }
+
+       pci_set_drvdata(pdev, dev);
+
+       niu_device_announce(np);
+
+       return 0;
+
+err_out_iounmap:
+       if (np->regs) {
+               iounmap(np->regs);
+               np->regs = NULL;
+       }
+
+err_out_release_parent:
+       niu_put_parent(np);
+
+err_out_free_dev:
+       free_netdev(dev);
+
+err_out_free_res:
+       pci_release_regions(pdev);
+
+err_out_disable_pdev:
+       pci_disable_device(pdev);
+       pci_set_drvdata(pdev, NULL);
+
+       return err;
+}
+
+static void __devexit niu_pci_remove_one(struct pci_dev *pdev)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+
+       if (dev) {
+               struct niu *np = netdev_priv(dev);
+
+               unregister_netdev(dev);
+               if (np->regs) {
+                       iounmap(np->regs);
+                       np->regs = NULL;
+               }
+
+               niu_ldg_free(np);
+
+               niu_put_parent(np);
+
+               free_netdev(dev);
+               pci_release_regions(pdev);
+               pci_disable_device(pdev);
+               pci_set_drvdata(pdev, NULL);
+       }
+}
+
+static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct niu *np = netdev_priv(dev);
+       unsigned long flags;
+
+       if (!netif_running(dev))
+               return 0;
+
+       flush_work_sync(&np->reset_task);
+       niu_netif_stop(np);
+
+       del_timer_sync(&np->timer);
+
+       spin_lock_irqsave(&np->lock, flags);
+       niu_enable_interrupts(np, 0);
+       spin_unlock_irqrestore(&np->lock, flags);
+
+       netif_device_detach(dev);
+
+       spin_lock_irqsave(&np->lock, flags);
+       niu_stop_hw(np);
+       spin_unlock_irqrestore(&np->lock, flags);
+
+       pci_save_state(pdev);
+
+       return 0;
+}
+
+static int niu_resume(struct pci_dev *pdev)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct niu *np = netdev_priv(dev);
+       unsigned long flags;
+       int err;
+
+       if (!netif_running(dev))
+               return 0;
+
+       pci_restore_state(pdev);
+
+       netif_device_attach(dev);
+
+       spin_lock_irqsave(&np->lock, flags);
+
+       err = niu_init_hw(np);
+       if (!err) {
+               np->timer.expires = jiffies + HZ;
+               add_timer(&np->timer);
+               niu_netif_start(np);
+       }
+
+       spin_unlock_irqrestore(&np->lock, flags);
+
+       return err;
+}
+
+static struct pci_driver niu_pci_driver = {
+       .name           = DRV_MODULE_NAME,
+       .id_table       = niu_pci_tbl,
+       .probe          = niu_pci_init_one,
+       .remove         = __devexit_p(niu_pci_remove_one),
+       .suspend        = niu_suspend,
+       .resume         = niu_resume,
+};
+
+#ifdef CONFIG_SPARC64
+static void *niu_phys_alloc_coherent(struct device *dev, size_t size,
+                                    u64 *dma_addr, gfp_t flag)
+{
+       unsigned long order = get_order(size);
+       unsigned long page = __get_free_pages(flag, order);
+
+       if (page == 0UL)
+               return NULL;
+       memset((char *)page, 0, PAGE_SIZE << order);
+       *dma_addr = __pa(page);
+
+       return (void *) page;
+}
+
+static void niu_phys_free_coherent(struct device *dev, size_t size,
+                                  void *cpu_addr, u64 handle)
+{
+       unsigned long order = get_order(size);
+
+       free_pages((unsigned long) cpu_addr, order);
+}
+
+static u64 niu_phys_map_page(struct device *dev, struct page *page,
+                            unsigned long offset, size_t size,
+                            enum dma_data_direction direction)
+{
+       return page_to_phys(page) + offset;
+}
+
+static void niu_phys_unmap_page(struct device *dev, u64 dma_address,
+                               size_t size, enum dma_data_direction direction)
+{
+       /* Nothing to do.  */
+}
+
+static u64 niu_phys_map_single(struct device *dev, void *cpu_addr,
+                              size_t size,
+                              enum dma_data_direction direction)
+{
+       return __pa(cpu_addr);
+}
+
+static void niu_phys_unmap_single(struct device *dev, u64 dma_address,
+                                 size_t size,
+                                 enum dma_data_direction direction)
+{
+       /* Nothing to do.  */
+}
+
+static const struct niu_ops niu_phys_ops = {
+       .alloc_coherent = niu_phys_alloc_coherent,
+       .free_coherent  = niu_phys_free_coherent,
+       .map_page       = niu_phys_map_page,
+       .unmap_page     = niu_phys_unmap_page,
+       .map_single     = niu_phys_map_single,
+       .unmap_single   = niu_phys_unmap_single,
+};
+
+static int __devinit niu_of_probe(struct platform_device *op)
+{
+       union niu_parent_id parent_id;
+       struct net_device *dev;
+       struct niu *np;
+       const u32 *reg;
+       int err;
+
+       niu_driver_version();
+
+       reg = of_get_property(op->dev.of_node, "reg", NULL);
+       if (!reg) {
+               dev_err(&op->dev, "%s: No 'reg' property, aborting\n",
+                       op->dev.of_node->full_name);
+               return -ENODEV;
+       }
+
+       dev = niu_alloc_and_init(&op->dev, NULL, op,
+                                &niu_phys_ops, reg[0] & 0x1);
+       if (!dev) {
+               err = -ENOMEM;
+               goto err_out;
+       }
+       np = netdev_priv(dev);
+
+       memset(&parent_id, 0, sizeof(parent_id));
+       parent_id.of = of_get_parent(op->dev.of_node);
+
+       np->parent = niu_get_parent(np, &parent_id,
+                                   PLAT_TYPE_NIU);
+       if (!np->parent) {
+               err = -ENOMEM;
+               goto err_out_free_dev;
+       }
+
+       niu_set_basic_features(dev);
+
+       np->regs = of_ioremap(&op->resource[1], 0,
+                             resource_size(&op->resource[1]),
+                             "niu regs");
+       if (!np->regs) {
+               dev_err(&op->dev, "Cannot map device registers, aborting\n");
+               err = -ENOMEM;
+               goto err_out_release_parent;
+       }
+
+       np->vir_regs_1 = of_ioremap(&op->resource[2], 0,
+                                   resource_size(&op->resource[2]),
+                                   "niu vregs-1");
+       if (!np->vir_regs_1) {
+               dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n");
+               err = -ENOMEM;
+               goto err_out_iounmap;
+       }
+
+       np->vir_regs_2 = of_ioremap(&op->resource[3], 0,
+                                   resource_size(&op->resource[3]),
+                                   "niu vregs-2");
+       if (!np->vir_regs_2) {
+               dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n");
+               err = -ENOMEM;
+               goto err_out_iounmap;
+       }
+
+       niu_assign_netdev_ops(dev);
+
+       err = niu_get_invariants(np);
+       if (err) {
+               if (err != -ENODEV)
+                       dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n");
+               goto err_out_iounmap;
+       }
+
+       err = register_netdev(dev);
+       if (err) {
+               dev_err(&op->dev, "Cannot register net device, aborting\n");
+               goto err_out_iounmap;
+       }
+
+       dev_set_drvdata(&op->dev, dev);
+
+       niu_device_announce(np);
+
+       return 0;
+
+err_out_iounmap:
+       if (np->vir_regs_1) {
+               of_iounmap(&op->resource[2], np->vir_regs_1,
+                          resource_size(&op->resource[2]));
+               np->vir_regs_1 = NULL;
+       }
+
+       if (np->vir_regs_2) {
+               of_iounmap(&op->resource[3], np->vir_regs_2,
+                          resource_size(&op->resource[3]));
+               np->vir_regs_2 = NULL;
+       }
+
+       if (np->regs) {
+               of_iounmap(&op->resource[1], np->regs,
+                          resource_size(&op->resource[1]));
+               np->regs = NULL;
+       }
+
+err_out_release_parent:
+       niu_put_parent(np);
+
+err_out_free_dev:
+       free_netdev(dev);
+
+err_out:
+       return err;
+}
+
+static int __devexit niu_of_remove(struct platform_device *op)
+{
+       struct net_device *dev = dev_get_drvdata(&op->dev);
+
+       if (dev) {
+               struct niu *np = netdev_priv(dev);
+
+               unregister_netdev(dev);
+
+               if (np->vir_regs_1) {
+                       of_iounmap(&op->resource[2], np->vir_regs_1,
+                                  resource_size(&op->resource[2]));
+                       np->vir_regs_1 = NULL;
+               }
+
+               if (np->vir_regs_2) {
+                       of_iounmap(&op->resource[3], np->vir_regs_2,
+                                  resource_size(&op->resource[3]));
+                       np->vir_regs_2 = NULL;
+               }
+
+               if (np->regs) {
+                       of_iounmap(&op->resource[1], np->regs,
+                                  resource_size(&op->resource[1]));
+                       np->regs = NULL;
+               }
+
+               niu_ldg_free(np);
+
+               niu_put_parent(np);
+
+               free_netdev(dev);
+               dev_set_drvdata(&op->dev, NULL);
+       }
+       return 0;
+}
+
+static const struct of_device_id niu_match[] = {
+       {
+               .name = "network",
+               .compatible = "SUNW,niusl",
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(of, niu_match);
+
+static struct platform_driver niu_of_driver = {
+       .driver = {
+               .name = "niu",
+               .owner = THIS_MODULE,
+               .of_match_table = niu_match,
+       },
+       .probe          = niu_of_probe,
+       .remove         = __devexit_p(niu_of_remove),
+};
+
+#endif /* CONFIG_SPARC64 */
+
+static int __init niu_init(void)
+{
+       int err = 0;
+
+       BUILD_BUG_ON(PAGE_SIZE < 4 * 1024);
+
+       niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
+
+#ifdef CONFIG_SPARC64
+       err = platform_driver_register(&niu_of_driver);
+#endif
+
+       if (!err) {
+               err = pci_register_driver(&niu_pci_driver);
+#ifdef CONFIG_SPARC64
+               if (err)
+                       platform_driver_unregister(&niu_of_driver);
+#endif
+       }
+
+       return err;
+}
+
+static void __exit niu_exit(void)
+{
+       pci_unregister_driver(&niu_pci_driver);
+#ifdef CONFIG_SPARC64
+       platform_driver_unregister(&niu_of_driver);
+#endif
+}
+
+module_init(niu_init);
+module_exit(niu_exit);
diff --git a/drivers/net/ethernet/sun/niu.h b/drivers/net/ethernet/sun/niu.h
new file mode 100644 (file)
index 0000000..51e177e
--- /dev/null
@@ -0,0 +1,3306 @@
+/* niu.h: Definitions for Neptune ethernet driver.
+ *
+ * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
+ */
+
+#ifndef _NIU_H
+#define _NIU_H
+
+#define PIO                    0x000000UL
+#define FZC_PIO                        0x080000UL
+#define FZC_MAC                        0x180000UL
+#define FZC_IPP                        0x280000UL
+#define FFLP                   0x300000UL
+#define FZC_FFLP               0x380000UL
+#define PIO_VADDR              0x400000UL
+#define ZCP                    0x500000UL
+#define FZC_ZCP                        0x580000UL
+#define DMC                    0x600000UL
+#define FZC_DMC                        0x680000UL
+#define TXC                    0x700000UL
+#define FZC_TXC                        0x780000UL
+#define PIO_LDSV               0x800000UL
+#define PIO_PIO_LDGIM          0x900000UL
+#define PIO_IMASK0             0xa00000UL
+#define PIO_IMASK1             0xb00000UL
+#define FZC_PROM               0xc80000UL
+#define FZC_PIM                        0xd80000UL
+
+#define LDSV0(LDG)             (PIO_LDSV + 0x00000UL + (LDG) * 0x2000UL)
+#define LDSV1(LDG)             (PIO_LDSV + 0x00008UL + (LDG) * 0x2000UL)
+#define LDSV2(LDG)             (PIO_LDSV + 0x00010UL + (LDG) * 0x2000UL)
+
+#define LDG_IMGMT(LDG)         (PIO_LDSV + 0x00018UL + (LDG) * 0x2000UL)
+#define  LDG_IMGMT_ARM         0x0000000080000000ULL
+#define  LDG_IMGMT_TIMER       0x000000000000003fULL
+
+#define LD_IM0(IDX)            (PIO_IMASK0 + 0x00000UL + (IDX) * 0x2000UL)
+#define  LD_IM0_MASK           0x0000000000000003ULL
+
+#define LD_IM1(IDX)            (PIO_IMASK1 + 0x00000UL + (IDX) * 0x2000UL)
+#define  LD_IM1_MASK           0x0000000000000003ULL
+
+#define LDG_TIMER_RES          (FZC_PIO + 0x00008UL)
+#define  LDG_TIMER_RES_VAL     0x00000000000fffffULL
+
+#define DIRTY_TID_CTL          (FZC_PIO + 0x00010UL)
+#define  DIRTY_TID_CTL_NPTHRED 0x00000000003f0000ULL
+#define  DIRTY_TID_CTL_RDTHRED 0x00000000000003f0ULL
+#define  DIRTY_TID_CTL_DTIDCLR 0x0000000000000002ULL
+#define  DIRTY_TID_CTL_DTIDENAB        0x0000000000000001ULL
+
+#define DIRTY_TID_STAT         (FZC_PIO + 0x00018UL)
+#define  DIRTY_TID_STAT_NPWSTAT        0x0000000000003f00ULL
+#define  DIRTY_TID_STAT_RDSTAT 0x000000000000003fULL
+
+#define RST_CTL                        (FZC_PIO + 0x00038UL)
+#define  RST_CTL_MAC_RST3      0x0000000000400000ULL
+#define  RST_CTL_MAC_RST2      0x0000000000200000ULL
+#define  RST_CTL_MAC_RST1      0x0000000000100000ULL
+#define  RST_CTL_MAC_RST0      0x0000000000080000ULL
+#define  RST_CTL_ACK_TO_EN     0x0000000000000800ULL
+#define  RST_CTL_ACK_TO_VAL    0x00000000000007feULL
+
+#define SMX_CFIG_DAT           (FZC_PIO + 0x00040UL)
+#define  SMX_CFIG_DAT_RAS_DET  0x0000000080000000ULL
+#define  SMX_CFIG_DAT_RAS_INJ  0x0000000040000000ULL
+#define  SMX_CFIG_DAT_XACT_TO  0x000000000fffffffULL
+
+#define SMX_INT_STAT           (FZC_PIO + 0x00048UL)
+#define  SMX_INT_STAT_STAT     0x00000000ffffffffULL
+
+#define SMX_CTL                        (FZC_PIO + 0x00050UL)
+#define  SMX_CTL_CTL           0x00000000ffffffffULL
+
+#define SMX_DBG_VEC            (FZC_PIO + 0x00058UL)
+#define  SMX_DBG_VEC_VEC       0x00000000ffffffffULL
+
+#define PIO_DBG_SEL            (FZC_PIO + 0x00060UL)
+#define  PIO_DBG_SEL_SEL       0x000000000000003fULL
+
+#define PIO_TRAIN_VEC          (FZC_PIO + 0x00068UL)
+#define  PIO_TRAIN_VEC_VEC     0x00000000ffffffffULL
+
+#define PIO_ARB_CTL            (FZC_PIO + 0x00070UL)
+#define  PIO_ARB_CTL_CTL       0x00000000ffffffffULL
+
+#define PIO_ARB_DBG_VEC                (FZC_PIO + 0x00078UL)
+#define  PIO_ARB_DBG_VEC_VEC   0x00000000ffffffffULL
+
+#define SYS_ERR_MASK           (FZC_PIO + 0x00090UL)
+#define  SYS_ERR_MASK_META2    0x0000000000000400ULL
+#define  SYS_ERR_MASK_META1    0x0000000000000200ULL
+#define  SYS_ERR_MASK_PEU      0x0000000000000100ULL
+#define  SYS_ERR_MASK_TXC      0x0000000000000080ULL
+#define  SYS_ERR_MASK_RDMC     0x0000000000000040ULL
+#define  SYS_ERR_MASK_TDMC     0x0000000000000020ULL
+#define  SYS_ERR_MASK_ZCP      0x0000000000000010ULL
+#define  SYS_ERR_MASK_FFLP     0x0000000000000008ULL
+#define  SYS_ERR_MASK_IPP      0x0000000000000004ULL
+#define  SYS_ERR_MASK_MAC      0x0000000000000002ULL
+#define  SYS_ERR_MASK_SMX      0x0000000000000001ULL
+
+#define SYS_ERR_STAT                   (FZC_PIO + 0x00098UL)
+#define  SYS_ERR_STAT_META2            0x0000000000000400ULL
+#define  SYS_ERR_STAT_META1            0x0000000000000200ULL
+#define  SYS_ERR_STAT_PEU              0x0000000000000100ULL
+#define  SYS_ERR_STAT_TXC              0x0000000000000080ULL
+#define  SYS_ERR_STAT_RDMC             0x0000000000000040ULL
+#define  SYS_ERR_STAT_TDMC             0x0000000000000020ULL
+#define  SYS_ERR_STAT_ZCP              0x0000000000000010ULL
+#define  SYS_ERR_STAT_FFLP             0x0000000000000008ULL
+#define  SYS_ERR_STAT_IPP              0x0000000000000004ULL
+#define  SYS_ERR_STAT_MAC              0x0000000000000002ULL
+#define  SYS_ERR_STAT_SMX              0x0000000000000001ULL
+
+#define SID(LDG)                       (FZC_PIO + 0x10200UL + (LDG) * 8UL)
+#define  SID_FUNC                      0x0000000000000060ULL
+#define  SID_FUNC_SHIFT                        5
+#define  SID_VECTOR                    0x000000000000001fULL
+#define  SID_VECTOR_SHIFT              0
+
+#define LDG_NUM(LDN)                   (FZC_PIO + 0x20000UL + (LDN) * 8UL)
+
+#define XMAC_PORT0_OFF                 (FZC_MAC + 0x000000)
+#define XMAC_PORT1_OFF                 (FZC_MAC + 0x006000)
+#define BMAC_PORT2_OFF                 (FZC_MAC + 0x00c000)
+#define BMAC_PORT3_OFF                 (FZC_MAC + 0x010000)
+
+/* XMAC registers, offset from np->mac_regs  */
+
+#define XTXMAC_SW_RST                  0x00000UL
+#define  XTXMAC_SW_RST_REG_RS          0x0000000000000002ULL
+#define  XTXMAC_SW_RST_SOFT_RST                0x0000000000000001ULL
+
+#define XRXMAC_SW_RST                  0x00008UL
+#define  XRXMAC_SW_RST_REG_RS          0x0000000000000002ULL
+#define  XRXMAC_SW_RST_SOFT_RST                0x0000000000000001ULL
+
+#define XTXMAC_STATUS                  0x00020UL
+#define  XTXMAC_STATUS_FRAME_CNT_EXP   0x0000000000000800ULL
+#define  XTXMAC_STATUS_BYTE_CNT_EXP    0x0000000000000400ULL
+#define  XTXMAC_STATUS_TXFIFO_XFR_ERR  0x0000000000000010ULL
+#define  XTXMAC_STATUS_TXMAC_OFLOW     0x0000000000000008ULL
+#define  XTXMAC_STATUS_MAX_PSIZE_ERR   0x0000000000000004ULL
+#define  XTXMAC_STATUS_TXMAC_UFLOW     0x0000000000000002ULL
+#define  XTXMAC_STATUS_FRAME_XMITED    0x0000000000000001ULL
+
+#define XRXMAC_STATUS                  0x00028UL
+#define  XRXMAC_STATUS_RXHIST7_CNT_EXP 0x0000000000100000ULL
+#define  XRXMAC_STATUS_LCL_FLT_STATUS  0x0000000000080000ULL
+#define  XRXMAC_STATUS_RFLT_DET                0x0000000000040000ULL
+#define  XRXMAC_STATUS_LFLT_CNT_EXP    0x0000000000020000ULL
+#define  XRXMAC_STATUS_PHY_MDINT       0x0000000000010000ULL
+#define  XRXMAC_STATUS_ALIGNERR_CNT_EXP        0x0000000000010000ULL
+#define  XRXMAC_STATUS_RXFRAG_CNT_EXP  0x0000000000008000ULL
+#define  XRXMAC_STATUS_RXMULTF_CNT_EXP 0x0000000000004000ULL
+#define  XRXMAC_STATUS_RXBCAST_CNT_EXP 0x0000000000002000ULL
+#define  XRXMAC_STATUS_RXHIST6_CNT_EXP 0x0000000000001000ULL
+#define  XRXMAC_STATUS_RXHIST5_CNT_EXP 0x0000000000000800ULL
+#define  XRXMAC_STATUS_RXHIST4_CNT_EXP 0x0000000000000400ULL
+#define  XRXMAC_STATUS_RXHIST3_CNT_EXP 0x0000000000000200ULL
+#define  XRXMAC_STATUS_RXHIST2_CNT_EXP 0x0000000000000100ULL
+#define  XRXMAC_STATUS_RXHIST1_CNT_EXP 0x0000000000000080ULL
+#define  XRXMAC_STATUS_RXOCTET_CNT_EXP 0x0000000000000040ULL
+#define  XRXMAC_STATUS_CVIOLERR_CNT_EXP        0x0000000000000020ULL
+#define  XRXMAC_STATUS_LENERR_CNT_EXP  0x0000000000000010ULL
+#define  XRXMAC_STATUS_CRCERR_CNT_EXP  0x0000000000000008ULL
+#define  XRXMAC_STATUS_RXUFLOW         0x0000000000000004ULL
+#define  XRXMAC_STATUS_RXOFLOW         0x0000000000000002ULL
+#define  XRXMAC_STATUS_FRAME_RCVD      0x0000000000000001ULL
+
+#define XMAC_FC_STAT                   0x00030UL
+#define  XMAC_FC_STAT_RX_RCV_PAUSE_TIME        0x00000000ffff0000ULL
+#define  XMAC_FC_STAT_TX_MAC_NPAUSE    0x0000000000000004ULL
+#define  XMAC_FC_STAT_TX_MAC_PAUSE     0x0000000000000002ULL
+#define  XMAC_FC_STAT_RX_MAC_RPAUSE    0x0000000000000001ULL
+
+#define XTXMAC_STAT_MSK                        0x00040UL
+#define  XTXMAC_STAT_MSK_FRAME_CNT_EXP 0x0000000000000800ULL
+#define  XTXMAC_STAT_MSK_BYTE_CNT_EXP  0x0000000000000400ULL
+#define  XTXMAC_STAT_MSK_TXFIFO_XFR_ERR        0x0000000000000010ULL
+#define  XTXMAC_STAT_MSK_TXMAC_OFLOW   0x0000000000000008ULL
+#define  XTXMAC_STAT_MSK_MAX_PSIZE_ERR 0x0000000000000004ULL
+#define  XTXMAC_STAT_MSK_TXMAC_UFLOW   0x0000000000000002ULL
+#define  XTXMAC_STAT_MSK_FRAME_XMITED  0x0000000000000001ULL
+
+#define XRXMAC_STAT_MSK                                0x00048UL
+#define  XRXMAC_STAT_MSK_LCL_FLT_STAT_MSK      0x0000000000080000ULL
+#define  XRXMAC_STAT_MSK_RFLT_DET              0x0000000000040000ULL
+#define  XRXMAC_STAT_MSK_LFLT_CNT_EXP          0x0000000000020000ULL
+#define  XRXMAC_STAT_MSK_PHY_MDINT             0x0000000000010000ULL
+#define  XRXMAC_STAT_MSK_RXFRAG_CNT_EXP                0x0000000000008000ULL
+#define  XRXMAC_STAT_MSK_RXMULTF_CNT_EXP       0x0000000000004000ULL
+#define  XRXMAC_STAT_MSK_RXBCAST_CNT_EXP       0x0000000000002000ULL
+#define  XRXMAC_STAT_MSK_RXHIST6_CNT_EXP       0x0000000000001000ULL
+#define  XRXMAC_STAT_MSK_RXHIST5_CNT_EXP       0x0000000000000800ULL
+#define  XRXMAC_STAT_MSK_RXHIST4_CNT_EXP       0x0000000000000400ULL
+#define  XRXMAC_STAT_MSK_RXHIST3_CNT_EXP       0x0000000000000200ULL
+#define  XRXMAC_STAT_MSK_RXHIST2_CNT_EXP       0x0000000000000100ULL
+#define  XRXMAC_STAT_MSK_RXHIST1_CNT_EXP       0x0000000000000080ULL
+#define  XRXMAC_STAT_MSK_RXOCTET_CNT_EXP       0x0000000000000040ULL
+#define  XRXMAC_STAT_MSK_CVIOLERR_CNT_EXP      0x0000000000000020ULL
+#define  XRXMAC_STAT_MSK_LENERR_CNT_EXP                0x0000000000000010ULL
+#define  XRXMAC_STAT_MSK_CRCERR_CNT_EXP                0x0000000000000008ULL
+#define  XRXMAC_STAT_MSK_RXUFLOW_CNT_EXP       0x0000000000000004ULL
+#define  XRXMAC_STAT_MSK_RXOFLOW_CNT_EXP       0x0000000000000002ULL
+#define  XRXMAC_STAT_MSK_FRAME_RCVD            0x0000000000000001ULL
+
+#define XMAC_FC_MSK                    0x00050UL
+#define  XMAC_FC_MSK_TX_MAC_NPAUSE     0x0000000000000004ULL
+#define  XMAC_FC_MSK_TX_MAC_PAUSE      0x0000000000000002ULL
+#define  XMAC_FC_MSK_RX_MAC_RPAUSE     0x0000000000000001ULL
+
+#define XMAC_CONFIG                    0x00060UL
+#define  XMAC_CONFIG_SEL_CLK_25MHZ     0x0000000080000000ULL
+#define  XMAC_CONFIG_1G_PCS_BYPASS     0x0000000040000000ULL
+#define  XMAC_CONFIG_10G_XPCS_BYPASS   0x0000000020000000ULL
+#define  XMAC_CONFIG_MODE_MASK         0x0000000018000000ULL
+#define  XMAC_CONFIG_MODE_XGMII                0x0000000000000000ULL
+#define  XMAC_CONFIG_MODE_GMII         0x0000000008000000ULL
+#define  XMAC_CONFIG_MODE_MII          0x0000000010000000ULL
+#define  XMAC_CONFIG_LFS_DISABLE       0x0000000004000000ULL
+#define  XMAC_CONFIG_LOOPBACK          0x0000000002000000ULL
+#define  XMAC_CONFIG_TX_OUTPUT_EN      0x0000000001000000ULL
+#define  XMAC_CONFIG_SEL_POR_CLK_SRC   0x0000000000800000ULL
+#define  XMAC_CONFIG_LED_POLARITY      0x0000000000400000ULL
+#define  XMAC_CONFIG_FORCE_LED_ON      0x0000000000200000ULL
+#define  XMAC_CONFIG_PASS_FLOW_CTRL    0x0000000000100000ULL
+#define  XMAC_CONFIG_RCV_PAUSE_ENABLE  0x0000000000080000ULL
+#define  XMAC_CONFIG_MAC2IPP_PKT_CNT_EN        0x0000000000040000ULL
+#define  XMAC_CONFIG_STRIP_CRC         0x0000000000020000ULL
+#define  XMAC_CONFIG_ADDR_FILTER_EN    0x0000000000010000ULL
+#define  XMAC_CONFIG_HASH_FILTER_EN    0x0000000000008000ULL
+#define  XMAC_CONFIG_RX_CODEV_CHK_DIS  0x0000000000004000ULL
+#define  XMAC_CONFIG_RESERVED_MULTICAST        0x0000000000002000ULL
+#define  XMAC_CONFIG_RX_CRC_CHK_DIS    0x0000000000001000ULL
+#define  XMAC_CONFIG_ERR_CHK_DIS       0x0000000000000800ULL
+#define  XMAC_CONFIG_PROMISC_GROUP     0x0000000000000400ULL
+#define  XMAC_CONFIG_PROMISCUOUS       0x0000000000000200ULL
+#define  XMAC_CONFIG_RX_MAC_ENABLE     0x0000000000000100ULL
+#define  XMAC_CONFIG_WARNING_MSG_EN    0x0000000000000080ULL
+#define  XMAC_CONFIG_ALWAYS_NO_CRC     0x0000000000000008ULL
+#define  XMAC_CONFIG_VAR_MIN_IPG_EN    0x0000000000000004ULL
+#define  XMAC_CONFIG_STRETCH_MODE      0x0000000000000002ULL
+#define  XMAC_CONFIG_TX_ENABLE         0x0000000000000001ULL
+
+#define XMAC_IPG                       0x00080UL
+#define  XMAC_IPG_STRETCH_CONST                0x0000000000e00000ULL
+#define  XMAC_IPG_STRETCH_CONST_SHIFT  21
+#define  XMAC_IPG_STRETCH_RATIO                0x00000000001f0000ULL
+#define  XMAC_IPG_STRETCH_RATIO_SHIFT  16
+#define  XMAC_IPG_IPG_MII_GMII         0x000000000000ff00ULL
+#define  XMAC_IPG_IPG_MII_GMII_SHIFT   8
+#define  XMAC_IPG_IPG_XGMII            0x0000000000000007ULL
+#define  XMAC_IPG_IPG_XGMII_SHIFT      0
+
+#define IPG_12_15_XGMII                        3
+#define IPG_16_19_XGMII                        4
+#define IPG_20_23_XGMII                        5
+#define IPG_12_MII_GMII                        10
+#define IPG_13_MII_GMII                        11
+#define IPG_14_MII_GMII                        12
+#define IPG_15_MII_GMII                        13
+#define IPG_16_MII_GMII                        14
+
+#define XMAC_MIN                       0x00088UL
+#define  XMAC_MIN_RX_MIN_PKT_SIZE      0x000000003ff00000ULL
+#define  XMAC_MIN_RX_MIN_PKT_SIZE_SHFT 20
+#define  XMAC_MIN_SLOT_TIME            0x000000000003fc00ULL
+#define  XMAC_MIN_SLOT_TIME_SHFT       10
+#define  XMAC_MIN_TX_MIN_PKT_SIZE      0x00000000000003ffULL
+#define  XMAC_MIN_TX_MIN_PKT_SIZE_SHFT 0
+
+#define XMAC_MAX                       0x00090UL
+#define  XMAC_MAX_FRAME_SIZE           0x0000000000003fffULL
+#define  XMAC_MAX_FRAME_SIZE_SHFT      0
+
+#define XMAC_ADDR0                     0x000a0UL
+#define  XMAC_ADDR0_ADDR0              0x000000000000ffffULL
+
+#define XMAC_ADDR1                     0x000a8UL
+#define  XMAC_ADDR1_ADDR1              0x000000000000ffffULL
+
+#define XMAC_ADDR2                     0x000b0UL
+#define  XMAC_ADDR2_ADDR2              0x000000000000ffffULL
+
+#define XMAC_ADDR_CMPEN                        0x00208UL
+#define  XMAC_ADDR_CMPEN_EN15          0x0000000000008000ULL
+#define  XMAC_ADDR_CMPEN_EN14          0x0000000000004000ULL
+#define  XMAC_ADDR_CMPEN_EN13          0x0000000000002000ULL
+#define  XMAC_ADDR_CMPEN_EN12          0x0000000000001000ULL
+#define  XMAC_ADDR_CMPEN_EN11          0x0000000000000800ULL
+#define  XMAC_ADDR_CMPEN_EN10          0x0000000000000400ULL
+#define  XMAC_ADDR_CMPEN_EN9           0x0000000000000200ULL
+#define  XMAC_ADDR_CMPEN_EN8           0x0000000000000100ULL
+#define  XMAC_ADDR_CMPEN_EN7           0x0000000000000080ULL
+#define  XMAC_ADDR_CMPEN_EN6           0x0000000000000040ULL
+#define  XMAC_ADDR_CMPEN_EN5           0x0000000000000020ULL
+#define  XMAC_ADDR_CMPEN_EN4           0x0000000000000010ULL
+#define  XMAC_ADDR_CMPEN_EN3           0x0000000000000008ULL
+#define  XMAC_ADDR_CMPEN_EN2           0x0000000000000004ULL
+#define  XMAC_ADDR_CMPEN_EN1           0x0000000000000002ULL
+#define  XMAC_ADDR_CMPEN_EN0           0x0000000000000001ULL
+
+#define XMAC_NUM_ALT_ADDR              16
+
+#define XMAC_ALT_ADDR0(NUM)            (0x00218UL + (NUM)*0x18UL)
+#define  XMAC_ALT_ADDR0_ADDR0          0x000000000000ffffULL
+
+#define XMAC_ALT_ADDR1(NUM)            (0x00220UL + (NUM)*0x18UL)
+#define  XMAC_ALT_ADDR1_ADDR1          0x000000000000ffffULL
+
+#define XMAC_ALT_ADDR2(NUM)            (0x00228UL + (NUM)*0x18UL)
+#define  XMAC_ALT_ADDR2_ADDR2          0x000000000000ffffULL
+
+#define XMAC_ADD_FILT0                 0x00818UL
+#define  XMAC_ADD_FILT0_FILT0          0x000000000000ffffULL
+
+#define XMAC_ADD_FILT1                 0x00820UL
+#define  XMAC_ADD_FILT1_FILT1          0x000000000000ffffULL
+
+#define XMAC_ADD_FILT2                 0x00828UL
+#define  XMAC_ADD_FILT2_FILT2          0x000000000000ffffULL
+
+#define XMAC_ADD_FILT12_MASK           0x00830UL
+#define  XMAC_ADD_FILT12_MASK_VAL      0x00000000000000ffULL
+
+#define XMAC_ADD_FILT00_MASK           0x00838UL
+#define  XMAC_ADD_FILT00_MASK_VAL      0x000000000000ffffULL
+
+#define XMAC_HASH_TBL(NUM)             (0x00840UL + (NUM) * 0x8UL)
+#define XMAC_HASH_TBL_VAL              0x000000000000ffffULL
+
+#define XMAC_NUM_HOST_INFO             20
+
+#define XMAC_HOST_INFO(NUM)            (0x00900UL + (NUM) * 0x8UL)
+
+#define XMAC_PA_DATA0                  0x00b80UL
+#define XMAC_PA_DATA0_VAL              0x00000000ffffffffULL
+
+#define XMAC_PA_DATA1                  0x00b88UL
+#define XMAC_PA_DATA1_VAL              0x00000000ffffffffULL
+
+#define XMAC_DEBUG_SEL                 0x00b90UL
+#define  XMAC_DEBUG_SEL_XMAC           0x0000000000000078ULL
+#define  XMAC_DEBUG_SEL_MAC            0x0000000000000007ULL
+
+#define XMAC_TRAIN_VEC                 0x00b98UL
+#define  XMAC_TRAIN_VEC_VAL            0x00000000ffffffffULL
+
+#define RXMAC_BT_CNT                   0x00100UL
+#define  RXMAC_BT_CNT_COUNT            0x00000000ffffffffULL
+
+#define RXMAC_BC_FRM_CNT               0x00108UL
+#define  RXMAC_BC_FRM_CNT_COUNT                0x00000000001fffffULL
+
+#define RXMAC_MC_FRM_CNT               0x00110UL
+#define  RXMAC_MC_FRM_CNT_COUNT                0x00000000001fffffULL
+
+#define RXMAC_FRAG_CNT                 0x00118UL
+#define  RXMAC_FRAG_CNT_COUNT          0x00000000001fffffULL
+
+#define RXMAC_HIST_CNT1                        0x00120UL
+#define  RXMAC_HIST_CNT1_COUNT         0x00000000001fffffULL
+
+#define RXMAC_HIST_CNT2                        0x00128UL
+#define  RXMAC_HIST_CNT2_COUNT         0x00000000001fffffULL
+
+#define RXMAC_HIST_CNT3                        0x00130UL
+#define  RXMAC_HIST_CNT3_COUNT         0x00000000000fffffULL
+
+#define RXMAC_HIST_CNT4                        0x00138UL
+#define  RXMAC_HIST_CNT4_COUNT         0x000000000007ffffULL
+
+#define RXMAC_HIST_CNT5                        0x00140UL
+#define  RXMAC_HIST_CNT5_COUNT         0x000000000003ffffULL
+
+#define RXMAC_HIST_CNT6                        0x00148UL
+#define  RXMAC_HIST_CNT6_COUNT         0x000000000000ffffULL
+
+#define RXMAC_MPSZER_CNT               0x00150UL
+#define  RXMAC_MPSZER_CNT_COUNT                0x00000000000000ffULL
+
+#define RXMAC_CRC_ER_CNT               0x00158UL
+#define  RXMAC_CRC_ER_CNT_COUNT                0x00000000000000ffULL
+
+#define RXMAC_CD_VIO_CNT               0x00160UL
+#define  RXMAC_CD_VIO_CNT_COUNT                0x00000000000000ffULL
+
+#define RXMAC_ALIGN_ERR_CNT            0x00168UL
+#define  RXMAC_ALIGN_ERR_CNT_COUNT     0x00000000000000ffULL
+
+#define TXMAC_FRM_CNT                  0x00170UL
+#define  TXMAC_FRM_CNT_COUNT           0x00000000ffffffffULL
+
+#define TXMAC_BYTE_CNT                 0x00178UL
+#define  TXMAC_BYTE_CNT_COUNT          0x00000000ffffffffULL
+
+#define LINK_FAULT_CNT                 0x00180UL
+#define  LINK_FAULT_CNT_COUNT          0x00000000000000ffULL
+
+#define RXMAC_HIST_CNT7                        0x00188UL
+#define  RXMAC_HIST_CNT7_COUNT         0x0000000007ffffffULL
+
+#define XMAC_SM_REG                    0x001a8UL
+#define  XMAC_SM_REG_STATE             0x00000000ffffffffULL
+
+#define XMAC_INTER1                    0x001b0UL
+#define  XMAC_INTERN1_SIGNALS1         0x00000000ffffffffULL
+
+#define XMAC_INTER2                    0x001b8UL
+#define  XMAC_INTERN2_SIGNALS2         0x00000000ffffffffULL
+
+/* BMAC registers, offset from np->mac_regs  */
+
+#define BTXMAC_SW_RST                  0x00000UL
+#define  BTXMAC_SW_RST_RESET           0x0000000000000001ULL
+
+#define BRXMAC_SW_RST                  0x00008UL
+#define  BRXMAC_SW_RST_RESET           0x0000000000000001ULL
+
+#define BMAC_SEND_PAUSE                        0x00010UL
+#define  BMAC_SEND_PAUSE_SEND          0x0000000000010000ULL
+#define  BMAC_SEND_PAUSE_TIME          0x000000000000ffffULL
+
+#define BTXMAC_STATUS                  0x00020UL
+#define  BTXMAC_STATUS_XMIT            0x0000000000000001ULL
+#define  BTXMAC_STATUS_UNDERRUN                0x0000000000000002ULL
+#define  BTXMAC_STATUS_MAX_PKT_ERR     0x0000000000000004ULL
+#define  BTXMAC_STATUS_BYTE_CNT_EXP    0x0000000000000400ULL
+#define  BTXMAC_STATUS_FRAME_CNT_EXP   0x0000000000000800ULL
+
+#define BRXMAC_STATUS                  0x00028UL
+#define  BRXMAC_STATUS_RX_PKT          0x0000000000000001ULL
+#define  BRXMAC_STATUS_OVERFLOW                0x0000000000000002ULL
+#define  BRXMAC_STATUS_FRAME_CNT_EXP   0x0000000000000004ULL
+#define  BRXMAC_STATUS_ALIGN_ERR_EXP   0x0000000000000008ULL
+#define  BRXMAC_STATUS_CRC_ERR_EXP     0x0000000000000010ULL
+#define  BRXMAC_STATUS_LEN_ERR_EXP     0x0000000000000020ULL
+
+#define BMAC_CTRL_STATUS               0x00030UL
+#define  BMAC_CTRL_STATUS_PAUSE_RECV   0x0000000000000001ULL
+#define  BMAC_CTRL_STATUS_PAUSE                0x0000000000000002ULL
+#define  BMAC_CTRL_STATUS_NOPAUSE      0x0000000000000004ULL
+#define  BMAC_CTRL_STATUS_TIME         0x00000000ffff0000ULL
+#define  BMAC_CTRL_STATUS_TIME_SHIFT   16
+
+#define BTXMAC_STATUS_MASK             0x00040UL
+#define BRXMAC_STATUS_MASK             0x00048UL
+#define BMAC_CTRL_STATUS_MASK          0x00050UL
+
+#define BTXMAC_CONFIG                  0x00060UL
+#define  BTXMAC_CONFIG_ENABLE          0x0000000000000001ULL
+#define  BTXMAC_CONFIG_FCS_DISABLE     0x0000000000000002ULL
+
+#define BRXMAC_CONFIG                  0x00068UL
+#define  BRXMAC_CONFIG_DISCARD_DIS     0x0000000000000080ULL
+#define  BRXMAC_CONFIG_ADDR_FILT_EN    0x0000000000000040ULL
+#define  BRXMAC_CONFIG_HASH_FILT_EN    0x0000000000000020ULL
+#define  BRXMAC_CONFIG_PROMISC_GRP     0x0000000000000010ULL
+#define  BRXMAC_CONFIG_PROMISC         0x0000000000000008ULL
+#define  BRXMAC_CONFIG_STRIP_FCS       0x0000000000000004ULL
+#define  BRXMAC_CONFIG_STRIP_PAD       0x0000000000000002ULL
+#define  BRXMAC_CONFIG_ENABLE          0x0000000000000001ULL
+
+#define BMAC_CTRL_CONFIG               0x00070UL
+#define  BMAC_CTRL_CONFIG_TX_PAUSE_EN  0x0000000000000001ULL
+#define  BMAC_CTRL_CONFIG_RX_PAUSE_EN  0x0000000000000002ULL
+#define  BMAC_CTRL_CONFIG_PASS_CTRL    0x0000000000000004ULL
+
+#define BMAC_XIF_CONFIG                        0x00078UL
+#define  BMAC_XIF_CONFIG_TX_OUTPUT_EN  0x0000000000000001ULL
+#define  BMAC_XIF_CONFIG_MII_LOOPBACK  0x0000000000000002ULL
+#define  BMAC_XIF_CONFIG_GMII_MODE     0x0000000000000008ULL
+#define  BMAC_XIF_CONFIG_LINK_LED      0x0000000000000020ULL
+#define  BMAC_XIF_CONFIG_LED_POLARITY  0x0000000000000040ULL
+#define  BMAC_XIF_CONFIG_25MHZ_CLOCK   0x0000000000000080ULL
+
+#define BMAC_MIN_FRAME                 0x000a0UL
+#define  BMAC_MIN_FRAME_VAL            0x00000000000003ffULL
+
+#define BMAC_MAX_FRAME                 0x000a8UL
+#define  BMAC_MAX_FRAME_MAX_BURST      0x000000003fff0000ULL
+#define  BMAC_MAX_FRAME_MAX_BURST_SHIFT        16
+#define  BMAC_MAX_FRAME_MAX_FRAME      0x0000000000003fffULL
+#define  BMAC_MAX_FRAME_MAX_FRAME_SHIFT        0
+
+#define BMAC_PREAMBLE_SIZE             0x000b0UL
+#define  BMAC_PREAMBLE_SIZE_VAL                0x00000000000003ffULL
+
+#define BMAC_CTRL_TYPE                 0x000c8UL
+
+#define BMAC_ADDR0                     0x00100UL
+#define  BMAC_ADDR0_ADDR0              0x000000000000ffffULL
+
+#define BMAC_ADDR1                     0x00108UL
+#define  BMAC_ADDR1_ADDR1              0x000000000000ffffULL
+
+#define BMAC_ADDR2                     0x00110UL
+#define  BMAC_ADDR2_ADDR2              0x000000000000ffffULL
+
+#define BMAC_NUM_ALT_ADDR              6
+
+#define BMAC_ALT_ADDR0(NUM)            (0x00118UL + (NUM)*0x18UL)
+#define  BMAC_ALT_ADDR0_ADDR0          0x000000000000ffffULL
+
+#define BMAC_ALT_ADDR1(NUM)            (0x00120UL + (NUM)*0x18UL)
+#define  BMAC_ALT_ADDR1_ADDR1          0x000000000000ffffULL
+
+#define BMAC_ALT_ADDR2(NUM)            (0x00128UL + (NUM)*0x18UL)
+#define  BMAC_ALT_ADDR2_ADDR2          0x000000000000ffffULL
+
+#define BMAC_FC_ADDR0                  0x00268UL
+#define  BMAC_FC_ADDR0_ADDR0           0x000000000000ffffULL
+
+#define BMAC_FC_ADDR1                  0x00270UL
+#define  BMAC_FC_ADDR1_ADDR1           0x000000000000ffffULL
+
+#define BMAC_FC_ADDR2                  0x00278UL
+#define  BMAC_FC_ADDR2_ADDR2           0x000000000000ffffULL
+
+#define BMAC_ADD_FILT0                 0x00298UL
+#define  BMAC_ADD_FILT0_FILT0          0x000000000000ffffULL
+
+#define BMAC_ADD_FILT1                 0x002a0UL
+#define  BMAC_ADD_FILT1_FILT1          0x000000000000ffffULL
+
+#define BMAC_ADD_FILT2                 0x002a8UL
+#define  BMAC_ADD_FILT2_FILT2          0x000000000000ffffULL
+
+#define BMAC_ADD_FILT12_MASK           0x002b0UL
+#define  BMAC_ADD_FILT12_MASK_VAL      0x00000000000000ffULL
+
+#define BMAC_ADD_FILT00_MASK           0x002b8UL
+#define  BMAC_ADD_FILT00_MASK_VAL      0x000000000000ffffULL
+
+#define BMAC_HASH_TBL(NUM)             (0x002c0UL + (NUM) * 0x8UL)
+#define BMAC_HASH_TBL_VAL              0x000000000000ffffULL
+
+#define BRXMAC_FRAME_CNT               0x00370
+#define  BRXMAC_FRAME_CNT_COUNT                0x000000000000ffffULL
+
+#define BRXMAC_MAX_LEN_ERR_CNT         0x00378
+
+#define BRXMAC_ALIGN_ERR_CNT           0x00380
+#define  BRXMAC_ALIGN_ERR_CNT_COUNT    0x000000000000ffffULL
+
+#define BRXMAC_CRC_ERR_CNT             0x00388
+#define  BRXMAC_ALIGN_ERR_CNT_COUNT    0x000000000000ffffULL
+
+#define BRXMAC_CODE_VIOL_ERR_CNT       0x00390
+#define  BRXMAC_CODE_VIOL_ERR_CNT_COUNT        0x000000000000ffffULL
+
+#define BMAC_STATE_MACHINE             0x003a0
+
+#define BMAC_ADDR_CMPEN                        0x003f8UL
+#define  BMAC_ADDR_CMPEN_EN15          0x0000000000008000ULL
+#define  BMAC_ADDR_CMPEN_EN14          0x0000000000004000ULL
+#define  BMAC_ADDR_CMPEN_EN13          0x0000000000002000ULL
+#define  BMAC_ADDR_CMPEN_EN12          0x0000000000001000ULL
+#define  BMAC_ADDR_CMPEN_EN11          0x0000000000000800ULL
+#define  BMAC_ADDR_CMPEN_EN10          0x0000000000000400ULL
+#define  BMAC_ADDR_CMPEN_EN9           0x0000000000000200ULL
+#define  BMAC_ADDR_CMPEN_EN8           0x0000000000000100ULL
+#define  BMAC_ADDR_CMPEN_EN7           0x0000000000000080ULL
+#define  BMAC_ADDR_CMPEN_EN6           0x0000000000000040ULL
+#define  BMAC_ADDR_CMPEN_EN5           0x0000000000000020ULL
+#define  BMAC_ADDR_CMPEN_EN4           0x0000000000000010ULL
+#define  BMAC_ADDR_CMPEN_EN3           0x0000000000000008ULL
+#define  BMAC_ADDR_CMPEN_EN2           0x0000000000000004ULL
+#define  BMAC_ADDR_CMPEN_EN1           0x0000000000000002ULL
+#define  BMAC_ADDR_CMPEN_EN0           0x0000000000000001ULL
+
+#define BMAC_NUM_HOST_INFO             9
+
+#define BMAC_HOST_INFO(NUM)            (0x00400UL + (NUM) * 0x8UL)
+
+#define BTXMAC_BYTE_CNT                        0x00448UL
+#define  BTXMAC_BYTE_CNT_COUNT         0x00000000ffffffffULL
+
+#define BTXMAC_FRM_CNT                 0x00450UL
+#define  BTXMAC_FRM_CNT_COUNT          0x00000000ffffffffULL
+
+#define BRXMAC_BYTE_CNT                        0x00458UL
+#define  BRXMAC_BYTE_CNT_COUNT         0x00000000ffffffffULL
+
+#define HOST_INFO_MPR                  0x0000000000000100ULL
+#define HOST_INFO_MACRDCTBLN           0x0000000000000007ULL
+
+/* XPCS registers, offset from np->regs + np->xpcs_off  */
+
+#define XPCS_CONTROL1                  (FZC_MAC + 0x00000UL)
+#define  XPCS_CONTROL1_RESET           0x0000000000008000ULL
+#define  XPCS_CONTROL1_LOOPBACK                0x0000000000004000ULL
+#define  XPCS_CONTROL1_SPEED_SELECT3   0x0000000000002000ULL
+#define  XPCS_CONTROL1_CSR_LOW_PWR     0x0000000000000800ULL
+#define  XPCS_CONTROL1_CSR_SPEED1      0x0000000000000040ULL
+#define  XPCS_CONTROL1_CSR_SPEED0      0x000000000000003cULL
+
+#define XPCS_STATUS1                   (FZC_MAC + 0x00008UL)
+#define  XPCS_STATUS1_CSR_FAULT                0x0000000000000080ULL
+#define  XPCS_STATUS1_CSR_RXLNK_STAT   0x0000000000000004ULL
+#define  XPCS_STATUS1_CSR_LPWR_ABLE    0x0000000000000002ULL
+
+#define XPCS_DEVICE_IDENTIFIER         (FZC_MAC + 0x00010UL)
+#define  XPCS_DEVICE_IDENTIFIER_VAL    0x00000000ffffffffULL
+
+#define XPCS_SPEED_ABILITY             (FZC_MAC + 0x00018UL)
+#define  XPCS_SPEED_ABILITY_10GIG      0x0000000000000001ULL
+
+#define XPCS_DEV_IN_PKG                        (FZC_MAC + 0x00020UL)
+#define  XPCS_DEV_IN_PKG_CSR_VEND2     0x0000000080000000ULL
+#define  XPCS_DEV_IN_PKG_CSR_VEND1     0x0000000040000000ULL
+#define  XPCS_DEV_IN_PKG_DTE_XS                0x0000000000000020ULL
+#define  XPCS_DEV_IN_PKG_PHY_XS                0x0000000000000010ULL
+#define  XPCS_DEV_IN_PKG_PCS           0x0000000000000008ULL
+#define  XPCS_DEV_IN_PKG_WIS           0x0000000000000004ULL
+#define  XPCS_DEV_IN_PKG_PMD_PMA       0x0000000000000002ULL
+#define  XPCS_DEV_IN_PKG_CLS22         0x0000000000000001ULL
+
+#define XPCS_CONTROL2                  (FZC_MAC + 0x00028UL)
+#define  XPCS_CONTROL2_CSR_PSC_SEL     0x0000000000000003ULL
+
+#define XPCS_STATUS2                   (FZC_MAC + 0x00030UL)
+#define  XPCS_STATUS2_CSR_DEV_PRES     0x000000000000c000ULL
+#define  XPCS_STATUS2_CSR_TX_FAULT     0x0000000000000800ULL
+#define  XPCS_STATUS2_CSR_RCV_FAULT    0x0000000000000400ULL
+#define  XPCS_STATUS2_TEN_GBASE_W      0x0000000000000004ULL
+#define  XPCS_STATUS2_TEN_GBASE_X      0x0000000000000002ULL
+#define  XPCS_STATUS2_TEN_GBASE_R      0x0000000000000001ULL
+
+#define XPCS_PKG_ID                    (FZC_MAC + 0x00038UL)
+#define  XPCS_PKG_ID_VAL               0x00000000ffffffffULL
+
+#define XPCS_STATUS(IDX)               (FZC_MAC + 0x00040UL)
+#define  XPCS_STATUS_CSR_LANE_ALIGN    0x0000000000001000ULL
+#define  XPCS_STATUS_CSR_PATTEST_CAP   0x0000000000000800ULL
+#define  XPCS_STATUS_CSR_LANE3_SYNC    0x0000000000000008ULL
+#define  XPCS_STATUS_CSR_LANE2_SYNC    0x0000000000000004ULL
+#define  XPCS_STATUS_CSR_LANE1_SYNC    0x0000000000000002ULL
+#define  XPCS_STATUS_CSR_LANE0_SYNC    0x0000000000000001ULL
+
+#define XPCS_TEST_CONTROL              (FZC_MAC + 0x00048UL)
+#define  XPCS_TEST_CONTROL_TXTST_EN    0x0000000000000004ULL
+#define  XPCS_TEST_CONTROL_TPAT_SEL    0x0000000000000003ULL
+
+#define XPCS_CFG_VENDOR1               (FZC_MAC + 0x00050UL)
+#define  XPCS_CFG_VENDOR1_DBG_IOTST    0x0000000000000080ULL
+#define  XPCS_CFG_VENDOR1_DBG_SEL      0x0000000000000078ULL
+#define  XPCS_CFG_VENDOR1_BYPASS_DET   0x0000000000000004ULL
+#define  XPCS_CFG_VENDOR1_TXBUF_EN     0x0000000000000002ULL
+#define  XPCS_CFG_VENDOR1_XPCS_EN      0x0000000000000001ULL
+
+#define XPCS_DIAG_VENDOR2              (FZC_MAC + 0x00058UL)
+#define  XPCS_DIAG_VENDOR2_SSM_LANE3   0x0000000001e00000ULL
+#define  XPCS_DIAG_VENDOR2_SSM_LANE2   0x00000000001e0000ULL
+#define  XPCS_DIAG_VENDOR2_SSM_LANE1   0x000000000001e000ULL
+#define  XPCS_DIAG_VENDOR2_SSM_LANE0   0x0000000000001e00ULL
+#define  XPCS_DIAG_VENDOR2_EBUF_SM     0x00000000000001feULL
+#define  XPCS_DIAG_VENDOR2_RCV_SM      0x0000000000000001ULL
+
+#define XPCS_MASK1                     (FZC_MAC + 0x00060UL)
+#define  XPCS_MASK1_FAULT_MASK         0x0000000000000080ULL
+#define  XPCS_MASK1_RXALIGN_STAT_MSK   0x0000000000000004ULL
+
+#define XPCS_PKT_COUNT                 (FZC_MAC + 0x00068UL)
+#define  XPCS_PKT_COUNT_TX             0x00000000ffff0000ULL
+#define  XPCS_PKT_COUNT_RX             0x000000000000ffffULL
+
+#define XPCS_TX_SM                     (FZC_MAC + 0x00070UL)
+#define  XPCS_TX_SM_VAL                        0x000000000000000fULL
+
+#define XPCS_DESKEW_ERR_CNT            (FZC_MAC + 0x00078UL)
+#define  XPCS_DESKEW_ERR_CNT_VAL       0x00000000000000ffULL
+
+#define XPCS_SYMERR_CNT01              (FZC_MAC + 0x00080UL)
+#define  XPCS_SYMERR_CNT01_LANE1       0x00000000ffff0000ULL
+#define  XPCS_SYMERR_CNT01_LANE0       0x000000000000ffffULL
+
+#define XPCS_SYMERR_CNT23              (FZC_MAC + 0x00088UL)
+#define  XPCS_SYMERR_CNT23_LANE3       0x00000000ffff0000ULL
+#define  XPCS_SYMERR_CNT23_LANE2       0x000000000000ffffULL
+
+#define XPCS_TRAINING_VECTOR           (FZC_MAC + 0x00090UL)
+#define  XPCS_TRAINING_VECTOR_VAL      0x00000000ffffffffULL
+
+/* PCS registers, offset from np->regs + np->pcs_off  */
+
+#define PCS_MII_CTL                    (FZC_MAC + 0x00000UL)
+#define  PCS_MII_CTL_RST               0x0000000000008000ULL
+#define  PCS_MII_CTL_10_100_SPEED      0x0000000000002000ULL
+#define  PCS_MII_AUTONEG_EN            0x0000000000001000ULL
+#define  PCS_MII_PWR_DOWN              0x0000000000000800ULL
+#define  PCS_MII_ISOLATE               0x0000000000000400ULL
+#define  PCS_MII_AUTONEG_RESTART       0x0000000000000200ULL
+#define  PCS_MII_DUPLEX                        0x0000000000000100ULL
+#define  PCS_MII_COLL_TEST             0x0000000000000080ULL
+#define  PCS_MII_1000MB_SPEED          0x0000000000000040ULL
+
+#define PCS_MII_STAT                   (FZC_MAC + 0x00008UL)
+#define  PCS_MII_STAT_EXT_STATUS       0x0000000000000100ULL
+#define  PCS_MII_STAT_AUTONEG_DONE     0x0000000000000020ULL
+#define  PCS_MII_STAT_REMOTE_FAULT     0x0000000000000010ULL
+#define  PCS_MII_STAT_AUTONEG_ABLE     0x0000000000000008ULL
+#define  PCS_MII_STAT_LINK_STATUS      0x0000000000000004ULL
+#define  PCS_MII_STAT_JABBER_DET       0x0000000000000002ULL
+#define  PCS_MII_STAT_EXT_CAP          0x0000000000000001ULL
+
+#define PCS_MII_ADV                    (FZC_MAC + 0x00010UL)
+#define  PCS_MII_ADV_NEXT_PAGE         0x0000000000008000ULL
+#define  PCS_MII_ADV_ACK               0x0000000000004000ULL
+#define  PCS_MII_ADV_REMOTE_FAULT      0x0000000000003000ULL
+#define  PCS_MII_ADV_ASM_DIR           0x0000000000000100ULL
+#define  PCS_MII_ADV_PAUSE             0x0000000000000080ULL
+#define  PCS_MII_ADV_HALF_DUPLEX       0x0000000000000040ULL
+#define  PCS_MII_ADV_FULL_DUPLEX       0x0000000000000020ULL
+
+#define PCS_MII_PARTNER                        (FZC_MAC + 0x00018UL)
+#define  PCS_MII_PARTNER_NEXT_PAGE     0x0000000000008000ULL
+#define  PCS_MII_PARTNER_ACK           0x0000000000004000ULL
+#define  PCS_MII_PARTNER_REMOTE_FAULT  0x0000000000002000ULL
+#define  PCS_MII_PARTNER_PAUSE         0x0000000000000180ULL
+#define  PCS_MII_PARTNER_HALF_DUPLEX   0x0000000000000040ULL
+#define  PCS_MII_PARTNER_FULL_DUPLEX   0x0000000000000020ULL
+
+#define PCS_CONF                       (FZC_MAC + 0x00020UL)
+#define  PCS_CONF_MASK                 0x0000000000000040ULL
+#define  PCS_CONF_10MS_TMR_OVERRIDE    0x0000000000000020ULL
+#define  PCS_CONF_JITTER_STUDY         0x0000000000000018ULL
+#define  PCS_CONF_SIGDET_ACTIVE_LOW    0x0000000000000004ULL
+#define  PCS_CONF_SIGDET_OVERRIDE      0x0000000000000002ULL
+#define  PCS_CONF_ENABLE               0x0000000000000001ULL
+
+#define PCS_STATE                      (FZC_MAC + 0x00028UL)
+#define  PCS_STATE_D_PARTNER_FAIL      0x0000000020000000ULL
+#define  PCS_STATE_D_WAIT_C_CODES_ACK  0x0000000010000000ULL
+#define  PCS_STATE_D_SYNC_LOSS         0x0000000008000000ULL
+#define  PCS_STATE_D_NO_GOOD_C_CODES   0x0000000004000000ULL
+#define  PCS_STATE_D_SERDES            0x0000000002000000ULL
+#define  PCS_STATE_D_BREAKLINK_C_CODES 0x0000000001000000ULL
+#define  PCS_STATE_L_SIGDET            0x0000000000400000ULL
+#define  PCS_STATE_L_SYNC_LOSS         0x0000000000200000ULL
+#define  PCS_STATE_L_C_CODES           0x0000000000100000ULL
+#define  PCS_STATE_LINK_CFG_STATE      0x000000000001e000ULL
+#define  PCS_STATE_SEQ_DET_STATE       0x0000000000001800ULL
+#define  PCS_STATE_WORD_SYNC_STATE     0x0000000000000700ULL
+#define  PCS_STATE_NO_IDLE             0x000000000000000fULL
+
+#define PCS_INTERRUPT                  (FZC_MAC + 0x00030UL)
+#define  PCS_INTERRUPT_LSTATUS         0x0000000000000004ULL
+
+#define PCS_DPATH_MODE                 (FZC_MAC + 0x000a0UL)
+#define  PCS_DPATH_MODE_PCS            0x0000000000000000ULL
+#define  PCS_DPATH_MODE_MII            0x0000000000000002ULL
+#define  PCS_DPATH_MODE_LINKUP_F_ENAB  0x0000000000000001ULL
+
+#define PCS_PKT_CNT                    (FZC_MAC + 0x000c0UL)
+#define  PCS_PKT_CNT_RX                        0x0000000007ff0000ULL
+#define  PCS_PKT_CNT_TX                        0x00000000000007ffULL
+
+#define MIF_BB_MDC                     (FZC_MAC + 0x16000UL)
+#define  MIF_BB_MDC_CLK                        0x0000000000000001ULL
+
+#define MIF_BB_MDO                     (FZC_MAC + 0x16008UL)
+#define  MIF_BB_MDO_DAT                        0x0000000000000001ULL
+
+#define MIF_BB_MDO_EN                  (FZC_MAC + 0x16010UL)
+#define  MIF_BB_MDO_EN_VAL             0x0000000000000001ULL
+
+#define MIF_FRAME_OUTPUT               (FZC_MAC + 0x16018UL)
+#define  MIF_FRAME_OUTPUT_ST           0x00000000c0000000ULL
+#define  MIF_FRAME_OUTPUT_ST_SHIFT     30
+#define  MIF_FRAME_OUTPUT_OP_ADDR      0x0000000000000000ULL
+#define  MIF_FRAME_OUTPUT_OP_WRITE     0x0000000010000000ULL
+#define  MIF_FRAME_OUTPUT_OP_READ_INC  0x0000000020000000ULL
+#define  MIF_FRAME_OUTPUT_OP_READ      0x0000000030000000ULL
+#define  MIF_FRAME_OUTPUT_OP_SHIFT     28
+#define  MIF_FRAME_OUTPUT_PORT         0x000000000f800000ULL
+#define  MIF_FRAME_OUTPUT_PORT_SHIFT   23
+#define  MIF_FRAME_OUTPUT_REG          0x00000000007c0000ULL
+#define  MIF_FRAME_OUTPUT_REG_SHIFT    18
+#define  MIF_FRAME_OUTPUT_TA           0x0000000000030000ULL
+#define  MIF_FRAME_OUTPUT_TA_SHIFT     16
+#define  MIF_FRAME_OUTPUT_DATA         0x000000000000ffffULL
+#define  MIF_FRAME_OUTPUT_DATA_SHIFT   0
+
+#define MDIO_ADDR_OP(port, dev, reg) \
+       ((0 << MIF_FRAME_OUTPUT_ST_SHIFT) | \
+        MIF_FRAME_OUTPUT_OP_ADDR | \
+        (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \
+        (dev << MIF_FRAME_OUTPUT_REG_SHIFT) | \
+        (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT) | \
+        (reg << MIF_FRAME_OUTPUT_DATA_SHIFT))
+
+#define MDIO_READ_OP(port, dev) \
+       ((0 << MIF_FRAME_OUTPUT_ST_SHIFT) | \
+        MIF_FRAME_OUTPUT_OP_READ | \
+        (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \
+        (dev << MIF_FRAME_OUTPUT_REG_SHIFT) | \
+        (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT))
+
+#define MDIO_WRITE_OP(port, dev, data) \
+       ((0 << MIF_FRAME_OUTPUT_ST_SHIFT) | \
+        MIF_FRAME_OUTPUT_OP_WRITE | \
+        (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \
+        (dev << MIF_FRAME_OUTPUT_REG_SHIFT) | \
+        (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT) | \
+        (data << MIF_FRAME_OUTPUT_DATA_SHIFT))
+
+#define MII_READ_OP(port, reg) \
+       ((1 << MIF_FRAME_OUTPUT_ST_SHIFT) | \
+        (2 << MIF_FRAME_OUTPUT_OP_SHIFT) | \
+        (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \
+        (reg << MIF_FRAME_OUTPUT_REG_SHIFT) | \
+        (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT))
+
+#define MII_WRITE_OP(port, reg, data) \
+       ((1 << MIF_FRAME_OUTPUT_ST_SHIFT) | \
+        (1 << MIF_FRAME_OUTPUT_OP_SHIFT) | \
+        (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \
+        (reg << MIF_FRAME_OUTPUT_REG_SHIFT) | \
+        (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT) | \
+        (data << MIF_FRAME_OUTPUT_DATA_SHIFT))
+
+#define MIF_CONFIG                     (FZC_MAC + 0x16020UL)
+#define  MIF_CONFIG_ATCA_GE            0x0000000000010000ULL
+#define  MIF_CONFIG_INDIRECT_MODE      0x0000000000008000ULL
+#define  MIF_CONFIG_POLL_PRT_PHYADDR   0x0000000000003c00ULL
+#define  MIF_CONFIG_POLL_DEV_REG_ADDR  0x00000000000003e0ULL
+#define  MIF_CONFIG_BB_MODE            0x0000000000000010ULL
+#define  MIF_CONFIG_POLL_EN            0x0000000000000008ULL
+#define  MIF_CONFIG_BB_SER_SEL         0x0000000000000006ULL
+#define  MIF_CONFIG_MANUAL_MODE                0x0000000000000001ULL
+
+#define MIF_POLL_STATUS                        (FZC_MAC + 0x16028UL)
+#define  MIF_POLL_STATUS_DATA          0x00000000ffff0000ULL
+#define  MIF_POLL_STATUS_STAT          0x000000000000ffffULL
+
+#define MIF_POLL_MASK                  (FZC_MAC + 0x16030UL)
+#define  MIF_POLL_MASK_VAL             0x000000000000ffffULL
+
+#define MIF_SM                         (FZC_MAC + 0x16038UL)
+#define  MIF_SM_PORT_ADDR              0x00000000001f0000ULL
+#define  MIF_SM_MDI_1                  0x0000000000004000ULL
+#define  MIF_SM_MDI_0                  0x0000000000002400ULL
+#define  MIF_SM_MDCLK                  0x0000000000001000ULL
+#define  MIF_SM_MDO_EN                 0x0000000000000800ULL
+#define  MIF_SM_MDO                    0x0000000000000400ULL
+#define  MIF_SM_MDI                    0x0000000000000200ULL
+#define  MIF_SM_CTL                    0x00000000000001c0ULL
+#define  MIF_SM_EX                     0x000000000000003fULL
+
+#define MIF_STATUS                     (FZC_MAC + 0x16040UL)
+#define  MIF_STATUS_MDINT1             0x0000000000000020ULL
+#define  MIF_STATUS_MDINT0             0x0000000000000010ULL
+
+#define MIF_MASK                       (FZC_MAC + 0x16048UL)
+#define  MIF_MASK_MDINT1               0x0000000000000020ULL
+#define  MIF_MASK_MDINT0               0x0000000000000010ULL
+#define  MIF_MASK_PEU_ERR              0x0000000000000008ULL
+#define  MIF_MASK_YC                   0x0000000000000004ULL
+#define  MIF_MASK_XGE_ERR0             0x0000000000000002ULL
+#define  MIF_MASK_MIF_INIT_DONE                0x0000000000000001ULL
+
+#define ENET_SERDES_RESET              (FZC_MAC + 0x14000UL)
+#define  ENET_SERDES_RESET_1           0x0000000000000002ULL
+#define  ENET_SERDES_RESET_0           0x0000000000000001ULL
+
+#define ENET_SERDES_CFG                        (FZC_MAC + 0x14008UL)
+#define  ENET_SERDES_BE_LOOPBACK       0x0000000000000002ULL
+#define  ENET_SERDES_CFG_FORCE_RDY     0x0000000000000001ULL
+
+#define ENET_SERDES_0_PLL_CFG          (FZC_MAC + 0x14010UL)
+#define  ENET_SERDES_PLL_FBDIV0                0x0000000000000001ULL
+#define  ENET_SERDES_PLL_FBDIV1                0x0000000000000002ULL
+#define  ENET_SERDES_PLL_FBDIV2                0x0000000000000004ULL
+#define  ENET_SERDES_PLL_HRATE0                0x0000000000000008ULL
+#define  ENET_SERDES_PLL_HRATE1                0x0000000000000010ULL
+#define  ENET_SERDES_PLL_HRATE2                0x0000000000000020ULL
+#define  ENET_SERDES_PLL_HRATE3                0x0000000000000040ULL
+
+#define ENET_SERDES_0_CTRL_CFG         (FZC_MAC + 0x14018UL)
+#define  ENET_SERDES_CTRL_SDET_0       0x0000000000000001ULL
+#define  ENET_SERDES_CTRL_SDET_1       0x0000000000000002ULL
+#define  ENET_SERDES_CTRL_SDET_2       0x0000000000000004ULL
+#define  ENET_SERDES_CTRL_SDET_3       0x0000000000000008ULL
+#define  ENET_SERDES_CTRL_EMPH_0       0x0000000000000070ULL
+#define  ENET_SERDES_CTRL_EMPH_0_SHIFT 4
+#define  ENET_SERDES_CTRL_EMPH_1       0x0000000000000380ULL
+#define  ENET_SERDES_CTRL_EMPH_1_SHIFT 7
+#define  ENET_SERDES_CTRL_EMPH_2       0x0000000000001c00ULL
+#define  ENET_SERDES_CTRL_EMPH_2_SHIFT 10
+#define  ENET_SERDES_CTRL_EMPH_3       0x000000000000e000ULL
+#define  ENET_SERDES_CTRL_EMPH_3_SHIFT 13
+#define  ENET_SERDES_CTRL_LADJ_0       0x0000000000070000ULL
+#define  ENET_SERDES_CTRL_LADJ_0_SHIFT 16
+#define  ENET_SERDES_CTRL_LADJ_1       0x0000000000380000ULL
+#define  ENET_SERDES_CTRL_LADJ_1_SHIFT 19
+#define  ENET_SERDES_CTRL_LADJ_2       0x0000000001c00000ULL
+#define  ENET_SERDES_CTRL_LADJ_2_SHIFT 22
+#define  ENET_SERDES_CTRL_LADJ_3       0x000000000e000000ULL
+#define  ENET_SERDES_CTRL_LADJ_3_SHIFT 25
+#define  ENET_SERDES_CTRL_RXITERM_0    0x0000000010000000ULL
+#define  ENET_SERDES_CTRL_RXITERM_1    0x0000000020000000ULL
+#define  ENET_SERDES_CTRL_RXITERM_2    0x0000000040000000ULL
+#define  ENET_SERDES_CTRL_RXITERM_3    0x0000000080000000ULL
+
+#define ENET_SERDES_0_TEST_CFG         (FZC_MAC + 0x14020UL)
+#define  ENET_SERDES_TEST_MD_0         0x0000000000000003ULL
+#define  ENET_SERDES_TEST_MD_0_SHIFT   0
+#define  ENET_SERDES_TEST_MD_1         0x000000000000000cULL
+#define  ENET_SERDES_TEST_MD_1_SHIFT   2
+#define  ENET_SERDES_TEST_MD_2         0x0000000000000030ULL
+#define  ENET_SERDES_TEST_MD_2_SHIFT   4
+#define  ENET_SERDES_TEST_MD_3         0x00000000000000c0ULL
+#define  ENET_SERDES_TEST_MD_3_SHIFT   6
+
+#define ENET_TEST_MD_NO_LOOPBACK       0x0
+#define ENET_TEST_MD_EWRAP             0x1
+#define ENET_TEST_MD_PAD_LOOPBACK      0x2
+#define ENET_TEST_MD_REV_LOOPBACK      0x3
+
+#define ENET_SERDES_1_PLL_CFG          (FZC_MAC + 0x14028UL)
+#define ENET_SERDES_1_CTRL_CFG         (FZC_MAC + 0x14030UL)
+#define ENET_SERDES_1_TEST_CFG         (FZC_MAC + 0x14038UL)
+
+#define ENET_RGMII_CFG_REG             (FZC_MAC + 0x14040UL)
+
+#define ESR_INT_SIGNALS                        (FZC_MAC + 0x14800UL)
+#define  ESR_INT_SIGNALS_ALL           0x00000000ffffffffULL
+#define  ESR_INT_SIGNALS_P0_BITS       0x0000000033e0000fULL
+#define  ESR_INT_SIGNALS_P1_BITS       0x000000000c1f00f0ULL
+#define  ESR_INT_SRDY0_P0              0x0000000020000000ULL
+#define  ESR_INT_DET0_P0               0x0000000010000000ULL
+#define  ESR_INT_SRDY0_P1              0x0000000008000000ULL
+#define  ESR_INT_DET0_P1               0x0000000004000000ULL
+#define  ESR_INT_XSRDY_P0              0x0000000002000000ULL
+#define  ESR_INT_XDP_P0_CH3            0x0000000001000000ULL
+#define  ESR_INT_XDP_P0_CH2            0x0000000000800000ULL
+#define  ESR_INT_XDP_P0_CH1            0x0000000000400000ULL
+#define  ESR_INT_XDP_P0_CH0            0x0000000000200000ULL
+#define  ESR_INT_XSRDY_P1              0x0000000000100000ULL
+#define  ESR_INT_XDP_P1_CH3            0x0000000000080000ULL
+#define  ESR_INT_XDP_P1_CH2            0x0000000000040000ULL
+#define  ESR_INT_XDP_P1_CH1            0x0000000000020000ULL
+#define  ESR_INT_XDP_P1_CH0            0x0000000000010000ULL
+#define  ESR_INT_SLOSS_P1_CH3          0x0000000000000080ULL
+#define  ESR_INT_SLOSS_P1_CH2          0x0000000000000040ULL
+#define  ESR_INT_SLOSS_P1_CH1          0x0000000000000020ULL
+#define  ESR_INT_SLOSS_P1_CH0          0x0000000000000010ULL
+#define  ESR_INT_SLOSS_P0_CH3          0x0000000000000008ULL
+#define  ESR_INT_SLOSS_P0_CH2          0x0000000000000004ULL
+#define  ESR_INT_SLOSS_P0_CH1          0x0000000000000002ULL
+#define  ESR_INT_SLOSS_P0_CH0          0x0000000000000001ULL
+
+#define ESR_DEBUG_SEL                  (FZC_MAC + 0x14808UL)
+#define  ESR_DEBUG_SEL_VAL             0x000000000000003fULL
+
+/* SerDes registers behind MIF */
+#define NIU_ESR_DEV_ADDR               0x1e
+#define ESR_BASE                       0x0000
+
+#define ESR_RXTX_COMM_CTRL_L           (ESR_BASE + 0x0000)
+#define ESR_RXTX_COMM_CTRL_H           (ESR_BASE + 0x0001)
+
+#define ESR_RXTX_RESET_CTRL_L          (ESR_BASE + 0x0002)
+#define ESR_RXTX_RESET_CTRL_H          (ESR_BASE + 0x0003)
+
+#define ESR_RX_POWER_CTRL_L            (ESR_BASE + 0x0004)
+#define ESR_RX_POWER_CTRL_H            (ESR_BASE + 0x0005)
+
+#define ESR_TX_POWER_CTRL_L            (ESR_BASE + 0x0006)
+#define ESR_TX_POWER_CTRL_H            (ESR_BASE + 0x0007)
+
+#define ESR_MISC_POWER_CTRL_L          (ESR_BASE + 0x0008)
+#define ESR_MISC_POWER_CTRL_H          (ESR_BASE + 0x0009)
+
+#define ESR_RXTX_CTRL_L(CHAN)          (ESR_BASE + 0x0080 + (CHAN) * 0x10)
+#define ESR_RXTX_CTRL_H(CHAN)          (ESR_BASE + 0x0081 + (CHAN) * 0x10)
+#define  ESR_RXTX_CTRL_BIASCNTL                0x80000000
+#define  ESR_RXTX_CTRL_RESV1           0x7c000000
+#define  ESR_RXTX_CTRL_TDENFIFO                0x02000000
+#define  ESR_RXTX_CTRL_TDWS20          0x01000000
+#define  ESR_RXTX_CTRL_VMUXLO          0x00c00000
+#define  ESR_RXTX_CTRL_VMUXLO_SHIFT    22
+#define  ESR_RXTX_CTRL_VPULSELO                0x00300000
+#define  ESR_RXTX_CTRL_VPULSELO_SHIFT  20
+#define  ESR_RXTX_CTRL_RESV2           0x000f0000
+#define  ESR_RXTX_CTRL_RESV3           0x0000c000
+#define  ESR_RXTX_CTRL_RXPRESWIN       0x00003000
+#define  ESR_RXTX_CTRL_RXPRESWIN_SHIFT 12
+#define  ESR_RXTX_CTRL_RESV4           0x00000800
+#define  ESR_RXTX_CTRL_RISEFALL                0x00000700
+#define  ESR_RXTX_CTRL_RISEFALL_SHIFT  8
+#define  ESR_RXTX_CTRL_RESV5           0x000000fe
+#define  ESR_RXTX_CTRL_ENSTRETCH       0x00000001
+
+#define ESR_RXTX_TUNING_L(CHAN)                (ESR_BASE + 0x0082 + (CHAN) * 0x10)
+#define ESR_RXTX_TUNING_H(CHAN)                (ESR_BASE + 0x0083 + (CHAN) * 0x10)
+
+#define ESR_RX_SYNCCHAR_L(CHAN)                (ESR_BASE + 0x0084 + (CHAN) * 0x10)
+#define ESR_RX_SYNCCHAR_H(CHAN)                (ESR_BASE + 0x0085 + (CHAN) * 0x10)
+
+#define ESR_RXTX_TEST_L(CHAN)          (ESR_BASE + 0x0086 + (CHAN) * 0x10)
+#define ESR_RXTX_TEST_H(CHAN)          (ESR_BASE + 0x0087 + (CHAN) * 0x10)
+
+#define ESR_GLUE_CTRL0_L(CHAN)         (ESR_BASE + 0x0088 + (CHAN) * 0x10)
+#define ESR_GLUE_CTRL0_H(CHAN)         (ESR_BASE + 0x0089 + (CHAN) * 0x10)
+#define  ESR_GLUE_CTRL0_RESV1          0xf8000000
+#define  ESR_GLUE_CTRL0_BLTIME         0x07000000
+#define  ESR_GLUE_CTRL0_BLTIME_SHIFT   24
+#define  ESR_GLUE_CTRL0_RESV2          0x00ff0000
+#define  ESR_GLUE_CTRL0_RXLOS_TEST     0x00008000
+#define  ESR_GLUE_CTRL0_RESV3          0x00004000
+#define  ESR_GLUE_CTRL0_RXLOSENAB      0x00002000
+#define  ESR_GLUE_CTRL0_FASTRESYNC     0x00001000
+#define  ESR_GLUE_CTRL0_SRATE          0x00000f00
+#define  ESR_GLUE_CTRL0_SRATE_SHIFT    8
+#define  ESR_GLUE_CTRL0_THCNT          0x000000ff
+#define  ESR_GLUE_CTRL0_THCNT_SHIFT    0
+
+#define BLTIME_64_CYCLES               0
+#define BLTIME_128_CYCLES              1
+#define BLTIME_256_CYCLES              2
+#define BLTIME_300_CYCLES              3
+#define BLTIME_384_CYCLES              4
+#define BLTIME_512_CYCLES              5
+#define BLTIME_1024_CYCLES             6
+#define BLTIME_2048_CYCLES             7
+
+#define ESR_GLUE_CTRL1_L(CHAN)         (ESR_BASE + 0x008a + (CHAN) * 0x10)
+#define ESR_GLUE_CTRL1_H(CHAN)         (ESR_BASE + 0x008b + (CHAN) * 0x10)
+#define ESR_RXTX_TUNING1_L(CHAN)       (ESR_BASE + 0x00c2 + (CHAN) * 0x10)
+#define ESR_RXTX_TUNING1_H(CHAN)       (ESR_BASE + 0x00c2 + (CHAN) * 0x10)
+#define ESR_RXTX_TUNING2_L(CHAN)       (ESR_BASE + 0x0102 + (CHAN) * 0x10)
+#define ESR_RXTX_TUNING2_H(CHAN)       (ESR_BASE + 0x0102 + (CHAN) * 0x10)
+#define ESR_RXTX_TUNING3_L(CHAN)       (ESR_BASE + 0x0142 + (CHAN) * 0x10)
+#define ESR_RXTX_TUNING3_H(CHAN)       (ESR_BASE + 0x0142 + (CHAN) * 0x10)
+
+#define NIU_ESR2_DEV_ADDR              0x1e
+#define ESR2_BASE                      0x8000
+
+#define ESR2_TI_PLL_CFG_L              (ESR2_BASE + 0x000)
+#define ESR2_TI_PLL_CFG_H              (ESR2_BASE + 0x001)
+#define  PLL_CFG_STD                   0x00000c00
+#define  PLL_CFG_STD_SHIFT             10
+#define  PLL_CFG_LD                    0x00000300
+#define  PLL_CFG_LD_SHIFT              8
+#define  PLL_CFG_MPY                   0x0000001e
+#define  PLL_CFG_MPY_SHIFT             1
+#define  PLL_CFG_MPY_4X                0x0
+#define  PLL_CFG_MPY_5X                0x00000002
+#define  PLL_CFG_MPY_6X                0x00000004
+#define  PLL_CFG_MPY_8X                0x00000008
+#define  PLL_CFG_MPY_10X               0x0000000a
+#define  PLL_CFG_MPY_12X               0x0000000c
+#define  PLL_CFG_MPY_12P5X             0x0000000e
+#define  PLL_CFG_ENPLL                 0x00000001
+
+#define ESR2_TI_PLL_STS_L              (ESR2_BASE + 0x002)
+#define ESR2_TI_PLL_STS_H              (ESR2_BASE + 0x003)
+#define  PLL_STS_LOCK                  0x00000001
+
+#define ESR2_TI_PLL_TEST_CFG_L         (ESR2_BASE + 0x004)
+#define ESR2_TI_PLL_TEST_CFG_H         (ESR2_BASE + 0x005)
+#define  PLL_TEST_INVPATT              0x00004000
+#define  PLL_TEST_RATE                 0x00003000
+#define  PLL_TEST_RATE_SHIFT           12
+#define  PLL_TEST_CFG_ENBSAC           0x00000400
+#define  PLL_TEST_CFG_ENBSRX           0x00000200
+#define  PLL_TEST_CFG_ENBSTX           0x00000100
+#define  PLL_TEST_CFG_LOOPBACK_PAD     0x00000040
+#define  PLL_TEST_CFG_LOOPBACK_CML_DIS 0x00000080
+#define  PLL_TEST_CFG_LOOPBACK_CML_EN  0x000000c0
+#define  PLL_TEST_CFG_CLKBYP           0x00000030
+#define  PLL_TEST_CFG_CLKBYP_SHIFT     4
+#define  PLL_TEST_CFG_EN_RXPATT                0x00000008
+#define  PLL_TEST_CFG_EN_TXPATT                0x00000004
+#define  PLL_TEST_CFG_TPATT            0x00000003
+#define  PLL_TEST_CFG_TPATT_SHIFT      0
+
+#define ESR2_TI_PLL_TX_CFG_L(CHAN)     (ESR2_BASE + 0x100 + (CHAN) * 4)
+#define ESR2_TI_PLL_TX_CFG_H(CHAN)     (ESR2_BASE + 0x101 + (CHAN) * 4)
+#define  PLL_TX_CFG_RDTCT              0x00600000
+#define  PLL_TX_CFG_RDTCT_SHIFT                21
+#define  PLL_TX_CFG_ENIDL              0x00100000
+#define  PLL_TX_CFG_BSTX               0x00020000
+#define  PLL_TX_CFG_ENFTP              0x00010000
+#define  PLL_TX_CFG_DE                 0x0000f000
+#define  PLL_TX_CFG_DE_SHIFT           12
+#define  PLL_TX_CFG_SWING_125MV                0x00000000
+#define  PLL_TX_CFG_SWING_250MV                0x00000200
+#define  PLL_TX_CFG_SWING_500MV                0x00000400
+#define  PLL_TX_CFG_SWING_625MV                0x00000600
+#define  PLL_TX_CFG_SWING_750MV                0x00000800
+#define  PLL_TX_CFG_SWING_1000MV       0x00000a00
+#define  PLL_TX_CFG_SWING_1250MV       0x00000c00
+#define  PLL_TX_CFG_SWING_1375MV       0x00000e00
+#define  PLL_TX_CFG_CM                 0x00000100
+#define  PLL_TX_CFG_INVPAIR            0x00000080
+#define  PLL_TX_CFG_RATE               0x00000060
+#define  PLL_TX_CFG_RATE_SHIFT         5
+#define  PLL_TX_CFG_RATE_FULL          0x0
+#define  PLL_TX_CFG_RATE_HALF          0x20
+#define  PLL_TX_CFG_RATE_QUAD          0x40
+#define  PLL_TX_CFG_BUSWIDTH           0x0000001c
+#define  PLL_TX_CFG_BUSWIDTH_SHIFT     2
+#define  PLL_TX_CFG_ENTEST             0x00000002
+#define  PLL_TX_CFG_ENTX               0x00000001
+
+#define ESR2_TI_PLL_TX_STS_L(CHAN)     (ESR2_BASE + 0x102 + (CHAN) * 4)
+#define ESR2_TI_PLL_TX_STS_H(CHAN)     (ESR2_BASE + 0x103 + (CHAN) * 4)
+#define  PLL_TX_STS_RDTCTIP            0x00000002
+#define  PLL_TX_STS_TESTFAIL           0x00000001
+
+#define ESR2_TI_PLL_RX_CFG_L(CHAN)     (ESR2_BASE + 0x120 + (CHAN) * 4)
+#define ESR2_TI_PLL_RX_CFG_H(CHAN)     (ESR2_BASE + 0x121 + (CHAN) * 4)
+#define  PLL_RX_CFG_BSINRXN            0x02000000
+#define  PLL_RX_CFG_BSINRXP            0x01000000
+#define  PLL_RX_CFG_EQ_MAX_LF          0x00000000
+#define  PLL_RX_CFG_EQ_LP_ADAPTIVE     0x00080000
+#define  PLL_RX_CFG_EQ_LP_1084MHZ      0x00400000
+#define  PLL_RX_CFG_EQ_LP_805MHZ       0x00480000
+#define  PLL_RX_CFG_EQ_LP_573MHZ       0x00500000
+#define  PLL_RX_CFG_EQ_LP_402MHZ       0x00580000
+#define  PLL_RX_CFG_EQ_LP_304MHZ       0x00600000
+#define  PLL_RX_CFG_EQ_LP_216MHZ       0x00680000
+#define  PLL_RX_CFG_EQ_LP_156MHZ       0x00700000
+#define  PLL_RX_CFG_EQ_LP_135MHZ       0x00780000
+#define  PLL_RX_CFG_EQ_SHIFT           19
+#define  PLL_RX_CFG_CDR                        0x00070000
+#define  PLL_RX_CFG_CDR_SHIFT          16
+#define  PLL_RX_CFG_LOS_DIS            0x00000000
+#define  PLL_RX_CFG_LOS_HTHRESH                0x00004000
+#define  PLL_RX_CFG_LOS_LTHRESH                0x00008000
+#define  PLL_RX_CFG_ALIGN_DIS          0x00000000
+#define  PLL_RX_CFG_ALIGN_ENA          0x00001000
+#define  PLL_RX_CFG_ALIGN_JOG          0x00002000
+#define  PLL_RX_CFG_TERM_VDDT          0x00000000
+#define  PLL_RX_CFG_TERM_0P8VDDT       0x00000100
+#define  PLL_RX_CFG_TERM_FLOAT         0x00000300
+#define  PLL_RX_CFG_INVPAIR            0x00000080
+#define  PLL_RX_CFG_RATE               0x00000060
+#define  PLL_RX_CFG_RATE_SHIFT         5
+#define  PLL_RX_CFG_RATE_FULL          0x0
+#define  PLL_RX_CFG_RATE_HALF          0x20
+#define  PLL_RX_CFG_RATE_QUAD          0x40
+#define  PLL_RX_CFG_BUSWIDTH           0x0000001c
+#define  PLL_RX_CFG_BUSWIDTH_SHIFT     2
+#define  PLL_RX_CFG_ENTEST             0x00000002
+#define  PLL_RX_CFG_ENRX               0x00000001
+
+#define ESR2_TI_PLL_RX_STS_L(CHAN)     (ESR2_BASE + 0x122 + (CHAN) * 4)
+#define ESR2_TI_PLL_RX_STS_H(CHAN)     (ESR2_BASE + 0x123 + (CHAN) * 4)
+#define  PLL_RX_STS_CRCIDTCT           0x00000200
+#define  PLL_RX_STS_CWDTCT             0x00000100
+#define  PLL_RX_STS_BSRXN              0x00000020
+#define  PLL_RX_STS_BSRXP              0x00000010
+#define  PLL_RX_STS_LOSDTCT            0x00000008
+#define  PLL_RX_STS_ODDCG              0x00000004
+#define  PLL_RX_STS_SYNC               0x00000002
+#define  PLL_RX_STS_TESTFAIL           0x00000001
+
+#define ENET_VLAN_TBL(IDX)             (FZC_FFLP + 0x00000UL + (IDX) * 8UL)
+#define  ENET_VLAN_TBL_PARITY1         0x0000000000020000ULL
+#define  ENET_VLAN_TBL_PARITY0         0x0000000000010000ULL
+#define  ENET_VLAN_TBL_VPR             0x0000000000000008ULL
+#define  ENET_VLAN_TBL_VLANRDCTBLN     0x0000000000000007ULL
+#define  ENET_VLAN_TBL_SHIFT(PORT)     ((PORT) * 4)
+
+#define ENET_VLAN_TBL_NUM_ENTRIES      4096
+
+#define FFLP_VLAN_PAR_ERR              (FZC_FFLP + 0x0800UL)
+#define  FFLP_VLAN_PAR_ERR_ERR         0x0000000080000000ULL
+#define  FFLP_VLAN_PAR_ERR_M_ERR       0x0000000040000000ULL
+#define  FFLP_VLAN_PAR_ERR_ADDR                0x000000003ffc0000ULL
+#define  FFLP_VLAN_PAR_ERR_DATA                0x000000000003ffffULL
+
+#define L2_CLS(IDX)                    (FZC_FFLP + 0x20000UL + (IDX) * 8UL)
+#define  L2_CLS_VLD                    0x0000000000010000ULL
+#define  L2_CLS_ETYPE                  0x000000000000ffffULL
+#define  L2_CLS_ETYPE_SHIFT            0
+
+#define L3_CLS(IDX)                    (FZC_FFLP + 0x20010UL + (IDX) * 8UL)
+#define  L3_CLS_VALID                  0x0000000002000000ULL
+#define  L3_CLS_IPVER                  0x0000000001000000ULL
+#define  L3_CLS_PID                    0x0000000000ff0000ULL
+#define  L3_CLS_PID_SHIFT              16
+#define  L3_CLS_TOSMASK                        0x000000000000ff00ULL
+#define  L3_CLS_TOSMASK_SHIFT          8
+#define  L3_CLS_TOS                    0x00000000000000ffULL
+#define  L3_CLS_TOS_SHIFT              0
+
+#define TCAM_KEY(IDX)                  (FZC_FFLP + 0x20030UL + (IDX) * 8UL)
+#define  TCAM_KEY_DISC                 0x0000000000000008ULL
+#define  TCAM_KEY_TSEL                 0x0000000000000004ULL
+#define  TCAM_KEY_IPADDR               0x0000000000000001ULL
+
+#define TCAM_KEY_0                     (FZC_FFLP + 0x20090UL)
+#define  TCAM_KEY_0_KEY                        0x00000000000000ffULL /* bits 192-199 */
+
+#define TCAM_KEY_1                     (FZC_FFLP + 0x20098UL)
+#define  TCAM_KEY_1_KEY                        0xffffffffffffffffULL /* bits 128-191 */
+
+#define TCAM_KEY_2                     (FZC_FFLP + 0x200a0UL)
+#define  TCAM_KEY_2_KEY                        0xffffffffffffffffULL /* bits 64-127 */
+
+#define TCAM_KEY_3                     (FZC_FFLP + 0x200a8UL)
+#define  TCAM_KEY_3_KEY                        0xffffffffffffffffULL /* bits 0-63 */
+
+#define TCAM_KEY_MASK_0                        (FZC_FFLP + 0x200b0UL)
+#define  TCAM_KEY_MASK_0_KEY_SEL       0x00000000000000ffULL /* bits 192-199 */
+
+#define TCAM_KEY_MASK_1                        (FZC_FFLP + 0x200b8UL)
+#define  TCAM_KEY_MASK_1_KEY_SEL       0xffffffffffffffffULL /* bits 128-191 */
+
+#define TCAM_KEY_MASK_2                        (FZC_FFLP + 0x200c0UL)
+#define  TCAM_KEY_MASK_2_KEY_SEL       0xffffffffffffffffULL /* bits 64-127 */
+
+#define TCAM_KEY_MASK_3                        (FZC_FFLP + 0x200c8UL)
+#define  TCAM_KEY_MASK_3_KEY_SEL       0xffffffffffffffffULL /* bits 0-63 */
+
+#define TCAM_CTL                       (FZC_FFLP + 0x200d0UL)
+#define  TCAM_CTL_RWC                  0x00000000001c0000ULL
+#define  TCAM_CTL_RWC_TCAM_WRITE       0x0000000000000000ULL
+#define  TCAM_CTL_RWC_TCAM_READ                0x0000000000040000ULL
+#define  TCAM_CTL_RWC_TCAM_COMPARE     0x0000000000080000ULL
+#define  TCAM_CTL_RWC_RAM_WRITE                0x0000000000100000ULL
+#define  TCAM_CTL_RWC_RAM_READ         0x0000000000140000ULL
+#define  TCAM_CTL_STAT                 0x0000000000020000ULL
+#define  TCAM_CTL_MATCH                        0x0000000000010000ULL
+#define  TCAM_CTL_LOC                  0x00000000000003ffULL
+
+#define TCAM_ERR                       (FZC_FFLP + 0x200d8UL)
+#define  TCAM_ERR_ERR                  0x0000000080000000ULL
+#define  TCAM_ERR_P_ECC                        0x0000000040000000ULL
+#define  TCAM_ERR_MULT                 0x0000000020000000ULL
+#define  TCAM_ERR_ADDR                 0x0000000000ff0000ULL
+#define  TCAM_ERR_SYNDROME             0x000000000000ffffULL
+
+#define HASH_LOOKUP_ERR_LOG1           (FZC_FFLP + 0x200e0UL)
+#define  HASH_LOOKUP_ERR_LOG1_ERR      0x0000000000000008ULL
+#define  HASH_LOOKUP_ERR_LOG1_MULT_LK  0x0000000000000004ULL
+#define  HASH_LOOKUP_ERR_LOG1_CU       0x0000000000000002ULL
+#define  HASH_LOOKUP_ERR_LOG1_MULT_BIT 0x0000000000000001ULL
+
+#define HASH_LOOKUP_ERR_LOG2           (FZC_FFLP + 0x200e8UL)
+#define  HASH_LOOKUP_ERR_LOG2_H1       0x000000007ffff800ULL
+#define  HASH_LOOKUP_ERR_LOG2_SUBAREA  0x0000000000000700ULL
+#define  HASH_LOOKUP_ERR_LOG2_SYNDROME 0x00000000000000ffULL
+
+#define FFLP_CFG_1                     (FZC_FFLP + 0x20100UL)
+#define  FFLP_CFG_1_TCAM_DIS           0x0000000004000000ULL
+#define  FFLP_CFG_1_PIO_DBG_SEL                0x0000000003800000ULL
+#define  FFLP_CFG_1_PIO_FIO_RST                0x0000000000400000ULL
+#define  FFLP_CFG_1_PIO_FIO_LAT                0x0000000000300000ULL
+#define  FFLP_CFG_1_CAMLAT             0x00000000000f0000ULL
+#define  FFLP_CFG_1_CAMLAT_SHIFT       16
+#define  FFLP_CFG_1_CAMRATIO           0x000000000000f000ULL
+#define  FFLP_CFG_1_CAMRATIO_SHIFT     12
+#define  FFLP_CFG_1_FCRAMRATIO         0x0000000000000f00ULL
+#define  FFLP_CFG_1_FCRAMRATIO_SHIFT   8
+#define  FFLP_CFG_1_FCRAMOUTDR_MASK    0x00000000000000f0ULL
+#define  FFLP_CFG_1_FCRAMOUTDR_NORMAL  0x0000000000000000ULL
+#define  FFLP_CFG_1_FCRAMOUTDR_STRONG  0x0000000000000050ULL
+#define  FFLP_CFG_1_FCRAMOUTDR_WEAK    0x00000000000000a0ULL
+#define  FFLP_CFG_1_FCRAMQS            0x0000000000000008ULL
+#define  FFLP_CFG_1_ERRORDIS           0x0000000000000004ULL
+#define  FFLP_CFG_1_FFLPINITDONE       0x0000000000000002ULL
+#define  FFLP_CFG_1_LLCSNAP            0x0000000000000001ULL
+
+#define DEFAULT_FCRAMRATIO             10
+
+#define DEFAULT_TCAM_LATENCY           4
+#define DEFAULT_TCAM_ACCESS_RATIO      10
+
+#define TCP_CFLAG_MSK                  (FZC_FFLP + 0x20108UL)
+#define  TCP_CFLAG_MSK_MASK            0x0000000000000fffULL
+
+#define FCRAM_REF_TMR                  (FZC_FFLP + 0x20110UL)
+#define  FCRAM_REF_TMR_MAX             0x00000000ffff0000ULL
+#define  FCRAM_REF_TMR_MAX_SHIFT       16
+#define  FCRAM_REF_TMR_MIN             0x000000000000ffffULL
+#define  FCRAM_REF_TMR_MIN_SHIFT       0
+
+#define DEFAULT_FCRAM_REFRESH_MAX      512
+#define DEFAULT_FCRAM_REFRESH_MIN      512
+
+#define FCRAM_FIO_ADDR                 (FZC_FFLP + 0x20118UL)
+#define  FCRAM_FIO_ADDR_ADDR           0x00000000000000ffULL
+
+#define FCRAM_FIO_DAT                  (FZC_FFLP + 0x20120UL)
+#define  FCRAM_FIO_DAT_DATA            0x000000000000ffffULL
+
+#define FCRAM_ERR_TST0                 (FZC_FFLP + 0x20128UL)
+#define  FCRAM_ERR_TST0_SYND           0x00000000000000ffULL
+
+#define FCRAM_ERR_TST1                 (FZC_FFLP + 0x20130UL)
+#define  FCRAM_ERR_TST1_DAT            0x00000000ffffffffULL
+
+#define FCRAM_ERR_TST2                 (FZC_FFLP + 0x20138UL)
+#define  FCRAM_ERR_TST2_DAT            0x00000000ffffffffULL
+
+#define FFLP_ERR_MASK                  (FZC_FFLP + 0x20140UL)
+#define  FFLP_ERR_MASK_HSH_TBL_DAT     0x00000000000007f8ULL
+#define  FFLP_ERR_MASK_HSH_TBL_LKUP    0x0000000000000004ULL
+#define  FFLP_ERR_MASK_TCAM            0x0000000000000002ULL
+#define  FFLP_ERR_MASK_VLAN            0x0000000000000001ULL
+
+#define FFLP_DBG_TRAIN_VCT             (FZC_FFLP + 0x20148UL)
+#define  FFLP_DBG_TRAIN_VCT_VECTOR     0x00000000ffffffffULL
+
+#define FCRAM_PHY_RD_LAT               (FZC_FFLP + 0x20150UL)
+#define  FCRAM_PHY_RD_LAT_LAT          0x00000000000000ffULL
+
+/* Ethernet TCAM format */
+#define TCAM_ETHKEY0_RESV1             0xffffffffffffff00ULL
+#define TCAM_ETHKEY0_CLASS_CODE                0x00000000000000f8ULL
+#define TCAM_ETHKEY0_CLASS_CODE_SHIFT  3
+#define TCAM_ETHKEY0_RESV2             0x0000000000000007ULL
+#define TCAM_ETHKEY1_FRAME_BYTE0_7(NUM)        (0xff << ((7 - NUM) * 8))
+#define TCAM_ETHKEY2_FRAME_BYTE8       0xff00000000000000ULL
+#define TCAM_ETHKEY2_FRAME_BYTE8_SHIFT 56
+#define TCAM_ETHKEY2_FRAME_BYTE9       0x00ff000000000000ULL
+#define TCAM_ETHKEY2_FRAME_BYTE9_SHIFT 48
+#define TCAM_ETHKEY2_FRAME_BYTE10      0x0000ff0000000000ULL
+#define TCAM_ETHKEY2_FRAME_BYTE10_SHIFT        40
+#define TCAM_ETHKEY2_FRAME_RESV                0x000000ffffffffffULL
+#define TCAM_ETHKEY3_FRAME_RESV                0xffffffffffffffffULL
+
+/* IPV4 TCAM format */
+#define TCAM_V4KEY0_RESV1              0xffffffffffffff00ULL
+#define TCAM_V4KEY0_CLASS_CODE         0x00000000000000f8ULL
+#define TCAM_V4KEY0_CLASS_CODE_SHIFT   3
+#define TCAM_V4KEY0_RESV2              0x0000000000000007ULL
+#define TCAM_V4KEY1_L2RDCNUM           0xf800000000000000ULL
+#define TCAM_V4KEY1_L2RDCNUM_SHIFT     59
+#define TCAM_V4KEY1_NOPORT             0x0400000000000000ULL
+#define TCAM_V4KEY1_RESV               0x03ffffffffffffffULL
+#define TCAM_V4KEY2_RESV               0xffff000000000000ULL
+#define TCAM_V4KEY2_TOS                        0x0000ff0000000000ULL
+#define TCAM_V4KEY2_TOS_SHIFT          40
+#define TCAM_V4KEY2_PROTO              0x000000ff00000000ULL
+#define TCAM_V4KEY2_PROTO_SHIFT                32
+#define TCAM_V4KEY2_PORT_SPI           0x00000000ffffffffULL
+#define TCAM_V4KEY2_PORT_SPI_SHIFT     0
+#define TCAM_V4KEY3_SADDR              0xffffffff00000000ULL
+#define TCAM_V4KEY3_SADDR_SHIFT                32
+#define TCAM_V4KEY3_DADDR              0x00000000ffffffffULL
+#define TCAM_V4KEY3_DADDR_SHIFT                0
+
+/* IPV6 TCAM format */
+#define TCAM_V6KEY0_RESV1              0xffffffffffffff00ULL
+#define TCAM_V6KEY0_CLASS_CODE         0x00000000000000f8ULL
+#define TCAM_V6KEY0_CLASS_CODE_SHIFT   3
+#define TCAM_V6KEY0_RESV2              0x0000000000000007ULL
+#define TCAM_V6KEY1_L2RDCNUM           0xf800000000000000ULL
+#define TCAM_V6KEY1_L2RDCNUM_SHIFT     59
+#define TCAM_V6KEY1_NOPORT             0x0400000000000000ULL
+#define TCAM_V6KEY1_RESV               0x03ff000000000000ULL
+#define TCAM_V6KEY1_TOS                        0x0000ff0000000000ULL
+#define TCAM_V6KEY1_TOS_SHIFT          40
+#define TCAM_V6KEY1_NEXT_HDR           0x000000ff00000000ULL
+#define TCAM_V6KEY1_NEXT_HDR_SHIFT     32
+#define TCAM_V6KEY1_PORT_SPI           0x00000000ffffffffULL
+#define TCAM_V6KEY1_PORT_SPI_SHIFT     0
+#define TCAM_V6KEY2_ADDR_HIGH          0xffffffffffffffffULL
+#define TCAM_V6KEY3_ADDR_LOW           0xffffffffffffffffULL
+
+#define TCAM_ASSOCDATA_SYNDROME                0x000003fffc000000ULL
+#define TCAM_ASSOCDATA_SYNDROME_SHIFT  26
+#define TCAM_ASSOCDATA_ZFID            0x0000000003ffc000ULL
+#define TCAM_ASSOCDATA_ZFID_SHIFT      14
+#define TCAM_ASSOCDATA_V4_ECC_OK       0x0000000000002000ULL
+#define TCAM_ASSOCDATA_DISC            0x0000000000001000ULL
+#define TCAM_ASSOCDATA_TRES_MASK       0x0000000000000c00ULL
+#define TCAM_ASSOCDATA_TRES_USE_L2RDC  0x0000000000000000ULL
+#define TCAM_ASSOCDATA_TRES_USE_OFFSET 0x0000000000000400ULL
+#define TCAM_ASSOCDATA_TRES_OVR_RDC    0x0000000000000800ULL
+#define TCAM_ASSOCDATA_TRES_OVR_RDC_OFF        0x0000000000000c00ULL
+#define TCAM_ASSOCDATA_RDCTBL          0x0000000000000380ULL
+#define TCAM_ASSOCDATA_RDCTBL_SHIFT    7
+#define TCAM_ASSOCDATA_OFFSET          0x000000000000007cULL
+#define TCAM_ASSOCDATA_OFFSET_SHIFT    2
+#define TCAM_ASSOCDATA_ZFVLD           0x0000000000000002ULL
+#define TCAM_ASSOCDATA_AGE             0x0000000000000001ULL
+
+#define FLOW_KEY(IDX)                  (FZC_FFLP + 0x40000UL + (IDX) * 8UL)
+#define  FLOW_KEY_PORT                 0x0000000000000200ULL
+#define  FLOW_KEY_L2DA                 0x0000000000000100ULL
+#define  FLOW_KEY_VLAN                 0x0000000000000080ULL
+#define  FLOW_KEY_IPSA                 0x0000000000000040ULL
+#define  FLOW_KEY_IPDA                 0x0000000000000020ULL
+#define  FLOW_KEY_PROTO                        0x0000000000000010ULL
+#define  FLOW_KEY_L4_0                 0x000000000000000cULL
+#define  FLOW_KEY_L4_0_SHIFT           2
+#define  FLOW_KEY_L4_1                 0x0000000000000003ULL
+#define  FLOW_KEY_L4_1_SHIFT           0
+
+#define  FLOW_KEY_L4_NONE              0x0
+#define  FLOW_KEY_L4_RESV              0x1
+#define  FLOW_KEY_L4_BYTE12            0x2
+#define  FLOW_KEY_L4_BYTE56            0x3
+
+#define H1POLY                         (FZC_FFLP + 0x40060UL)
+#define  H1POLY_INITVAL                        0x00000000ffffffffULL
+
+#define H2POLY                         (FZC_FFLP + 0x40068UL)
+#define  H2POLY_INITVAL                        0x000000000000ffffULL
+
+#define FLW_PRT_SEL(IDX)               (FZC_FFLP + 0x40070UL + (IDX) * 8UL)
+#define  FLW_PRT_SEL_EXT               0x0000000000010000ULL
+#define  FLW_PRT_SEL_MASK              0x0000000000001f00ULL
+#define  FLW_PRT_SEL_MASK_SHIFT                8
+#define  FLW_PRT_SEL_BASE              0x000000000000001fULL
+#define  FLW_PRT_SEL_BASE_SHIFT                0
+
+#define HASH_TBL_ADDR(IDX)             (FFLP + 0x00000UL + (IDX) * 8192UL)
+#define  HASH_TBL_ADDR_AUTOINC         0x0000000000800000ULL
+#define  HASH_TBL_ADDR_ADDR            0x00000000007fffffULL
+
+#define HASH_TBL_DATA(IDX)             (FFLP + 0x00008UL + (IDX) * 8192UL)
+#define  HASH_TBL_DATA_DATA            0xffffffffffffffffULL
+
+/* FCRAM hash table entries are up to 8 64-bit words in size.
+ * The layout of each entry is determined by the settings in the
+ * first word, which is the header.
+ *
+ * The indexing is controllable per partition (there is one partition
+ * per RDC group, thus a total of eight) using the BASE and MASK fields
+ * of FLW_PRT_SEL above.
+ */
+#define FCRAM_SIZE                     0x800000
+#define FCRAM_NUM_PARTITIONS           8
+
+/* Generic HASH entry header, used for all non-optimized formats.  */
+#define HASH_HEADER_FMT                        0x8000000000000000ULL
+#define HASH_HEADER_EXT                        0x4000000000000000ULL
+#define HASH_HEADER_VALID              0x2000000000000000ULL
+#define HASH_HEADER_RESVD              0x1000000000000000ULL
+#define HASH_HEADER_L2_DADDR           0x0ffffffffffff000ULL
+#define HASH_HEADER_L2_DADDR_SHIFT     12
+#define HASH_HEADER_VLAN               0x0000000000000fffULL
+#define HASH_HEADER_VLAN_SHIFT         0
+
+/* Optimized format, just a header with a special layout defined below.
+ * Set FMT and EXT both to zero to indicate this layout is being used.
+ */
+#define HASH_OPT_HEADER_FMT            0x8000000000000000ULL
+#define HASH_OPT_HEADER_EXT            0x4000000000000000ULL
+#define HASH_OPT_HEADER_VALID          0x2000000000000000ULL
+#define HASH_OPT_HEADER_RDCOFF         0x1f00000000000000ULL
+#define HASH_OPT_HEADER_RDCOFF_SHIFT   56
+#define HASH_OPT_HEADER_HASH2          0x00ffff0000000000ULL
+#define HASH_OPT_HEADER_HASH2_SHIFT    40
+#define HASH_OPT_HEADER_RESVD          0x000000ff00000000ULL
+#define HASH_OPT_HEADER_USERINFO       0x00000000ffffffffULL
+#define HASH_OPT_HEADER_USERINFO_SHIFT 0
+
+/* Port and protocol word used for ipv4 and ipv6 layouts.  */
+#define HASH_PORT_DPORT                        0xffff000000000000ULL
+#define HASH_PORT_DPORT_SHIFT          48
+#define HASH_PORT_SPORT                        0x0000ffff00000000ULL
+#define HASH_PORT_SPORT_SHIFT          32
+#define HASH_PORT_PROTO                        0x00000000ff000000ULL
+#define HASH_PORT_PROTO_SHIFT          24
+#define HASH_PORT_PORT_OFF             0x0000000000c00000ULL
+#define HASH_PORT_PORT_OFF_SHIFT       22
+#define HASH_PORT_PORT_RESV            0x00000000003fffffULL
+
+/* Action word used for ipv4 and ipv6 layouts.  */
+#define HASH_ACTION_RESV1              0xe000000000000000ULL
+#define HASH_ACTION_RDCOFF             0x1f00000000000000ULL
+#define HASH_ACTION_RDCOFF_SHIFT       56
+#define HASH_ACTION_ZFVALID            0x0080000000000000ULL
+#define HASH_ACTION_RESV2              0x0070000000000000ULL
+#define HASH_ACTION_ZFID               0x000fff0000000000ULL
+#define HASH_ACTION_ZFID_SHIFT         40
+#define HASH_ACTION_RESV3              0x000000ff00000000ULL
+#define HASH_ACTION_USERINFO           0x00000000ffffffffULL
+#define HASH_ACTION_USERINFO_SHIFT     0
+
+/* IPV4 address word.  Addresses are in network endian. */
+#define HASH_IP4ADDR_SADDR             0xffffffff00000000ULL
+#define HASH_IP4ADDR_SADDR_SHIFT       32
+#define HASH_IP4ADDR_DADDR             0x00000000ffffffffULL
+#define HASH_IP4ADDR_DADDR_SHIFT       0
+
+/* IPV6 address layout is 4 words, first two are saddr, next two
+ * are daddr.  Addresses are in network endian.
+ */
+
+struct fcram_hash_opt {
+       u64     header;
+};
+
+/* EXT=1, FMT=0 */
+struct fcram_hash_ipv4 {
+       u64     header;
+       u64     addrs;
+       u64     ports;
+       u64     action;
+};
+
+/* EXT=1, FMT=1 */
+struct fcram_hash_ipv6 {
+       u64     header;
+       u64     addrs[4];
+       u64     ports;
+       u64     action;
+};
+
+#define HASH_TBL_DATA_LOG(IDX)         (FFLP + 0x00010UL + (IDX) * 8192UL)
+#define  HASH_TBL_DATA_LOG_ERR         0x0000000080000000ULL
+#define  HASH_TBL_DATA_LOG_ADDR                0x000000007fffff00ULL
+#define  HASH_TBL_DATA_LOG_SYNDROME    0x00000000000000ffULL
+
+#define RX_DMA_CK_DIV                  (FZC_DMC + 0x00000UL)
+#define  RX_DMA_CK_DIV_CNT             0x000000000000ffffULL
+
+#define DEF_RDC(IDX)                   (FZC_DMC + 0x00008UL + (IDX) * 0x8UL)
+#define  DEF_RDC_VAL                   0x000000000000001fULL
+
+#define PT_DRR_WT(IDX)                 (FZC_DMC + 0x00028UL + (IDX) * 0x8UL)
+#define  PT_DRR_WT_VAL                 0x000000000000ffffULL
+
+#define PT_DRR_WEIGHT_DEFAULT_10G      0x0400
+#define PT_DRR_WEIGHT_DEFAULT_1G       0x0066
+
+#define PT_USE(IDX)                    (FZC_DMC + 0x00048UL + (IDX) * 0x8UL)
+#define  PT_USE_CNT                    0x00000000000fffffULL
+
+#define RED_RAN_INIT                   (FZC_DMC + 0x00068UL)
+#define  RED_RAN_INIT_OPMODE           0x0000000000010000ULL
+#define  RED_RAN_INIT_VAL              0x000000000000ffffULL
+
+#define RX_ADDR_MD                     (FZC_DMC + 0x00070UL)
+#define  RX_ADDR_MD_DBG_PT_MUX_SEL     0x000000000000000cULL
+#define  RX_ADDR_MD_RAM_ACC            0x0000000000000002ULL
+#define  RX_ADDR_MD_MODE32             0x0000000000000001ULL
+
+#define RDMC_PRE_PAR_ERR               (FZC_DMC + 0x00078UL)
+#define  RDMC_PRE_PAR_ERR_ERR          0x0000000000008000ULL
+#define  RDMC_PRE_PAR_ERR_MERR         0x0000000000004000ULL
+#define  RDMC_PRE_PAR_ERR_ADDR         0x00000000000000ffULL
+
+#define RDMC_SHA_PAR_ERR               (FZC_DMC + 0x00080UL)
+#define  RDMC_SHA_PAR_ERR_ERR          0x0000000000008000ULL
+#define  RDMC_SHA_PAR_ERR_MERR         0x0000000000004000ULL
+#define  RDMC_SHA_PAR_ERR_ADDR         0x00000000000000ffULL
+
+#define RDMC_MEM_ADDR                  (FZC_DMC + 0x00088UL)
+#define  RDMC_MEM_ADDR_PRE_SHAD                0x0000000000000100ULL
+#define  RDMC_MEM_ADDR_ADDR            0x00000000000000ffULL
+
+#define RDMC_MEM_DAT0                  (FZC_DMC + 0x00090UL)
+#define  RDMC_MEM_DAT0_DATA            0x00000000ffffffffULL /* bits 31:0 */
+
+#define RDMC_MEM_DAT1                  (FZC_DMC + 0x00098UL)
+#define  RDMC_MEM_DAT1_DATA            0x00000000ffffffffULL /* bits 63:32 */
+
+#define RDMC_MEM_DAT2                  (FZC_DMC + 0x000a0UL)
+#define  RDMC_MEM_DAT2_DATA            0x00000000ffffffffULL /* bits 95:64 */
+
+#define RDMC_MEM_DAT3                  (FZC_DMC + 0x000a8UL)
+#define  RDMC_MEM_DAT3_DATA            0x00000000ffffffffULL /* bits 127:96 */
+
+#define RDMC_MEM_DAT4                  (FZC_DMC + 0x000b0UL)
+#define  RDMC_MEM_DAT4_DATA            0x00000000000fffffULL /* bits 147:128 */
+
+#define RX_CTL_DAT_FIFO_STAT                   (FZC_DMC + 0x000b8UL)
+#define  RX_CTL_DAT_FIFO_STAT_ID_MISMATCH      0x0000000000000100ULL
+#define  RX_CTL_DAT_FIFO_STAT_ZCP_EOP_ERR      0x00000000000000f0ULL
+#define  RX_CTL_DAT_FIFO_STAT_IPP_EOP_ERR      0x000000000000000fULL
+
+#define RX_CTL_DAT_FIFO_MASK                   (FZC_DMC + 0x000c0UL)
+#define  RX_CTL_DAT_FIFO_MASK_ID_MISMATCH      0x0000000000000100ULL
+#define  RX_CTL_DAT_FIFO_MASK_ZCP_EOP_ERR      0x00000000000000f0ULL
+#define  RX_CTL_DAT_FIFO_MASK_IPP_EOP_ERR      0x000000000000000fULL
+
+#define RDMC_TRAINING_VECTOR                   (FZC_DMC + 0x000c8UL)
+#define  RDMC_TRAINING_VECTOR_TRAINING_VECTOR  0x00000000ffffffffULL
+
+#define RX_CTL_DAT_FIFO_STAT_DBG               (FZC_DMC + 0x000d0UL)
+#define  RX_CTL_DAT_FIFO_STAT_DBG_ID_MISMATCH  0x0000000000000100ULL
+#define  RX_CTL_DAT_FIFO_STAT_DBG_ZCP_EOP_ERR  0x00000000000000f0ULL
+#define  RX_CTL_DAT_FIFO_STAT_DBG_IPP_EOP_ERR  0x000000000000000fULL
+
+#define RDC_TBL(TBL,SLOT)              (FZC_ZCP + 0x10000UL + \
+                                        (TBL) * (8UL * 16UL) + \
+                                        (SLOT) * 8UL)
+#define  RDC_TBL_RDC                   0x000000000000000fULL
+
+#define RX_LOG_PAGE_VLD(IDX)           (FZC_DMC + 0x20000UL + (IDX) * 0x40UL)
+#define  RX_LOG_PAGE_VLD_FUNC          0x000000000000000cULL
+#define  RX_LOG_PAGE_VLD_FUNC_SHIFT    2
+#define  RX_LOG_PAGE_VLD_PAGE1         0x0000000000000002ULL
+#define  RX_LOG_PAGE_VLD_PAGE0         0x0000000000000001ULL
+
+#define RX_LOG_MASK1(IDX)              (FZC_DMC + 0x20008UL + (IDX) * 0x40UL)
+#define  RX_LOG_MASK1_MASK             0x00000000ffffffffULL
+
+#define RX_LOG_VAL1(IDX)               (FZC_DMC + 0x20010UL + (IDX) * 0x40UL)
+#define  RX_LOG_VAL1_VALUE             0x00000000ffffffffULL
+
+#define RX_LOG_MASK2(IDX)              (FZC_DMC + 0x20018UL + (IDX) * 0x40UL)
+#define  RX_LOG_MASK2_MASK             0x00000000ffffffffULL
+
+#define RX_LOG_VAL2(IDX)               (FZC_DMC + 0x20020UL + (IDX) * 0x40UL)
+#define  RX_LOG_VAL2_VALUE             0x00000000ffffffffULL
+
+#define RX_LOG_PAGE_RELO1(IDX)         (FZC_DMC + 0x20028UL + (IDX) * 0x40UL)
+#define  RX_LOG_PAGE_RELO1_RELO                0x00000000ffffffffULL
+
+#define RX_LOG_PAGE_RELO2(IDX)         (FZC_DMC + 0x20030UL + (IDX) * 0x40UL)
+#define  RX_LOG_PAGE_RELO2_RELO                0x00000000ffffffffULL
+
+#define RX_LOG_PAGE_HDL(IDX)           (FZC_DMC + 0x20038UL + (IDX) * 0x40UL)
+#define  RX_LOG_PAGE_HDL_HANDLE                0x00000000000fffffULL
+
+#define TX_LOG_PAGE_VLD(IDX)           (FZC_DMC + 0x40000UL + (IDX) * 0x200UL)
+#define  TX_LOG_PAGE_VLD_FUNC          0x000000000000000cULL
+#define  TX_LOG_PAGE_VLD_FUNC_SHIFT    2
+#define  TX_LOG_PAGE_VLD_PAGE1         0x0000000000000002ULL
+#define  TX_LOG_PAGE_VLD_PAGE0         0x0000000000000001ULL
+
+#define TX_LOG_MASK1(IDX)              (FZC_DMC + 0x40008UL + (IDX) * 0x200UL)
+#define  TX_LOG_MASK1_MASK             0x00000000ffffffffULL
+
+#define TX_LOG_VAL1(IDX)               (FZC_DMC + 0x40010UL + (IDX) * 0x200UL)
+#define  TX_LOG_VAL1_VALUE             0x00000000ffffffffULL
+
+#define TX_LOG_MASK2(IDX)              (FZC_DMC + 0x40018UL + (IDX) * 0x200UL)
+#define  TX_LOG_MASK2_MASK             0x00000000ffffffffULL
+
+#define TX_LOG_VAL2(IDX)               (FZC_DMC + 0x40020UL + (IDX) * 0x200UL)
+#define  TX_LOG_VAL2_VALUE             0x00000000ffffffffULL
+
+#define TX_LOG_PAGE_RELO1(IDX)         (FZC_DMC + 0x40028UL + (IDX) * 0x200UL)
+#define  TX_LOG_PAGE_RELO1_RELO                0x00000000ffffffffULL
+
+#define TX_LOG_PAGE_RELO2(IDX)         (FZC_DMC + 0x40030UL + (IDX) * 0x200UL)
+#define  TX_LOG_PAGE_RELO2_RELO                0x00000000ffffffffULL
+
+#define TX_LOG_PAGE_HDL(IDX)           (FZC_DMC + 0x40038UL + (IDX) * 0x200UL)
+#define  TX_LOG_PAGE_HDL_HANDLE                0x00000000000fffffULL
+
+#define TX_ADDR_MD                     (FZC_DMC + 0x45000UL)
+#define  TX_ADDR_MD_MODE32             0x0000000000000001ULL
+
+#define RDC_RED_PARA(IDX)              (FZC_DMC + 0x30000UL + (IDX) * 0x40UL)
+#define  RDC_RED_PARA_THRE_SYN         0x00000000fff00000ULL
+#define  RDC_RED_PARA_THRE_SYN_SHIFT   20
+#define  RDC_RED_PARA_WIN_SYN          0x00000000000f0000ULL
+#define  RDC_RED_PARA_WIN_SYN_SHIFT    16
+#define  RDC_RED_PARA_THRE             0x000000000000fff0ULL
+#define  RDC_RED_PARA_THRE_SHIFT       4
+#define  RDC_RED_PARA_WIN              0x000000000000000fULL
+#define  RDC_RED_PARA_WIN_SHIFT                0
+
+#define RED_DIS_CNT(IDX)               (FZC_DMC + 0x30008UL + (IDX) * 0x40UL)
+#define  RED_DIS_CNT_OFLOW             0x0000000000010000ULL
+#define  RED_DIS_CNT_COUNT             0x000000000000ffffULL
+
+#define IPP_CFIG                       (FZC_IPP + 0x00000UL)
+#define  IPP_CFIG_SOFT_RST             0x0000000080000000ULL
+#define  IPP_CFIG_IP_MAX_PKT           0x0000000001ffff00ULL
+#define  IPP_CFIG_IP_MAX_PKT_SHIFT     8
+#define  IPP_CFIG_FFLP_CS_PIO_W                0x0000000000000080ULL
+#define  IPP_CFIG_PFIFO_PIO_W          0x0000000000000040ULL
+#define  IPP_CFIG_DFIFO_PIO_W          0x0000000000000020ULL
+#define  IPP_CFIG_CKSUM_EN             0x0000000000000010ULL
+#define  IPP_CFIG_DROP_BAD_CRC         0x0000000000000008ULL
+#define  IPP_CFIG_DFIFO_ECC_EN         0x0000000000000004ULL
+#define  IPP_CFIG_DEBUG_BUS_OUT_EN     0x0000000000000002ULL
+#define  IPP_CFIG_IPP_ENABLE           0x0000000000000001ULL
+
+#define IPP_PKT_DIS                    (FZC_IPP + 0x00020UL)
+#define  IPP_PKT_DIS_COUNT             0x0000000000003fffULL
+
+#define IPP_BAD_CS_CNT                 (FZC_IPP + 0x00028UL)
+#define  IPP_BAD_CS_CNT_COUNT          0x0000000000003fffULL
+
+#define IPP_ECC                                (FZC_IPP + 0x00030UL)
+#define  IPP_ECC_COUNT                 0x00000000000000ffULL
+
+#define IPP_INT_STAT                   (FZC_IPP + 0x00040UL)
+#define  IPP_INT_STAT_SOP_MISS         0x0000000080000000ULL
+#define  IPP_INT_STAT_EOP_MISS         0x0000000040000000ULL
+#define  IPP_INT_STAT_DFIFO_UE         0x0000000030000000ULL
+#define  IPP_INT_STAT_DFIFO_CE         0x000000000c000000ULL
+#define  IPP_INT_STAT_DFIFO_ECC                0x0000000003000000ULL
+#define  IPP_INT_STAT_DFIFO_ECC_IDX    0x00000000007ff000ULL
+#define  IPP_INT_STAT_PFIFO_PERR       0x0000000000000800ULL
+#define  IPP_INT_STAT_ECC_ERR_MAX      0x0000000000000400ULL
+#define  IPP_INT_STAT_PFIFO_ERR_IDX    0x00000000000003f0ULL
+#define  IPP_INT_STAT_PFIFO_OVER       0x0000000000000008ULL
+#define  IPP_INT_STAT_PFIFO_UND                0x0000000000000004ULL
+#define  IPP_INT_STAT_BAD_CS_MX                0x0000000000000002ULL
+#define  IPP_INT_STAT_PKT_DIS_MX       0x0000000000000001ULL
+#define  IPP_INT_STAT_ALL              0x00000000ff7fffffULL
+
+#define IPP_MSK                                (FZC_IPP + 0x00048UL)
+#define  IPP_MSK_ECC_ERR_MX            0x0000000000000080ULL
+#define  IPP_MSK_DFIFO_EOP_SOP         0x0000000000000040ULL
+#define  IPP_MSK_DFIFO_UC              0x0000000000000020ULL
+#define  IPP_MSK_PFIFO_PAR             0x0000000000000010ULL
+#define  IPP_MSK_PFIFO_OVER            0x0000000000000008ULL
+#define  IPP_MSK_PFIFO_UND             0x0000000000000004ULL
+#define  IPP_MSK_BAD_CS                        0x0000000000000002ULL
+#define  IPP_MSK_PKT_DIS_CNT           0x0000000000000001ULL
+#define  IPP_MSK_ALL                   0x00000000000000ffULL
+
+#define IPP_PFIFO_RD0                  (FZC_IPP + 0x00060UL)
+#define  IPP_PFIFO_RD0_DATA            0x00000000ffffffffULL /* bits 31:0 */
+
+#define IPP_PFIFO_RD1                  (FZC_IPP + 0x00068UL)
+#define  IPP_PFIFO_RD1_DATA            0x00000000ffffffffULL /* bits 63:32 */
+
+#define IPP_PFIFO_RD2                  (FZC_IPP + 0x00070UL)
+#define  IPP_PFIFO_RD2_DATA            0x00000000ffffffffULL /* bits 95:64 */
+
+#define IPP_PFIFO_RD3                  (FZC_IPP + 0x00078UL)
+#define  IPP_PFIFO_RD3_DATA            0x00000000ffffffffULL /* bits 127:96 */
+
+#define IPP_PFIFO_RD4                  (FZC_IPP + 0x00080UL)
+#define  IPP_PFIFO_RD4_DATA            0x00000000ffffffffULL /* bits 145:128 */
+
+#define IPP_PFIFO_WR0                  (FZC_IPP + 0x00088UL)
+#define  IPP_PFIFO_WR0_DATA            0x00000000ffffffffULL /* bits 31:0 */
+
+#define IPP_PFIFO_WR1                  (FZC_IPP + 0x00090UL)
+#define  IPP_PFIFO_WR1_DATA            0x00000000ffffffffULL /* bits 63:32 */
+
+#define IPP_PFIFO_WR2                  (FZC_IPP + 0x00098UL)
+#define  IPP_PFIFO_WR2_DATA            0x00000000ffffffffULL /* bits 95:64 */
+
+#define IPP_PFIFO_WR3                  (FZC_IPP + 0x000a0UL)
+#define  IPP_PFIFO_WR3_DATA            0x00000000ffffffffULL /* bits 127:96 */
+
+#define IPP_PFIFO_WR4                  (FZC_IPP + 0x000a8UL)
+#define  IPP_PFIFO_WR4_DATA            0x00000000ffffffffULL /* bits 145:128 */
+
+#define IPP_PFIFO_RD_PTR               (FZC_IPP + 0x000b0UL)
+#define  IPP_PFIFO_RD_PTR_PTR          0x000000000000003fULL
+
+#define IPP_PFIFO_WR_PTR               (FZC_IPP + 0x000b8UL)
+#define  IPP_PFIFO_WR_PTR_PTR          0x000000000000007fULL
+
+#define IPP_DFIFO_RD0                  (FZC_IPP + 0x000c0UL)
+#define  IPP_DFIFO_RD0_DATA            0x00000000ffffffffULL /* bits 31:0 */
+
+#define IPP_DFIFO_RD1                  (FZC_IPP + 0x000c8UL)
+#define  IPP_DFIFO_RD1_DATA            0x00000000ffffffffULL /* bits 63:32 */
+
+#define IPP_DFIFO_RD2                  (FZC_IPP + 0x000d0UL)
+#define  IPP_DFIFO_RD2_DATA            0x00000000ffffffffULL /* bits 95:64 */
+
+#define IPP_DFIFO_RD3                  (FZC_IPP + 0x000d8UL)
+#define  IPP_DFIFO_RD3_DATA            0x00000000ffffffffULL /* bits 127:96 */
+
+#define IPP_DFIFO_RD4                  (FZC_IPP + 0x000e0UL)
+#define  IPP_DFIFO_RD4_DATA            0x00000000ffffffffULL /* bits 145:128 */
+
+#define IPP_DFIFO_WR0                  (FZC_IPP + 0x000e8UL)
+#define  IPP_DFIFO_WR0_DATA            0x00000000ffffffffULL /* bits 31:0 */
+
+#define IPP_DFIFO_WR1                  (FZC_IPP + 0x000f0UL)
+#define  IPP_DFIFO_WR1_DATA            0x00000000ffffffffULL /* bits 63:32 */
+
+#define IPP_DFIFO_WR2                  (FZC_IPP + 0x000f8UL)
+#define  IPP_DFIFO_WR2_DATA            0x00000000ffffffffULL /* bits 95:64 */
+
+#define IPP_DFIFO_WR3                  (FZC_IPP + 0x00100UL)
+#define  IPP_DFIFO_WR3_DATA            0x00000000ffffffffULL /* bits 127:96 */
+
+#define IPP_DFIFO_WR4                  (FZC_IPP + 0x00108UL)
+#define  IPP_DFIFO_WR4_DATA            0x00000000ffffffffULL /* bits 145:128 */
+
+#define IPP_DFIFO_RD_PTR               (FZC_IPP + 0x00110UL)
+#define  IPP_DFIFO_RD_PTR_PTR          0x0000000000000fffULL
+
+#define IPP_DFIFO_WR_PTR               (FZC_IPP + 0x00118UL)
+#define  IPP_DFIFO_WR_PTR_PTR          0x0000000000000fffULL
+
+#define IPP_SM                         (FZC_IPP + 0x00120UL)
+#define  IPP_SM_SM                     0x00000000ffffffffULL
+
+#define IPP_CS_STAT                    (FZC_IPP + 0x00128UL)
+#define  IPP_CS_STAT_BCYC_CNT          0x00000000ff000000ULL
+#define  IPP_CS_STAT_IP_LEN            0x0000000000fff000ULL
+#define  IPP_CS_STAT_CS_FAIL           0x0000000000000800ULL
+#define  IPP_CS_STAT_TERM              0x0000000000000400ULL
+#define  IPP_CS_STAT_BAD_NUM           0x0000000000000200ULL
+#define  IPP_CS_STAT_CS_STATE          0x00000000000001ffULL
+
+#define IPP_FFLP_CS_INFO               (FZC_IPP + 0x00130UL)
+#define  IPP_FFLP_CS_INFO_PKT_ID       0x0000000000003c00ULL
+#define  IPP_FFLP_CS_INFO_L4_PROTO     0x0000000000000300ULL
+#define  IPP_FFLP_CS_INFO_V4_HD_LEN    0x00000000000000f0ULL
+#define  IPP_FFLP_CS_INFO_L3_VER       0x000000000000000cULL
+#define  IPP_FFLP_CS_INFO_L2_OP                0x0000000000000003ULL
+
+#define IPP_DBG_SEL                    (FZC_IPP + 0x00138UL)
+#define  IPP_DBG_SEL_SEL               0x000000000000000fULL
+
+#define IPP_DFIFO_ECC_SYND             (FZC_IPP + 0x00140UL)
+#define  IPP_DFIFO_ECC_SYND_SYND       0x000000000000ffffULL
+
+#define IPP_DFIFO_EOP_RD_PTR           (FZC_IPP + 0x00148UL)
+#define  IPP_DFIFO_EOP_RD_PTR_PTR      0x0000000000000fffULL
+
+#define IPP_ECC_CTL                    (FZC_IPP + 0x00150UL)
+#define  IPP_ECC_CTL_DIS_DBL           0x0000000080000000ULL
+#define  IPP_ECC_CTL_COR_DBL           0x0000000000020000ULL
+#define  IPP_ECC_CTL_COR_SNG           0x0000000000010000ULL
+#define  IPP_ECC_CTL_COR_ALL           0x0000000000000400ULL
+#define  IPP_ECC_CTL_COR_1             0x0000000000000100ULL
+#define  IPP_ECC_CTL_COR_LST           0x0000000000000004ULL
+#define  IPP_ECC_CTL_COR_SND           0x0000000000000002ULL
+#define  IPP_ECC_CTL_COR_FSR           0x0000000000000001ULL
+
+#define NIU_DFIFO_ENTRIES              1024
+#define ATLAS_P0_P1_DFIFO_ENTRIES      2048
+#define ATLAS_P2_P3_DFIFO_ENTRIES      1024
+
+#define ZCP_CFIG                       (FZC_ZCP + 0x00000UL)
+#define  ZCP_CFIG_ZCP_32BIT_MODE       0x0000000001000000ULL
+#define  ZCP_CFIG_ZCP_DEBUG_SEL                0x0000000000ff0000ULL
+#define  ZCP_CFIG_DMA_TH               0x000000000000ffe0ULL
+#define  ZCP_CFIG_ECC_CHK_DIS          0x0000000000000010ULL
+#define  ZCP_CFIG_PAR_CHK_DIS          0x0000000000000008ULL
+#define  ZCP_CFIG_DIS_BUFF_RSP_IF      0x0000000000000004ULL
+#define  ZCP_CFIG_DIS_BUFF_REQ_IF      0x0000000000000002ULL
+#define  ZCP_CFIG_ZC_ENABLE            0x0000000000000001ULL
+
+#define ZCP_INT_STAT                   (FZC_ZCP + 0x00008UL)
+#define  ZCP_INT_STAT_RRFIFO_UNDERRUN  0x0000000000008000ULL
+#define  ZCP_INT_STAT_RRFIFO_OVERRUN   0x0000000000004000ULL
+#define  ZCP_INT_STAT_RSPFIFO_UNCOR_ERR        0x0000000000001000ULL
+#define  ZCP_INT_STAT_BUFFER_OVERFLOW  0x0000000000000800ULL
+#define  ZCP_INT_STAT_STAT_TBL_PERR    0x0000000000000400ULL
+#define  ZCP_INT_STAT_DYN_TBL_PERR     0x0000000000000200ULL
+#define  ZCP_INT_STAT_BUF_TBL_PERR     0x0000000000000100ULL
+#define  ZCP_INT_STAT_TT_PROGRAM_ERR   0x0000000000000080ULL
+#define  ZCP_INT_STAT_RSP_TT_INDEX_ERR 0x0000000000000040ULL
+#define  ZCP_INT_STAT_SLV_TT_INDEX_ERR 0x0000000000000020ULL
+#define  ZCP_INT_STAT_ZCP_TT_INDEX_ERR 0x0000000000000010ULL
+#define  ZCP_INT_STAT_CFIFO_ECC3       0x0000000000000008ULL
+#define  ZCP_INT_STAT_CFIFO_ECC2       0x0000000000000004ULL
+#define  ZCP_INT_STAT_CFIFO_ECC1       0x0000000000000002ULL
+#define  ZCP_INT_STAT_CFIFO_ECC0       0x0000000000000001ULL
+#define  ZCP_INT_STAT_ALL              0x000000000000ffffULL
+
+#define ZCP_INT_MASK                   (FZC_ZCP + 0x00010UL)
+#define  ZCP_INT_MASK_RRFIFO_UNDERRUN  0x0000000000008000ULL
+#define  ZCP_INT_MASK_RRFIFO_OVERRUN   0x0000000000004000ULL
+#define  ZCP_INT_MASK_LOJ              0x0000000000002000ULL
+#define  ZCP_INT_MASK_RSPFIFO_UNCOR_ERR        0x0000000000001000ULL
+#define  ZCP_INT_MASK_BUFFER_OVERFLOW  0x0000000000000800ULL
+#define  ZCP_INT_MASK_STAT_TBL_PERR    0x0000000000000400ULL
+#define  ZCP_INT_MASK_DYN_TBL_PERR     0x0000000000000200ULL
+#define  ZCP_INT_MASK_BUF_TBL_PERR     0x0000000000000100ULL
+#define  ZCP_INT_MASK_TT_PROGRAM_ERR   0x0000000000000080ULL
+#define  ZCP_INT_MASK_RSP_TT_INDEX_ERR 0x0000000000000040ULL
+#define  ZCP_INT_MASK_SLV_TT_INDEX_ERR 0x0000000000000020ULL
+#define  ZCP_INT_MASK_ZCP_TT_INDEX_ERR 0x0000000000000010ULL
+#define  ZCP_INT_MASK_CFIFO_ECC3       0x0000000000000008ULL
+#define  ZCP_INT_MASK_CFIFO_ECC2       0x0000000000000004ULL
+#define  ZCP_INT_MASK_CFIFO_ECC1       0x0000000000000002ULL
+#define  ZCP_INT_MASK_CFIFO_ECC0       0x0000000000000001ULL
+#define  ZCP_INT_MASK_ALL              0x000000000000ffffULL
+
+#define BAM4BUF                                (FZC_ZCP + 0x00018UL)
+#define  BAM4BUF_LOJ                   0x0000000080000000ULL
+#define  BAM4BUF_EN_CK                 0x0000000040000000ULL
+#define  BAM4BUF_IDX_END0              0x000000003ff00000ULL
+#define  BAM4BUF_IDX_ST0               0x00000000000ffc00ULL
+#define  BAM4BUF_OFFSET0               0x00000000000003ffULL
+
+#define BAM8BUF                                (FZC_ZCP + 0x00020UL)
+#define  BAM8BUF_LOJ                   0x0000000080000000ULL
+#define  BAM8BUF_EN_CK                 0x0000000040000000ULL
+#define  BAM8BUF_IDX_END1              0x000000003ff00000ULL
+#define  BAM8BUF_IDX_ST1               0x00000000000ffc00ULL
+#define  BAM8BUF_OFFSET1               0x00000000000003ffULL
+
+#define BAM16BUF                       (FZC_ZCP + 0x00028UL)
+#define  BAM16BUF_LOJ                  0x0000000080000000ULL
+#define  BAM16BUF_EN_CK                        0x0000000040000000ULL
+#define  BAM16BUF_IDX_END2             0x000000003ff00000ULL
+#define  BAM16BUF_IDX_ST2              0x00000000000ffc00ULL
+#define  BAM16BUF_OFFSET2              0x00000000000003ffULL
+
+#define BAM32BUF                       (FZC_ZCP + 0x00030UL)
+#define  BAM32BUF_LOJ                  0x0000000080000000ULL
+#define  BAM32BUF_EN_CK                        0x0000000040000000ULL
+#define  BAM32BUF_IDX_END3             0x000000003ff00000ULL
+#define  BAM32BUF_IDX_ST3              0x00000000000ffc00ULL
+#define  BAM32BUF_OFFSET3              0x00000000000003ffULL
+
+#define DST4BUF                                (FZC_ZCP + 0x00038UL)
+#define  DST4BUF_DS_OFFSET0            0x00000000000003ffULL
+
+#define DST8BUF                                (FZC_ZCP + 0x00040UL)
+#define  DST8BUF_DS_OFFSET1            0x00000000000003ffULL
+
+#define DST16BUF                       (FZC_ZCP + 0x00048UL)
+#define  DST16BUF_DS_OFFSET2           0x00000000000003ffULL
+
+#define DST32BUF                       (FZC_ZCP + 0x00050UL)
+#define  DST32BUF_DS_OFFSET3           0x00000000000003ffULL
+
+#define ZCP_RAM_DATA0                  (FZC_ZCP + 0x00058UL)
+#define  ZCP_RAM_DATA0_DAT0            0x00000000ffffffffULL
+
+#define ZCP_RAM_DATA1                  (FZC_ZCP + 0x00060UL)
+#define  ZCP_RAM_DAT10_DAT1            0x00000000ffffffffULL
+
+#define ZCP_RAM_DATA2                  (FZC_ZCP + 0x00068UL)
+#define  ZCP_RAM_DATA2_DAT2            0x00000000ffffffffULL
+
+#define ZCP_RAM_DATA3                  (FZC_ZCP + 0x00070UL)
+#define  ZCP_RAM_DATA3_DAT3            0x00000000ffffffffULL
+
+#define ZCP_RAM_DATA4                  (FZC_ZCP + 0x00078UL)
+#define  ZCP_RAM_DATA4_DAT4            0x00000000000000ffULL
+
+#define ZCP_RAM_BE                     (FZC_ZCP + 0x00080UL)
+#define  ZCP_RAM_BE_VAL                        0x000000000001ffffULL
+
+#define ZCP_RAM_ACC                    (FZC_ZCP + 0x00088UL)
+#define  ZCP_RAM_ACC_BUSY              0x0000000080000000ULL
+#define  ZCP_RAM_ACC_READ              0x0000000040000000ULL
+#define  ZCP_RAM_ACC_WRITE             0x0000000000000000ULL
+#define  ZCP_RAM_ACC_LOJ               0x0000000020000000ULL
+#define  ZCP_RAM_ACC_ZFCID             0x000000001ffe0000ULL
+#define  ZCP_RAM_ACC_ZFCID_SHIFT       17
+#define  ZCP_RAM_ACC_RAM_SEL           0x000000000001f000ULL
+#define  ZCP_RAM_ACC_RAM_SEL_SHIFT     12
+#define  ZCP_RAM_ACC_CFIFOADDR         0x0000000000000fffULL
+#define  ZCP_RAM_ACC_CFIFOADDR_SHIFT   0
+
+#define ZCP_RAM_SEL_BAM(INDEX)         (0x00 + (INDEX))
+#define ZCP_RAM_SEL_TT_STATIC          0x08
+#define ZCP_RAM_SEL_TT_DYNAMIC         0x09
+#define ZCP_RAM_SEL_CFIFO(PORT)                (0x10 + (PORT))
+
+#define NIU_CFIFO_ENTRIES              1024
+#define ATLAS_P0_P1_CFIFO_ENTRIES      2048
+#define ATLAS_P2_P3_CFIFO_ENTRIES      1024
+
+#define CHK_BIT_DATA                   (FZC_ZCP + 0x00090UL)
+#define  CHK_BIT_DATA_DATA             0x000000000000ffffULL
+
+#define RESET_CFIFO                    (FZC_ZCP + 0x00098UL)
+#define  RESET_CFIFO_RST(PORT)         (0x1 << (PORT))
+
+#define CFIFO_ECC(PORT)                        (FZC_ZCP + 0x000a0UL + (PORT) * 8UL)
+#define  CFIFO_ECC_DIS_DBLBIT_ERR      0x0000000080000000ULL
+#define  CFIFO_ECC_DBLBIT_ERR          0x0000000000020000ULL
+#define  CFIFO_ECC_SINGLEBIT_ERR       0x0000000000010000ULL
+#define  CFIFO_ECC_ALL_PKT             0x0000000000000400ULL
+#define  CFIFO_ECC_LAST_LINE           0x0000000000000004ULL
+#define  CFIFO_ECC_2ND_LINE            0x0000000000000002ULL
+#define  CFIFO_ECC_1ST_LINE            0x0000000000000001ULL
+
+#define ZCP_TRAINING_VECTOR            (FZC_ZCP + 0x000c0UL)
+#define  ZCP_TRAINING_VECTOR_VECTOR    0x00000000ffffffffULL
+
+#define ZCP_STATE_MACHINE              (FZC_ZCP + 0x000c8UL)
+#define  ZCP_STATE_MACHINE_SM          0x00000000ffffffffULL
+
+/* Same bits as ZCP_INT_STAT */
+#define ZCP_INT_STAT_TEST              (FZC_ZCP + 0x00108UL)
+
+#define RXDMA_CFIG1(IDX)               (DMC + 0x00000UL + (IDX) * 0x200UL)
+#define  RXDMA_CFIG1_EN                        0x0000000080000000ULL
+#define  RXDMA_CFIG1_RST               0x0000000040000000ULL
+#define  RXDMA_CFIG1_QST               0x0000000020000000ULL
+#define  RXDMA_CFIG1_MBADDR_H          0x0000000000000fffULL /* mboxaddr 43:32 */
+
+#define RXDMA_CFIG2(IDX)               (DMC + 0x00008UL + (IDX) * 0x200UL)
+#define  RXDMA_CFIG2_MBADDR_L          0x00000000ffffffc0ULL /* mboxaddr 31:6 */
+#define  RXDMA_CFIG2_OFFSET            0x0000000000000006ULL
+#define  RXDMA_CFIG2_OFFSET_SHIFT      1
+#define  RXDMA_CFIG2_FULL_HDR          0x0000000000000001ULL
+
+#define RBR_CFIG_A(IDX)                        (DMC + 0x00010UL + (IDX) * 0x200UL)
+#define  RBR_CFIG_A_LEN                        0xffff000000000000ULL
+#define  RBR_CFIG_A_LEN_SHIFT          48
+#define  RBR_CFIG_A_STADDR_BASE                0x00000ffffffc0000ULL
+#define  RBR_CFIG_A_STADDR             0x000000000003ffc0ULL
+
+#define RBR_CFIG_B(IDX)                        (DMC + 0x00018UL + (IDX) * 0x200UL)
+#define  RBR_CFIG_B_BLKSIZE            0x0000000003000000ULL
+#define  RBR_CFIG_B_BLKSIZE_SHIFT      24
+#define  RBR_CFIG_B_VLD2               0x0000000000800000ULL
+#define  RBR_CFIG_B_BUFSZ2             0x0000000000030000ULL
+#define  RBR_CFIG_B_BUFSZ2_SHIFT       16
+#define  RBR_CFIG_B_VLD1               0x0000000000008000ULL
+#define  RBR_CFIG_B_BUFSZ1             0x0000000000000300ULL
+#define  RBR_CFIG_B_BUFSZ1_SHIFT       8
+#define  RBR_CFIG_B_VLD0               0x0000000000000080ULL
+#define  RBR_CFIG_B_BUFSZ0             0x0000000000000003ULL
+#define  RBR_CFIG_B_BUFSZ0_SHIFT       0
+
+#define RBR_BLKSIZE_4K                 0x0
+#define RBR_BLKSIZE_8K                 0x1
+#define RBR_BLKSIZE_16K                        0x2
+#define RBR_BLKSIZE_32K                        0x3
+#define RBR_BUFSZ2_2K                  0x0
+#define RBR_BUFSZ2_4K                  0x1
+#define RBR_BUFSZ2_8K                  0x2
+#define RBR_BUFSZ2_16K                 0x3
+#define RBR_BUFSZ1_1K                  0x0
+#define RBR_BUFSZ1_2K                  0x1
+#define RBR_BUFSZ1_4K                  0x2
+#define RBR_BUFSZ1_8K                  0x3
+#define RBR_BUFSZ0_256                 0x0
+#define RBR_BUFSZ0_512                 0x1
+#define RBR_BUFSZ0_1K                  0x2
+#define RBR_BUFSZ0_2K                  0x3
+
+#define RBR_KICK(IDX)                  (DMC + 0x00020UL + (IDX) * 0x200UL)
+#define  RBR_KICK_BKADD                        0x000000000000ffffULL
+
+#define RBR_STAT(IDX)                  (DMC + 0x00028UL + (IDX) * 0x200UL)
+#define  RBR_STAT_QLEN                 0x000000000000ffffULL
+
+#define RBR_HDH(IDX)                   (DMC + 0x00030UL + (IDX) * 0x200UL)
+#define  RBR_HDH_HEAD_H                        0x0000000000000fffULL
+
+#define RBR_HDL(IDX)                   (DMC + 0x00038UL + (IDX) * 0x200UL)
+#define  RBR_HDL_HEAD_L                        0x00000000fffffffcULL
+
+#define RCRCFIG_A(IDX)                 (DMC + 0x00040UL + (IDX) * 0x200UL)
+#define  RCRCFIG_A_LEN                 0xffff000000000000ULL
+#define  RCRCFIG_A_LEN_SHIFT           48
+#define  RCRCFIG_A_STADDR_BASE         0x00000ffffff80000ULL
+#define  RCRCFIG_A_STADDR              0x000000000007ffc0ULL
+
+#define RCRCFIG_B(IDX)                 (DMC + 0x00048UL + (IDX) * 0x200UL)
+#define  RCRCFIG_B_PTHRES              0x00000000ffff0000ULL
+#define  RCRCFIG_B_PTHRES_SHIFT                16
+#define  RCRCFIG_B_ENTOUT              0x0000000000008000ULL
+#define  RCRCFIG_B_TIMEOUT             0x000000000000003fULL
+#define  RCRCFIG_B_TIMEOUT_SHIFT       0
+
+#define RCRSTAT_A(IDX)                 (DMC + 0x00050UL + (IDX) * 0x200UL)
+#define  RCRSTAT_A_QLEN                        0x000000000000ffffULL
+
+#define RCRSTAT_B(IDX)                 (DMC + 0x00058UL + (IDX) * 0x200UL)
+#define  RCRSTAT_B_TIPTR_H             0x0000000000000fffULL
+
+#define RCRSTAT_C(IDX)                 (DMC + 0x00060UL + (IDX) * 0x200UL)
+#define  RCRSTAT_C_TIPTR_L             0x00000000fffffff8ULL
+
+#define RX_DMA_CTL_STAT(IDX)           (DMC + 0x00070UL + (IDX) * 0x200UL)
+#define  RX_DMA_CTL_STAT_RBR_TMOUT     0x0020000000000000ULL
+#define  RX_DMA_CTL_STAT_RSP_CNT_ERR   0x0010000000000000ULL
+#define  RX_DMA_CTL_STAT_BYTE_EN_BUS   0x0008000000000000ULL
+#define  RX_DMA_CTL_STAT_RSP_DAT_ERR   0x0004000000000000ULL
+#define  RX_DMA_CTL_STAT_RCR_ACK_ERR   0x0002000000000000ULL
+#define  RX_DMA_CTL_STAT_DC_FIFO_ERR   0x0001000000000000ULL
+#define  RX_DMA_CTL_STAT_MEX           0x0000800000000000ULL
+#define  RX_DMA_CTL_STAT_RCRTHRES      0x0000400000000000ULL
+#define  RX_DMA_CTL_STAT_RCRTO         0x0000200000000000ULL
+#define  RX_DMA_CTL_STAT_RCR_SHA_PAR   0x0000100000000000ULL
+#define  RX_DMA_CTL_STAT_RBR_PRE_PAR   0x0000080000000000ULL
+#define  RX_DMA_CTL_STAT_PORT_DROP_PKT 0x0000040000000000ULL
+#define  RX_DMA_CTL_STAT_WRED_DROP     0x0000020000000000ULL
+#define  RX_DMA_CTL_STAT_RBR_PRE_EMTY  0x0000010000000000ULL
+#define  RX_DMA_CTL_STAT_RCRSHADOW_FULL        0x0000008000000000ULL
+#define  RX_DMA_CTL_STAT_CONFIG_ERR    0x0000004000000000ULL
+#define  RX_DMA_CTL_STAT_RCRINCON      0x0000002000000000ULL
+#define  RX_DMA_CTL_STAT_RCRFULL       0x0000001000000000ULL
+#define  RX_DMA_CTL_STAT_RBR_EMPTY     0x0000000800000000ULL
+#define  RX_DMA_CTL_STAT_RBRFULL       0x0000000400000000ULL
+#define  RX_DMA_CTL_STAT_RBRLOGPAGE    0x0000000200000000ULL
+#define  RX_DMA_CTL_STAT_CFIGLOGPAGE   0x0000000100000000ULL
+#define  RX_DMA_CTL_STAT_PTRREAD       0x00000000ffff0000ULL
+#define  RX_DMA_CTL_STAT_PTRREAD_SHIFT 16
+#define  RX_DMA_CTL_STAT_PKTREAD       0x000000000000ffffULL
+#define  RX_DMA_CTL_STAT_PKTREAD_SHIFT 0
+
+#define  RX_DMA_CTL_STAT_CHAN_FATAL    (RX_DMA_CTL_STAT_RBR_TMOUT | \
+                                        RX_DMA_CTL_STAT_RSP_CNT_ERR | \
+                                        RX_DMA_CTL_STAT_BYTE_EN_BUS | \
+                                        RX_DMA_CTL_STAT_RSP_DAT_ERR | \
+                                        RX_DMA_CTL_STAT_RCR_ACK_ERR | \
+                                        RX_DMA_CTL_STAT_RCR_SHA_PAR | \
+                                        RX_DMA_CTL_STAT_RBR_PRE_PAR | \
+                                        RX_DMA_CTL_STAT_CONFIG_ERR | \
+                                        RX_DMA_CTL_STAT_RCRINCON | \
+                                        RX_DMA_CTL_STAT_RCRFULL | \
+                                        RX_DMA_CTL_STAT_RBRFULL | \
+                                        RX_DMA_CTL_STAT_RBRLOGPAGE | \
+                                        RX_DMA_CTL_STAT_CFIGLOGPAGE)
+
+#define RX_DMA_CTL_STAT_PORT_FATAL     (RX_DMA_CTL_STAT_DC_FIFO_ERR)
+
+#define RX_DMA_CTL_WRITE_CLEAR_ERRS    (RX_DMA_CTL_STAT_RBR_EMPTY | \
+                                        RX_DMA_CTL_STAT_RCRSHADOW_FULL | \
+                                        RX_DMA_CTL_STAT_RBR_PRE_EMTY | \
+                                        RX_DMA_CTL_STAT_WRED_DROP | \
+                                        RX_DMA_CTL_STAT_PORT_DROP_PKT | \
+                                        RX_DMA_CTL_STAT_RCRTO | \
+                                        RX_DMA_CTL_STAT_RCRTHRES | \
+                                        RX_DMA_CTL_STAT_DC_FIFO_ERR)
+
+#define RCR_FLSH(IDX)                  (DMC + 0x00078UL + (IDX) * 0x200UL)
+#define  RCR_FLSH_FLSH                 0x0000000000000001ULL
+
+#define RXMISC(IDX)                    (DMC + 0x00090UL + (IDX) * 0x200UL)
+#define  RXMISC_OFLOW                  0x0000000000010000ULL
+#define  RXMISC_COUNT                  0x000000000000ffffULL
+
+#define RX_DMA_CTL_STAT_DBG(IDX)       (DMC + 0x00098UL + (IDX) * 0x200UL)
+#define  RX_DMA_CTL_STAT_DBG_RBR_TMOUT         0x0020000000000000ULL
+#define  RX_DMA_CTL_STAT_DBG_RSP_CNT_ERR       0x0010000000000000ULL
+#define  RX_DMA_CTL_STAT_DBG_BYTE_EN_BUS       0x0008000000000000ULL
+#define  RX_DMA_CTL_STAT_DBG_RSP_DAT_ERR       0x0004000000000000ULL
+#define  RX_DMA_CTL_STAT_DBG_RCR_ACK_ERR       0x0002000000000000ULL
+#define  RX_DMA_CTL_STAT_DBG_DC_FIFO_ERR       0x0001000000000000ULL
+#define  RX_DMA_CTL_STAT_DBG_MEX               0x0000800000000000ULL
+#define  RX_DMA_CTL_STAT_DBG_RCRTHRES          0x0000400000000000ULL
+#define  RX_DMA_CTL_STAT_DBG_RCRTO             0x0000200000000000ULL
+#define  RX_DMA_CTL_STAT_DBG_RCR_SHA_PAR       0x0000100000000000ULL
+#define  RX_DMA_CTL_STAT_DBG_RBR_PRE_PAR       0x0000080000000000ULL
+#define  RX_DMA_CTL_STAT_DBG_PORT_DROP_PKT     0x0000040000000000ULL
+#define  RX_DMA_CTL_STAT_DBG_WRED_DROP         0x0000020000000000ULL
+#define  RX_DMA_CTL_STAT_DBG_RBR_PRE_EMTY      0x0000010000000000ULL
+#define  RX_DMA_CTL_STAT_DBG_RCRSHADOW_FULL    0x0000008000000000ULL
+#define  RX_DMA_CTL_STAT_DBG_CONFIG_ERR                0x0000004000000000ULL
+#define  RX_DMA_CTL_STAT_DBG_RCRINCON          0x0000002000000000ULL
+#define  RX_DMA_CTL_STAT_DBG_RCRFULL           0x0000001000000000ULL
+#define  RX_DMA_CTL_STAT_DBG_RBR_EMPTY         0x0000000800000000ULL
+#define  RX_DMA_CTL_STAT_DBG_RBRFULL           0x0000000400000000ULL
+#define  RX_DMA_CTL_STAT_DBG_RBRLOGPAGE                0x0000000200000000ULL
+#define  RX_DMA_CTL_STAT_DBG_CFIGLOGPAGE       0x0000000100000000ULL
+#define  RX_DMA_CTL_STAT_DBG_PTRREAD           0x00000000ffff0000ULL
+#define  RX_DMA_CTL_STAT_DBG_PKTREAD           0x000000000000ffffULL
+
+#define RX_DMA_ENT_MSK(IDX)            (DMC + 0x00068UL + (IDX) * 0x200UL)
+#define  RX_DMA_ENT_MSK_RBR_TMOUT      0x0000000000200000ULL
+#define  RX_DMA_ENT_MSK_RSP_CNT_ERR    0x0000000000100000ULL
+#define  RX_DMA_ENT_MSK_BYTE_EN_BUS    0x0000000000080000ULL
+#define  RX_DMA_ENT_MSK_RSP_DAT_ERR    0x0000000000040000ULL
+#define  RX_DMA_ENT_MSK_RCR_ACK_ERR    0x0000000000020000ULL
+#define  RX_DMA_ENT_MSK_DC_FIFO_ERR    0x0000000000010000ULL
+#define  RX_DMA_ENT_MSK_RCRTHRES       0x0000000000004000ULL
+#define  RX_DMA_ENT_MSK_RCRTO          0x0000000000002000ULL
+#define  RX_DMA_ENT_MSK_RCR_SHA_PAR    0x0000000000001000ULL
+#define  RX_DMA_ENT_MSK_RBR_PRE_PAR    0x0000000000000800ULL
+#define  RX_DMA_ENT_MSK_PORT_DROP_PKT  0x0000000000000400ULL
+#define  RX_DMA_ENT_MSK_WRED_DROP      0x0000000000000200ULL
+#define  RX_DMA_ENT_MSK_RBR_PRE_EMTY   0x0000000000000100ULL
+#define  RX_DMA_ENT_MSK_RCR_SHADOW_FULL        0x0000000000000080ULL
+#define  RX_DMA_ENT_MSK_CONFIG_ERR     0x0000000000000040ULL
+#define  RX_DMA_ENT_MSK_RCRINCON       0x0000000000000020ULL
+#define  RX_DMA_ENT_MSK_RCRFULL                0x0000000000000010ULL
+#define  RX_DMA_ENT_MSK_RBR_EMPTY      0x0000000000000008ULL
+#define  RX_DMA_ENT_MSK_RBRFULL                0x0000000000000004ULL
+#define  RX_DMA_ENT_MSK_RBRLOGPAGE     0x0000000000000002ULL
+#define  RX_DMA_ENT_MSK_CFIGLOGPAGE    0x0000000000000001ULL
+#define  RX_DMA_ENT_MSK_ALL            0x00000000003f7fffULL
+
+#define TX_RNG_CFIG(IDX)               (DMC + 0x40000UL + (IDX) * 0x200UL)
+#define  TX_RNG_CFIG_LEN               0x1fff000000000000ULL
+#define  TX_RNG_CFIG_LEN_SHIFT         48
+#define  TX_RNG_CFIG_STADDR_BASE       0x00000ffffff80000ULL
+#define  TX_RNG_CFIG_STADDR            0x000000000007ffc0ULL
+
+#define TX_RING_HDL(IDX)               (DMC + 0x40010UL + (IDX) * 0x200UL)
+#define  TX_RING_HDL_WRAP              0x0000000000080000ULL
+#define  TX_RING_HDL_HEAD              0x000000000007fff8ULL
+#define  TX_RING_HDL_HEAD_SHIFT                3
+
+#define TX_RING_KICK(IDX)              (DMC + 0x40018UL + (IDX) * 0x200UL)
+#define  TX_RING_KICK_WRAP             0x0000000000080000ULL
+#define  TX_RING_KICK_TAIL             0x000000000007fff8ULL
+
+#define TX_ENT_MSK(IDX)                        (DMC + 0x40020UL + (IDX) * 0x200UL)
+#define  TX_ENT_MSK_MK                 0x0000000000008000ULL
+#define  TX_ENT_MSK_MBOX_ERR           0x0000000000000080ULL
+#define  TX_ENT_MSK_PKT_SIZE_ERR       0x0000000000000040ULL
+#define  TX_ENT_MSK_TX_RING_OFLOW      0x0000000000000020ULL
+#define  TX_ENT_MSK_PREF_BUF_ECC_ERR   0x0000000000000010ULL
+#define  TX_ENT_MSK_NACK_PREF          0x0000000000000008ULL
+#define  TX_ENT_MSK_NACK_PKT_RD                0x0000000000000004ULL
+#define  TX_ENT_MSK_CONF_PART_ERR      0x0000000000000002ULL
+#define  TX_ENT_MSK_PKT_PRT_ERR                0x0000000000000001ULL
+
+#define TX_CS(IDX)                     (DMC + 0x40028UL + (IDX)*0x200UL)
+#define  TX_CS_PKT_CNT                 0x0fff000000000000ULL
+#define  TX_CS_PKT_CNT_SHIFT           48
+#define  TX_CS_LASTMARK                        0x00000fff00000000ULL
+#define  TX_CS_LASTMARK_SHIFT          32
+#define  TX_CS_RST                     0x0000000080000000ULL
+#define  TX_CS_RST_STATE               0x0000000040000000ULL
+#define  TX_CS_MB                      0x0000000020000000ULL
+#define  TX_CS_STOP_N_GO               0x0000000010000000ULL
+#define  TX_CS_SNG_STATE               0x0000000008000000ULL
+#define  TX_CS_MK                      0x0000000000008000ULL
+#define  TX_CS_MMK                     0x0000000000004000ULL
+#define  TX_CS_MBOX_ERR                        0x0000000000000080ULL
+#define  TX_CS_PKT_SIZE_ERR            0x0000000000000040ULL
+#define  TX_CS_TX_RING_OFLOW           0x0000000000000020ULL
+#define  TX_CS_PREF_BUF_PAR_ERR                0x0000000000000010ULL
+#define  TX_CS_NACK_PREF               0x0000000000000008ULL
+#define  TX_CS_NACK_PKT_RD             0x0000000000000004ULL
+#define  TX_CS_CONF_PART_ERR           0x0000000000000002ULL
+#define  TX_CS_PKT_PRT_ERR             0x0000000000000001ULL
+
+#define TXDMA_MBH(IDX)                 (DMC + 0x40030UL + (IDX) * 0x200UL)
+#define  TXDMA_MBH_MBADDR              0x0000000000000fffULL
+
+#define TXDMA_MBL(IDX)                 (DMC + 0x40038UL + (IDX) * 0x200UL)
+#define  TXDMA_MBL_MBADDR              0x00000000ffffffc0ULL
+
+#define TX_DMA_PRE_ST(IDX)             (DMC + 0x40040UL + (IDX) * 0x200UL)
+#define  TX_DMA_PRE_ST_SHADOW_HD       0x000000000007ffffULL
+
+#define TX_RNG_ERR_LOGH(IDX)           (DMC + 0x40048UL + (IDX) * 0x200UL)
+#define  TX_RNG_ERR_LOGH_ERR           0x0000000080000000ULL
+#define  TX_RNG_ERR_LOGH_MERR          0x0000000040000000ULL
+#define  TX_RNG_ERR_LOGH_ERRCODE       0x0000000038000000ULL
+#define  TX_RNG_ERR_LOGH_ERRADDR       0x0000000000000fffULL
+
+#define TX_RNG_ERR_LOGL(IDX)           (DMC + 0x40050UL + (IDX) * 0x200UL)
+#define  TX_RNG_ERR_LOGL_ERRADDR       0x00000000ffffffffULL
+
+#define TDMC_INTR_DBG(IDX)             (DMC + 0x40060UL + (IDX) * 0x200UL)
+#define  TDMC_INTR_DBG_MK              0x0000000000008000ULL
+#define  TDMC_INTR_DBG_MBOX_ERR                0x0000000000000080ULL
+#define  TDMC_INTR_DBG_PKT_SIZE_ERR    0x0000000000000040ULL
+#define  TDMC_INTR_DBG_TX_RING_OFLOW   0x0000000000000020ULL
+#define  TDMC_INTR_DBG_PREF_BUF_PAR_ERR        0x0000000000000010ULL
+#define  TDMC_INTR_DBG_NACK_PREF       0x0000000000000008ULL
+#define  TDMC_INTR_DBG_NACK_PKT_RD     0x0000000000000004ULL
+#define  TDMC_INTR_DBG_CONF_PART_ERR   0x0000000000000002ULL
+#define  TDMC_INTR_DBG_PKT_PART_ERR    0x0000000000000001ULL
+
+#define TX_CS_DBG(IDX)                 (DMC + 0x40068UL + (IDX) * 0x200UL)
+#define  TX_CS_DBG_PKT_CNT             0x0fff000000000000ULL
+
+#define TDMC_INJ_PAR_ERR(IDX)          (DMC + 0x45040UL + (IDX) * 0x200UL)
+#define  TDMC_INJ_PAR_ERR_VAL          0x000000000000ffffULL
+
+#define TDMC_DBG_SEL(IDX)              (DMC + 0x45080UL + (IDX) * 0x200UL)
+#define  TDMC_DBG_SEL_DBG_SEL          0x000000000000003fULL
+
+#define TDMC_TRAINING_VECTOR(IDX)      (DMC + 0x45088UL + (IDX) * 0x200UL)
+#define  TDMC_TRAINING_VECTOR_VEC      0x00000000ffffffffULL
+
+#define TXC_DMA_MAX(CHAN)              (FZC_TXC + 0x00000UL + (CHAN)*0x1000UL)
+#define TXC_DMA_MAX_LEN(CHAN)          (FZC_TXC + 0x00008UL + (CHAN)*0x1000UL)
+
+#define TXC_CONTROL                    (FZC_TXC + 0x20000UL)
+#define  TXC_CONTROL_ENABLE            0x0000000000000010ULL
+#define  TXC_CONTROL_PORT_ENABLE(X)    (1 << (X))
+
+#define TXC_TRAINING_VEC               (FZC_TXC + 0x20008UL)
+#define  TXC_TRAINING_VEC_MASK         0x00000000ffffffffULL
+
+#define TXC_DEBUG                      (FZC_TXC + 0x20010UL)
+#define  TXC_DEBUG_SELECT              0x000000000000003fULL
+
+#define TXC_MAX_REORDER                        (FZC_TXC + 0x20018UL)
+#define  TXC_MAX_REORDER_PORT3         0x000000000f000000ULL
+#define  TXC_MAX_REORDER_PORT2         0x00000000000f0000ULL
+#define  TXC_MAX_REORDER_PORT1         0x0000000000000f00ULL
+#define  TXC_MAX_REORDER_PORT0         0x000000000000000fULL
+
+#define TXC_PORT_CTL(PORT)             (FZC_TXC + 0x20020UL + (PORT)*0x100UL)
+#define  TXC_PORT_CTL_CLR_ALL_STAT     0x0000000000000001ULL
+
+#define TXC_PKT_STUFFED(PORT)          (FZC_TXC + 0x20030UL + (PORT)*0x100UL)
+#define  TXC_PKT_STUFFED_PP_REORDER    0x00000000ffff0000ULL
+#define  TXC_PKT_STUFFED_PP_PACKETASSY 0x000000000000ffffULL
+
+#define TXC_PKT_XMIT(PORT)             (FZC_TXC + 0x20038UL + (PORT)*0x100UL)
+#define  TXC_PKT_XMIT_BYTES            0x00000000ffff0000ULL
+#define  TXC_PKT_XMIT_PKTS             0x000000000000ffffULL
+
+#define TXC_ROECC_CTL(PORT)            (FZC_TXC + 0x20040UL + (PORT)*0x100UL)
+#define  TXC_ROECC_CTL_DISABLE_UE      0x0000000080000000ULL
+#define  TXC_ROECC_CTL_DBL_BIT_ERR     0x0000000000020000ULL
+#define  TXC_ROECC_CTL_SNGL_BIT_ERR    0x0000000000010000ULL
+#define  TXC_ROECC_CTL_ALL_PKTS                0x0000000000000400ULL
+#define  TXC_ROECC_CTL_ALT_PKTS                0x0000000000000200ULL
+#define  TXC_ROECC_CTL_ONE_PKT_ONLY    0x0000000000000100ULL
+#define  TXC_ROECC_CTL_LST_PKT_LINE    0x0000000000000004ULL
+#define  TXC_ROECC_CTL_2ND_PKT_LINE    0x0000000000000002ULL
+#define  TXC_ROECC_CTL_1ST_PKT_LINE    0x0000000000000001ULL
+
+#define TXC_ROECC_ST(PORT)             (FZC_TXC + 0x20048UL + (PORT)*0x100UL)
+#define  TXC_ROECC_CLR_ST              0x0000000080000000ULL
+#define  TXC_ROECC_CE                  0x0000000000020000ULL
+#define  TXC_ROECC_UE                  0x0000000000010000ULL
+#define  TXC_ROECC_ST_ECC_ADDR         0x00000000000003ffULL
+
+#define TXC_RO_DATA0(PORT)             (FZC_TXC + 0x20050UL + (PORT)*0x100UL)
+#define  TXC_RO_DATA0_DATA0            0x00000000ffffffffULL /* bits 31:0 */
+
+#define TXC_RO_DATA1(PORT)             (FZC_TXC + 0x20058UL + (PORT)*0x100UL)
+#define  TXC_RO_DATA1_DATA1            0x00000000ffffffffULL /* bits 63:32 */
+
+#define TXC_RO_DATA2(PORT)             (FZC_TXC + 0x20060UL + (PORT)*0x100UL)
+#define  TXC_RO_DATA2_DATA2            0x00000000ffffffffULL /* bits 95:64 */
+
+#define TXC_RO_DATA3(PORT)             (FZC_TXC + 0x20068UL + (PORT)*0x100UL)
+#define  TXC_RO_DATA3_DATA3            0x00000000ffffffffULL /* bits 127:96 */
+
+#define TXC_RO_DATA4(PORT)             (FZC_TXC + 0x20070UL + (PORT)*0x100UL)
+#define  TXC_RO_DATA4_DATA4            0x0000000000ffffffULL /* bits 151:128 */
+
+#define TXC_SFECC_CTL(PORT)            (FZC_TXC + 0x20078UL + (PORT)*0x100UL)
+#define  TXC_SFECC_CTL_DISABLE_UE      0x0000000080000000ULL
+#define  TXC_SFECC_CTL_DBL_BIT_ERR     0x0000000000020000ULL
+#define  TXC_SFECC_CTL_SNGL_BIT_ERR    0x0000000000010000ULL
+#define  TXC_SFECC_CTL_ALL_PKTS                0x0000000000000400ULL
+#define  TXC_SFECC_CTL_ALT_PKTS                0x0000000000000200ULL
+#define  TXC_SFECC_CTL_ONE_PKT_ONLY    0x0000000000000100ULL
+#define  TXC_SFECC_CTL_LST_PKT_LINE    0x0000000000000004ULL
+#define  TXC_SFECC_CTL_2ND_PKT_LINE    0x0000000000000002ULL
+#define  TXC_SFECC_CTL_1ST_PKT_LINE    0x0000000000000001ULL
+
+#define TXC_SFECC_ST(PORT)             (FZC_TXC + 0x20080UL + (PORT)*0x100UL)
+#define  TXC_SFECC_ST_CLR_ST           0x0000000080000000ULL
+#define  TXC_SFECC_ST_CE               0x0000000000020000ULL
+#define  TXC_SFECC_ST_UE               0x0000000000010000ULL
+#define  TXC_SFECC_ST_ECC_ADDR         0x00000000000003ffULL
+
+#define TXC_SF_DATA0(PORT)             (FZC_TXC + 0x20088UL + (PORT)*0x100UL)
+#define  TXC_SF_DATA0_DATA0            0x00000000ffffffffULL /* bits 31:0 */
+
+#define TXC_SF_DATA1(PORT)             (FZC_TXC + 0x20090UL + (PORT)*0x100UL)
+#define  TXC_SF_DATA1_DATA1            0x00000000ffffffffULL /* bits 63:32 */
+
+#define TXC_SF_DATA2(PORT)             (FZC_TXC + 0x20098UL + (PORT)*0x100UL)
+#define  TXC_SF_DATA2_DATA2            0x00000000ffffffffULL /* bits 95:64 */
+
+#define TXC_SF_DATA3(PORT)             (FZC_TXC + 0x200a0UL + (PORT)*0x100UL)
+#define  TXC_SF_DATA3_DATA3            0x00000000ffffffffULL /* bits 127:96 */
+
+#define TXC_SF_DATA4(PORT)             (FZC_TXC + 0x200a8UL + (PORT)*0x100UL)
+#define  TXC_SF_DATA4_DATA4            0x0000000000ffffffULL /* bits 151:128 */
+
+#define TXC_RO_TIDS(PORT)              (FZC_TXC + 0x200b0UL + (PORT)*0x100UL)
+#define  TXC_RO_TIDS_IN_USE            0x00000000ffffffffULL
+
+#define TXC_RO_STATE0(PORT)            (FZC_TXC + 0x200b8UL + (PORT)*0x100UL)
+#define  TXC_RO_STATE0_DUPLICATE_TID   0x00000000ffffffffULL
+
+#define TXC_RO_STATE1(PORT)            (FZC_TXC + 0x200c0UL + (PORT)*0x100UL)
+#define  TXC_RO_STATE1_UNUSED_TID      0x00000000ffffffffULL
+
+#define TXC_RO_STATE2(PORT)            (FZC_TXC + 0x200c8UL + (PORT)*0x100UL)
+#define  TXC_RO_STATE2_TRANS_TIMEOUT   0x00000000ffffffffULL
+
+#define TXC_RO_STATE3(PORT)            (FZC_TXC + 0x200d0UL + (PORT)*0x100UL)
+#define  TXC_RO_STATE3_ENAB_SPC_WMARK  0x0000000080000000ULL
+#define  TXC_RO_STATE3_RO_SPC_WMARK    0x000000007fe00000ULL
+#define  TXC_RO_STATE3_ROFIFO_SPC_AVAIL        0x00000000001ff800ULL
+#define  TXC_RO_STATE3_ENAB_RO_WMARK   0x0000000000000100ULL
+#define  TXC_RO_STATE3_HIGH_RO_USED    0x00000000000000f0ULL
+#define  TXC_RO_STATE3_NUM_RO_USED     0x000000000000000fULL
+
+#define TXC_RO_CTL(PORT)               (FZC_TXC + 0x200d8UL + (PORT)*0x100UL)
+#define  TXC_RO_CTL_CLR_FAIL_STATE     0x0000000080000000ULL
+#define  TXC_RO_CTL_RO_ADDR            0x000000000f000000ULL
+#define  TXC_RO_CTL_ADDR_FAILED                0x0000000000400000ULL
+#define  TXC_RO_CTL_DMA_FAILED         0x0000000000200000ULL
+#define  TXC_RO_CTL_LEN_FAILED         0x0000000000100000ULL
+#define  TXC_RO_CTL_CAPT_ADDR_FAILED   0x0000000000040000ULL
+#define  TXC_RO_CTL_CAPT_DMA_FAILED    0x0000000000020000ULL
+#define  TXC_RO_CTL_CAPT_LEN_FAILED    0x0000000000010000ULL
+#define  TXC_RO_CTL_RO_STATE_RD_DONE   0x0000000000000080ULL
+#define  TXC_RO_CTL_RO_STATE_WR_DONE   0x0000000000000040ULL
+#define  TXC_RO_CTL_RO_STATE_RD                0x0000000000000020ULL
+#define  TXC_RO_CTL_RO_STATE_WR                0x0000000000000010ULL
+#define  TXC_RO_CTL_RO_STATE_ADDR      0x000000000000000fULL
+
+#define TXC_RO_ST_DATA0(PORT)          (FZC_TXC + 0x200e0UL + (PORT)*0x100UL)
+#define  TXC_RO_ST_DATA0_DATA0         0x00000000ffffffffULL
+
+#define TXC_RO_ST_DATA1(PORT)          (FZC_TXC + 0x200e8UL + (PORT)*0x100UL)
+#define  TXC_RO_ST_DATA1_DATA1         0x00000000ffffffffULL
+
+#define TXC_RO_ST_DATA2(PORT)          (FZC_TXC + 0x200f0UL + (PORT)*0x100UL)
+#define  TXC_RO_ST_DATA2_DATA2         0x00000000ffffffffULL
+
+#define TXC_RO_ST_DATA3(PORT)          (FZC_TXC + 0x200f8UL + (PORT)*0x100UL)
+#define  TXC_RO_ST_DATA3_DATA3         0x00000000ffffffffULL
+
+#define TXC_PORT_PACKET_REQ(PORT)      (FZC_TXC + 0x20100UL + (PORT)*0x100UL)
+#define  TXC_PORT_PACKET_REQ_GATHER_REQ        0x00000000f0000000ULL
+#define  TXC_PORT_PACKET_REQ_PKT_REQ   0x000000000fff0000ULL
+#define  TXC_PORT_PACKET_REQ_PERR_ABRT 0x000000000000ffffULL
+
+       /* bits are same as TXC_INT_STAT */
+#define TXC_INT_STAT_DBG               (FZC_TXC + 0x20420UL)
+
+#define TXC_INT_STAT                   (FZC_TXC + 0x20428UL)
+#define  TXC_INT_STAT_VAL_SHIFT(PORT)  ((PORT) * 8)
+#define  TXC_INT_STAT_VAL(PORT)                (0x3f << TXC_INT_STAT_VAL_SHIFT(PORT))
+#define  TXC_INT_STAT_SF_CE(PORT)      (0x01 << TXC_INT_STAT_VAL_SHIFT(PORT))
+#define  TXC_INT_STAT_SF_UE(PORT)      (0x02 << TXC_INT_STAT_VAL_SHIFT(PORT))
+#define  TXC_INT_STAT_RO_CE(PORT)      (0x04 << TXC_INT_STAT_VAL_SHIFT(PORT))
+#define  TXC_INT_STAT_RO_UE(PORT)      (0x08 << TXC_INT_STAT_VAL_SHIFT(PORT))
+#define  TXC_INT_STAT_REORDER_ERR(PORT)        (0x10 << TXC_INT_STAT_VAL_SHIFT(PORT))
+#define  TXC_INT_STAT_PKTASM_DEAD(PORT)        (0x20 << TXC_INT_STAT_VAL_SHIFT(PORT))
+
+#define TXC_INT_MASK                   (FZC_TXC + 0x20430UL)
+#define  TXC_INT_MASK_VAL_SHIFT(PORT)  ((PORT) * 8)
+#define  TXC_INT_MASK_VAL(PORT)                (0x3f << TXC_INT_STAT_VAL_SHIFT(PORT))
+
+#define TXC_INT_MASK_SF_CE             0x01
+#define TXC_INT_MASK_SF_UE             0x02
+#define TXC_INT_MASK_RO_CE             0x04
+#define TXC_INT_MASK_RO_UE             0x08
+#define TXC_INT_MASK_REORDER_ERR       0x10
+#define TXC_INT_MASK_PKTASM_DEAD       0x20
+#define TXC_INT_MASK_ALL               0x3f
+
+#define TXC_PORT_DMA(IDX)              (FZC_TXC + 0x20028UL + (IDX)*0x100UL)
+
+#define ESPC_PIO_EN                    (FZC_PROM + 0x40000UL)
+#define  ESPC_PIO_EN_ENABLE            0x0000000000000001ULL
+
+#define ESPC_PIO_STAT                  (FZC_PROM + 0x40008UL)
+#define  ESPC_PIO_STAT_READ_START      0x0000000080000000ULL
+#define  ESPC_PIO_STAT_READ_END                0x0000000040000000ULL
+#define  ESPC_PIO_STAT_WRITE_INIT      0x0000000020000000ULL
+#define  ESPC_PIO_STAT_WRITE_END       0x0000000010000000ULL
+#define  ESPC_PIO_STAT_ADDR            0x0000000003ffff00ULL
+#define  ESPC_PIO_STAT_ADDR_SHIFT      8
+#define  ESPC_PIO_STAT_DATA            0x00000000000000ffULL
+#define  ESPC_PIO_STAT_DATA_SHIFT      0
+
+#define ESPC_NCR(IDX)                  (FZC_PROM + 0x40020UL + (IDX)*0x8UL)
+#define  ESPC_NCR_VAL                  0x00000000ffffffffULL
+
+#define ESPC_MAC_ADDR0                 ESPC_NCR(0)
+#define ESPC_MAC_ADDR1                 ESPC_NCR(1)
+#define ESPC_NUM_PORTS_MACS            ESPC_NCR(2)
+#define  ESPC_NUM_PORTS_MACS_VAL       0x00000000000000ffULL
+#define ESPC_MOD_STR_LEN               ESPC_NCR(4)
+#define ESPC_MOD_STR_1                 ESPC_NCR(5)
+#define ESPC_MOD_STR_2                 ESPC_NCR(6)
+#define ESPC_MOD_STR_3                 ESPC_NCR(7)
+#define ESPC_MOD_STR_4                 ESPC_NCR(8)
+#define ESPC_MOD_STR_5                 ESPC_NCR(9)
+#define ESPC_MOD_STR_6                 ESPC_NCR(10)
+#define ESPC_MOD_STR_7                 ESPC_NCR(11)
+#define ESPC_MOD_STR_8                 ESPC_NCR(12)
+#define ESPC_BD_MOD_STR_LEN            ESPC_NCR(13)
+#define ESPC_BD_MOD_STR_1              ESPC_NCR(14)
+#define ESPC_BD_MOD_STR_2              ESPC_NCR(15)
+#define ESPC_BD_MOD_STR_3              ESPC_NCR(16)
+#define ESPC_BD_MOD_STR_4              ESPC_NCR(17)
+
+#define ESPC_PHY_TYPE                  ESPC_NCR(18)
+#define  ESPC_PHY_TYPE_PORT0           0x00000000ff000000ULL
+#define  ESPC_PHY_TYPE_PORT0_SHIFT     24
+#define  ESPC_PHY_TYPE_PORT1           0x0000000000ff0000ULL
+#define  ESPC_PHY_TYPE_PORT1_SHIFT     16
+#define  ESPC_PHY_TYPE_PORT2           0x000000000000ff00ULL
+#define  ESPC_PHY_TYPE_PORT2_SHIFT     8
+#define  ESPC_PHY_TYPE_PORT3           0x00000000000000ffULL
+#define  ESPC_PHY_TYPE_PORT3_SHIFT     0
+
+#define  ESPC_PHY_TYPE_1G_COPPER       3
+#define  ESPC_PHY_TYPE_1G_FIBER                2
+#define  ESPC_PHY_TYPE_10G_COPPER      1
+#define  ESPC_PHY_TYPE_10G_FIBER       0
+
+#define ESPC_MAX_FM_SZ                 ESPC_NCR(19)
+
+#define ESPC_INTR_NUM                  ESPC_NCR(20)
+#define  ESPC_INTR_NUM_PORT0           0x00000000ff000000ULL
+#define  ESPC_INTR_NUM_PORT1           0x0000000000ff0000ULL
+#define  ESPC_INTR_NUM_PORT2           0x000000000000ff00ULL
+#define  ESPC_INTR_NUM_PORT3           0x00000000000000ffULL
+
+#define ESPC_VER_IMGSZ                 ESPC_NCR(21)
+#define  ESPC_VER_IMGSZ_IMGSZ          0x00000000ffff0000ULL
+#define  ESPC_VER_IMGSZ_IMGSZ_SHIFT    16
+#define  ESPC_VER_IMGSZ_VER            0x000000000000ffffULL
+#define  ESPC_VER_IMGSZ_VER_SHIFT      0
+
+#define ESPC_CHKSUM                    ESPC_NCR(22)
+#define  ESPC_CHKSUM_SUM               0x00000000000000ffULL
+
+#define ESPC_EEPROM_SIZE               0x100000
+
+#define CLASS_CODE_UNRECOG             0x00
+#define CLASS_CODE_DUMMY1              0x01
+#define CLASS_CODE_ETHERTYPE1          0x02
+#define CLASS_CODE_ETHERTYPE2          0x03
+#define CLASS_CODE_USER_PROG1          0x04
+#define CLASS_CODE_USER_PROG2          0x05
+#define CLASS_CODE_USER_PROG3          0x06
+#define CLASS_CODE_USER_PROG4          0x07
+#define CLASS_CODE_TCP_IPV4            0x08
+#define CLASS_CODE_UDP_IPV4            0x09
+#define CLASS_CODE_AH_ESP_IPV4         0x0a
+#define CLASS_CODE_SCTP_IPV4           0x0b
+#define CLASS_CODE_TCP_IPV6            0x0c
+#define CLASS_CODE_UDP_IPV6            0x0d
+#define CLASS_CODE_AH_ESP_IPV6         0x0e
+#define CLASS_CODE_SCTP_IPV6           0x0f
+#define CLASS_CODE_ARP                 0x10
+#define CLASS_CODE_RARP                        0x11
+#define CLASS_CODE_DUMMY2              0x12
+#define CLASS_CODE_DUMMY3              0x13
+#define CLASS_CODE_DUMMY4              0x14
+#define CLASS_CODE_DUMMY5              0x15
+#define CLASS_CODE_DUMMY6              0x16
+#define CLASS_CODE_DUMMY7              0x17
+#define CLASS_CODE_DUMMY8              0x18
+#define CLASS_CODE_DUMMY9              0x19
+#define CLASS_CODE_DUMMY10             0x1a
+#define CLASS_CODE_DUMMY11             0x1b
+#define CLASS_CODE_DUMMY12             0x1c
+#define CLASS_CODE_DUMMY13             0x1d
+#define CLASS_CODE_DUMMY14             0x1e
+#define CLASS_CODE_DUMMY15             0x1f
+
+/* Logical devices and device groups */
+#define LDN_RXDMA(CHAN)                        (0 + (CHAN))
+#define LDN_RESV1(OFF)                 (16 + (OFF))
+#define LDN_TXDMA(CHAN)                        (32 + (CHAN))
+#define LDN_RESV2(OFF)                 (56 + (OFF))
+#define LDN_MIF                                63
+#define LDN_MAC(PORT)                  (64 + (PORT))
+#define LDN_DEVICE_ERROR               68
+#define LDN_MAX                                LDN_DEVICE_ERROR
+
+#define NIU_LDG_MIN                    0
+#define NIU_LDG_MAX                    63
+#define NIU_NUM_LDG                    64
+#define LDG_INVALID                    0xff
+
+/* PHY stuff */
+#define NIU_PMA_PMD_DEV_ADDR           1
+#define NIU_PCS_DEV_ADDR               3
+
+#define NIU_PHY_ID_MASK                        0xfffff0f0
+#define NIU_PHY_ID_BCM8704             0x00206030
+#define NIU_PHY_ID_BCM8706             0x00206035
+#define NIU_PHY_ID_BCM5464R            0x002060b0
+#define NIU_PHY_ID_MRVL88X2011         0x01410020
+
+/* MRVL88X2011 register addresses */
+#define MRVL88X2011_USER_DEV1_ADDR     1
+#define MRVL88X2011_USER_DEV2_ADDR     2
+#define MRVL88X2011_USER_DEV3_ADDR     3
+#define MRVL88X2011_USER_DEV4_ADDR     4
+#define MRVL88X2011_PMA_PMD_CTL_1      0x0000
+#define MRVL88X2011_PMA_PMD_STATUS_1   0x0001
+#define MRVL88X2011_10G_PMD_STATUS_2   0x0008
+#define MRVL88X2011_10G_PMD_TX_DIS     0x0009
+#define MRVL88X2011_10G_XGXS_LANE_STAT 0x0018
+#define MRVL88X2011_GENERAL_CTL                0x8300
+#define MRVL88X2011_LED_BLINK_CTL      0x8303
+#define MRVL88X2011_LED_8_TO_11_CTL    0x8306
+
+/* MRVL88X2011 register control */
+#define MRVL88X2011_ENA_XFPREFCLK      0x0001
+#define MRVL88X2011_ENA_PMDTX          0x0000
+#define MRVL88X2011_LOOPBACK            0x1
+#define MRVL88X2011_LED_ACT            0x1
+#define MRVL88X2011_LNK_STATUS_OK      0x4
+#define MRVL88X2011_LED_BLKRATE_MASK   0x70
+#define MRVL88X2011_LED_BLKRATE_034MS  0x0
+#define MRVL88X2011_LED_BLKRATE_067MS  0x1
+#define MRVL88X2011_LED_BLKRATE_134MS  0x2
+#define MRVL88X2011_LED_BLKRATE_269MS  0x3
+#define MRVL88X2011_LED_BLKRATE_538MS  0x4
+#define MRVL88X2011_LED_CTL_OFF                0x0
+#define MRVL88X2011_LED_CTL_PCS_ACT    0x5
+#define MRVL88X2011_LED_CTL_MASK       0x7
+#define MRVL88X2011_LED(n,v)           ((v)<<((n)*4))
+#define MRVL88X2011_LED_STAT(n,v)      ((v)>>((n)*4))
+
+#define BCM8704_PMA_PMD_DEV_ADDR       1
+#define BCM8704_PCS_DEV_ADDR           2
+#define BCM8704_USER_DEV3_ADDR         3
+#define BCM8704_PHYXS_DEV_ADDR         4
+#define BCM8704_USER_DEV4_ADDR         4
+
+#define BCM8704_PMD_RCV_SIGDET         0x000a
+#define  PMD_RCV_SIGDET_LANE3          0x0010
+#define  PMD_RCV_SIGDET_LANE2          0x0008
+#define  PMD_RCV_SIGDET_LANE1          0x0004
+#define  PMD_RCV_SIGDET_LANE0          0x0002
+#define  PMD_RCV_SIGDET_GLOBAL         0x0001
+
+#define BCM8704_PCS_10G_R_STATUS       0x0020
+#define  PCS_10G_R_STATUS_LINKSTAT     0x1000
+#define  PCS_10G_R_STATUS_PRBS31_ABLE  0x0004
+#define  PCS_10G_R_STATUS_HI_BER       0x0002
+#define  PCS_10G_R_STATUS_BLK_LOCK     0x0001
+
+#define BCM8704_USER_CONTROL           0xc800
+#define  USER_CONTROL_OPTXENB_LVL      0x8000
+#define  USER_CONTROL_OPTXRST_LVL      0x4000
+#define  USER_CONTROL_OPBIASFLT_LVL    0x2000
+#define  USER_CONTROL_OBTMPFLT_LVL     0x1000
+#define  USER_CONTROL_OPPRFLT_LVL      0x0800
+#define  USER_CONTROL_OPTXFLT_LVL      0x0400
+#define  USER_CONTROL_OPRXLOS_LVL      0x0200
+#define  USER_CONTROL_OPRXFLT_LVL      0x0100
+#define  USER_CONTROL_OPTXON_LVL       0x0080
+#define  USER_CONTROL_RES1             0x007f
+#define  USER_CONTROL_RES1_SHIFT       0
+
+#define BCM8704_USER_ANALOG_CLK                0xc801
+#define BCM8704_USER_PMD_RX_CONTROL    0xc802
+
+#define BCM8704_USER_PMD_TX_CONTROL    0xc803
+#define  USER_PMD_TX_CTL_RES1          0xfe00
+#define  USER_PMD_TX_CTL_XFP_CLKEN     0x0100
+#define  USER_PMD_TX_CTL_TX_DAC_TXD    0x00c0
+#define  USER_PMD_TX_CTL_TX_DAC_TXD_SH 6
+#define  USER_PMD_TX_CTL_TX_DAC_TXCK   0x0030
+#define  USER_PMD_TX_CTL_TX_DAC_TXCK_SH        4
+#define  USER_PMD_TX_CTL_TSD_LPWREN    0x0008
+#define  USER_PMD_TX_CTL_TSCK_LPWREN   0x0004
+#define  USER_PMD_TX_CTL_CMU_LPWREN    0x0002
+#define  USER_PMD_TX_CTL_SFIFORST      0x0001
+
+#define BCM8704_USER_ANALOG_STATUS0    0xc804
+#define BCM8704_USER_OPT_DIGITAL_CTRL  0xc808
+#define BCM8704_USER_TX_ALARM_STATUS   0x9004
+
+#define  USER_ODIG_CTRL_FMODE          0x8000
+#define  USER_ODIG_CTRL_TX_PDOWN       0x4000
+#define  USER_ODIG_CTRL_RX_PDOWN       0x2000
+#define  USER_ODIG_CTRL_EFILT_EN       0x1000
+#define  USER_ODIG_CTRL_OPT_RST                0x0800
+#define  USER_ODIG_CTRL_PCS_TIB                0x0400
+#define  USER_ODIG_CTRL_PCS_RI         0x0200
+#define  USER_ODIG_CTRL_RESV1          0x0180
+#define  USER_ODIG_CTRL_GPIOS          0x0060
+#define  USER_ODIG_CTRL_GPIOS_SHIFT    5
+#define  USER_ODIG_CTRL_RESV2          0x0010
+#define  USER_ODIG_CTRL_LB_ERR_DIS     0x0008
+#define  USER_ODIG_CTRL_RESV3          0x0006
+#define  USER_ODIG_CTRL_TXONOFF_PD_DIS 0x0001
+
+#define BCM8704_PHYXS_XGXS_LANE_STAT   0x0018
+#define  PHYXS_XGXS_LANE_STAT_ALINGED  0x1000
+#define  PHYXS_XGXS_LANE_STAT_PATTEST  0x0800
+#define  PHYXS_XGXS_LANE_STAT_MAGIC    0x0400
+#define  PHYXS_XGXS_LANE_STAT_LANE3    0x0008
+#define  PHYXS_XGXS_LANE_STAT_LANE2    0x0004
+#define  PHYXS_XGXS_LANE_STAT_LANE1    0x0002
+#define  PHYXS_XGXS_LANE_STAT_LANE0    0x0001
+
+#define BCM5464R_AUX_CTL               24
+#define  BCM5464R_AUX_CTL_EXT_LB       0x8000
+#define  BCM5464R_AUX_CTL_EXT_PLEN     0x4000
+#define  BCM5464R_AUX_CTL_ER1000       0x3000
+#define  BCM5464R_AUX_CTL_ER1000_SHIFT 12
+#define  BCM5464R_AUX_CTL_RESV1                0x0800
+#define  BCM5464R_AUX_CTL_WRITE_1      0x0400
+#define  BCM5464R_AUX_CTL_RESV2                0x0300
+#define  BCM5464R_AUX_CTL_PRESP_DIS    0x0080
+#define  BCM5464R_AUX_CTL_RESV3                0x0040
+#define  BCM5464R_AUX_CTL_ER100                0x0030
+#define  BCM5464R_AUX_CTL_ER100_SHIFT  4
+#define  BCM5464R_AUX_CTL_DIAG_MODE    0x0008
+#define  BCM5464R_AUX_CTL_SR_SEL       0x0007
+#define  BCM5464R_AUX_CTL_SR_SEL_SHIFT 0
+
+#define  BCM5464R_CTRL1000_AS_MASTER           0x0800
+#define  BCM5464R_CTRL1000_ENABLE_AS_MASTER    0x1000
+
+#define RCR_ENTRY_MULTI                        0x8000000000000000ULL
+#define RCR_ENTRY_PKT_TYPE             0x6000000000000000ULL
+#define RCR_ENTRY_PKT_TYPE_SHIFT       61
+#define RCR_ENTRY_ZERO_COPY            0x1000000000000000ULL
+#define RCR_ENTRY_NOPORT               0x0800000000000000ULL
+#define RCR_ENTRY_PROMISC              0x0400000000000000ULL
+#define RCR_ENTRY_ERROR                        0x0380000000000000ULL
+#define RCR_ENTRY_DCF_ERR              0x0040000000000000ULL
+#define RCR_ENTRY_L2_LEN               0x003fff0000000000ULL
+#define RCR_ENTRY_L2_LEN_SHIFT         40
+#define RCR_ENTRY_PKTBUFSZ             0x000000c000000000ULL
+#define RCR_ENTRY_PKTBUFSZ_SHIFT       38
+#define RCR_ENTRY_PKT_BUF_ADDR         0x0000003fffffffffULL /* bits 43:6 */
+#define RCR_ENTRY_PKT_BUF_ADDR_SHIFT   6
+
+#define RCR_PKT_TYPE_OTHER             0x0
+#define RCR_PKT_TYPE_TCP               0x1
+#define RCR_PKT_TYPE_UDP               0x2
+#define RCR_PKT_TYPE_SCTP              0x3
+
+#define NIU_RXPULL_MAX                 ETH_HLEN
+
+struct rx_pkt_hdr0 {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       u8      inputport:2,
+               maccheck:1,
+               class:5;
+       u8      vlan:1,
+               llcsnap:1,
+               noport:1,
+               badip:1,
+               tcamhit:1,
+               tres:2,
+               tzfvld:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+       u8      class:5,
+               maccheck:1,
+               inputport:2;
+       u8      tzfvld:1,
+               tres:2,
+               tcamhit:1,
+               badip:1,
+               noport:1,
+               llcsnap:1,
+               vlan:1;
+#endif
+};
+
+struct rx_pkt_hdr1 {
+       u8      hwrsvd1;
+       u8      tcammatch;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       u8      hwrsvd2:2,
+               hashit:1,
+               exact:1,
+               hzfvld:1,
+               hashsidx:3;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+       u8      hashsidx:3,
+               hzfvld:1,
+               exact:1,
+               hashit:1,
+               hwrsvd2:2;
+#endif
+       u8      zcrsvd;
+
+       /* Bits 11:8 of zero copy flow ID.  */
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       u8      hwrsvd3:4, zflowid0:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+       u8      zflowid0:4, hwrsvd3:4;
+#endif
+
+       /* Bits 7:0 of zero copy flow ID.  */
+       u8      zflowid1;
+
+       /* Bits 15:8 of hash value, H2.  */
+       u8      hashval2_0;
+
+       /* Bits 7:0 of hash value, H2.  */
+       u8      hashval2_1;
+
+       /* Bits 19:16 of hash value, H1.  */
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       u8      hwrsvd4:4, hashval1_0:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+       u8      hashval1_0:4, hwrsvd4:4;
+#endif
+
+       /* Bits 15:8 of hash value, H1.  */
+       u8      hashval1_1;
+
+       /* Bits 7:0 of hash value, H1.  */
+       u8      hashval1_2;
+
+       u8      hwrsvd5;
+       u8      hwrsvd6;
+
+       u8      usrdata_0;      /* Bits 39:32 of user data.  */
+       u8      usrdata_1;      /* Bits 31:24 of user data.  */
+       u8      usrdata_2;      /* Bits 23:16 of user data.  */
+       u8      usrdata_3;      /* Bits 15:8 of user data.  */
+       u8      usrdata_4;      /* Bits 7:0 of user data.  */
+};
+
+struct tx_dma_mbox {
+       u64     tx_dma_pre_st;
+       u64     tx_cs;
+       u64     tx_ring_kick;
+       u64     tx_ring_hdl;
+       u64     resv1;
+       u32     tx_rng_err_logl;
+       u32     tx_rng_err_logh;
+       u64     resv2;
+       u64     resv3;
+};
+
+struct tx_pkt_hdr {
+       __le64  flags;
+#define TXHDR_PAD              0x0000000000000007ULL
+#define  TXHDR_PAD_SHIFT       0
+#define TXHDR_LEN              0x000000003fff0000ULL
+#define  TXHDR_LEN_SHIFT       16
+#define TXHDR_L4STUFF          0x0000003f00000000ULL
+#define  TXHDR_L4STUFF_SHIFT   32
+#define TXHDR_L4START          0x00003f0000000000ULL
+#define  TXHDR_L4START_SHIFT   40
+#define TXHDR_L3START          0x000f000000000000ULL
+#define  TXHDR_L3START_SHIFT   48
+#define TXHDR_IHL              0x00f0000000000000ULL
+#define  TXHDR_IHL_SHIFT       52
+#define TXHDR_VLAN             0x0100000000000000ULL
+#define TXHDR_LLC              0x0200000000000000ULL
+#define TXHDR_IP_VER           0x2000000000000000ULL
+#define TXHDR_CSUM_NONE                0x0000000000000000ULL
+#define TXHDR_CSUM_TCP         0x4000000000000000ULL
+#define TXHDR_CSUM_UDP         0x8000000000000000ULL
+#define TXHDR_CSUM_SCTP                0xc000000000000000ULL
+       __le64  resv;
+};
+
+#define TX_DESC_SOP            0x8000000000000000ULL
+#define TX_DESC_MARK           0x4000000000000000ULL
+#define TX_DESC_NUM_PTR                0x3c00000000000000ULL
+#define TX_DESC_NUM_PTR_SHIFT  58
+#define TX_DESC_TR_LEN         0x01fff00000000000ULL
+#define TX_DESC_TR_LEN_SHIFT   44
+#define TX_DESC_SAD            0x00000fffffffffffULL
+#define TX_DESC_SAD_SHIFT      0
+
+struct tx_buff_info {
+       struct sk_buff *skb;
+       u64 mapping;
+};
+
+struct txdma_mailbox {
+       __le64  tx_dma_pre_st;
+       __le64  tx_cs;
+       __le64  tx_ring_kick;
+       __le64  tx_ring_hdl;
+       __le64  resv1;
+       __le32  tx_rng_err_logl;
+       __le32  tx_rng_err_logh;
+       __le64  resv2[2];
+} __attribute__((aligned(64)));
+
+#define MAX_TX_RING_SIZE       256
+#define MAX_TX_DESC_LEN                4076
+
+struct tx_ring_info {
+       struct tx_buff_info     tx_buffs[MAX_TX_RING_SIZE];
+       struct niu              *np;
+       u64                     tx_cs;
+       int                     pending;
+       int                     prod;
+       int                     cons;
+       int                     wrap_bit;
+       u16                     last_pkt_cnt;
+       u16                     tx_channel;
+       u16                     mark_counter;
+       u16                     mark_freq;
+       u16                     mark_pending;
+       u16                     __pad;
+       struct txdma_mailbox    *mbox;
+       __le64                  *descr;
+
+       u64                     tx_packets;
+       u64                     tx_bytes;
+       u64                     tx_errors;
+
+       u64                     mbox_dma;
+       u64                     descr_dma;
+       int                     max_burst;
+};
+
+#define NEXT_TX(tp, index) \
+       (((index) + 1) < (tp)->pending ? ((index) + 1) : 0)
+
+static inline u32 niu_tx_avail(struct tx_ring_info *tp)
+{
+       return (tp->pending -
+               ((tp->prod - tp->cons) & (MAX_TX_RING_SIZE - 1)));
+}
+
+struct rxdma_mailbox {
+       __le64  rx_dma_ctl_stat;
+       __le64  rbr_stat;
+       __le32  rbr_hdl;
+       __le32  rbr_hdh;
+       __le64  resv1;
+       __le32  rcrstat_c;
+       __le32  rcrstat_b;
+       __le64  rcrstat_a;
+       __le64  resv2[2];
+} __attribute__((aligned(64)));
+
+#define MAX_RBR_RING_SIZE      128
+#define MAX_RCR_RING_SIZE      (MAX_RBR_RING_SIZE * 2)
+
+#define RBR_REFILL_MIN         16
+
+#define RX_SKB_ALLOC_SIZE      128 + NET_IP_ALIGN
+
+struct rx_ring_info {
+       struct niu              *np;
+       int                     rx_channel;
+       u16                     rbr_block_size;
+       u16                     rbr_blocks_per_page;
+       u16                     rbr_sizes[4];
+       unsigned int            rcr_index;
+       unsigned int            rcr_table_size;
+       unsigned int            rbr_index;
+       unsigned int            rbr_pending;
+       unsigned int            rbr_refill_pending;
+       unsigned int            rbr_kick_thresh;
+       unsigned int            rbr_table_size;
+       struct page             **rxhash;
+       struct rxdma_mailbox    *mbox;
+       __le64                  *rcr;
+       __le32                  *rbr;
+#define RBR_DESCR_ADDR_SHIFT   12
+
+       u64                     rx_packets;
+       u64                     rx_bytes;
+       u64                     rx_dropped;
+       u64                     rx_errors;
+
+       u64                     mbox_dma;
+       u64                     rcr_dma;
+       u64                     rbr_dma;
+
+       /* WRED */
+       int                     nonsyn_window;
+       int                     nonsyn_threshold;
+       int                     syn_window;
+       int                     syn_threshold;
+
+       /* interrupt mitigation */
+       int                     rcr_pkt_threshold;
+       int                     rcr_timeout;
+};
+
+#define NEXT_RCR(rp, index) \
+       (((index) + 1) < (rp)->rcr_table_size ? ((index) + 1) : 0)
+#define NEXT_RBR(rp, index) \
+       (((index) + 1) < (rp)->rbr_table_size ? ((index) + 1) : 0)
+
+#define NIU_MAX_PORTS          4
+#define NIU_NUM_RXCHAN         16
+#define NIU_NUM_TXCHAN         24
+#define MAC_NUM_HASH           16
+
+#define NIU_MAX_MTU            9216
+
+/* VPD strings */
+#define        NIU_QGC_LP_BM_STR       "501-7606"
+#define        NIU_2XGF_LP_BM_STR      "501-7283"
+#define        NIU_QGC_PEM_BM_STR      "501-7765"
+#define        NIU_2XGF_PEM_BM_STR     "501-7626"
+#define        NIU_ALONSO_BM_STR       "373-0202"
+#define        NIU_FOXXY_BM_STR        "501-7961"
+#define        NIU_2XGF_MRVL_BM_STR    "SK-6E82"
+#define        NIU_QGC_LP_MDL_STR      "SUNW,pcie-qgc"
+#define        NIU_2XGF_LP_MDL_STR     "SUNW,pcie-2xgf"
+#define        NIU_QGC_PEM_MDL_STR     "SUNW,pcie-qgc-pem"
+#define        NIU_2XGF_PEM_MDL_STR    "SUNW,pcie-2xgf-pem"
+#define        NIU_ALONSO_MDL_STR      "SUNW,CP3220"
+#define        NIU_KIMI_MDL_STR        "SUNW,CP3260"
+#define        NIU_MARAMBA_MDL_STR     "SUNW,pcie-neptune"
+#define        NIU_FOXXY_MDL_STR       "SUNW,pcie-rfem"
+#define        NIU_2XGF_MRVL_MDL_STR   "SysKonnect,pcie-2xgf"
+
+#define NIU_VPD_MIN_MAJOR      3
+#define NIU_VPD_MIN_MINOR      4
+
+#define NIU_VPD_MODEL_MAX      32
+#define NIU_VPD_BD_MODEL_MAX   16
+#define NIU_VPD_VERSION_MAX    64
+#define NIU_VPD_PHY_TYPE_MAX   8
+
+struct niu_vpd {
+       char                    model[NIU_VPD_MODEL_MAX];
+       char                    board_model[NIU_VPD_BD_MODEL_MAX];
+       char                    version[NIU_VPD_VERSION_MAX];
+       char                    phy_type[NIU_VPD_PHY_TYPE_MAX];
+       u8                      mac_num;
+       u8                      __pad;
+       u8                      local_mac[6];
+       int                     fcode_major;
+       int                     fcode_minor;
+};
+
+struct niu_altmac_rdc {
+       u8                      alt_mac_num;
+       u8                      rdc_num;
+       u8                      mac_pref;
+};
+
+struct niu_vlan_rdc {
+       u8                      rdc_num;
+       u8                      vlan_pref;
+};
+
+struct niu_classifier {
+       struct niu_altmac_rdc   alt_mac_mappings[16];
+       struct niu_vlan_rdc     vlan_mappings[ENET_VLAN_TBL_NUM_ENTRIES];
+
+       u16                     tcam_top;
+       u16                     tcam_sz;
+       u16                     tcam_valid_entries;
+       u16                     num_alt_mac_mappings;
+
+       u32                     h1_init;
+       u16                     h2_init;
+};
+
+#define NIU_NUM_RDC_TABLES     8
+#define NIU_RDC_TABLE_SLOTS    16
+
+struct rdc_table {
+       u8                      rxdma_channel[NIU_RDC_TABLE_SLOTS];
+};
+
+struct niu_rdc_tables {
+       struct rdc_table        tables[NIU_NUM_RDC_TABLES];
+       int                     first_table_num;
+       int                     num_tables;
+};
+
+#define PHY_TYPE_PMA_PMD       0
+#define PHY_TYPE_PCS           1
+#define PHY_TYPE_MII           2
+#define PHY_TYPE_MAX           3
+
+struct phy_probe_info {
+       u32     phy_id[PHY_TYPE_MAX][NIU_MAX_PORTS];
+       u8      phy_port[PHY_TYPE_MAX][NIU_MAX_PORTS];
+       u8      cur[PHY_TYPE_MAX];
+
+       struct device_attribute phy_port_attrs[PHY_TYPE_MAX * NIU_MAX_PORTS];
+       struct device_attribute phy_type_attrs[PHY_TYPE_MAX * NIU_MAX_PORTS];
+       struct device_attribute phy_id_attrs[PHY_TYPE_MAX * NIU_MAX_PORTS];
+};
+
+struct niu_tcam_entry {
+       u8                      valid;
+       u64                     key[4];
+       u64                     key_mask[4];
+       u64                     assoc_data;
+};
+
+struct device_node;
+union niu_parent_id {
+       struct {
+               int             domain;
+               int             bus;
+               int             device;
+       } pci;
+       struct device_node      *of;
+};
+
+struct niu;
+struct niu_parent {
+       struct platform_device  *plat_dev;
+       int                     index;
+
+       union niu_parent_id     id;
+
+       struct niu              *ports[NIU_MAX_PORTS];
+
+       atomic_t                refcnt;
+       struct list_head        list;
+
+       spinlock_t              lock;
+
+       u32                     flags;
+#define PARENT_FLGS_CLS_HWINIT 0x00000001
+
+       u32                     port_phy;
+#define PORT_PHY_UNKNOWN       0x00000000
+#define PORT_PHY_INVALID       0xffffffff
+#define PORT_TYPE_10G          0x01
+#define PORT_TYPE_1G           0x02
+#define PORT_TYPE_MASK         0x03
+
+       u8                      rxchan_per_port[NIU_MAX_PORTS];
+       u8                      txchan_per_port[NIU_MAX_PORTS];
+
+       struct niu_rdc_tables   rdc_group_cfg[NIU_MAX_PORTS];
+       u8                      rdc_default[NIU_MAX_PORTS];
+
+       u8                      ldg_map[LDN_MAX + 1];
+
+       u8                      plat_type;
+#define PLAT_TYPE_INVALID      0x00
+#define PLAT_TYPE_ATLAS                0x01
+#define PLAT_TYPE_NIU          0x02
+#define PLAT_TYPE_VF_P0                0x03
+#define PLAT_TYPE_VF_P1                0x04
+#define PLAT_TYPE_ATCA_CP3220  0x08
+
+       u8                      num_ports;
+
+       u16                     tcam_num_entries;
+#define NIU_PCI_TCAM_ENTRIES   256
+#define NIU_NONPCI_TCAM_ENTRIES        128
+#define NIU_TCAM_ENTRIES_MAX   256
+
+       int                     rxdma_clock_divider;
+
+       struct phy_probe_info   phy_probe_info;
+
+       struct niu_tcam_entry   tcam[NIU_TCAM_ENTRIES_MAX];
+
+#define        NIU_L2_PROG_CLS         2
+#define        NIU_L3_PROG_CLS         4
+       u64                     l2_cls[NIU_L2_PROG_CLS];
+       u64                     l3_cls[NIU_L3_PROG_CLS];
+       u64                     tcam_key[12];
+       u64                     flow_key[12];
+       u16                     l3_cls_refcnt[NIU_L3_PROG_CLS];
+       u8                      l3_cls_pid[NIU_L3_PROG_CLS];
+};
+
+struct niu_ops {
+       void *(*alloc_coherent)(struct device *dev, size_t size,
+                               u64 *handle, gfp_t flag);
+       void (*free_coherent)(struct device *dev, size_t size,
+                             void *cpu_addr, u64 handle);
+       u64 (*map_page)(struct device *dev, struct page *page,
+                       unsigned long offset, size_t size,
+                       enum dma_data_direction direction);
+       void (*unmap_page)(struct device *dev, u64 dma_address,
+                          size_t size, enum dma_data_direction direction);
+       u64 (*map_single)(struct device *dev, void *cpu_addr,
+                         size_t size,
+                         enum dma_data_direction direction);
+       void (*unmap_single)(struct device *dev, u64 dma_address,
+                            size_t size, enum dma_data_direction direction);
+};
+
+struct niu_link_config {
+       u32                             supported;
+
+       /* Describes what we're trying to get. */
+       u32                             advertising;
+       u16                             speed;
+       u8                              duplex;
+       u8                              autoneg;
+
+       /* Describes what we actually have. */
+       u32                             active_advertising;
+       u16                             active_speed;
+       u8                              active_duplex;
+       u8                              active_autoneg;
+#define SPEED_INVALID          0xffff
+#define DUPLEX_INVALID         0xff
+#define AUTONEG_INVALID                0xff
+
+       u8                              loopback_mode;
+#define LOOPBACK_DISABLED      0x00
+#define LOOPBACK_PHY           0x01
+#define LOOPBACK_MAC           0x02
+};
+
+struct niu_ldg {
+       struct napi_struct      napi;
+       struct niu      *np;
+       u8              ldg_num;
+       u8              timer;
+       u64             v0, v1, v2;
+       unsigned int    irq;
+};
+
+struct niu_xmac_stats {
+       u64     tx_frames;
+       u64     tx_bytes;
+       u64     tx_fifo_errors;
+       u64     tx_overflow_errors;
+       u64     tx_max_pkt_size_errors;
+       u64     tx_underflow_errors;
+
+       u64     rx_local_faults;
+       u64     rx_remote_faults;
+       u64     rx_link_faults;
+       u64     rx_align_errors;
+       u64     rx_frags;
+       u64     rx_mcasts;
+       u64     rx_bcasts;
+       u64     rx_hist_cnt1;
+       u64     rx_hist_cnt2;
+       u64     rx_hist_cnt3;
+       u64     rx_hist_cnt4;
+       u64     rx_hist_cnt5;
+       u64     rx_hist_cnt6;
+       u64     rx_hist_cnt7;
+       u64     rx_octets;
+       u64     rx_code_violations;
+       u64     rx_len_errors;
+       u64     rx_crc_errors;
+       u64     rx_underflows;
+       u64     rx_overflows;
+
+       u64     pause_off_state;
+       u64     pause_on_state;
+       u64     pause_received;
+};
+
+struct niu_bmac_stats {
+       u64     tx_underflow_errors;
+       u64     tx_max_pkt_size_errors;
+       u64     tx_bytes;
+       u64     tx_frames;
+
+       u64     rx_overflows;
+       u64     rx_frames;
+       u64     rx_align_errors;
+       u64     rx_crc_errors;
+       u64     rx_len_errors;
+
+       u64     pause_off_state;
+       u64     pause_on_state;
+       u64     pause_received;
+};
+
+union niu_mac_stats {
+       struct niu_xmac_stats   xmac;
+       struct niu_bmac_stats   bmac;
+};
+
+struct niu_phy_ops {
+       int (*serdes_init)(struct niu *np);
+       int (*xcvr_init)(struct niu *np);
+       int (*link_status)(struct niu *np, int *);
+};
+
+struct platform_device;
+struct niu {
+       void __iomem                    *regs;
+       struct net_device               *dev;
+       struct pci_dev                  *pdev;
+       struct device                   *device;
+       struct niu_parent               *parent;
+
+       u32                             flags;
+#define NIU_FLAGS_HOTPLUG_PHY_PRESENT  0x02000000 /* Removeable PHY detected*/
+#define NIU_FLAGS_HOTPLUG_PHY          0x01000000 /* Removeable PHY */
+#define NIU_FLAGS_VPD_VALID            0x00800000 /* VPD has valid version */
+#define NIU_FLAGS_MSIX                 0x00400000 /* MSI-X in use */
+#define NIU_FLAGS_MCAST                        0x00200000 /* multicast filter enabled */
+#define NIU_FLAGS_PROMISC              0x00100000 /* PROMISC enabled */
+#define NIU_FLAGS_XCVR_SERDES          0x00080000 /* 0=PHY 1=SERDES */
+#define NIU_FLAGS_10G                  0x00040000 /* 0=1G 1=10G */
+#define NIU_FLAGS_FIBER                        0x00020000 /* 0=COPPER 1=FIBER */
+#define NIU_FLAGS_XMAC                 0x00010000 /* 0=BMAC 1=XMAC */
+
+       u32                             msg_enable;
+       char                            irq_name[NIU_NUM_RXCHAN+NIU_NUM_TXCHAN+3][IFNAMSIZ + 6];
+
+       /* Protects hw programming, and ring state.  */
+       spinlock_t                      lock;
+
+       const struct niu_ops            *ops;
+       union niu_mac_stats             mac_stats;
+
+       struct rx_ring_info             *rx_rings;
+       struct tx_ring_info             *tx_rings;
+       int                             num_rx_rings;
+       int                             num_tx_rings;
+
+       struct niu_ldg                  ldg[NIU_NUM_LDG];
+       int                             num_ldg;
+
+       void __iomem                    *mac_regs;
+       unsigned long                   ipp_off;
+       unsigned long                   pcs_off;
+       unsigned long                   xpcs_off;
+
+       struct timer_list               timer;
+       u64                             orig_led_state;
+       const struct niu_phy_ops        *phy_ops;
+       int                             phy_addr;
+
+       struct niu_link_config          link_config;
+
+       struct work_struct              reset_task;
+
+       u8                              port;
+       u8                              mac_xcvr;
+#define MAC_XCVR_MII                   1
+#define MAC_XCVR_PCS                   2
+#define MAC_XCVR_XPCS                  3
+
+       struct niu_classifier           clas;
+
+       struct niu_vpd                  vpd;
+       u32                             eeprom_len;
+
+       struct platform_device          *op;
+       void __iomem                    *vir_regs_1;
+       void __iomem                    *vir_regs_2;
+};
+
+#endif /* _NIU_H */
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
new file mode 100644 (file)
index 0000000..297a424
--- /dev/null
@@ -0,0 +1,1306 @@
+/* sunbmac.c: Driver for Sparc BigMAC 100baseT ethernet adapters.
+ *
+ * Copyright (C) 1997, 1998, 1999, 2003, 2008 David S. Miller (davem@davemloft.net)
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/errno.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/gfp.h>
+
+#include <asm/auxio.h>
+#include <asm/byteorder.h>
+#include <asm/dma.h>
+#include <asm/idprom.h>
+#include <asm/io.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+
+#include "sunbmac.h"
+
+#define DRV_NAME       "sunbmac"
+#define DRV_VERSION    "2.1"
+#define DRV_RELDATE    "August 26, 2008"
+#define DRV_AUTHOR     "David S. Miller (davem@davemloft.net)"
+
+static char version[] =
+       DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
+
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION("Sun BigMAC 100baseT ethernet driver");
+MODULE_LICENSE("GPL");
+
+#undef DEBUG_PROBE
+#undef DEBUG_TX
+#undef DEBUG_IRQ
+
+#ifdef DEBUG_PROBE
+#define DP(x)  printk x
+#else
+#define DP(x)
+#endif
+
+#ifdef DEBUG_TX
+#define DTX(x)  printk x
+#else
+#define DTX(x)
+#endif
+
+#ifdef DEBUG_IRQ
+#define DIRQ(x)  printk x
+#else
+#define DIRQ(x)
+#endif
+
+#define DEFAULT_JAMSIZE    4 /* Toe jam */
+
+#define QEC_RESET_TRIES 200
+
+static int qec_global_reset(void __iomem *gregs)
+{
+       int tries = QEC_RESET_TRIES;
+
+       sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);
+       while (--tries) {
+               if (sbus_readl(gregs + GLOB_CTRL) & GLOB_CTRL_RESET) {
+                       udelay(20);
+                       continue;
+               }
+               break;
+       }
+       if (tries)
+               return 0;
+       printk(KERN_ERR "BigMAC: Cannot reset the QEC.\n");
+       return -1;
+}
+
+static void qec_init(struct bigmac *bp)
+{
+       struct platform_device *qec_op = bp->qec_op;
+       void __iomem *gregs = bp->gregs;
+       u8 bsizes = bp->bigmac_bursts;
+       u32 regval;
+
+       /* 64byte bursts do not work at the moment, do
+        * not even try to enable them.  -DaveM
+        */
+       if (bsizes & DMA_BURST32)
+               regval = GLOB_CTRL_B32;
+       else
+               regval = GLOB_CTRL_B16;
+       sbus_writel(regval | GLOB_CTRL_BMODE, gregs + GLOB_CTRL);
+       sbus_writel(GLOB_PSIZE_2048, gregs + GLOB_PSIZE);
+
+       /* All of memsize is given to bigmac. */
+       sbus_writel(resource_size(&qec_op->resource[1]),
+                   gregs + GLOB_MSIZE);
+
+       /* Half to the transmitter, half to the receiver. */
+       sbus_writel(resource_size(&qec_op->resource[1]) >> 1,
+                   gregs + GLOB_TSIZE);
+       sbus_writel(resource_size(&qec_op->resource[1]) >> 1,
+                   gregs + GLOB_RSIZE);
+}
+
+#define TX_RESET_TRIES     32
+#define RX_RESET_TRIES     32
+
+static void bigmac_tx_reset(void __iomem *bregs)
+{
+       int tries = TX_RESET_TRIES;
+
+       sbus_writel(0, bregs + BMAC_TXCFG);
+
+       /* The fifo threshold bit is read-only and does
+        * not clear.  -DaveM
+        */
+       while ((sbus_readl(bregs + BMAC_TXCFG) & ~(BIGMAC_TXCFG_FIFO)) != 0 &&
+              --tries != 0)
+               udelay(20);
+
+       if (!tries) {
+               printk(KERN_ERR "BIGMAC: Transmitter will not reset.\n");
+               printk(KERN_ERR "BIGMAC: tx_cfg is %08x\n",
+                      sbus_readl(bregs + BMAC_TXCFG));
+       }
+}
+
+static void bigmac_rx_reset(void __iomem *bregs)
+{
+       int tries = RX_RESET_TRIES;
+
+       sbus_writel(0, bregs + BMAC_RXCFG);
+       while (sbus_readl(bregs + BMAC_RXCFG) && --tries)
+               udelay(20);
+
+       if (!tries) {
+               printk(KERN_ERR "BIGMAC: Receiver will not reset.\n");
+               printk(KERN_ERR "BIGMAC: rx_cfg is %08x\n",
+                      sbus_readl(bregs + BMAC_RXCFG));
+       }
+}
+
+/* Reset the transmitter and receiver. */
+static void bigmac_stop(struct bigmac *bp)
+{
+       bigmac_tx_reset(bp->bregs);
+       bigmac_rx_reset(bp->bregs);
+}
+
+static void bigmac_get_counters(struct bigmac *bp, void __iomem *bregs)
+{
+       struct net_device_stats *stats = &bp->enet_stats;
+
+       stats->rx_crc_errors += sbus_readl(bregs + BMAC_RCRCECTR);
+       sbus_writel(0, bregs + BMAC_RCRCECTR);
+
+       stats->rx_frame_errors += sbus_readl(bregs + BMAC_UNALECTR);
+       sbus_writel(0, bregs + BMAC_UNALECTR);
+
+       stats->rx_length_errors += sbus_readl(bregs + BMAC_GLECTR);
+       sbus_writel(0, bregs + BMAC_GLECTR);
+
+       stats->tx_aborted_errors += sbus_readl(bregs + BMAC_EXCTR);
+
+       stats->collisions +=
+               (sbus_readl(bregs + BMAC_EXCTR) +
+                sbus_readl(bregs + BMAC_LTCTR));
+       sbus_writel(0, bregs + BMAC_EXCTR);
+       sbus_writel(0, bregs + BMAC_LTCTR);
+}
+
+static void bigmac_clean_rings(struct bigmac *bp)
+{
+       int i;
+
+       for (i = 0; i < RX_RING_SIZE; i++) {
+               if (bp->rx_skbs[i] != NULL) {
+                       dev_kfree_skb_any(bp->rx_skbs[i]);
+                       bp->rx_skbs[i] = NULL;
+               }
+       }
+
+       for (i = 0; i < TX_RING_SIZE; i++) {
+               if (bp->tx_skbs[i] != NULL) {
+                       dev_kfree_skb_any(bp->tx_skbs[i]);
+                       bp->tx_skbs[i] = NULL;
+               }
+       }
+}
+
+static void bigmac_init_rings(struct bigmac *bp, int from_irq)
+{
+       struct bmac_init_block *bb = bp->bmac_block;
+       struct net_device *dev = bp->dev;
+       int i;
+       gfp_t gfp_flags = GFP_KERNEL;
+
+       if (from_irq || in_interrupt())
+               gfp_flags = GFP_ATOMIC;
+
+       bp->rx_new = bp->rx_old = bp->tx_new = bp->tx_old = 0;
+
+       /* Free any skippy bufs left around in the rings. */
+       bigmac_clean_rings(bp);
+
+       /* Now get new skbufs for the receive ring. */
+       for (i = 0; i < RX_RING_SIZE; i++) {
+               struct sk_buff *skb;
+
+               skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, gfp_flags);
+               if (!skb)
+                       continue;
+
+               bp->rx_skbs[i] = skb;
+               skb->dev = dev;
+
+               /* Because we reserve afterwards. */
+               skb_put(skb, ETH_FRAME_LEN);
+               skb_reserve(skb, 34);
+
+               bb->be_rxd[i].rx_addr =
+                       dma_map_single(&bp->bigmac_op->dev,
+                                      skb->data,
+                                      RX_BUF_ALLOC_SIZE - 34,
+                                      DMA_FROM_DEVICE);
+               bb->be_rxd[i].rx_flags =
+                       (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
+       }
+
+       for (i = 0; i < TX_RING_SIZE; i++)
+               bb->be_txd[i].tx_flags = bb->be_txd[i].tx_addr = 0;
+}
+
+#define MGMT_CLKON  (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB|MGMT_PAL_DCLOCK)
+#define MGMT_CLKOFF (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB)
+
+static void idle_transceiver(void __iomem *tregs)
+{
+       int i = 20;
+
+       while (i--) {
+               sbus_writel(MGMT_CLKOFF, tregs + TCVR_MPAL);
+               sbus_readl(tregs + TCVR_MPAL);
+               sbus_writel(MGMT_CLKON, tregs + TCVR_MPAL);
+               sbus_readl(tregs + TCVR_MPAL);
+       }
+}
+
+static void write_tcvr_bit(struct bigmac *bp, void __iomem *tregs, int bit)
+{
+       if (bp->tcvr_type == internal) {
+               bit = (bit & 1) << 3;
+               sbus_writel(bit | (MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO),
+                           tregs + TCVR_MPAL);
+               sbus_readl(tregs + TCVR_MPAL);
+               sbus_writel(bit | MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK,
+                           tregs + TCVR_MPAL);
+               sbus_readl(tregs + TCVR_MPAL);
+       } else if (bp->tcvr_type == external) {
+               bit = (bit & 1) << 2;
+               sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB,
+                           tregs + TCVR_MPAL);
+               sbus_readl(tregs + TCVR_MPAL);
+               sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB | MGMT_PAL_DCLOCK,
+                           tregs + TCVR_MPAL);
+               sbus_readl(tregs + TCVR_MPAL);
+       } else {
+               printk(KERN_ERR "write_tcvr_bit: No transceiver type known!\n");
+       }
+}
+
+static int read_tcvr_bit(struct bigmac *bp, void __iomem *tregs)
+{
+       int retval = 0;
+
+       if (bp->tcvr_type == internal) {
+               sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL);
+               sbus_readl(tregs + TCVR_MPAL);
+               sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK,
+                           tregs + TCVR_MPAL);
+               sbus_readl(tregs + TCVR_MPAL);
+               retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3;
+       } else if (bp->tcvr_type == external) {
+               sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL);
+               sbus_readl(tregs + TCVR_MPAL);
+               sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL);
+               sbus_readl(tregs + TCVR_MPAL);
+               retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2;
+       } else {
+               printk(KERN_ERR "read_tcvr_bit: No transceiver type known!\n");
+       }
+       return retval;
+}
+
+static int read_tcvr_bit2(struct bigmac *bp, void __iomem *tregs)
+{
+       int retval = 0;
+
+       if (bp->tcvr_type == internal) {
+               sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL);
+               sbus_readl(tregs + TCVR_MPAL);
+               retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3;
+               sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL);
+               sbus_readl(tregs + TCVR_MPAL);
+       } else if (bp->tcvr_type == external) {
+               sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL);
+               sbus_readl(tregs + TCVR_MPAL);
+               retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2;
+               sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL);
+               sbus_readl(tregs + TCVR_MPAL);
+       } else {
+               printk(KERN_ERR "read_tcvr_bit2: No transceiver type known!\n");
+       }
+       return retval;
+}
+
+static void put_tcvr_byte(struct bigmac *bp,
+                         void __iomem *tregs,
+                         unsigned int byte)
+{
+       int shift = 4;
+
+       do {
+               write_tcvr_bit(bp, tregs, ((byte >> shift) & 1));
+               shift -= 1;
+       } while (shift >= 0);
+}
+
+static void bigmac_tcvr_write(struct bigmac *bp, void __iomem *tregs,
+                             int reg, unsigned short val)
+{
+       int shift;
+
+       reg &= 0xff;
+       val &= 0xffff;
+       switch(bp->tcvr_type) {
+       case internal:
+       case external:
+               break;
+
+       default:
+               printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n");
+               return;
+       }
+
+       idle_transceiver(tregs);
+       write_tcvr_bit(bp, tregs, 0);
+       write_tcvr_bit(bp, tregs, 1);
+       write_tcvr_bit(bp, tregs, 0);
+       write_tcvr_bit(bp, tregs, 1);
+
+       put_tcvr_byte(bp, tregs,
+                     ((bp->tcvr_type == internal) ?
+                      BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL));
+
+       put_tcvr_byte(bp, tregs, reg);
+
+       write_tcvr_bit(bp, tregs, 1);
+       write_tcvr_bit(bp, tregs, 0);
+
+       shift = 15;
+       do {
+               write_tcvr_bit(bp, tregs, (val >> shift) & 1);
+               shift -= 1;
+       } while (shift >= 0);
+}
+
+static unsigned short bigmac_tcvr_read(struct bigmac *bp,
+                                      void __iomem *tregs,
+                                      int reg)
+{
+       unsigned short retval = 0;
+
+       reg &= 0xff;
+       switch(bp->tcvr_type) {
+       case internal:
+       case external:
+               break;
+
+       default:
+               printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n");
+               return 0xffff;
+       }
+
+       idle_transceiver(tregs);
+       write_tcvr_bit(bp, tregs, 0);
+       write_tcvr_bit(bp, tregs, 1);
+       write_tcvr_bit(bp, tregs, 1);
+       write_tcvr_bit(bp, tregs, 0);
+
+       put_tcvr_byte(bp, tregs,
+                     ((bp->tcvr_type == internal) ?
+                      BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL));
+
+       put_tcvr_byte(bp, tregs, reg);
+
+       if (bp->tcvr_type == external) {
+               int shift = 15;
+
+               (void) read_tcvr_bit2(bp, tregs);
+               (void) read_tcvr_bit2(bp, tregs);
+
+               do {
+                       int tmp;
+
+                       tmp = read_tcvr_bit2(bp, tregs);
+                       retval |= ((tmp & 1) << shift);
+                       shift -= 1;
+               } while (shift >= 0);
+
+               (void) read_tcvr_bit2(bp, tregs);
+               (void) read_tcvr_bit2(bp, tregs);
+               (void) read_tcvr_bit2(bp, tregs);
+       } else {
+               int shift = 15;
+
+               (void) read_tcvr_bit(bp, tregs);
+               (void) read_tcvr_bit(bp, tregs);
+
+               do {
+                       int tmp;
+
+                       tmp = read_tcvr_bit(bp, tregs);
+                       retval |= ((tmp & 1) << shift);
+                       shift -= 1;
+               } while (shift >= 0);
+
+               (void) read_tcvr_bit(bp, tregs);
+               (void) read_tcvr_bit(bp, tregs);
+               (void) read_tcvr_bit(bp, tregs);
+       }
+       return retval;
+}
+
+static void bigmac_tcvr_init(struct bigmac *bp)
+{
+       void __iomem *tregs = bp->tregs;
+       u32 mpal;
+
+       idle_transceiver(tregs);
+       sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK,
+                   tregs + TCVR_MPAL);
+       sbus_readl(tregs + TCVR_MPAL);
+
+       /* Only the bit for the present transceiver (internal or
+        * external) will stick, set them both and see what stays.
+        */
+       sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL);
+       sbus_readl(tregs + TCVR_MPAL);
+       udelay(20);
+
+       mpal = sbus_readl(tregs + TCVR_MPAL);
+       if (mpal & MGMT_PAL_EXT_MDIO) {
+               bp->tcvr_type = external;
+               sbus_writel(~(TCVR_PAL_EXTLBACK | TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE),
+                           tregs + TCVR_TPAL);
+               sbus_readl(tregs + TCVR_TPAL);
+       } else if (mpal & MGMT_PAL_INT_MDIO) {
+               bp->tcvr_type = internal;
+               sbus_writel(~(TCVR_PAL_SERIAL | TCVR_PAL_EXTLBACK |
+                             TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE),
+                           tregs + TCVR_TPAL);
+               sbus_readl(tregs + TCVR_TPAL);
+       } else {
+               printk(KERN_ERR "BIGMAC: AIEEE, neither internal nor "
+                      "external MDIO available!\n");
+               printk(KERN_ERR "BIGMAC: mgmt_pal[%08x] tcvr_pal[%08x]\n",
+                      sbus_readl(tregs + TCVR_MPAL),
+                      sbus_readl(tregs + TCVR_TPAL));
+       }
+}
+
+static int bigmac_init_hw(struct bigmac *, int);
+
+static int try_next_permutation(struct bigmac *bp, void __iomem *tregs)
+{
+       if (bp->sw_bmcr & BMCR_SPEED100) {
+               int timeout;
+
+               /* Reset the PHY. */
+               bp->sw_bmcr     = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK);
+               bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
+               bp->sw_bmcr     = (BMCR_RESET);
+               bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
+
+               timeout = 64;
+               while (--timeout) {
+                       bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
+                       if ((bp->sw_bmcr & BMCR_RESET) == 0)
+                               break;
+                       udelay(20);
+               }
+               if (timeout == 0)
+                       printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name);
+
+               bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
+
+               /* Now we try 10baseT. */
+               bp->sw_bmcr &= ~(BMCR_SPEED100);
+               bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
+               return 0;
+       }
+
+       /* We've tried them all. */
+       return -1;
+}
+
+static void bigmac_timer(unsigned long data)
+{
+       struct bigmac *bp = (struct bigmac *) data;
+       void __iomem *tregs = bp->tregs;
+       int restart_timer = 0;
+
+       bp->timer_ticks++;
+       if (bp->timer_state == ltrywait) {
+               bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMSR);
+               bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
+               if (bp->sw_bmsr & BMSR_LSTATUS) {
+                       printk(KERN_INFO "%s: Link is now up at %s.\n",
+                              bp->dev->name,
+                              (bp->sw_bmcr & BMCR_SPEED100) ?
+                              "100baseT" : "10baseT");
+                       bp->timer_state = asleep;
+                       restart_timer = 0;
+               } else {
+                       if (bp->timer_ticks >= 4) {
+                               int ret;
+
+                               ret = try_next_permutation(bp, tregs);
+                               if (ret == -1) {
+                                       printk(KERN_ERR "%s: Link down, cable problem?\n",
+                                              bp->dev->name);
+                                       ret = bigmac_init_hw(bp, 0);
+                                       if (ret) {
+                                               printk(KERN_ERR "%s: Error, cannot re-init the "
+                                                      "BigMAC.\n", bp->dev->name);
+                                       }
+                                       return;
+                               }
+                               bp->timer_ticks = 0;
+                               restart_timer = 1;
+                       } else {
+                               restart_timer = 1;
+                       }
+               }
+       } else {
+               /* Can't happens.... */
+               printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n",
+                      bp->dev->name);
+               restart_timer = 0;
+               bp->timer_ticks = 0;
+               bp->timer_state = asleep; /* foo on you */
+       }
+
+       if (restart_timer != 0) {
+               bp->bigmac_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */
+               add_timer(&bp->bigmac_timer);
+       }
+}
+
+/* Well, really we just force the chip into 100baseT then
+ * 10baseT, each time checking for a link status.
+ */
+static void bigmac_begin_auto_negotiation(struct bigmac *bp)
+{
+       void __iomem *tregs = bp->tregs;
+       int timeout;
+
+       /* Grab new software copies of PHY registers. */
+       bp->sw_bmsr     = bigmac_tcvr_read(bp, tregs, BIGMAC_BMSR);
+       bp->sw_bmcr     = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
+
+       /* Reset the PHY. */
+       bp->sw_bmcr     = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK);
+       bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
+       bp->sw_bmcr     = (BMCR_RESET);
+       bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
+
+       timeout = 64;
+       while (--timeout) {
+               bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
+               if ((bp->sw_bmcr & BMCR_RESET) == 0)
+                       break;
+               udelay(20);
+       }
+       if (timeout == 0)
+               printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name);
+
+       bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
+
+       /* First we try 100baseT. */
+       bp->sw_bmcr |= BMCR_SPEED100;
+       bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
+
+       bp->timer_state = ltrywait;
+       bp->timer_ticks = 0;
+       bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10;
+       bp->bigmac_timer.data = (unsigned long) bp;
+       bp->bigmac_timer.function = bigmac_timer;
+       add_timer(&bp->bigmac_timer);
+}
+
+static int bigmac_init_hw(struct bigmac *bp, int from_irq)
+{
+       void __iomem *gregs        = bp->gregs;
+       void __iomem *cregs        = bp->creg;
+       void __iomem *bregs        = bp->bregs;
+       unsigned char *e = &bp->dev->dev_addr[0];
+
+       /* Latch current counters into statistics. */
+       bigmac_get_counters(bp, bregs);
+
+       /* Reset QEC. */
+       qec_global_reset(gregs);
+
+       /* Init QEC. */
+       qec_init(bp);
+
+       /* Alloc and reset the tx/rx descriptor chains. */
+       bigmac_init_rings(bp, from_irq);
+
+       /* Initialize the PHY. */
+       bigmac_tcvr_init(bp);
+
+       /* Stop transmitter and receiver. */
+       bigmac_stop(bp);
+
+       /* Set hardware ethernet address. */
+       sbus_writel(((e[4] << 8) | e[5]), bregs + BMAC_MACADDR2);
+       sbus_writel(((e[2] << 8) | e[3]), bregs + BMAC_MACADDR1);
+       sbus_writel(((e[0] << 8) | e[1]), bregs + BMAC_MACADDR0);
+
+       /* Clear the hash table until mc upload occurs. */
+       sbus_writel(0, bregs + BMAC_HTABLE3);
+       sbus_writel(0, bregs + BMAC_HTABLE2);
+       sbus_writel(0, bregs + BMAC_HTABLE1);
+       sbus_writel(0, bregs + BMAC_HTABLE0);
+
+       /* Enable Big Mac hash table filter. */
+       sbus_writel(BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_FIFO,
+                   bregs + BMAC_RXCFG);
+       udelay(20);
+
+       /* Ok, configure the Big Mac transmitter. */
+       sbus_writel(BIGMAC_TXCFG_FIFO, bregs + BMAC_TXCFG);
+
+       /* The HME docs recommend to use the 10LSB of our MAC here. */
+       sbus_writel(((e[5] | e[4] << 8) & 0x3ff),
+                   bregs + BMAC_RSEED);
+
+       /* Enable the output drivers no matter what. */
+       sbus_writel(BIGMAC_XCFG_ODENABLE | BIGMAC_XCFG_RESV,
+                   bregs + BMAC_XIFCFG);
+
+       /* Tell the QEC where the ring descriptors are. */
+       sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0),
+                   cregs + CREG_RXDS);
+       sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0),
+                   cregs + CREG_TXDS);
+
+       /* Setup the FIFO pointers into QEC local memory. */
+       sbus_writel(0, cregs + CREG_RXRBUFPTR);
+       sbus_writel(0, cregs + CREG_RXWBUFPTR);
+       sbus_writel(sbus_readl(gregs + GLOB_RSIZE),
+                   cregs + CREG_TXRBUFPTR);
+       sbus_writel(sbus_readl(gregs + GLOB_RSIZE),
+                   cregs + CREG_TXWBUFPTR);
+
+       /* Tell bigmac what interrupts we don't want to hear about. */
+       sbus_writel(BIGMAC_IMASK_GOTFRAME | BIGMAC_IMASK_SENTFRAME,
+                   bregs + BMAC_IMASK);
+
+       /* Enable the various other irq's. */
+       sbus_writel(0, cregs + CREG_RIMASK);
+       sbus_writel(0, cregs + CREG_TIMASK);
+       sbus_writel(0, cregs + CREG_QMASK);
+       sbus_writel(0, cregs + CREG_BMASK);
+
+       /* Set jam size to a reasonable default. */
+       sbus_writel(DEFAULT_JAMSIZE, bregs + BMAC_JSIZE);
+
+       /* Clear collision counter. */
+       sbus_writel(0, cregs + CREG_CCNT);
+
+       /* Enable transmitter and receiver. */
+       sbus_writel(sbus_readl(bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE,
+                   bregs + BMAC_TXCFG);
+       sbus_writel(sbus_readl(bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE,
+                   bregs + BMAC_RXCFG);
+
+       /* Ok, start detecting link speed/duplex. */
+       bigmac_begin_auto_negotiation(bp);
+
+       /* Success. */
+       return 0;
+}
+
+/* Error interrupts get sent here. */
+static void bigmac_is_medium_rare(struct bigmac *bp, u32 qec_status, u32 bmac_status)
+{
+       printk(KERN_ERR "bigmac_is_medium_rare: ");
+       if (qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) {
+               if (qec_status & GLOB_STAT_ER)
+                       printk("QEC_ERROR, ");
+               if (qec_status & GLOB_STAT_BM)
+                       printk("QEC_BMAC_ERROR, ");
+       }
+       if (bmac_status & CREG_STAT_ERRORS) {
+               if (bmac_status & CREG_STAT_BERROR)
+                       printk("BMAC_ERROR, ");
+               if (bmac_status & CREG_STAT_TXDERROR)
+                       printk("TXD_ERROR, ");
+               if (bmac_status & CREG_STAT_TXLERR)
+                       printk("TX_LATE_ERROR, ");
+               if (bmac_status & CREG_STAT_TXPERR)
+                       printk("TX_PARITY_ERROR, ");
+               if (bmac_status & CREG_STAT_TXSERR)
+                       printk("TX_SBUS_ERROR, ");
+
+               if (bmac_status & CREG_STAT_RXDROP)
+                       printk("RX_DROP_ERROR, ");
+
+               if (bmac_status & CREG_STAT_RXSMALL)
+                       printk("RX_SMALL_ERROR, ");
+               if (bmac_status & CREG_STAT_RXLERR)
+                       printk("RX_LATE_ERROR, ");
+               if (bmac_status & CREG_STAT_RXPERR)
+                       printk("RX_PARITY_ERROR, ");
+               if (bmac_status & CREG_STAT_RXSERR)
+                       printk("RX_SBUS_ERROR, ");
+       }
+
+       printk(" RESET\n");
+       bigmac_init_hw(bp, 1);
+}
+
+/* BigMAC transmit complete service routines. */
+static void bigmac_tx(struct bigmac *bp)
+{
+       struct be_txd *txbase = &bp->bmac_block->be_txd[0];
+       struct net_device *dev = bp->dev;
+       int elem;
+
+       spin_lock(&bp->lock);
+
+       elem = bp->tx_old;
+       DTX(("bigmac_tx: tx_old[%d] ", elem));
+       while (elem != bp->tx_new) {
+               struct sk_buff *skb;
+               struct be_txd *this = &txbase[elem];
+
+               DTX(("this(%p) [flags(%08x)addr(%08x)]",
+                    this, this->tx_flags, this->tx_addr));
+
+               if (this->tx_flags & TXD_OWN)
+                       break;
+               skb = bp->tx_skbs[elem];
+               bp->enet_stats.tx_packets++;
+               bp->enet_stats.tx_bytes += skb->len;
+               dma_unmap_single(&bp->bigmac_op->dev,
+                                this->tx_addr, skb->len,
+                                DMA_TO_DEVICE);
+
+               DTX(("skb(%p) ", skb));
+               bp->tx_skbs[elem] = NULL;
+               dev_kfree_skb_irq(skb);
+
+               elem = NEXT_TX(elem);
+       }
+       DTX((" DONE, tx_old=%d\n", elem));
+       bp->tx_old = elem;
+
+       if (netif_queue_stopped(dev) &&
+           TX_BUFFS_AVAIL(bp) > 0)
+               netif_wake_queue(bp->dev);
+
+       spin_unlock(&bp->lock);
+}
+
+/* BigMAC receive complete service routines. */
+static void bigmac_rx(struct bigmac *bp)
+{
+       struct be_rxd *rxbase = &bp->bmac_block->be_rxd[0];
+       struct be_rxd *this;
+       int elem = bp->rx_new, drops = 0;
+       u32 flags;
+
+       this = &rxbase[elem];
+       while (!((flags = this->rx_flags) & RXD_OWN)) {
+               struct sk_buff *skb;
+               int len = (flags & RXD_LENGTH); /* FCS not included */
+
+               /* Check for errors. */
+               if (len < ETH_ZLEN) {
+                       bp->enet_stats.rx_errors++;
+                       bp->enet_stats.rx_length_errors++;
+
+       drop_it:
+                       /* Return it to the BigMAC. */
+                       bp->enet_stats.rx_dropped++;
+                       this->rx_flags =
+                               (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
+                       goto next;
+               }
+               skb = bp->rx_skbs[elem];
+               if (len > RX_COPY_THRESHOLD) {
+                       struct sk_buff *new_skb;
+
+                       /* Now refill the entry, if we can. */
+                       new_skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
+                       if (new_skb == NULL) {
+                               drops++;
+                               goto drop_it;
+                       }
+                       dma_unmap_single(&bp->bigmac_op->dev,
+                                        this->rx_addr,
+                                        RX_BUF_ALLOC_SIZE - 34,
+                                        DMA_FROM_DEVICE);
+                       bp->rx_skbs[elem] = new_skb;
+                       new_skb->dev = bp->dev;
+                       skb_put(new_skb, ETH_FRAME_LEN);
+                       skb_reserve(new_skb, 34);
+                       this->rx_addr =
+                               dma_map_single(&bp->bigmac_op->dev,
+                                              new_skb->data,
+                                              RX_BUF_ALLOC_SIZE - 34,
+                                              DMA_FROM_DEVICE);
+                       this->rx_flags =
+                               (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
+
+                       /* Trim the original skb for the netif. */
+                       skb_trim(skb, len);
+               } else {
+                       struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
+
+                       if (copy_skb == NULL) {
+                               drops++;
+                               goto drop_it;
+                       }
+                       skb_reserve(copy_skb, 2);
+                       skb_put(copy_skb, len);
+                       dma_sync_single_for_cpu(&bp->bigmac_op->dev,
+                                               this->rx_addr, len,
+                                               DMA_FROM_DEVICE);
+                       skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len);
+                       dma_sync_single_for_device(&bp->bigmac_op->dev,
+                                                  this->rx_addr, len,
+                                                  DMA_FROM_DEVICE);
+
+                       /* Reuse original ring buffer. */
+                       this->rx_flags =
+                               (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
+
+                       skb = copy_skb;
+               }
+
+               /* No checksums done by the BigMAC ;-( */
+               skb->protocol = eth_type_trans(skb, bp->dev);
+               netif_rx(skb);
+               bp->enet_stats.rx_packets++;
+               bp->enet_stats.rx_bytes += len;
+       next:
+               elem = NEXT_RX(elem);
+               this = &rxbase[elem];
+       }
+       bp->rx_new = elem;
+       if (drops)
+               printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", bp->dev->name);
+}
+
+static irqreturn_t bigmac_interrupt(int irq, void *dev_id)
+{
+       struct bigmac *bp = (struct bigmac *) dev_id;
+       u32 qec_status, bmac_status;
+
+       DIRQ(("bigmac_interrupt: "));
+
+       /* Latch status registers now. */
+       bmac_status = sbus_readl(bp->creg + CREG_STAT);
+       qec_status = sbus_readl(bp->gregs + GLOB_STAT);
+
+       DIRQ(("qec_status=%08x bmac_status=%08x\n", qec_status, bmac_status));
+       if ((qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) ||
+          (bmac_status & CREG_STAT_ERRORS))
+               bigmac_is_medium_rare(bp, qec_status, bmac_status);
+
+       if (bmac_status & CREG_STAT_TXIRQ)
+               bigmac_tx(bp);
+
+       if (bmac_status & CREG_STAT_RXIRQ)
+               bigmac_rx(bp);
+
+       return IRQ_HANDLED;
+}
+
+static int bigmac_open(struct net_device *dev)
+{
+       struct bigmac *bp = netdev_priv(dev);
+       int ret;
+
+       ret = request_irq(dev->irq, bigmac_interrupt, IRQF_SHARED, dev->name, bp);
+       if (ret) {
+               printk(KERN_ERR "BIGMAC: Can't order irq %d to go.\n", dev->irq);
+               return ret;
+       }
+       init_timer(&bp->bigmac_timer);
+       ret = bigmac_init_hw(bp, 0);
+       if (ret)
+               free_irq(dev->irq, bp);
+       return ret;
+}
+
+static int bigmac_close(struct net_device *dev)
+{
+       struct bigmac *bp = netdev_priv(dev);
+
+       del_timer(&bp->bigmac_timer);
+       bp->timer_state = asleep;
+       bp->timer_ticks = 0;
+
+       bigmac_stop(bp);
+       bigmac_clean_rings(bp);
+       free_irq(dev->irq, bp);
+       return 0;
+}
+
+static void bigmac_tx_timeout(struct net_device *dev)
+{
+       struct bigmac *bp = netdev_priv(dev);
+
+       bigmac_init_hw(bp, 0);
+       netif_wake_queue(dev);
+}
+
+/* Put a packet on the wire. */
+static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct bigmac *bp = netdev_priv(dev);
+       int len, entry;
+       u32 mapping;
+
+       len = skb->len;
+       mapping = dma_map_single(&bp->bigmac_op->dev, skb->data,
+                                len, DMA_TO_DEVICE);
+
+       /* Avoid a race... */
+       spin_lock_irq(&bp->lock);
+       entry = bp->tx_new;
+       DTX(("bigmac_start_xmit: len(%d) entry(%d)\n", len, entry));
+       bp->bmac_block->be_txd[entry].tx_flags = TXD_UPDATE;
+       bp->tx_skbs[entry] = skb;
+       bp->bmac_block->be_txd[entry].tx_addr = mapping;
+       bp->bmac_block->be_txd[entry].tx_flags =
+               (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
+       bp->tx_new = NEXT_TX(entry);
+       if (TX_BUFFS_AVAIL(bp) <= 0)
+               netif_stop_queue(dev);
+       spin_unlock_irq(&bp->lock);
+
+       /* Get it going. */
+       sbus_writel(CREG_CTRL_TWAKEUP, bp->creg + CREG_CTRL);
+
+
+       return NETDEV_TX_OK;
+}
+
+static struct net_device_stats *bigmac_get_stats(struct net_device *dev)
+{
+       struct bigmac *bp = netdev_priv(dev);
+
+       bigmac_get_counters(bp, bp->bregs);
+       return &bp->enet_stats;
+}
+
+static void bigmac_set_multicast(struct net_device *dev)
+{
+       struct bigmac *bp = netdev_priv(dev);
+       void __iomem *bregs = bp->bregs;
+       struct netdev_hw_addr *ha;
+       int i;
+       u32 tmp, crc;
+
+       /* Disable the receiver.  The bit self-clears when
+        * the operation is complete.
+        */
+       tmp = sbus_readl(bregs + BMAC_RXCFG);
+       tmp &= ~(BIGMAC_RXCFG_ENABLE);
+       sbus_writel(tmp, bregs + BMAC_RXCFG);
+       while ((sbus_readl(bregs + BMAC_RXCFG) & BIGMAC_RXCFG_ENABLE) != 0)
+               udelay(20);
+
+       if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
+               sbus_writel(0xffff, bregs + BMAC_HTABLE0);
+               sbus_writel(0xffff, bregs + BMAC_HTABLE1);
+               sbus_writel(0xffff, bregs + BMAC_HTABLE2);
+               sbus_writel(0xffff, bregs + BMAC_HTABLE3);
+       } else if (dev->flags & IFF_PROMISC) {
+               tmp = sbus_readl(bregs + BMAC_RXCFG);
+               tmp |= BIGMAC_RXCFG_PMISC;
+               sbus_writel(tmp, bregs + BMAC_RXCFG);
+       } else {
+               u16 hash_table[4];
+
+               for (i = 0; i < 4; i++)
+                       hash_table[i] = 0;
+
+               netdev_for_each_mc_addr(ha, dev) {
+                       crc = ether_crc_le(6, ha->addr);
+                       crc >>= 26;
+                       hash_table[crc >> 4] |= 1 << (crc & 0xf);
+               }
+               sbus_writel(hash_table[0], bregs + BMAC_HTABLE0);
+               sbus_writel(hash_table[1], bregs + BMAC_HTABLE1);
+               sbus_writel(hash_table[2], bregs + BMAC_HTABLE2);
+               sbus_writel(hash_table[3], bregs + BMAC_HTABLE3);
+       }
+
+       /* Re-enable the receiver. */
+       tmp = sbus_readl(bregs + BMAC_RXCFG);
+       tmp |= BIGMAC_RXCFG_ENABLE;
+       sbus_writel(tmp, bregs + BMAC_RXCFG);
+}
+
+/* Ethtool support... */
+static void bigmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+       strcpy(info->driver, "sunbmac");
+       strcpy(info->version, "2.0");
+}
+
+static u32 bigmac_get_link(struct net_device *dev)
+{
+       struct bigmac *bp = netdev_priv(dev);
+
+       spin_lock_irq(&bp->lock);
+       bp->sw_bmsr = bigmac_tcvr_read(bp, bp->tregs, BIGMAC_BMSR);
+       spin_unlock_irq(&bp->lock);
+
+       return (bp->sw_bmsr & BMSR_LSTATUS);
+}
+
+static const struct ethtool_ops bigmac_ethtool_ops = {
+       .get_drvinfo            = bigmac_get_drvinfo,
+       .get_link               = bigmac_get_link,
+};
+
+static const struct net_device_ops bigmac_ops = {
+       .ndo_open               = bigmac_open,
+       .ndo_stop               = bigmac_close,
+       .ndo_start_xmit         = bigmac_start_xmit,
+       .ndo_get_stats          = bigmac_get_stats,
+       .ndo_set_multicast_list = bigmac_set_multicast,
+       .ndo_tx_timeout         = bigmac_tx_timeout,
+       .ndo_change_mtu         = eth_change_mtu,
+       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+};
+
+static int __devinit bigmac_ether_init(struct platform_device *op,
+                                      struct platform_device *qec_op)
+{
+       static int version_printed;
+       struct net_device *dev;
+       u8 bsizes, bsizes_more;
+       struct bigmac *bp;
+       int i;
+
+       /* Get a new device struct for this interface. */
+       dev = alloc_etherdev(sizeof(struct bigmac));
+       if (!dev)
+               return -ENOMEM;
+
+       if (version_printed++ == 0)
+               printk(KERN_INFO "%s", version);
+
+       for (i = 0; i < 6; i++)
+               dev->dev_addr[i] = idprom->id_ethaddr[i];
+
+       /* Setup softc, with backpointers to QEC and BigMAC SBUS device structs. */
+       bp = netdev_priv(dev);
+       bp->qec_op = qec_op;
+       bp->bigmac_op = op;
+
+       SET_NETDEV_DEV(dev, &op->dev);
+
+       spin_lock_init(&bp->lock);
+
+       /* Map in QEC global control registers. */
+       bp->gregs = of_ioremap(&qec_op->resource[0], 0,
+                              GLOB_REG_SIZE, "BigMAC QEC GLobal Regs");
+       if (!bp->gregs) {
+               printk(KERN_ERR "BIGMAC: Cannot map QEC global registers.\n");
+               goto fail_and_cleanup;
+       }
+
+       /* Make sure QEC is in BigMAC mode. */
+       if ((sbus_readl(bp->gregs + GLOB_CTRL) & 0xf0000000) != GLOB_CTRL_BMODE) {
+               printk(KERN_ERR "BigMAC: AIEEE, QEC is not in BigMAC mode!\n");
+               goto fail_and_cleanup;
+       }
+
+       /* Reset the QEC. */
+       if (qec_global_reset(bp->gregs))
+               goto fail_and_cleanup;
+
+       /* Get supported SBUS burst sizes. */
+       bsizes = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff);
+       bsizes_more = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff);
+
+       bsizes &= 0xff;
+       if (bsizes_more != 0xff)
+               bsizes &= bsizes_more;
+       if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
+           (bsizes & DMA_BURST32) == 0)
+               bsizes = (DMA_BURST32 - 1);
+       bp->bigmac_bursts = bsizes;
+
+       /* Perform QEC initialization. */
+       qec_init(bp);
+
+       /* Map in the BigMAC channel registers. */
+       bp->creg = of_ioremap(&op->resource[0], 0,
+                             CREG_REG_SIZE, "BigMAC QEC Channel Regs");
+       if (!bp->creg) {
+               printk(KERN_ERR "BIGMAC: Cannot map QEC channel registers.\n");
+               goto fail_and_cleanup;
+       }
+
+       /* Map in the BigMAC control registers. */
+       bp->bregs = of_ioremap(&op->resource[1], 0,
+                              BMAC_REG_SIZE, "BigMAC Primary Regs");
+       if (!bp->bregs) {
+               printk(KERN_ERR "BIGMAC: Cannot map BigMAC primary registers.\n");
+               goto fail_and_cleanup;
+       }
+
+       /* Map in the BigMAC transceiver registers, this is how you poke at
+        * the BigMAC's PHY.
+        */
+       bp->tregs = of_ioremap(&op->resource[2], 0,
+                              TCVR_REG_SIZE, "BigMAC Transceiver Regs");
+       if (!bp->tregs) {
+               printk(KERN_ERR "BIGMAC: Cannot map BigMAC transceiver registers.\n");
+               goto fail_and_cleanup;
+       }
+
+       /* Stop the BigMAC. */
+       bigmac_stop(bp);
+
+       /* Allocate transmit/receive descriptor DVMA block. */
+       bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev,
+                                           PAGE_SIZE,
+                                           &bp->bblock_dvma, GFP_ATOMIC);
+       if (bp->bmac_block == NULL || bp->bblock_dvma == 0) {
+               printk(KERN_ERR "BIGMAC: Cannot allocate consistent DMA.\n");
+               goto fail_and_cleanup;
+       }
+
+       /* Get the board revision of this BigMAC. */
+       bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node,
+                                             "board-version", 1);
+
+       /* Init auto-negotiation timer state. */
+       init_timer(&bp->bigmac_timer);
+       bp->timer_state = asleep;
+       bp->timer_ticks = 0;
+
+       /* Backlink to generic net device struct. */
+       bp->dev = dev;
+
+       /* Set links to our BigMAC open and close routines. */
+       dev->ethtool_ops = &bigmac_ethtool_ops;
+       dev->netdev_ops = &bigmac_ops;
+       dev->watchdog_timeo = 5*HZ;
+
+       /* Finish net device registration. */
+       dev->irq = bp->bigmac_op->archdata.irqs[0];
+       dev->dma = 0;
+
+       if (register_netdev(dev)) {
+               printk(KERN_ERR "BIGMAC: Cannot register device.\n");
+               goto fail_and_cleanup;
+       }
+
+       dev_set_drvdata(&bp->bigmac_op->dev, bp);
+
+       printk(KERN_INFO "%s: BigMAC 100baseT Ethernet %pM\n",
+              dev->name, dev->dev_addr);
+
+       return 0;
+
+fail_and_cleanup:
+       /* Something went wrong, undo whatever we did so far. */
+       /* Free register mappings if any. */
+       if (bp->gregs)
+               of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE);
+       if (bp->creg)
+               of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE);
+       if (bp->bregs)
+               of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE);
+       if (bp->tregs)
+               of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE);
+
+       if (bp->bmac_block)
+               dma_free_coherent(&bp->bigmac_op->dev,
+                                 PAGE_SIZE,
+                                 bp->bmac_block,
+                                 bp->bblock_dvma);
+
+       /* This also frees the co-located private data */
+       free_netdev(dev);
+       return -ENODEV;
+}
+
+/* QEC can be the parent of either QuadEthernet or a BigMAC.  We want
+ * the latter.
+ */
+static int __devinit bigmac_sbus_probe(struct platform_device *op)
+{
+       struct device *parent = op->dev.parent;
+       struct platform_device *qec_op;
+
+       qec_op = to_platform_device(parent);
+
+       return bigmac_ether_init(op, qec_op);
+}
+
+static int __devexit bigmac_sbus_remove(struct platform_device *op)
+{
+       struct bigmac *bp = dev_get_drvdata(&op->dev);
+       struct device *parent = op->dev.parent;
+       struct net_device *net_dev = bp->dev;
+       struct platform_device *qec_op;
+
+       qec_op = to_platform_device(parent);
+
+       unregister_netdev(net_dev);
+
+       of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE);
+       of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE);
+       of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE);
+       of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE);
+       dma_free_coherent(&op->dev,
+                         PAGE_SIZE,
+                         bp->bmac_block,
+                         bp->bblock_dvma);
+
+       free_netdev(net_dev);
+
+       dev_set_drvdata(&op->dev, NULL);
+
+       return 0;
+}
+
+static const struct of_device_id bigmac_sbus_match[] = {
+       {
+               .name = "be",
+       },
+       {},
+};
+
+MODULE_DEVICE_TABLE(of, bigmac_sbus_match);
+
+static struct platform_driver bigmac_sbus_driver = {
+       .driver = {
+               .name = "sunbmac",
+               .owner = THIS_MODULE,
+               .of_match_table = bigmac_sbus_match,
+       },
+       .probe          = bigmac_sbus_probe,
+       .remove         = __devexit_p(bigmac_sbus_remove),
+};
+
+static int __init bigmac_init(void)
+{
+       return platform_driver_register(&bigmac_sbus_driver);
+}
+
+static void __exit bigmac_exit(void)
+{
+       platform_driver_unregister(&bigmac_sbus_driver);
+}
+
+module_init(bigmac_init);
+module_exit(bigmac_exit);
diff --git a/drivers/net/ethernet/sun/sunbmac.h b/drivers/net/ethernet/sun/sunbmac.h
new file mode 100644 (file)
index 0000000..4943e97
--- /dev/null
@@ -0,0 +1,355 @@
+/* $Id: sunbmac.h,v 1.7 2000/07/11 22:35:22 davem Exp $
+ * sunbmac.h: Defines for the Sun "Big MAC" 100baseT ethernet cards.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#ifndef _SUNBMAC_H
+#define _SUNBMAC_H
+
+/* QEC global registers. */
+#define GLOB_CTRL      0x00UL  /* Control                  */
+#define GLOB_STAT      0x04UL  /* Status                   */
+#define GLOB_PSIZE     0x08UL  /* Packet Size              */
+#define GLOB_MSIZE     0x0cUL  /* Local-mem size (64K)     */
+#define GLOB_RSIZE     0x10UL  /* Receive partition size   */
+#define GLOB_TSIZE     0x14UL  /* Transmit partition size  */
+#define GLOB_REG_SIZE  0x18UL
+
+#define GLOB_CTRL_MMODE       0x40000000 /* MACE qec mode            */
+#define GLOB_CTRL_BMODE       0x10000000 /* BigMAC qec mode          */
+#define GLOB_CTRL_EPAR        0x00000020 /* Enable parity            */
+#define GLOB_CTRL_ACNTRL      0x00000018 /* SBUS arbitration control */
+#define GLOB_CTRL_B64         0x00000004 /* 64 byte dvma bursts      */
+#define GLOB_CTRL_B32         0x00000002 /* 32 byte dvma bursts      */
+#define GLOB_CTRL_B16         0x00000000 /* 16 byte dvma bursts      */
+#define GLOB_CTRL_RESET       0x00000001 /* Reset the QEC            */
+
+#define GLOB_STAT_TX          0x00000008 /* BigMAC Transmit IRQ      */
+#define GLOB_STAT_RX          0x00000004 /* BigMAC Receive IRQ       */
+#define GLOB_STAT_BM          0x00000002 /* BigMAC Global IRQ        */
+#define GLOB_STAT_ER          0x00000001 /* BigMAC Error IRQ         */
+
+#define GLOB_PSIZE_2048       0x00       /* 2k packet size           */
+#define GLOB_PSIZE_4096       0x01       /* 4k packet size           */
+#define GLOB_PSIZE_6144       0x10       /* 6k packet size           */
+#define GLOB_PSIZE_8192       0x11       /* 8k packet size           */
+
+/* QEC BigMAC channel registers. */
+#define CREG_CTRL      0x00UL  /* Control                   */
+#define CREG_STAT      0x04UL  /* Status                    */
+#define CREG_RXDS      0x08UL  /* RX descriptor ring ptr    */
+#define CREG_TXDS      0x0cUL  /* TX descriptor ring ptr    */
+#define CREG_RIMASK    0x10UL  /* RX Interrupt Mask         */
+#define CREG_TIMASK    0x14UL  /* TX Interrupt Mask         */
+#define CREG_QMASK     0x18UL  /* QEC Error Interrupt Mask  */
+#define CREG_BMASK     0x1cUL  /* BigMAC Error Interrupt Mask*/
+#define CREG_RXWBUFPTR 0x20UL  /* Local memory rx write ptr */
+#define CREG_RXRBUFPTR 0x24UL  /* Local memory rx read ptr  */
+#define CREG_TXWBUFPTR 0x28UL  /* Local memory tx write ptr */
+#define CREG_TXRBUFPTR 0x2cUL  /* Local memory tx read ptr  */
+#define CREG_CCNT      0x30UL  /* Collision Counter         */
+#define CREG_REG_SIZE  0x34UL
+
+#define CREG_CTRL_TWAKEUP     0x00000001  /* Transmitter Wakeup, 'go'. */
+
+#define CREG_STAT_BERROR      0x80000000  /* BigMAC error              */
+#define CREG_STAT_TXIRQ       0x00200000  /* Transmit Interrupt        */
+#define CREG_STAT_TXDERROR    0x00080000  /* TX Descriptor is bogus    */
+#define CREG_STAT_TXLERR      0x00040000  /* Late Transmit Error       */
+#define CREG_STAT_TXPERR      0x00020000  /* Transmit Parity Error     */
+#define CREG_STAT_TXSERR      0x00010000  /* Transmit SBUS error ack   */
+#define CREG_STAT_RXIRQ       0x00000020  /* Receive Interrupt         */
+#define CREG_STAT_RXDROP      0x00000010  /* Dropped a RX'd packet     */
+#define CREG_STAT_RXSMALL     0x00000008  /* Receive buffer too small  */
+#define CREG_STAT_RXLERR      0x00000004  /* Receive Late Error        */
+#define CREG_STAT_RXPERR      0x00000002  /* Receive Parity Error      */
+#define CREG_STAT_RXSERR      0x00000001  /* Receive SBUS Error ACK    */
+
+#define CREG_STAT_ERRORS      (CREG_STAT_BERROR|CREG_STAT_TXDERROR|CREG_STAT_TXLERR|   \
+                               CREG_STAT_TXPERR|CREG_STAT_TXSERR|CREG_STAT_RXDROP|     \
+                               CREG_STAT_RXSMALL|CREG_STAT_RXLERR|CREG_STAT_RXPERR|    \
+                               CREG_STAT_RXSERR)
+
+#define CREG_QMASK_TXDERROR   0x00080000  /* TXD error                 */
+#define CREG_QMASK_TXLERR     0x00040000  /* TX late error             */
+#define CREG_QMASK_TXPERR     0x00020000  /* TX parity error           */
+#define CREG_QMASK_TXSERR     0x00010000  /* TX sbus error ack         */
+#define CREG_QMASK_RXDROP     0x00000010  /* RX drop                   */
+#define CREG_QMASK_RXBERROR   0x00000008  /* RX buffer error           */
+#define CREG_QMASK_RXLEERR    0x00000004  /* RX late error             */
+#define CREG_QMASK_RXPERR     0x00000002  /* RX parity error           */
+#define CREG_QMASK_RXSERR     0x00000001  /* RX sbus error ack         */
+
+/* BIGMAC core registers */
+#define BMAC_XIFCFG    0x000UL /* XIF config register                */
+       /* 0x004-->0x0fc, reserved */
+#define BMAC_STATUS    0x100UL /* Status register, clear on read     */
+#define BMAC_IMASK     0x104UL /* Interrupt mask register            */
+       /* 0x108-->0x204, reserved */
+#define BMAC_TXSWRESET 0x208UL /* Transmitter software reset         */
+#define BMAC_TXCFG     0x20cUL /* Transmitter config register        */
+#define BMAC_IGAP1     0x210UL /* Inter-packet gap 1                 */
+#define BMAC_IGAP2     0x214UL /* Inter-packet gap 2                 */
+#define BMAC_ALIMIT    0x218UL /* Transmit attempt limit             */
+#define BMAC_STIME     0x21cUL /* Transmit slot time                 */
+#define BMAC_PLEN      0x220UL /* Size of transmit preamble          */
+#define BMAC_PPAT      0x224UL /* Pattern for transmit preamble      */
+#define BMAC_TXDELIM   0x228UL /* Transmit delimiter                 */
+#define BMAC_JSIZE     0x22cUL /* Toe jam...                         */
+#define BMAC_TXPMAX    0x230UL /* Transmit max pkt size              */
+#define BMAC_TXPMIN    0x234UL /* Transmit min pkt size              */
+#define BMAC_PATTEMPT  0x238UL /* Count of transmit peak attempts    */
+#define BMAC_DTCTR     0x23cUL /* Transmit defer timer               */
+#define BMAC_NCCTR     0x240UL /* Transmit normal-collision counter  */
+#define BMAC_FCCTR     0x244UL /* Transmit first-collision counter   */
+#define BMAC_EXCTR     0x248UL /* Transmit excess-collision counter  */
+#define BMAC_LTCTR     0x24cUL /* Transmit late-collision counter    */
+#define BMAC_RSEED     0x250UL /* Transmit random number seed        */
+#define BMAC_TXSMACHINE        0x254UL /* Transmit state machine             */
+       /* 0x258-->0x304, reserved */
+#define BMAC_RXSWRESET 0x308UL /* Receiver software reset            */
+#define BMAC_RXCFG     0x30cUL /* Receiver config register           */
+#define BMAC_RXPMAX    0x310UL /* Receive max pkt size               */
+#define BMAC_RXPMIN    0x314UL /* Receive min pkt size               */
+#define BMAC_MACADDR2  0x318UL /* Ether address register 2           */
+#define BMAC_MACADDR1  0x31cUL /* Ether address register 1           */
+#define BMAC_MACADDR0  0x320UL /* Ether address register 0           */
+#define BMAC_FRCTR     0x324UL /* Receive frame receive counter      */
+#define BMAC_GLECTR    0x328UL /* Receive giant-length error counter */
+#define BMAC_UNALECTR  0x32cUL /* Receive unaligned error counter    */
+#define BMAC_RCRCECTR  0x330UL /* Receive CRC error counter          */
+#define BMAC_RXSMACHINE        0x334UL /* Receiver state machine             */
+#define BMAC_RXCVALID  0x338UL /* Receiver code violation            */
+       /* 0x33c, reserved */
+#define BMAC_HTABLE3   0x340UL /* Hash table 3                       */
+#define BMAC_HTABLE2   0x344UL /* Hash table 2                       */
+#define BMAC_HTABLE1   0x348UL /* Hash table 1                       */
+#define BMAC_HTABLE0   0x34cUL /* Hash table 0                       */
+#define BMAC_AFILTER2  0x350UL /* Address filter 2                   */
+#define BMAC_AFILTER1  0x354UL /* Address filter 1                   */
+#define BMAC_AFILTER0  0x358UL /* Address filter 0                   */
+#define BMAC_AFMASK    0x35cUL /* Address filter mask                */
+#define BMAC_REG_SIZE  0x360UL
+
+/* BigMac XIF config register. */
+#define BIGMAC_XCFG_ODENABLE   0x00000001 /* Output driver enable                     */
+#define BIGMAC_XCFG_RESV       0x00000002 /* Reserved, write always as 1              */
+#define BIGMAC_XCFG_MLBACK     0x00000004 /* Loopback-mode MII enable                 */
+#define BIGMAC_XCFG_SMODE      0x00000008 /* Enable serial mode                       */
+
+/* BigMAC status register. */
+#define BIGMAC_STAT_GOTFRAME   0x00000001 /* Received a frame                         */
+#define BIGMAC_STAT_RCNTEXP    0x00000002 /* Receive frame counter expired            */
+#define BIGMAC_STAT_ACNTEXP    0x00000004 /* Align-error counter expired              */
+#define BIGMAC_STAT_CCNTEXP    0x00000008 /* CRC-error counter expired                */
+#define BIGMAC_STAT_LCNTEXP    0x00000010 /* Length-error counter expired             */
+#define BIGMAC_STAT_RFIFOVF    0x00000020 /* Receive FIFO overflow                    */
+#define BIGMAC_STAT_CVCNTEXP   0x00000040 /* Code-violation counter expired           */
+#define BIGMAC_STAT_SENTFRAME  0x00000100 /* Transmitted a frame                      */
+#define BIGMAC_STAT_TFIFO_UND  0x00000200 /* Transmit FIFO underrun                   */
+#define BIGMAC_STAT_MAXPKTERR  0x00000400 /* Max-packet size error                    */
+#define BIGMAC_STAT_NCNTEXP    0x00000800 /* Normal-collision counter expired         */
+#define BIGMAC_STAT_ECNTEXP    0x00001000 /* Excess-collision counter expired         */
+#define BIGMAC_STAT_LCCNTEXP   0x00002000 /* Late-collision counter expired           */
+#define BIGMAC_STAT_FCNTEXP    0x00004000 /* First-collision counter expired          */
+#define BIGMAC_STAT_DTIMEXP    0x00008000 /* Defer-timer expired                      */
+
+/* BigMAC interrupt mask register. */
+#define BIGMAC_IMASK_GOTFRAME  0x00000001 /* Received a frame                         */
+#define BIGMAC_IMASK_RCNTEXP   0x00000002 /* Receive frame counter expired            */
+#define BIGMAC_IMASK_ACNTEXP   0x00000004 /* Align-error counter expired              */
+#define BIGMAC_IMASK_CCNTEXP   0x00000008 /* CRC-error counter expired                */
+#define BIGMAC_IMASK_LCNTEXP   0x00000010 /* Length-error counter expired             */
+#define BIGMAC_IMASK_RFIFOVF   0x00000020 /* Receive FIFO overflow                    */
+#define BIGMAC_IMASK_CVCNTEXP  0x00000040 /* Code-violation counter expired           */
+#define BIGMAC_IMASK_SENTFRAME 0x00000100 /* Transmitted a frame                      */
+#define BIGMAC_IMASK_TFIFO_UND 0x00000200 /* Transmit FIFO underrun                   */
+#define BIGMAC_IMASK_MAXPKTERR 0x00000400 /* Max-packet size error                    */
+#define BIGMAC_IMASK_NCNTEXP   0x00000800 /* Normal-collision counter expired         */
+#define BIGMAC_IMASK_ECNTEXP   0x00001000 /* Excess-collision counter expired         */
+#define BIGMAC_IMASK_LCCNTEXP  0x00002000 /* Late-collision counter expired           */
+#define BIGMAC_IMASK_FCNTEXP   0x00004000 /* First-collision counter expired          */
+#define BIGMAC_IMASK_DTIMEXP   0x00008000 /* Defer-timer expired                      */
+
+/* BigMac transmit config register. */
+#define BIGMAC_TXCFG_ENABLE    0x00000001 /* Enable the transmitter                   */
+#define BIGMAC_TXCFG_FIFO      0x00000010 /* Default tx fthresh...                    */
+#define BIGMAC_TXCFG_SMODE     0x00000020 /* Enable slow transmit mode                */
+#define BIGMAC_TXCFG_CIGN      0x00000040 /* Ignore transmit collisions               */
+#define BIGMAC_TXCFG_FCSOFF    0x00000080 /* Do not emit FCS                          */
+#define BIGMAC_TXCFG_DBACKOFF  0x00000100 /* Disable backoff                          */
+#define BIGMAC_TXCFG_FULLDPLX  0x00000200 /* Enable full-duplex                       */
+
+/* BigMac receive config register. */
+#define BIGMAC_RXCFG_ENABLE    0x00000001 /* Enable the receiver                      */
+#define BIGMAC_RXCFG_FIFO      0x0000000e /* Default rx fthresh...                    */
+#define BIGMAC_RXCFG_PSTRIP    0x00000020 /* Pad byte strip enable                    */
+#define BIGMAC_RXCFG_PMISC     0x00000040 /* Enable promiscuous mode                   */
+#define BIGMAC_RXCFG_DERR      0x00000080 /* Disable error checking                   */
+#define BIGMAC_RXCFG_DCRCS     0x00000100 /* Disable CRC stripping                    */
+#define BIGMAC_RXCFG_ME        0x00000200 /* Receive packets addressed to me          */
+#define BIGMAC_RXCFG_PGRP      0x00000400 /* Enable promisc group mode                */
+#define BIGMAC_RXCFG_HENABLE   0x00000800 /* Enable the hash filter                   */
+#define BIGMAC_RXCFG_AENABLE   0x00001000 /* Enable the address filter                */
+
+/* The BigMAC PHY transceiver.  Not nearly as sophisticated as the happy meal
+ * one.  But it does have the "bit banger", oh baby.
+ */
+#define TCVR_TPAL      0x00UL
+#define TCVR_MPAL      0x04UL
+#define TCVR_REG_SIZE  0x08UL
+
+/* Frame commands. */
+#define FRAME_WRITE           0x50020000
+#define FRAME_READ            0x60020000
+
+/* Tranceiver registers. */
+#define TCVR_PAL_SERIAL       0x00000001 /* Enable serial mode              */
+#define TCVR_PAL_EXTLBACK     0x00000002 /* Enable external loopback        */
+#define TCVR_PAL_MSENSE       0x00000004 /* Media sense                     */
+#define TCVR_PAL_LTENABLE     0x00000008 /* Link test enable                */
+#define TCVR_PAL_LTSTATUS     0x00000010 /* Link test status  (P1 only)     */
+
+/* Management PAL. */
+#define MGMT_PAL_DCLOCK       0x00000001 /* Data clock                      */
+#define MGMT_PAL_OENAB        0x00000002 /* Output enabler                  */
+#define MGMT_PAL_MDIO         0x00000004 /* MDIO Data/attached              */
+#define MGMT_PAL_TIMEO        0x00000008 /* Transmit enable timeout error   */
+#define MGMT_PAL_EXT_MDIO     MGMT_PAL_MDIO
+#define MGMT_PAL_INT_MDIO     MGMT_PAL_TIMEO
+
+/* Here are some PHY addresses. */
+#define BIGMAC_PHY_EXTERNAL   0 /* External transceiver */
+#define BIGMAC_PHY_INTERNAL   1 /* Internal transceiver */
+
+/* PHY registers */
+#define BIGMAC_BMCR           0x00 /* Basic mode control register      */
+#define BIGMAC_BMSR           0x01 /* Basic mode status register       */
+
+/* BMCR bits */
+#define BMCR_ISOLATE            0x0400  /* Disconnect DP83840 from MII */
+#define BMCR_PDOWN              0x0800  /* Powerdown the DP83840       */
+#define BMCR_ANENABLE           0x1000  /* Enable auto negotiation     */
+#define BMCR_SPEED100           0x2000  /* Select 100Mbps              */
+#define BMCR_LOOPBACK           0x4000  /* TXD loopback bits           */
+#define BMCR_RESET              0x8000  /* Reset the DP83840           */
+
+/* BMSR bits */
+#define BMSR_ERCAP              0x0001  /* Ext-reg capability          */
+#define BMSR_JCD                0x0002  /* Jabber detected             */
+#define BMSR_LSTATUS            0x0004  /* Link status                 */
+
+/* Ring descriptors and such, same as Quad Ethernet. */
+struct be_rxd {
+       u32 rx_flags;
+       u32 rx_addr;
+};
+
+#define RXD_OWN      0x80000000 /* Ownership.      */
+#define RXD_UPDATE   0x10000000 /* Being Updated?  */
+#define RXD_LENGTH   0x000007ff /* Packet Length.  */
+
+struct be_txd {
+       u32 tx_flags;
+       u32 tx_addr;
+};
+
+#define TXD_OWN      0x80000000 /* Ownership.      */
+#define TXD_SOP      0x40000000 /* Start Of Packet */
+#define TXD_EOP      0x20000000 /* End Of Packet   */
+#define TXD_UPDATE   0x10000000 /* Being Updated?  */
+#define TXD_LENGTH   0x000007ff /* Packet Length.  */
+
+#define TX_RING_MAXSIZE   256
+#define RX_RING_MAXSIZE   256
+
+#define TX_RING_SIZE      256
+#define RX_RING_SIZE      256
+
+#define NEXT_RX(num)       (((num) + 1) & (RX_RING_SIZE - 1))
+#define NEXT_TX(num)       (((num) + 1) & (TX_RING_SIZE - 1))
+#define PREV_RX(num)       (((num) - 1) & (RX_RING_SIZE - 1))
+#define PREV_TX(num)       (((num) - 1) & (TX_RING_SIZE - 1))
+
+#define TX_BUFFS_AVAIL(bp)                                    \
+        (((bp)->tx_old <= (bp)->tx_new) ?                     \
+         (bp)->tx_old + (TX_RING_SIZE - 1) - (bp)->tx_new :  \
+                           (bp)->tx_old - (bp)->tx_new - 1)
+
+
+#define RX_COPY_THRESHOLD  256
+#define RX_BUF_ALLOC_SIZE  (ETH_FRAME_LEN + (64 * 3))
+
+struct bmac_init_block {
+       struct be_rxd be_rxd[RX_RING_MAXSIZE];
+       struct be_txd be_txd[TX_RING_MAXSIZE];
+};
+
+#define bib_offset(mem, elem) \
+((__u32)((unsigned long)(&(((struct bmac_init_block *)0)->mem[elem]))))
+
+/* Now software state stuff. */
+enum bigmac_transceiver {
+       external = 0,
+       internal = 1,
+       none     = 2,
+};
+
+/* Timer state engine. */
+enum bigmac_timer_state {
+       ltrywait = 1,  /* Forcing try of all modes, from fastest to slowest. */
+       asleep   = 2,  /* Timer inactive.                                    */
+};
+
+struct bigmac {
+       void __iomem    *gregs; /* QEC Global Registers               */
+       void __iomem    *creg;  /* QEC BigMAC Channel Registers       */
+       void __iomem    *bregs; /* BigMAC Registers                   */
+       void __iomem    *tregs; /* BigMAC Transceiver                 */
+       struct bmac_init_block  *bmac_block;    /* RX and TX descriptors */
+       __u32                    bblock_dvma;   /* RX and TX descriptors */
+
+       spinlock_t              lock;
+
+       struct sk_buff          *rx_skbs[RX_RING_SIZE];
+       struct sk_buff          *tx_skbs[TX_RING_SIZE];
+
+       int rx_new, tx_new, rx_old, tx_old;
+
+       int board_rev;                          /* BigMAC board revision.             */
+
+       enum bigmac_transceiver tcvr_type;
+       unsigned int            bigmac_bursts;
+       unsigned int            paddr;
+       unsigned short          sw_bmsr;         /* SW copy of PHY BMSR               */
+       unsigned short          sw_bmcr;         /* SW copy of PHY BMCR               */
+       struct timer_list       bigmac_timer;
+       enum bigmac_timer_state timer_state;
+       unsigned int            timer_ticks;
+
+       struct net_device_stats enet_stats;
+       struct platform_device  *qec_op;
+       struct platform_device  *bigmac_op;
+       struct net_device       *dev;
+};
+
+/* We use this to acquire receive skb's that we can DMA directly into. */
+#define ALIGNED_RX_SKB_ADDR(addr) \
+        ((((unsigned long)(addr) + (64 - 1)) & ~(64 - 1)) - (unsigned long)(addr))
+
+static inline struct sk_buff *big_mac_alloc_skb(unsigned int length, gfp_t gfp_flags)
+{
+       struct sk_buff *skb;
+
+       skb = alloc_skb(length + 64, gfp_flags);
+       if(skb) {
+               int offset = ALIGNED_RX_SKB_ADDR(skb->data);
+
+               if(offset)
+                       skb_reserve(skb, offset);
+       }
+       return skb;
+}
+
+#endif /* !(_SUNBMAC_H) */
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
new file mode 100644 (file)
index 0000000..ade35dd
--- /dev/null
@@ -0,0 +1,3049 @@
+/* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $
+ * sungem.c: Sun GEM ethernet driver.
+ *
+ * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com)
+ *
+ * Support for Apple GMAC and assorted PHYs, WOL, Power Management
+ * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org)
+ * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp.
+ *
+ * NAPI and NETPOLL support
+ * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com)
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/random.h>
+#include <linux/workqueue.h>
+#include <linux/if_vlan.h>
+#include <linux/bitops.h>
+#include <linux/mm.h>
+#include <linux/gfp.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+#include <asm/irq.h>
+
+#ifdef CONFIG_SPARC
+#include <asm/idprom.h>
+#include <asm/prom.h>
+#endif
+
+#ifdef CONFIG_PPC_PMAC
+#include <asm/pci-bridge.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+#endif
+
+#include "sungem_phy.h"
+#include "sungem.h"
+
+/* Stripping FCS is causing problems, disabled for now */
+#undef STRIP_FCS
+
+#define DEFAULT_MSG    (NETIF_MSG_DRV          | \
+                        NETIF_MSG_PROBE        | \
+                        NETIF_MSG_LINK)
+
+#define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
+                        SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
+                        SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \
+                        SUPPORTED_Pause | SUPPORTED_Autoneg)
+
+#define DRV_NAME       "sungem"
+#define DRV_VERSION    "1.0"
+#define DRV_AUTHOR     "David S. Miller <davem@redhat.com>"
+
+static char version[] __devinitdata =
+        DRV_NAME ".c:v" DRV_VERSION " " DRV_AUTHOR "\n";
+
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define GEM_MODULE_NAME        "gem"
+
+static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = {
+       { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+
+       /* These models only differ from the original GEM in
+        * that their tx/rx fifos are of a different size and
+        * they only support 10/100 speeds. -DaveM
+        *
+        * Apple's GMAC does support gigabit on machines with
+        * the BCM54xx PHYs. -BenH
+        */
+       { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+       { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+       { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+       { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+       { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+       { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+       { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC,
+         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+       {0, }
+};
+
+MODULE_DEVICE_TABLE(pci, gem_pci_tbl);
+
+static u16 __phy_read(struct gem *gp, int phy_addr, int reg)
+{
+       u32 cmd;
+       int limit = 10000;
+
+       cmd  = (1 << 30);
+       cmd |= (2 << 28);
+       cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
+       cmd |= (reg << 18) & MIF_FRAME_REGAD;
+       cmd |= (MIF_FRAME_TAMSB);
+       writel(cmd, gp->regs + MIF_FRAME);
+
+       while (--limit) {
+               cmd = readl(gp->regs + MIF_FRAME);
+               if (cmd & MIF_FRAME_TALSB)
+                       break;
+
+               udelay(10);
+       }
+
+       if (!limit)
+               cmd = 0xffff;
+
+       return cmd & MIF_FRAME_DATA;
+}
+
+static inline int _phy_read(struct net_device *dev, int mii_id, int reg)
+{
+       struct gem *gp = netdev_priv(dev);
+       return __phy_read(gp, mii_id, reg);
+}
+
+static inline u16 phy_read(struct gem *gp, int reg)
+{
+       return __phy_read(gp, gp->mii_phy_addr, reg);
+}
+
+static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val)
+{
+       u32 cmd;
+       int limit = 10000;
+
+       cmd  = (1 << 30);
+       cmd |= (1 << 28);
+       cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
+       cmd |= (reg << 18) & MIF_FRAME_REGAD;
+       cmd |= (MIF_FRAME_TAMSB);
+       cmd |= (val & MIF_FRAME_DATA);
+       writel(cmd, gp->regs + MIF_FRAME);
+
+       while (limit--) {
+               cmd = readl(gp->regs + MIF_FRAME);
+               if (cmd & MIF_FRAME_TALSB)
+                       break;
+
+               udelay(10);
+       }
+}
+
+static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val)
+{
+       struct gem *gp = netdev_priv(dev);
+       __phy_write(gp, mii_id, reg, val & 0xffff);
+}
+
+static inline void phy_write(struct gem *gp, int reg, u16 val)
+{
+       __phy_write(gp, gp->mii_phy_addr, reg, val);
+}
+
+static inline void gem_enable_ints(struct gem *gp)
+{
+       /* Enable all interrupts but TXDONE */
+       writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
+}
+
+static inline void gem_disable_ints(struct gem *gp)
+{
+       /* Disable all interrupts, including TXDONE */
+       writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
+       (void)readl(gp->regs + GREG_IMASK); /* write posting */
+}
+
+static void gem_get_cell(struct gem *gp)
+{
+       BUG_ON(gp->cell_enabled < 0);
+       gp->cell_enabled++;
+#ifdef CONFIG_PPC_PMAC
+       if (gp->cell_enabled == 1) {
+               mb();
+               pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1);
+               udelay(10);
+       }
+#endif /* CONFIG_PPC_PMAC */
+}
+
+/* Turn off the chip's clock */
+static void gem_put_cell(struct gem *gp)
+{
+       BUG_ON(gp->cell_enabled <= 0);
+       gp->cell_enabled--;
+#ifdef CONFIG_PPC_PMAC
+       if (gp->cell_enabled == 0) {
+               mb();
+               pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0);
+               udelay(10);
+       }
+#endif /* CONFIG_PPC_PMAC */
+}
+
+static inline void gem_netif_stop(struct gem *gp)
+{
+       gp->dev->trans_start = jiffies; /* prevent tx timeout */
+       napi_disable(&gp->napi);
+       netif_tx_disable(gp->dev);
+}
+
+static inline void gem_netif_start(struct gem *gp)
+{
+       /* NOTE: unconditional netif_wake_queue is only
+        * appropriate so long as all callers are assured to
+        * have free tx slots.
+        */
+       netif_wake_queue(gp->dev);
+       napi_enable(&gp->napi);
+}
+
+static void gem_schedule_reset(struct gem *gp)
+{
+       gp->reset_task_pending = 1;
+       schedule_work(&gp->reset_task);
+}
+
+static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits)
+{
+       if (netif_msg_intr(gp))
+               printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name);
+}
+
+static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
+{
+       u32 pcs_istat = readl(gp->regs + PCS_ISTAT);
+       u32 pcs_miistat;
+
+       if (netif_msg_intr(gp))
+               printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n",
+                       gp->dev->name, pcs_istat);
+
+       if (!(pcs_istat & PCS_ISTAT_LSC)) {
+               netdev_err(dev, "PCS irq but no link status change???\n");
+               return 0;
+       }
+
+       /* The link status bit latches on zero, so you must
+        * read it twice in such a case to see a transition
+        * to the link being up.
+        */
+       pcs_miistat = readl(gp->regs + PCS_MIISTAT);
+       if (!(pcs_miistat & PCS_MIISTAT_LS))
+               pcs_miistat |=
+                       (readl(gp->regs + PCS_MIISTAT) &
+                        PCS_MIISTAT_LS);
+
+       if (pcs_miistat & PCS_MIISTAT_ANC) {
+               /* The remote-fault indication is only valid
+                * when autoneg has completed.
+                */
+               if (pcs_miistat & PCS_MIISTAT_RF)
+                       netdev_info(dev, "PCS AutoNEG complete, RemoteFault\n");
+               else
+                       netdev_info(dev, "PCS AutoNEG complete\n");
+       }
+
+       if (pcs_miistat & PCS_MIISTAT_LS) {
+               netdev_info(dev, "PCS link is now up\n");
+               netif_carrier_on(gp->dev);
+       } else {
+               netdev_info(dev, "PCS link is now down\n");
+               netif_carrier_off(gp->dev);
+               /* If this happens and the link timer is not running,
+                * reset so we re-negotiate.
+                */
+               if (!timer_pending(&gp->link_timer))
+                       return 1;
+       }
+
+       return 0;
+}
+
+static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
+{
+       u32 txmac_stat = readl(gp->regs + MAC_TXSTAT);
+
+       if (netif_msg_intr(gp))
+               printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
+                       gp->dev->name, txmac_stat);
+
+       /* Defer timer expiration is quite normal,
+        * don't even log the event.
+        */
+       if ((txmac_stat & MAC_TXSTAT_DTE) &&
+           !(txmac_stat & ~MAC_TXSTAT_DTE))
+               return 0;
+
+       if (txmac_stat & MAC_TXSTAT_URUN) {
+               netdev_err(dev, "TX MAC xmit underrun\n");
+               dev->stats.tx_fifo_errors++;
+       }
+
+       if (txmac_stat & MAC_TXSTAT_MPE) {
+               netdev_err(dev, "TX MAC max packet size error\n");
+               dev->stats.tx_errors++;
+       }
+
+       /* The rest are all cases of one of the 16-bit TX
+        * counters expiring.
+        */
+       if (txmac_stat & MAC_TXSTAT_NCE)
+               dev->stats.collisions += 0x10000;
+
+       if (txmac_stat & MAC_TXSTAT_ECE) {
+               dev->stats.tx_aborted_errors += 0x10000;
+               dev->stats.collisions += 0x10000;
+       }
+
+       if (txmac_stat & MAC_TXSTAT_LCE) {
+               dev->stats.tx_aborted_errors += 0x10000;
+               dev->stats.collisions += 0x10000;
+       }
+
+       /* We do not keep track of MAC_TXSTAT_FCE and
+        * MAC_TXSTAT_PCE events.
+        */
+       return 0;
+}
+
+/* When we get a RX fifo overflow, the RX unit in GEM is probably hung
+ * so we do the following.
+ *
+ * If any part of the reset goes wrong, we return 1 and that causes the
+ * whole chip to be reset.
+ */
+static int gem_rxmac_reset(struct gem *gp)
+{
+       struct net_device *dev = gp->dev;
+       int limit, i;
+       u64 desc_dma;
+       u32 val;
+
+       /* First, reset & disable MAC RX. */
+       writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
+       for (limit = 0; limit < 5000; limit++) {
+               if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD))
+                       break;
+               udelay(10);
+       }
+       if (limit == 5000) {
+               netdev_err(dev, "RX MAC will not reset, resetting whole chip\n");
+               return 1;
+       }
+
+       writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB,
+              gp->regs + MAC_RXCFG);
+       for (limit = 0; limit < 5000; limit++) {
+               if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB))
+                       break;
+               udelay(10);
+       }
+       if (limit == 5000) {
+               netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
+               return 1;
+       }
+
+       /* Second, disable RX DMA. */
+       writel(0, gp->regs + RXDMA_CFG);
+       for (limit = 0; limit < 5000; limit++) {
+               if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE))
+                       break;
+               udelay(10);
+       }
+       if (limit == 5000) {
+               netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
+               return 1;
+       }
+
+       udelay(5000);
+
+       /* Execute RX reset command. */
+       writel(gp->swrst_base | GREG_SWRST_RXRST,
+              gp->regs + GREG_SWRST);
+       for (limit = 0; limit < 5000; limit++) {
+               if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST))
+                       break;
+               udelay(10);
+       }
+       if (limit == 5000) {
+               netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
+               return 1;
+       }
+
+       /* Refresh the RX ring. */
+       for (i = 0; i < RX_RING_SIZE; i++) {
+               struct gem_rxd *rxd = &gp->init_block->rxd[i];
+
+               if (gp->rx_skbs[i] == NULL) {
+                       netdev_err(dev, "Parts of RX ring empty, resetting whole chip\n");
+                       return 1;
+               }
+
+               rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
+       }
+       gp->rx_new = gp->rx_old = 0;
+
+       /* Now we must reprogram the rest of RX unit. */
+       desc_dma = (u64) gp->gblock_dvma;
+       desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
+       writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
+       writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
+       writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
+       val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
+              ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
+       writel(val, gp->regs + RXDMA_CFG);
+       if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
+               writel(((5 & RXDMA_BLANK_IPKTS) |
+                       ((8 << 12) & RXDMA_BLANK_ITIME)),
+                      gp->regs + RXDMA_BLANK);
+       else
+               writel(((5 & RXDMA_BLANK_IPKTS) |
+                       ((4 << 12) & RXDMA_BLANK_ITIME)),
+                      gp->regs + RXDMA_BLANK);
+       val  = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
+       val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
+       writel(val, gp->regs + RXDMA_PTHRESH);
+       val = readl(gp->regs + RXDMA_CFG);
+       writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
+       writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
+       val = readl(gp->regs + MAC_RXCFG);
+       writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
+
+       return 0;
+}
+
+static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
+{
+       u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT);
+       int ret = 0;
+
+       if (netif_msg_intr(gp))
+               printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n",
+                       gp->dev->name, rxmac_stat);
+
+       if (rxmac_stat & MAC_RXSTAT_OFLW) {
+               u32 smac = readl(gp->regs + MAC_SMACHINE);
+
+               netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac);
+               dev->stats.rx_over_errors++;
+               dev->stats.rx_fifo_errors++;
+
+               ret = gem_rxmac_reset(gp);
+       }
+
+       if (rxmac_stat & MAC_RXSTAT_ACE)
+               dev->stats.rx_frame_errors += 0x10000;
+
+       if (rxmac_stat & MAC_RXSTAT_CCE)
+               dev->stats.rx_crc_errors += 0x10000;
+
+       if (rxmac_stat & MAC_RXSTAT_LCE)
+               dev->stats.rx_length_errors += 0x10000;
+
+       /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
+        * events.
+        */
+       return ret;
+}
+
+static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
+{
+       u32 mac_cstat = readl(gp->regs + MAC_CSTAT);
+
+       if (netif_msg_intr(gp))
+               printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n",
+                       gp->dev->name, mac_cstat);
+
+       /* This interrupt is just for pause frame and pause
+        * tracking.  It is useful for diagnostics and debug
+        * but probably by default we will mask these events.
+        */
+       if (mac_cstat & MAC_CSTAT_PS)
+               gp->pause_entered++;
+
+       if (mac_cstat & MAC_CSTAT_PRCV)
+               gp->pause_last_time_recvd = (mac_cstat >> 16);
+
+       return 0;
+}
+
+static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
+{
+       u32 mif_status = readl(gp->regs + MIF_STATUS);
+       u32 reg_val, changed_bits;
+
+       reg_val = (mif_status & MIF_STATUS_DATA) >> 16;
+       changed_bits = (mif_status & MIF_STATUS_STAT);
+
+       gem_handle_mif_event(gp, reg_val, changed_bits);
+
+       return 0;
+}
+
+static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
+{
+       u32 pci_estat = readl(gp->regs + GREG_PCIESTAT);
+
+       if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
+           gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
+               netdev_err(dev, "PCI error [%04x]", pci_estat);
+
+               if (pci_estat & GREG_PCIESTAT_BADACK)
+                       pr_cont(" <No ACK64# during ABS64 cycle>");
+               if (pci_estat & GREG_PCIESTAT_DTRTO)
+                       pr_cont(" <Delayed transaction timeout>");
+               if (pci_estat & GREG_PCIESTAT_OTHER)
+                       pr_cont(" <other>");
+               pr_cont("\n");
+       } else {
+               pci_estat |= GREG_PCIESTAT_OTHER;
+               netdev_err(dev, "PCI error\n");
+       }
+
+       if (pci_estat & GREG_PCIESTAT_OTHER) {
+               u16 pci_cfg_stat;
+
+               /* Interrogate PCI config space for the
+                * true cause.
+                */
+               pci_read_config_word(gp->pdev, PCI_STATUS,
+                                    &pci_cfg_stat);
+               netdev_err(dev, "Read PCI cfg space status [%04x]\n",
+                          pci_cfg_stat);
+               if (pci_cfg_stat & PCI_STATUS_PARITY)
+                       netdev_err(dev, "PCI parity error detected\n");
+               if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT)
+                       netdev_err(dev, "PCI target abort\n");
+               if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT)
+                       netdev_err(dev, "PCI master acks target abort\n");
+               if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT)
+                       netdev_err(dev, "PCI master abort\n");
+               if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR)
+                       netdev_err(dev, "PCI system error SERR#\n");
+               if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY)
+                       netdev_err(dev, "PCI parity error\n");
+
+               /* Write the error bits back to clear them. */
+               pci_cfg_stat &= (PCI_STATUS_PARITY |
+                                PCI_STATUS_SIG_TARGET_ABORT |
+                                PCI_STATUS_REC_TARGET_ABORT |
+                                PCI_STATUS_REC_MASTER_ABORT |
+                                PCI_STATUS_SIG_SYSTEM_ERROR |
+                                PCI_STATUS_DETECTED_PARITY);
+               pci_write_config_word(gp->pdev,
+                                     PCI_STATUS, pci_cfg_stat);
+       }
+
+       /* For all PCI errors, we should reset the chip. */
+       return 1;
+}
+
+/* All non-normal interrupt conditions get serviced here.
+ * Returns non-zero if we should just exit the interrupt
+ * handler right now (ie. if we reset the card which invalidates
+ * all of the other original irq status bits).
+ */
+static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status)
+{
+       if (gem_status & GREG_STAT_RXNOBUF) {
+               /* Frame arrived, no free RX buffers available. */
+               if (netif_msg_rx_err(gp))
+                       printk(KERN_DEBUG "%s: no buffer for rx frame\n",
+                               gp->dev->name);
+               dev->stats.rx_dropped++;
+       }
+
+       if (gem_status & GREG_STAT_RXTAGERR) {
+               /* corrupt RX tag framing */
+               if (netif_msg_rx_err(gp))
+                       printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
+                               gp->dev->name);
+               dev->stats.rx_errors++;
+
+               return 1;
+       }
+
+       if (gem_status & GREG_STAT_PCS) {
+               if (gem_pcs_interrupt(dev, gp, gem_status))
+                       return 1;
+       }
+
+       if (gem_status & GREG_STAT_TXMAC) {
+               if (gem_txmac_interrupt(dev, gp, gem_status))
+                       return 1;
+       }
+
+       if (gem_status & GREG_STAT_RXMAC) {
+               if (gem_rxmac_interrupt(dev, gp, gem_status))
+                       return 1;
+       }
+
+       if (gem_status & GREG_STAT_MAC) {
+               if (gem_mac_interrupt(dev, gp, gem_status))
+                       return 1;
+       }
+
+       if (gem_status & GREG_STAT_MIF) {
+               if (gem_mif_interrupt(dev, gp, gem_status))
+                       return 1;
+       }
+
+       if (gem_status & GREG_STAT_PCIERR) {
+               if (gem_pci_interrupt(dev, gp, gem_status))
+                       return 1;
+       }
+
+       return 0;
+}
+
+static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status)
+{
+       int entry, limit;
+
+       entry = gp->tx_old;
+       limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT);
+       while (entry != limit) {
+               struct sk_buff *skb;
+               struct gem_txd *txd;
+               dma_addr_t dma_addr;
+               u32 dma_len;
+               int frag;
+
+               if (netif_msg_tx_done(gp))
+                       printk(KERN_DEBUG "%s: tx done, slot %d\n",
+                               gp->dev->name, entry);
+               skb = gp->tx_skbs[entry];
+               if (skb_shinfo(skb)->nr_frags) {
+                       int last = entry + skb_shinfo(skb)->nr_frags;
+                       int walk = entry;
+                       int incomplete = 0;
+
+                       last &= (TX_RING_SIZE - 1);
+                       for (;;) {
+                               walk = NEXT_TX(walk);
+                               if (walk == limit)
+                                       incomplete = 1;
+                               if (walk == last)
+                                       break;
+                       }
+                       if (incomplete)
+                               break;
+               }
+               gp->tx_skbs[entry] = NULL;
+               dev->stats.tx_bytes += skb->len;
+
+               for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
+                       txd = &gp->init_block->txd[entry];
+
+                       dma_addr = le64_to_cpu(txd->buffer);
+                       dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ;
+
+                       pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
+                       entry = NEXT_TX(entry);
+               }
+
+               dev->stats.tx_packets++;
+               dev_kfree_skb(skb);
+       }
+       gp->tx_old = entry;
+
+       /* Need to make the tx_old update visible to gem_start_xmit()
+        * before checking for netif_queue_stopped().  Without the
+        * memory barrier, there is a small possibility that gem_start_xmit()
+        * will miss it and cause the queue to be stopped forever.
+        */
+       smp_mb();
+
+       if (unlikely(netif_queue_stopped(dev) &&
+                    TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))) {
+               struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+
+               __netif_tx_lock(txq, smp_processor_id());
+               if (netif_queue_stopped(dev) &&
+                   TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
+                       netif_wake_queue(dev);
+               __netif_tx_unlock(txq);
+       }
+}
+
+static __inline__ void gem_post_rxds(struct gem *gp, int limit)
+{
+       int cluster_start, curr, count, kick;
+
+       cluster_start = curr = (gp->rx_new & ~(4 - 1));
+       count = 0;
+       kick = -1;
+       wmb();
+       while (curr != limit) {
+               curr = NEXT_RX(curr);
+               if (++count == 4) {
+                       struct gem_rxd *rxd =
+                               &gp->init_block->rxd[cluster_start];
+                       for (;;) {
+                               rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
+                               rxd++;
+                               cluster_start = NEXT_RX(cluster_start);
+                               if (cluster_start == curr)
+                                       break;
+                       }
+                       kick = curr;
+                       count = 0;
+               }
+       }
+       if (kick >= 0) {
+               mb();
+               writel(kick, gp->regs + RXDMA_KICK);
+       }
+}
+
+#define ALIGNED_RX_SKB_ADDR(addr) \
+        ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
+static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size,
+                                               gfp_t gfp_flags)
+{
+       struct sk_buff *skb = alloc_skb(size + 64, gfp_flags);
+
+       if (likely(skb)) {
+               unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data);
+               skb_reserve(skb, offset);
+               skb->dev = dev;
+       }
+       return skb;
+}
+
+static int gem_rx(struct gem *gp, int work_to_do)
+{
+       struct net_device *dev = gp->dev;
+       int entry, drops, work_done = 0;
+       u32 done;
+       __sum16 csum;
+
+       if (netif_msg_rx_status(gp))
+               printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
+                       gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new);
+
+       entry = gp->rx_new;
+       drops = 0;
+       done = readl(gp->regs + RXDMA_DONE);
+       for (;;) {
+               struct gem_rxd *rxd = &gp->init_block->rxd[entry];
+               struct sk_buff *skb;
+               u64 status = le64_to_cpu(rxd->status_word);
+               dma_addr_t dma_addr;
+               int len;
+
+               if ((status & RXDCTRL_OWN) != 0)
+                       break;
+
+               if (work_done >= RX_RING_SIZE || work_done >= work_to_do)
+                       break;
+
+               /* When writing back RX descriptor, GEM writes status
+                * then buffer address, possibly in separate transactions.
+                * If we don't wait for the chip to write both, we could
+                * post a new buffer to this descriptor then have GEM spam
+                * on the buffer address.  We sync on the RX completion
+                * register to prevent this from happening.
+                */
+               if (entry == done) {
+                       done = readl(gp->regs + RXDMA_DONE);
+                       if (entry == done)
+                               break;
+               }
+
+               /* We can now account for the work we're about to do */
+               work_done++;
+
+               skb = gp->rx_skbs[entry];
+
+               len = (status & RXDCTRL_BUFSZ) >> 16;
+               if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
+                       dev->stats.rx_errors++;
+                       if (len < ETH_ZLEN)
+                               dev->stats.rx_length_errors++;
+                       if (len & RXDCTRL_BAD)
+                               dev->stats.rx_crc_errors++;
+
+                       /* We'll just return it to GEM. */
+               drop_it:
+                       dev->stats.rx_dropped++;
+                       goto next;
+               }
+
+               dma_addr = le64_to_cpu(rxd->buffer);
+               if (len > RX_COPY_THRESHOLD) {
+                       struct sk_buff *new_skb;
+
+                       new_skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
+                       if (new_skb == NULL) {
+                               drops++;
+                               goto drop_it;
+                       }
+                       pci_unmap_page(gp->pdev, dma_addr,
+                                      RX_BUF_ALLOC_SIZE(gp),
+                                      PCI_DMA_FROMDEVICE);
+                       gp->rx_skbs[entry] = new_skb;
+                       skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET));
+                       rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev,
+                                                              virt_to_page(new_skb->data),
+                                                              offset_in_page(new_skb->data),
+                                                              RX_BUF_ALLOC_SIZE(gp),
+                                                              PCI_DMA_FROMDEVICE));
+                       skb_reserve(new_skb, RX_OFFSET);
+
+                       /* Trim the original skb for the netif. */
+                       skb_trim(skb, len);
+               } else {
+                       struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2);
+
+                       if (copy_skb == NULL) {
+                               drops++;
+                               goto drop_it;
+                       }
+
+                       skb_reserve(copy_skb, 2);
+                       skb_put(copy_skb, len);
+                       pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
+                       skb_copy_from_linear_data(skb, copy_skb->data, len);
+                       pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
+
+                       /* We'll reuse the original ring buffer. */
+                       skb = copy_skb;
+               }
+
+               csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
+               skb->csum = csum_unfold(csum);
+               skb->ip_summed = CHECKSUM_COMPLETE;
+               skb->protocol = eth_type_trans(skb, gp->dev);
+
+               napi_gro_receive(&gp->napi, skb);
+
+               dev->stats.rx_packets++;
+               dev->stats.rx_bytes += len;
+
+       next:
+               entry = NEXT_RX(entry);
+       }
+
+       gem_post_rxds(gp, entry);
+
+       gp->rx_new = entry;
+
+       if (drops)
+               netdev_info(gp->dev, "Memory squeeze, deferring packet\n");
+
+       return work_done;
+}
+
+static int gem_poll(struct napi_struct *napi, int budget)
+{
+       struct gem *gp = container_of(napi, struct gem, napi);
+       struct net_device *dev = gp->dev;
+       int work_done;
+
+       work_done = 0;
+       do {
+               /* Handle anomalies */
+               if (unlikely(gp->status & GREG_STAT_ABNORMAL)) {
+                       struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+                       int reset;
+
+                       /* We run the abnormal interrupt handling code with
+                        * the Tx lock. It only resets the Rx portion of the
+                        * chip, but we need to guard it against DMA being
+                        * restarted by the link poll timer
+                        */
+                       __netif_tx_lock(txq, smp_processor_id());
+                       reset = gem_abnormal_irq(dev, gp, gp->status);
+                       __netif_tx_unlock(txq);
+                       if (reset) {
+                               gem_schedule_reset(gp);
+                               napi_complete(napi);
+                               return work_done;
+                       }
+               }
+
+               /* Run TX completion thread */
+               gem_tx(dev, gp, gp->status);
+
+               /* Run RX thread. We don't use any locking here,
+                * code willing to do bad things - like cleaning the
+                * rx ring - must call napi_disable(), which
+                * schedule_timeout()'s if polling is already disabled.
+                */
+               work_done += gem_rx(gp, budget - work_done);
+
+               if (work_done >= budget)
+                       return work_done;
+
+               gp->status = readl(gp->regs + GREG_STAT);
+       } while (gp->status & GREG_STAT_NAPI);
+
+       napi_complete(napi);
+       gem_enable_ints(gp);
+
+       return work_done;
+}
+
+static irqreturn_t gem_interrupt(int irq, void *dev_id)
+{
+       struct net_device *dev = dev_id;
+       struct gem *gp = netdev_priv(dev);
+
+       if (napi_schedule_prep(&gp->napi)) {
+               u32 gem_status = readl(gp->regs + GREG_STAT);
+
+               if (unlikely(gem_status == 0)) {
+                       napi_enable(&gp->napi);
+                       return IRQ_NONE;
+               }
+               if (netif_msg_intr(gp))
+                       printk(KERN_DEBUG "%s: gem_interrupt() gem_status: 0x%x\n",
+                              gp->dev->name, gem_status);
+
+               gp->status = gem_status;
+               gem_disable_ints(gp);
+               __napi_schedule(&gp->napi);
+       }
+
+       /* If polling was disabled at the time we received that
+        * interrupt, we may return IRQ_HANDLED here while we
+        * should return IRQ_NONE. No big deal...
+        */
+       return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void gem_poll_controller(struct net_device *dev)
+{
+       struct gem *gp = netdev_priv(dev);
+
+       disable_irq(gp->pdev->irq);
+       gem_interrupt(gp->pdev->irq, dev);
+       enable_irq(gp->pdev->irq);
+}
+#endif
+
+static void gem_tx_timeout(struct net_device *dev)
+{
+       struct gem *gp = netdev_priv(dev);
+
+       netdev_err(dev, "transmit timed out, resetting\n");
+
+       netdev_err(dev, "TX_STATE[%08x:%08x:%08x]\n",
+                  readl(gp->regs + TXDMA_CFG),
+                  readl(gp->regs + MAC_TXSTAT),
+                  readl(gp->regs + MAC_TXCFG));
+       netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
+                  readl(gp->regs + RXDMA_CFG),
+                  readl(gp->regs + MAC_RXSTAT),
+                  readl(gp->regs + MAC_RXCFG));
+
+       gem_schedule_reset(gp);
+}
+
+static __inline__ int gem_intme(int entry)
+{
+       /* Algorithm: IRQ every 1/2 of descriptors. */
+       if (!(entry & ((TX_RING_SIZE>>1)-1)))
+               return 1;
+
+       return 0;
+}
+
+static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
+                                 struct net_device *dev)
+{
+       struct gem *gp = netdev_priv(dev);
+       int entry;
+       u64 ctrl;
+
+       ctrl = 0;
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               const u64 csum_start_off = skb_checksum_start_offset(skb);
+               const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
+
+               ctrl = (TXDCTRL_CENAB |
+                       (csum_start_off << 15) |
+                       (csum_stuff_off << 21));
+       }
+
+       if (unlikely(TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1))) {
+               /* This is a hard error, log it. */
+               if (!netif_queue_stopped(dev)) {
+                       netif_stop_queue(dev);
+                       netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+               }
+               return NETDEV_TX_BUSY;
+       }
+
+       entry = gp->tx_new;
+       gp->tx_skbs[entry] = skb;
+
+       if (skb_shinfo(skb)->nr_frags == 0) {
+               struct gem_txd *txd = &gp->init_block->txd[entry];
+               dma_addr_t mapping;
+               u32 len;
+
+               len = skb->len;
+               mapping = pci_map_page(gp->pdev,
+                                      virt_to_page(skb->data),
+                                      offset_in_page(skb->data),
+                                      len, PCI_DMA_TODEVICE);
+               ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len;
+               if (gem_intme(entry))
+                       ctrl |= TXDCTRL_INTME;
+               txd->buffer = cpu_to_le64(mapping);
+               wmb();
+               txd->control_word = cpu_to_le64(ctrl);
+               entry = NEXT_TX(entry);
+       } else {
+               struct gem_txd *txd;
+               u32 first_len;
+               u64 intme;
+               dma_addr_t first_mapping;
+               int frag, first_entry = entry;
+
+               intme = 0;
+               if (gem_intme(entry))
+                       intme |= TXDCTRL_INTME;
+
+               /* We must give this initial chunk to the device last.
+                * Otherwise we could race with the device.
+                */
+               first_len = skb_headlen(skb);
+               first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data),
+                                            offset_in_page(skb->data),
+                                            first_len, PCI_DMA_TODEVICE);
+               entry = NEXT_TX(entry);
+
+               for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+                       skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
+                       u32 len;
+                       dma_addr_t mapping;
+                       u64 this_ctrl;
+
+                       len = this_frag->size;
+                       mapping = pci_map_page(gp->pdev,
+                                              this_frag->page,
+                                              this_frag->page_offset,
+                                              len, PCI_DMA_TODEVICE);
+                       this_ctrl = ctrl;
+                       if (frag == skb_shinfo(skb)->nr_frags - 1)
+                               this_ctrl |= TXDCTRL_EOF;
+
+                       txd = &gp->init_block->txd[entry];
+                       txd->buffer = cpu_to_le64(mapping);
+                       wmb();
+                       txd->control_word = cpu_to_le64(this_ctrl | len);
+
+                       if (gem_intme(entry))
+                               intme |= TXDCTRL_INTME;
+
+                       entry = NEXT_TX(entry);
+               }
+               txd = &gp->init_block->txd[first_entry];
+               txd->buffer = cpu_to_le64(first_mapping);
+               wmb();
+               txd->control_word =
+                       cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len);
+       }
+
+       gp->tx_new = entry;
+       if (unlikely(TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))) {
+               netif_stop_queue(dev);
+
+               /* netif_stop_queue() must be done before checking
+                * checking tx index in TX_BUFFS_AVAIL() below, because
+                * in gem_tx(), we update tx_old before checking for
+                * netif_queue_stopped().
+                */
+               smp_mb();
+               if (TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
+                       netif_wake_queue(dev);
+       }
+       if (netif_msg_tx_queued(gp))
+               printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
+                      dev->name, entry, skb->len);
+       mb();
+       writel(gp->tx_new, gp->regs + TXDMA_KICK);
+
+       return NETDEV_TX_OK;
+}
+
+static void gem_pcs_reset(struct gem *gp)
+{
+       int limit;
+       u32 val;
+
+       /* Reset PCS unit. */
+       val = readl(gp->regs + PCS_MIICTRL);
+       val |= PCS_MIICTRL_RST;
+       writel(val, gp->regs + PCS_MIICTRL);
+
+       limit = 32;
+       while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
+               udelay(100);
+               if (limit-- <= 0)
+                       break;
+       }
+       if (limit < 0)
+               netdev_warn(gp->dev, "PCS reset bit would not clear\n");
+}
+
+static void gem_pcs_reinit_adv(struct gem *gp)
+{
+       u32 val;
+
+       /* Make sure PCS is disabled while changing advertisement
+        * configuration.
+        */
+       val = readl(gp->regs + PCS_CFG);
+       val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
+       writel(val, gp->regs + PCS_CFG);
+
+       /* Advertise all capabilities except asymmetric
+        * pause.
+        */
+       val = readl(gp->regs + PCS_MIIADV);
+       val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
+               PCS_MIIADV_SP | PCS_MIIADV_AP);
+       writel(val, gp->regs + PCS_MIIADV);
+
+       /* Enable and restart auto-negotiation, disable wrapback/loopback,
+        * and re-enable PCS.
+        */
+       val = readl(gp->regs + PCS_MIICTRL);
+       val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
+       val &= ~PCS_MIICTRL_WB;
+       writel(val, gp->regs + PCS_MIICTRL);
+
+       val = readl(gp->regs + PCS_CFG);
+       val |= PCS_CFG_ENABLE;
+       writel(val, gp->regs + PCS_CFG);
+
+       /* Make sure serialink loopback is off.  The meaning
+        * of this bit is logically inverted based upon whether
+        * you are in Serialink or SERDES mode.
+        */
+       val = readl(gp->regs + PCS_SCTRL);
+       if (gp->phy_type == phy_serialink)
+               val &= ~PCS_SCTRL_LOOP;
+       else
+               val |= PCS_SCTRL_LOOP;
+       writel(val, gp->regs + PCS_SCTRL);
+}
+
+#define STOP_TRIES 32
+
+static void gem_reset(struct gem *gp)
+{
+       int limit;
+       u32 val;
+
+       /* Make sure we won't get any more interrupts */
+       writel(0xffffffff, gp->regs + GREG_IMASK);
+
+       /* Reset the chip */
+       writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST,
+              gp->regs + GREG_SWRST);
+
+       limit = STOP_TRIES;
+
+       do {
+               udelay(20);
+               val = readl(gp->regs + GREG_SWRST);
+               if (limit-- <= 0)
+                       break;
+       } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
+
+       if (limit < 0)
+               netdev_err(gp->dev, "SW reset is ghetto\n");
+
+       if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
+               gem_pcs_reinit_adv(gp);
+}
+
+static void gem_start_dma(struct gem *gp)
+{
+       u32 val;
+
+       /* We are ready to rock, turn everything on. */
+       val = readl(gp->regs + TXDMA_CFG);
+       writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
+       val = readl(gp->regs + RXDMA_CFG);
+       writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
+       val = readl(gp->regs + MAC_TXCFG);
+       writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
+       val = readl(gp->regs + MAC_RXCFG);
+       writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
+
+       (void) readl(gp->regs + MAC_RXCFG);
+       udelay(100);
+
+       gem_enable_ints(gp);
+
+       writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
+}
+
+/* DMA won't be actually stopped before about 4ms tho ...
+ */
+static void gem_stop_dma(struct gem *gp)
+{
+       u32 val;
+
+       /* We are done rocking, turn everything off. */
+       val = readl(gp->regs + TXDMA_CFG);
+       writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
+       val = readl(gp->regs + RXDMA_CFG);
+       writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
+       val = readl(gp->regs + MAC_TXCFG);
+       writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
+       val = readl(gp->regs + MAC_RXCFG);
+       writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
+
+       (void) readl(gp->regs + MAC_RXCFG);
+
+       /* Need to wait a bit ... done by the caller */
+}
+
+
+// XXX dbl check what that function should do when called on PCS PHY
+static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
+{
+       u32 advertise, features;
+       int autoneg;
+       int speed;
+       int duplex;
+
+       if (gp->phy_type != phy_mii_mdio0 &&
+           gp->phy_type != phy_mii_mdio1)
+               goto non_mii;
+
+       /* Setup advertise */
+       if (found_mii_phy(gp))
+               features = gp->phy_mii.def->features;
+       else
+               features = 0;
+
+       advertise = features & ADVERTISE_MASK;
+       if (gp->phy_mii.advertising != 0)
+               advertise &= gp->phy_mii.advertising;
+
+       autoneg = gp->want_autoneg;
+       speed = gp->phy_mii.speed;
+       duplex = gp->phy_mii.duplex;
+
+       /* Setup link parameters */
+       if (!ep)
+               goto start_aneg;
+       if (ep->autoneg == AUTONEG_ENABLE) {
+               advertise = ep->advertising;
+               autoneg = 1;
+       } else {
+               autoneg = 0;
+               speed = ethtool_cmd_speed(ep);
+               duplex = ep->duplex;
+       }
+
+start_aneg:
+       /* Sanitize settings based on PHY capabilities */
+       if ((features & SUPPORTED_Autoneg) == 0)
+               autoneg = 0;
+       if (speed == SPEED_1000 &&
+           !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)))
+               speed = SPEED_100;
+       if (speed == SPEED_100 &&
+           !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full)))
+               speed = SPEED_10;
+       if (duplex == DUPLEX_FULL &&
+           !(features & (SUPPORTED_1000baseT_Full |
+                         SUPPORTED_100baseT_Full |
+                         SUPPORTED_10baseT_Full)))
+               duplex = DUPLEX_HALF;
+       if (speed == 0)
+               speed = SPEED_10;
+
+       /* If we are asleep, we don't try to actually setup the PHY, we
+        * just store the settings
+        */
+       if (!netif_device_present(gp->dev)) {
+               gp->phy_mii.autoneg = gp->want_autoneg = autoneg;
+               gp->phy_mii.speed = speed;
+               gp->phy_mii.duplex = duplex;
+               return;
+       }
+
+       /* Configure PHY & start aneg */
+       gp->want_autoneg = autoneg;
+       if (autoneg) {
+               if (found_mii_phy(gp))
+                       gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise);
+               gp->lstate = link_aneg;
+       } else {
+               if (found_mii_phy(gp))
+                       gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex);
+               gp->lstate = link_force_ok;
+       }
+
+non_mii:
+       gp->timer_ticks = 0;
+       mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
+}
+
+/* A link-up condition has occurred, initialize and enable the
+ * rest of the chip.
+ */
+static int gem_set_link_modes(struct gem *gp)
+{
+       struct netdev_queue *txq = netdev_get_tx_queue(gp->dev, 0);
+       int full_duplex, speed, pause;
+       u32 val;
+
+       full_duplex = 0;
+       speed = SPEED_10;
+       pause = 0;
+
+       if (found_mii_phy(gp)) {
+               if (gp->phy_mii.def->ops->read_link(&gp->phy_mii))
+                       return 1;
+               full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL);
+               speed = gp->phy_mii.speed;
+               pause = gp->phy_mii.pause;
+       } else if (gp->phy_type == phy_serialink ||
+                  gp->phy_type == phy_serdes) {
+               u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
+
+               if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes)
+                       full_duplex = 1;
+               speed = SPEED_1000;
+       }
+
+       netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n",
+                  speed, (full_duplex ? "full" : "half"));
+
+
+       /* We take the tx queue lock to avoid collisions between
+        * this code, the tx path and the NAPI-driven error path
+        */
+       __netif_tx_lock(txq, smp_processor_id());
+
+       val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU);
+       if (full_duplex) {
+               val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL);
+       } else {
+               /* MAC_TXCFG_NBO must be zero. */
+       }
+       writel(val, gp->regs + MAC_TXCFG);
+
+       val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED);
+       if (!full_duplex &&
+           (gp->phy_type == phy_mii_mdio0 ||
+            gp->phy_type == phy_mii_mdio1)) {
+               val |= MAC_XIFCFG_DISE;
+       } else if (full_duplex) {
+               val |= MAC_XIFCFG_FLED;
+       }
+
+       if (speed == SPEED_1000)
+               val |= (MAC_XIFCFG_GMII);
+
+       writel(val, gp->regs + MAC_XIFCFG);
+
+       /* If gigabit and half-duplex, enable carrier extension
+        * mode.  Else, disable it.
+        */
+       if (speed == SPEED_1000 && !full_duplex) {
+               val = readl(gp->regs + MAC_TXCFG);
+               writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
+
+               val = readl(gp->regs + MAC_RXCFG);
+               writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
+       } else {
+               val = readl(gp->regs + MAC_TXCFG);
+               writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
+
+               val = readl(gp->regs + MAC_RXCFG);
+               writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
+       }
+
+       if (gp->phy_type == phy_serialink ||
+           gp->phy_type == phy_serdes) {
+               u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
+
+               if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP))
+                       pause = 1;
+       }
+
+       if (!full_duplex)
+               writel(512, gp->regs + MAC_STIME);
+       else
+               writel(64, gp->regs + MAC_STIME);
+       val = readl(gp->regs + MAC_MCCFG);
+       if (pause)
+               val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE);
+       else
+               val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE);
+       writel(val, gp->regs + MAC_MCCFG);
+
+       gem_start_dma(gp);
+
+       __netif_tx_unlock(txq);
+
+       if (netif_msg_link(gp)) {
+               if (pause) {
+                       netdev_info(gp->dev,
+                                   "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
+                                   gp->rx_fifo_sz,
+                                   gp->rx_pause_off,
+                                   gp->rx_pause_on);
+               } else {
+                       netdev_info(gp->dev, "Pause is disabled\n");
+               }
+       }
+
+       return 0;
+}
+
+static int gem_mdio_link_not_up(struct gem *gp)
+{
+       switch (gp->lstate) {
+       case link_force_ret:
+               netif_info(gp, link, gp->dev,
+                          "Autoneg failed again, keeping forced mode\n");
+               gp->phy_mii.def->ops->setup_forced(&gp->phy_mii,
+                       gp->last_forced_speed, DUPLEX_HALF);
+               gp->timer_ticks = 5;
+               gp->lstate = link_force_ok;
+               return 0;
+       case link_aneg:
+               /* We try forced modes after a failed aneg only on PHYs that don't
+                * have "magic_aneg" bit set, which means they internally do the
+                * while forced-mode thingy. On these, we just restart aneg
+                */
+               if (gp->phy_mii.def->magic_aneg)
+                       return 1;
+               netif_info(gp, link, gp->dev, "switching to forced 100bt\n");
+               /* Try forced modes. */
+               gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100,
+                       DUPLEX_HALF);
+               gp->timer_ticks = 5;
+               gp->lstate = link_force_try;
+               return 0;
+       case link_force_try:
+               /* Downgrade from 100 to 10 Mbps if necessary.
+                * If already at 10Mbps, warn user about the
+                * situation every 10 ticks.
+                */
+               if (gp->phy_mii.speed == SPEED_100) {
+                       gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10,
+                               DUPLEX_HALF);
+                       gp->timer_ticks = 5;
+                       netif_info(gp, link, gp->dev,
+                                  "switching to forced 10bt\n");
+                       return 0;
+               } else
+                       return 1;
+       default:
+               return 0;
+       }
+}
+
+static void gem_link_timer(unsigned long data)
+{
+       struct gem *gp = (struct gem *) data;
+       struct net_device *dev = gp->dev;
+       int restart_aneg = 0;
+
+       /* There's no point doing anything if we're going to be reset */
+       if (gp->reset_task_pending)
+               return;
+
+       if (gp->phy_type == phy_serialink ||
+           gp->phy_type == phy_serdes) {
+               u32 val = readl(gp->regs + PCS_MIISTAT);
+
+               if (!(val & PCS_MIISTAT_LS))
+                       val = readl(gp->regs + PCS_MIISTAT);
+
+               if ((val & PCS_MIISTAT_LS) != 0) {
+                       if (gp->lstate == link_up)
+                               goto restart;
+
+                       gp->lstate = link_up;
+                       netif_carrier_on(dev);
+                       (void)gem_set_link_modes(gp);
+               }
+               goto restart;
+       }
+       if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) {
+               /* Ok, here we got a link. If we had it due to a forced
+                * fallback, and we were configured for autoneg, we do
+                * retry a short autoneg pass. If you know your hub is
+                * broken, use ethtool ;)
+                */
+               if (gp->lstate == link_force_try && gp->want_autoneg) {
+                       gp->lstate = link_force_ret;
+                       gp->last_forced_speed = gp->phy_mii.speed;
+                       gp->timer_ticks = 5;
+                       if (netif_msg_link(gp))
+                               netdev_info(dev,
+                                           "Got link after fallback, retrying autoneg once...\n");
+                       gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising);
+               } else if (gp->lstate != link_up) {
+                       gp->lstate = link_up;
+                       netif_carrier_on(dev);
+                       if (gem_set_link_modes(gp))
+                               restart_aneg = 1;
+               }
+       } else {
+               /* If the link was previously up, we restart the
+                * whole process
+                */
+               if (gp->lstate == link_up) {
+                       gp->lstate = link_down;
+                       netif_info(gp, link, dev, "Link down\n");
+                       netif_carrier_off(dev);
+                       gem_schedule_reset(gp);
+                       /* The reset task will restart the timer */
+                       return;
+               } else if (++gp->timer_ticks > 10) {
+                       if (found_mii_phy(gp))
+                               restart_aneg = gem_mdio_link_not_up(gp);
+                       else
+                               restart_aneg = 1;
+               }
+       }
+       if (restart_aneg) {
+               gem_begin_auto_negotiation(gp, NULL);
+               return;
+       }
+restart:
+       mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
+}
+
+static void gem_clean_rings(struct gem *gp)
+{
+       struct gem_init_block *gb = gp->init_block;
+       struct sk_buff *skb;
+       int i;
+       dma_addr_t dma_addr;
+
+       for (i = 0; i < RX_RING_SIZE; i++) {
+               struct gem_rxd *rxd;
+
+               rxd = &gb->rxd[i];
+               if (gp->rx_skbs[i] != NULL) {
+                       skb = gp->rx_skbs[i];
+                       dma_addr = le64_to_cpu(rxd->buffer);
+                       pci_unmap_page(gp->pdev, dma_addr,
+                                      RX_BUF_ALLOC_SIZE(gp),
+                                      PCI_DMA_FROMDEVICE);
+                       dev_kfree_skb_any(skb);
+                       gp->rx_skbs[i] = NULL;
+               }
+               rxd->status_word = 0;
+               wmb();
+               rxd->buffer = 0;
+       }
+
+       for (i = 0; i < TX_RING_SIZE; i++) {
+               if (gp->tx_skbs[i] != NULL) {
+                       struct gem_txd *txd;
+                       int frag;
+
+                       skb = gp->tx_skbs[i];
+                       gp->tx_skbs[i] = NULL;
+
+                       for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
+                               int ent = i & (TX_RING_SIZE - 1);
+
+                               txd = &gb->txd[ent];
+                               dma_addr = le64_to_cpu(txd->buffer);
+                               pci_unmap_page(gp->pdev, dma_addr,
+                                              le64_to_cpu(txd->control_word) &
+                                              TXDCTRL_BUFSZ, PCI_DMA_TODEVICE);
+
+                               if (frag != skb_shinfo(skb)->nr_frags)
+                                       i++;
+                       }
+                       dev_kfree_skb_any(skb);
+               }
+       }
+}
+
+static void gem_init_rings(struct gem *gp)
+{
+       struct gem_init_block *gb = gp->init_block;
+       struct net_device *dev = gp->dev;
+       int i;
+       dma_addr_t dma_addr;
+
+       gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0;
+
+       gem_clean_rings(gp);
+
+       gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN,
+                           (unsigned)VLAN_ETH_FRAME_LEN);
+
+       for (i = 0; i < RX_RING_SIZE; i++) {
+               struct sk_buff *skb;
+               struct gem_rxd *rxd = &gb->rxd[i];
+
+               skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_KERNEL);
+               if (!skb) {
+                       rxd->buffer = 0;
+                       rxd->status_word = 0;
+                       continue;
+               }
+
+               gp->rx_skbs[i] = skb;
+               skb_put(skb, (gp->rx_buf_sz + RX_OFFSET));
+               dma_addr = pci_map_page(gp->pdev,
+                                       virt_to_page(skb->data),
+                                       offset_in_page(skb->data),
+                                       RX_BUF_ALLOC_SIZE(gp),
+                                       PCI_DMA_FROMDEVICE);
+               rxd->buffer = cpu_to_le64(dma_addr);
+               wmb();
+               rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
+               skb_reserve(skb, RX_OFFSET);
+       }
+
+       for (i = 0; i < TX_RING_SIZE; i++) {
+               struct gem_txd *txd = &gb->txd[i];
+
+               txd->control_word = 0;
+               wmb();
+               txd->buffer = 0;
+       }
+       wmb();
+}
+
+/* Init PHY interface and start link poll state machine */
+static void gem_init_phy(struct gem *gp)
+{
+       u32 mifcfg;
+
+       /* Revert MIF CFG setting done on stop_phy */
+       mifcfg = readl(gp->regs + MIF_CFG);
+       mifcfg &= ~MIF_CFG_BBMODE;
+       writel(mifcfg, gp->regs + MIF_CFG);
+
+       if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
+               int i;
+
+               /* Those delay sucks, the HW seem to love them though, I'll
+                * serisouly consider breaking some locks here to be able
+                * to schedule instead
+                */
+               for (i = 0; i < 3; i++) {
+#ifdef CONFIG_PPC_PMAC
+                       pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0);
+                       msleep(20);
+#endif
+                       /* Some PHYs used by apple have problem getting back to us,
+                        * we do an additional reset here
+                        */
+                       phy_write(gp, MII_BMCR, BMCR_RESET);
+                       msleep(20);
+                       if (phy_read(gp, MII_BMCR) != 0xffff)
+                               break;
+                       if (i == 2)
+                               netdev_warn(gp->dev, "GMAC PHY not responding !\n");
+               }
+       }
+
+       if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
+           gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
+               u32 val;
+
+               /* Init datapath mode register. */
+               if (gp->phy_type == phy_mii_mdio0 ||
+                   gp->phy_type == phy_mii_mdio1) {
+                       val = PCS_DMODE_MGM;
+               } else if (gp->phy_type == phy_serialink) {
+                       val = PCS_DMODE_SM | PCS_DMODE_GMOE;
+               } else {
+                       val = PCS_DMODE_ESM;
+               }
+
+               writel(val, gp->regs + PCS_DMODE);
+       }
+
+       if (gp->phy_type == phy_mii_mdio0 ||
+           gp->phy_type == phy_mii_mdio1) {
+               /* Reset and detect MII PHY */
+               mii_phy_probe(&gp->phy_mii, gp->mii_phy_addr);
+
+               /* Init PHY */
+               if (gp->phy_mii.def && gp->phy_mii.def->ops->init)
+                       gp->phy_mii.def->ops->init(&gp->phy_mii);
+       } else {
+               gem_pcs_reset(gp);
+               gem_pcs_reinit_adv(gp);
+       }
+
+       /* Default aneg parameters */
+       gp->timer_ticks = 0;
+       gp->lstate = link_down;
+       netif_carrier_off(gp->dev);
+
+       /* Print things out */
+       if (gp->phy_type == phy_mii_mdio0 ||
+           gp->phy_type == phy_mii_mdio1)
+               netdev_info(gp->dev, "Found %s PHY\n",
+                           gp->phy_mii.def ? gp->phy_mii.def->name : "no");
+
+       gem_begin_auto_negotiation(gp, NULL);
+}
+
+static void gem_init_dma(struct gem *gp)
+{
+       u64 desc_dma = (u64) gp->gblock_dvma;
+       u32 val;
+
+       val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE);
+       writel(val, gp->regs + TXDMA_CFG);
+
+       writel(desc_dma >> 32, gp->regs + TXDMA_DBHI);
+       writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW);
+       desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
+
+       writel(0, gp->regs + TXDMA_KICK);
+
+       val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
+              ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
+       writel(val, gp->regs + RXDMA_CFG);
+
+       writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
+       writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
+
+       writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
+
+       val  = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
+       val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
+       writel(val, gp->regs + RXDMA_PTHRESH);
+
+       if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
+               writel(((5 & RXDMA_BLANK_IPKTS) |
+                       ((8 << 12) & RXDMA_BLANK_ITIME)),
+                      gp->regs + RXDMA_BLANK);
+       else
+               writel(((5 & RXDMA_BLANK_IPKTS) |
+                       ((4 << 12) & RXDMA_BLANK_ITIME)),
+                      gp->regs + RXDMA_BLANK);
+}
+
+static u32 gem_setup_multicast(struct gem *gp)
+{
+       u32 rxcfg = 0;
+       int i;
+
+       if ((gp->dev->flags & IFF_ALLMULTI) ||
+           (netdev_mc_count(gp->dev) > 256)) {
+               for (i=0; i<16; i++)
+                       writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
+               rxcfg |= MAC_RXCFG_HFE;
+       } else if (gp->dev->flags & IFF_PROMISC) {
+               rxcfg |= MAC_RXCFG_PROM;
+       } else {
+               u16 hash_table[16];
+               u32 crc;
+               struct netdev_hw_addr *ha;
+               int i;
+
+               memset(hash_table, 0, sizeof(hash_table));
+               netdev_for_each_mc_addr(ha, gp->dev) {
+                       crc = ether_crc_le(6, ha->addr);
+                       crc >>= 24;
+                       hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
+               }
+               for (i=0; i<16; i++)
+                       writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2));
+               rxcfg |= MAC_RXCFG_HFE;
+       }
+
+       return rxcfg;
+}
+
+static void gem_init_mac(struct gem *gp)
+{
+       unsigned char *e = &gp->dev->dev_addr[0];
+
+       writel(0x1bf0, gp->regs + MAC_SNDPAUSE);
+
+       writel(0x00, gp->regs + MAC_IPG0);
+       writel(0x08, gp->regs + MAC_IPG1);
+       writel(0x04, gp->regs + MAC_IPG2);
+       writel(0x40, gp->regs + MAC_STIME);
+       writel(0x40, gp->regs + MAC_MINFSZ);
+
+       /* Ethernet payload + header + FCS + optional VLAN tag. */
+       writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ);
+
+       writel(0x07, gp->regs + MAC_PASIZE);
+       writel(0x04, gp->regs + MAC_JAMSIZE);
+       writel(0x10, gp->regs + MAC_ATTLIM);
+       writel(0x8808, gp->regs + MAC_MCTYPE);
+
+       writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED);
+
+       writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
+       writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
+       writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
+
+       writel(0, gp->regs + MAC_ADDR3);
+       writel(0, gp->regs + MAC_ADDR4);
+       writel(0, gp->regs + MAC_ADDR5);
+
+       writel(0x0001, gp->regs + MAC_ADDR6);
+       writel(0xc200, gp->regs + MAC_ADDR7);
+       writel(0x0180, gp->regs + MAC_ADDR8);
+
+       writel(0, gp->regs + MAC_AFILT0);
+       writel(0, gp->regs + MAC_AFILT1);
+       writel(0, gp->regs + MAC_AFILT2);
+       writel(0, gp->regs + MAC_AF21MSK);
+       writel(0, gp->regs + MAC_AF0MSK);
+
+       gp->mac_rx_cfg = gem_setup_multicast(gp);
+#ifdef STRIP_FCS
+       gp->mac_rx_cfg |= MAC_RXCFG_SFCS;
+#endif
+       writel(0, gp->regs + MAC_NCOLL);
+       writel(0, gp->regs + MAC_FASUCC);
+       writel(0, gp->regs + MAC_ECOLL);
+       writel(0, gp->regs + MAC_LCOLL);
+       writel(0, gp->regs + MAC_DTIMER);
+       writel(0, gp->regs + MAC_PATMPS);
+       writel(0, gp->regs + MAC_RFCTR);
+       writel(0, gp->regs + MAC_LERR);
+       writel(0, gp->regs + MAC_AERR);
+       writel(0, gp->regs + MAC_FCSERR);
+       writel(0, gp->regs + MAC_RXCVERR);
+
+       /* Clear RX/TX/MAC/XIF config, we will set these up and enable
+        * them once a link is established.
+        */
+       writel(0, gp->regs + MAC_TXCFG);
+       writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG);
+       writel(0, gp->regs + MAC_MCCFG);
+       writel(0, gp->regs + MAC_XIFCFG);
+
+       /* Setup MAC interrupts.  We want to get all of the interesting
+        * counter expiration events, but we do not want to hear about
+        * normal rx/tx as the DMA engine tells us that.
+        */
+       writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK);
+       writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
+
+       /* Don't enable even the PAUSE interrupts for now, we
+        * make no use of those events other than to record them.
+        */
+       writel(0xffffffff, gp->regs + MAC_MCMASK);
+
+       /* Don't enable GEM's WOL in normal operations
+        */
+       if (gp->has_wol)
+               writel(0, gp->regs + WOL_WAKECSR);
+}
+
+static void gem_init_pause_thresholds(struct gem *gp)
+{
+               u32 cfg;
+
+       /* Calculate pause thresholds.  Setting the OFF threshold to the
+        * full RX fifo size effectively disables PAUSE generation which
+        * is what we do for 10/100 only GEMs which have FIFOs too small
+        * to make real gains from PAUSE.
+        */
+       if (gp->rx_fifo_sz <= (2 * 1024)) {
+               gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz;
+       } else {
+               int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63;
+               int off = (gp->rx_fifo_sz - (max_frame * 2));
+               int on = off - max_frame;
+
+               gp->rx_pause_off = off;
+               gp->rx_pause_on = on;
+       }
+
+
+       /* Configure the chip "burst" DMA mode & enable some
+        * HW bug fixes on Apple version
+        */
+               cfg  = 0;
+               if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
+               cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX;
+#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
+               cfg |= GREG_CFG_IBURST;
+#endif
+               cfg |= ((31 << 1) & GREG_CFG_TXDMALIM);
+               cfg |= ((31 << 6) & GREG_CFG_RXDMALIM);
+               writel(cfg, gp->regs + GREG_CFG);
+
+       /* If Infinite Burst didn't stick, then use different
+        * thresholds (and Apple bug fixes don't exist)
+        */
+       if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) {
+               cfg = ((2 << 1) & GREG_CFG_TXDMALIM);
+               cfg |= ((8 << 6) & GREG_CFG_RXDMALIM);
+               writel(cfg, gp->regs + GREG_CFG);
+       }
+}
+
+static int gem_check_invariants(struct gem *gp)
+{
+       struct pci_dev *pdev = gp->pdev;
+       u32 mif_cfg;
+
+       /* On Apple's sungem, we can't rely on registers as the chip
+        * was been powered down by the firmware. The PHY is looked
+        * up later on.
+        */
+       if (pdev->vendor == PCI_VENDOR_ID_APPLE) {
+               gp->phy_type = phy_mii_mdio0;
+               gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
+               gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
+               gp->swrst_base = 0;
+
+               mif_cfg = readl(gp->regs + MIF_CFG);
+               mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1);
+               mif_cfg |= MIF_CFG_MDI0;
+               writel(mif_cfg, gp->regs + MIF_CFG);
+               writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE);
+               writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG);
+
+               /* We hard-code the PHY address so we can properly bring it out of
+                * reset later on, we can't really probe it at this point, though
+                * that isn't an issue.
+                */
+               if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC)
+                       gp->mii_phy_addr = 1;
+               else
+                       gp->mii_phy_addr = 0;
+
+               return 0;
+       }
+
+       mif_cfg = readl(gp->regs + MIF_CFG);
+
+       if (pdev->vendor == PCI_VENDOR_ID_SUN &&
+           pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) {
+               /* One of the MII PHYs _must_ be present
+                * as this chip has no gigabit PHY.
+                */
+               if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) {
+                       pr_err("RIO GEM lacks MII phy, mif_cfg[%08x]\n",
+                              mif_cfg);
+                       return -1;
+               }
+       }
+
+       /* Determine initial PHY interface type guess.  MDIO1 is the
+        * external PHY and thus takes precedence over MDIO0.
+        */
+
+       if (mif_cfg & MIF_CFG_MDI1) {
+               gp->phy_type = phy_mii_mdio1;
+               mif_cfg |= MIF_CFG_PSELECT;
+               writel(mif_cfg, gp->regs + MIF_CFG);
+       } else if (mif_cfg & MIF_CFG_MDI0) {
+               gp->phy_type = phy_mii_mdio0;
+               mif_cfg &= ~MIF_CFG_PSELECT;
+               writel(mif_cfg, gp->regs + MIF_CFG);
+       } else {
+#ifdef CONFIG_SPARC
+               const char *p;
+
+               p = of_get_property(gp->of_node, "shared-pins", NULL);
+               if (p && !strcmp(p, "serdes"))
+                       gp->phy_type = phy_serdes;
+               else
+#endif
+                       gp->phy_type = phy_serialink;
+       }
+       if (gp->phy_type == phy_mii_mdio1 ||
+           gp->phy_type == phy_mii_mdio0) {
+               int i;
+
+               for (i = 0; i < 32; i++) {
+                       gp->mii_phy_addr = i;
+                       if (phy_read(gp, MII_BMCR) != 0xffff)
+                               break;
+               }
+               if (i == 32) {
+                       if (pdev->device != PCI_DEVICE_ID_SUN_GEM) {
+                               pr_err("RIO MII phy will not respond\n");
+                               return -1;
+                       }
+                       gp->phy_type = phy_serdes;
+               }
+       }
+
+       /* Fetch the FIFO configurations now too. */
+       gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
+       gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
+
+       if (pdev->vendor == PCI_VENDOR_ID_SUN) {
+               if (pdev->device == PCI_DEVICE_ID_SUN_GEM) {
+                       if (gp->tx_fifo_sz != (9 * 1024) ||
+                           gp->rx_fifo_sz != (20 * 1024)) {
+                               pr_err("GEM has bogus fifo sizes tx(%d) rx(%d)\n",
+                                      gp->tx_fifo_sz, gp->rx_fifo_sz);
+                               return -1;
+                       }
+                       gp->swrst_base = 0;
+               } else {
+                       if (gp->tx_fifo_sz != (2 * 1024) ||
+                           gp->rx_fifo_sz != (2 * 1024)) {
+                               pr_err("RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
+                                      gp->tx_fifo_sz, gp->rx_fifo_sz);
+                               return -1;
+                       }
+                       gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT;
+               }
+       }
+
+       return 0;
+}
+
+static void gem_reinit_chip(struct gem *gp)
+{
+       /* Reset the chip */
+       gem_reset(gp);
+
+       /* Make sure ints are disabled */
+       gem_disable_ints(gp);
+
+       /* Allocate & setup ring buffers */
+       gem_init_rings(gp);
+
+       /* Configure pause thresholds */
+       gem_init_pause_thresholds(gp);
+
+       /* Init DMA & MAC engines */
+       gem_init_dma(gp);
+       gem_init_mac(gp);
+}
+
+
+static void gem_stop_phy(struct gem *gp, int wol)
+{
+       u32 mifcfg;
+
+       /* Let the chip settle down a bit, it seems that helps
+        * for sleep mode on some models
+        */
+       msleep(10);
+
+       /* Make sure we aren't polling PHY status change. We
+        * don't currently use that feature though
+        */
+       mifcfg = readl(gp->regs + MIF_CFG);
+       mifcfg &= ~MIF_CFG_POLL;
+       writel(mifcfg, gp->regs + MIF_CFG);
+
+       if (wol && gp->has_wol) {
+               unsigned char *e = &gp->dev->dev_addr[0];
+               u32 csr;
+
+               /* Setup wake-on-lan for MAGIC packet */
+               writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB,
+                      gp->regs + MAC_RXCFG);
+               writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0);
+               writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1);
+               writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2);
+
+               writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT);
+               csr = WOL_WAKECSR_ENABLE;
+               if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0)
+                       csr |= WOL_WAKECSR_MII;
+               writel(csr, gp->regs + WOL_WAKECSR);
+       } else {
+               writel(0, gp->regs + MAC_RXCFG);
+               (void)readl(gp->regs + MAC_RXCFG);
+               /* Machine sleep will die in strange ways if we
+                * dont wait a bit here, looks like the chip takes
+                * some time to really shut down
+                */
+               msleep(10);
+       }
+
+       writel(0, gp->regs + MAC_TXCFG);
+       writel(0, gp->regs + MAC_XIFCFG);
+       writel(0, gp->regs + TXDMA_CFG);
+       writel(0, gp->regs + RXDMA_CFG);
+
+       if (!wol) {
+               gem_reset(gp);
+               writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST);
+               writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
+
+               if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend)
+                       gp->phy_mii.def->ops->suspend(&gp->phy_mii);
+
+               /* According to Apple, we must set the MDIO pins to this begnign
+                * state or we may 1) eat more current, 2) damage some PHYs
+                */
+               writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG);
+               writel(0, gp->regs + MIF_BBCLK);
+               writel(0, gp->regs + MIF_BBDATA);
+               writel(0, gp->regs + MIF_BBOENAB);
+               writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG);
+               (void) readl(gp->regs + MAC_XIFCFG);
+       }
+}
+
+static int gem_do_start(struct net_device *dev)
+{
+       struct gem *gp = netdev_priv(dev);
+       int rc;
+
+       /* Enable the cell */
+       gem_get_cell(gp);
+
+       /* Make sure PCI access and bus master are enabled */
+       rc = pci_enable_device(gp->pdev);
+       if (rc) {
+               netdev_err(dev, "Failed to enable chip on PCI bus !\n");
+
+               /* Put cell and forget it for now, it will be considered as
+                * still asleep, a new sleep cycle may bring it back
+                */
+               gem_put_cell(gp);
+               return -ENXIO;
+       }
+       pci_set_master(gp->pdev);
+
+       /* Init & setup chip hardware */
+       gem_reinit_chip(gp);
+
+       /* An interrupt might come in handy */
+       rc = request_irq(gp->pdev->irq, gem_interrupt,
+                        IRQF_SHARED, dev->name, (void *)dev);
+       if (rc) {
+               netdev_err(dev, "failed to request irq !\n");
+
+               gem_reset(gp);
+               gem_clean_rings(gp);
+               gem_put_cell(gp);
+               return rc;
+       }
+
+       /* Mark us as attached again if we come from resume(), this has
+        * no effect if we weren't detatched and needs to be done now.
+        */
+       netif_device_attach(dev);
+
+       /* Restart NAPI & queues */
+       gem_netif_start(gp);
+
+       /* Detect & init PHY, start autoneg etc... this will
+        * eventually result in starting DMA operations when
+        * the link is up
+        */
+       gem_init_phy(gp);
+
+       return 0;
+}
+
+static void gem_do_stop(struct net_device *dev, int wol)
+{
+       struct gem *gp = netdev_priv(dev);
+
+       /* Stop NAPI and stop tx queue */
+       gem_netif_stop(gp);
+
+       /* Make sure ints are disabled. We don't care about
+        * synchronizing as NAPI is disabled, thus a stray
+        * interrupt will do nothing bad (our irq handler
+        * just schedules NAPI)
+        */
+       gem_disable_ints(gp);
+
+       /* Stop the link timer */
+       del_timer_sync(&gp->link_timer);
+
+       /* We cannot cancel the reset task while holding the
+        * rtnl lock, we'd get an A->B / B->A deadlock stituation
+        * if we did. This is not an issue however as the reset
+        * task is synchronized vs. us (rtnl_lock) and will do
+        * nothing if the device is down or suspended. We do
+        * still clear reset_task_pending to avoid a spurrious
+        * reset later on in case we do resume before it gets
+        * scheduled.
+        */
+       gp->reset_task_pending = 0;
+
+       /* If we are going to sleep with WOL */
+       gem_stop_dma(gp);
+       msleep(10);
+       if (!wol)
+               gem_reset(gp);
+       msleep(10);
+
+       /* Get rid of rings */
+       gem_clean_rings(gp);
+
+       /* No irq needed anymore */
+       free_irq(gp->pdev->irq, (void *) dev);
+
+       /* Shut the PHY down eventually and setup WOL */
+       gem_stop_phy(gp, wol);
+
+       /* Make sure bus master is disabled */
+       pci_disable_device(gp->pdev);
+
+       /* Cell not needed neither if no WOL */
+       if (!wol)
+               gem_put_cell(gp);
+}
+
+static void gem_reset_task(struct work_struct *work)
+{
+       struct gem *gp = container_of(work, struct gem, reset_task);
+
+       /* Lock out the network stack (essentially shield ourselves
+        * against a racing open, close, control call, or suspend
+        */
+       rtnl_lock();
+
+       /* Skip the reset task if suspended or closed, or if it's
+        * been cancelled by gem_do_stop (see comment there)
+        */
+       if (!netif_device_present(gp->dev) ||
+           !netif_running(gp->dev) ||
+           !gp->reset_task_pending) {
+               rtnl_unlock();
+               return;
+       }
+
+       /* Stop the link timer */
+       del_timer_sync(&gp->link_timer);
+
+       /* Stop NAPI and tx */
+       gem_netif_stop(gp);
+
+       /* Reset the chip & rings */
+       gem_reinit_chip(gp);
+       if (gp->lstate == link_up)
+               gem_set_link_modes(gp);
+
+       /* Restart NAPI and Tx */
+       gem_netif_start(gp);
+
+       /* We are back ! */
+       gp->reset_task_pending = 0;
+
+       /* If the link is not up, restart autoneg, else restart the
+        * polling timer
+        */
+       if (gp->lstate != link_up)
+               gem_begin_auto_negotiation(gp, NULL);
+       else
+               mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
+
+       rtnl_unlock();
+}
+
+static int gem_open(struct net_device *dev)
+{
+       /* We allow open while suspended, we just do nothing,
+        * the chip will be initialized in resume()
+        */
+       if (netif_device_present(dev))
+               return gem_do_start(dev);
+       return 0;
+}
+
+static int gem_close(struct net_device *dev)
+{
+       if (netif_device_present(dev))
+               gem_do_stop(dev, 0);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct gem *gp = netdev_priv(dev);
+
+       /* Lock the network stack first to avoid racing with open/close,
+        * reset task and setting calls
+        */
+       rtnl_lock();
+
+       /* Not running, mark ourselves non-present, no need for
+        * a lock here
+        */
+       if (!netif_running(dev)) {
+               netif_device_detach(dev);
+               rtnl_unlock();
+               return 0;
+       }
+       netdev_info(dev, "suspending, WakeOnLan %s\n",
+                   (gp->wake_on_lan && netif_running(dev)) ?
+                   "enabled" : "disabled");
+
+       /* Tell the network stack we're gone. gem_do_stop() below will
+        * synchronize with TX, stop NAPI etc...
+        */
+       netif_device_detach(dev);
+
+       /* Switch off chip, remember WOL setting */
+       gp->asleep_wol = gp->wake_on_lan;
+       gem_do_stop(dev, gp->asleep_wol);
+
+       /* Unlock the network stack */
+       rtnl_unlock();
+
+       return 0;
+}
+
+static int gem_resume(struct pci_dev *pdev)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct gem *gp = netdev_priv(dev);
+
+       /* See locking comment in gem_suspend */
+       rtnl_lock();
+
+       /* Not running, mark ourselves present, no need for
+        * a lock here
+        */
+       if (!netif_running(dev)) {
+               netif_device_attach(dev);
+               rtnl_unlock();
+               return 0;
+       }
+
+       /* Restart chip. If that fails there isn't much we can do, we
+        * leave things stopped.
+        */
+       gem_do_start(dev);
+
+       /* If we had WOL enabled, the cell clock was never turned off during
+        * sleep, so we end up beeing unbalanced. Fix that here
+        */
+       if (gp->asleep_wol)
+               gem_put_cell(gp);
+
+       /* Unlock the network stack */
+       rtnl_unlock();
+
+       return 0;
+}
+#endif /* CONFIG_PM */
+
+static struct net_device_stats *gem_get_stats(struct net_device *dev)
+{
+       struct gem *gp = netdev_priv(dev);
+
+       /* I have seen this being called while the PM was in progress,
+        * so we shield against this. Let's also not poke at registers
+        * while the reset task is going on.
+        *
+        * TODO: Move stats collection elsewhere (link timer ?) and
+        * make this a nop to avoid all those synchro issues
+        */
+       if (!netif_device_present(dev) || !netif_running(dev))
+               goto bail;
+
+       /* Better safe than sorry... */
+       if (WARN_ON(!gp->cell_enabled))
+               goto bail;
+
+       dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR);
+       writel(0, gp->regs + MAC_FCSERR);
+
+       dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR);
+       writel(0, gp->regs + MAC_AERR);
+
+       dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR);
+       writel(0, gp->regs + MAC_LERR);
+
+       dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
+       dev->stats.collisions +=
+               (readl(gp->regs + MAC_ECOLL) + readl(gp->regs + MAC_LCOLL));
+       writel(0, gp->regs + MAC_ECOLL);
+       writel(0, gp->regs + MAC_LCOLL);
+ bail:
+       return &dev->stats;
+}
+
+static int gem_set_mac_address(struct net_device *dev, void *addr)
+{
+       struct sockaddr *macaddr = (struct sockaddr *) addr;
+       struct gem *gp = netdev_priv(dev);
+       unsigned char *e = &dev->dev_addr[0];
+
+       if (!is_valid_ether_addr(macaddr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len);
+
+       /* We'll just catch it later when the device is up'd or resumed */
+       if (!netif_running(dev) || !netif_device_present(dev))
+               return 0;
+
+       /* Better safe than sorry... */
+       if (WARN_ON(!gp->cell_enabled))
+               return 0;
+
+       writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
+       writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
+       writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
+
+       return 0;
+}
+
+static void gem_set_multicast(struct net_device *dev)
+{
+       struct gem *gp = netdev_priv(dev);
+       u32 rxcfg, rxcfg_new;
+       int limit = 10000;
+
+       if (!netif_running(dev) || !netif_device_present(dev))
+               return;
+
+       /* Better safe than sorry... */
+       if (gp->reset_task_pending || WARN_ON(!gp->cell_enabled))
+               return;
+
+       rxcfg = readl(gp->regs + MAC_RXCFG);
+       rxcfg_new = gem_setup_multicast(gp);
+#ifdef STRIP_FCS
+       rxcfg_new |= MAC_RXCFG_SFCS;
+#endif
+       gp->mac_rx_cfg = rxcfg_new;
+
+       writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
+       while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) {
+               if (!limit--)
+                       break;
+               udelay(10);
+       }
+
+       rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE);
+       rxcfg |= rxcfg_new;
+
+       writel(rxcfg, gp->regs + MAC_RXCFG);
+}
+
+/* Jumbo-grams don't seem to work :-( */
+#define GEM_MIN_MTU    68
+#if 1
+#define GEM_MAX_MTU    1500
+#else
+#define GEM_MAX_MTU    9000
+#endif
+
+static int gem_change_mtu(struct net_device *dev, int new_mtu)
+{
+       struct gem *gp = netdev_priv(dev);
+
+       if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU)
+               return -EINVAL;
+
+       dev->mtu = new_mtu;
+
+       /* We'll just catch it later when the device is up'd or resumed */
+       if (!netif_running(dev) || !netif_device_present(dev))
+               return 0;
+
+       /* Better safe than sorry... */
+       if (WARN_ON(!gp->cell_enabled))
+               return 0;
+
+       gem_netif_stop(gp);
+       gem_reinit_chip(gp);
+       if (gp->lstate == link_up)
+               gem_set_link_modes(gp);
+       gem_netif_start(gp);
+
+       return 0;
+}
+
+static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+       struct gem *gp = netdev_priv(dev);
+
+       strcpy(info->driver, DRV_NAME);
+       strcpy(info->version, DRV_VERSION);
+       strcpy(info->bus_info, pci_name(gp->pdev));
+}
+
+static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct gem *gp = netdev_priv(dev);
+
+       if (gp->phy_type == phy_mii_mdio0 ||
+           gp->phy_type == phy_mii_mdio1) {
+               if (gp->phy_mii.def)
+                       cmd->supported = gp->phy_mii.def->features;
+               else
+                       cmd->supported = (SUPPORTED_10baseT_Half |
+                                         SUPPORTED_10baseT_Full);
+
+               /* XXX hardcoded stuff for now */
+               cmd->port = PORT_MII;
+               cmd->transceiver = XCVR_EXTERNAL;
+               cmd->phy_address = 0; /* XXX fixed PHYAD */
+
+               /* Return current PHY settings */
+               cmd->autoneg = gp->want_autoneg;
+               ethtool_cmd_speed_set(cmd, gp->phy_mii.speed);
+               cmd->duplex = gp->phy_mii.duplex;
+               cmd->advertising = gp->phy_mii.advertising;
+
+               /* If we started with a forced mode, we don't have a default
+                * advertise set, we need to return something sensible so
+                * userland can re-enable autoneg properly.
+                */
+               if (cmd->advertising == 0)
+                       cmd->advertising = cmd->supported;
+       } else { // XXX PCS ?
+               cmd->supported =
+                       (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+                        SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+                        SUPPORTED_Autoneg);
+               cmd->advertising = cmd->supported;
+               ethtool_cmd_speed_set(cmd, 0);
+               cmd->duplex = cmd->port = cmd->phy_address =
+                       cmd->transceiver = cmd->autoneg = 0;
+
+               /* serdes means usually a Fibre connector, with most fixed */
+               if (gp->phy_type == phy_serdes) {
+                       cmd->port = PORT_FIBRE;
+                       cmd->supported = (SUPPORTED_1000baseT_Half |
+                               SUPPORTED_1000baseT_Full |
+                               SUPPORTED_FIBRE | SUPPORTED_Autoneg |
+                               SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+                       cmd->advertising = cmd->supported;
+                       cmd->transceiver = XCVR_INTERNAL;
+                       if (gp->lstate == link_up)
+                               ethtool_cmd_speed_set(cmd, SPEED_1000);
+                       cmd->duplex = DUPLEX_FULL;
+                       cmd->autoneg = 1;
+               }
+       }
+       cmd->maxtxpkt = cmd->maxrxpkt = 0;
+
+       return 0;
+}
+
+static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct gem *gp = netdev_priv(dev);
+       u32 speed = ethtool_cmd_speed(cmd);
+
+       /* Verify the settings we care about. */
+       if (cmd->autoneg != AUTONEG_ENABLE &&
+           cmd->autoneg != AUTONEG_DISABLE)
+               return -EINVAL;
+
+       if (cmd->autoneg == AUTONEG_ENABLE &&
+           cmd->advertising == 0)
+               return -EINVAL;
+
+       if (cmd->autoneg == AUTONEG_DISABLE &&
+           ((speed != SPEED_1000 &&
+             speed != SPEED_100 &&
+             speed != SPEED_10) ||
+            (cmd->duplex != DUPLEX_HALF &&
+             cmd->duplex != DUPLEX_FULL)))
+               return -EINVAL;
+
+       /* Apply settings and restart link process. */
+       if (netif_device_present(gp->dev)) {
+               del_timer_sync(&gp->link_timer);
+               gem_begin_auto_negotiation(gp, cmd);
+       }
+
+       return 0;
+}
+
+static int gem_nway_reset(struct net_device *dev)
+{
+       struct gem *gp = netdev_priv(dev);
+
+       if (!gp->want_autoneg)
+               return -EINVAL;
+
+       /* Restart link process  */
+       if (netif_device_present(gp->dev)) {
+               del_timer_sync(&gp->link_timer);
+               gem_begin_auto_negotiation(gp, NULL);
+       }
+
+       return 0;
+}
+
+static u32 gem_get_msglevel(struct net_device *dev)
+{
+       struct gem *gp = netdev_priv(dev);
+       return gp->msg_enable;
+}
+
+static void gem_set_msglevel(struct net_device *dev, u32 value)
+{
+       struct gem *gp = netdev_priv(dev);
+       gp->msg_enable = value;
+}
+
+
+/* Add more when I understand how to program the chip */
+/* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */
+
+#define WOL_SUPPORTED_MASK     (WAKE_MAGIC)
+
+static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+       struct gem *gp = netdev_priv(dev);
+
+       /* Add more when I understand how to program the chip */
+       if (gp->has_wol) {
+               wol->supported = WOL_SUPPORTED_MASK;
+               wol->wolopts = gp->wake_on_lan;
+       } else {
+               wol->supported = 0;
+               wol->wolopts = 0;
+       }
+}
+
+static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+       struct gem *gp = netdev_priv(dev);
+
+       if (!gp->has_wol)
+               return -EOPNOTSUPP;
+       gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK;
+       return 0;
+}
+
+static const struct ethtool_ops gem_ethtool_ops = {
+       .get_drvinfo            = gem_get_drvinfo,
+       .get_link               = ethtool_op_get_link,
+       .get_settings           = gem_get_settings,
+       .set_settings           = gem_set_settings,
+       .nway_reset             = gem_nway_reset,
+       .get_msglevel           = gem_get_msglevel,
+       .set_msglevel           = gem_set_msglevel,
+       .get_wol                = gem_get_wol,
+       .set_wol                = gem_set_wol,
+};
+
+static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+       struct gem *gp = netdev_priv(dev);
+       struct mii_ioctl_data *data = if_mii(ifr);
+       int rc = -EOPNOTSUPP;
+
+       /* For SIOCGMIIREG and SIOCSMIIREG the core checks for us that
+        * netif_device_present() is true and holds rtnl_lock for us
+        * so we have nothing to worry about
+        */
+
+       switch (cmd) {
+       case SIOCGMIIPHY:               /* Get address of MII PHY in use. */
+               data->phy_id = gp->mii_phy_addr;
+               /* Fallthrough... */
+
+       case SIOCGMIIREG:               /* Read MII PHY register. */
+               data->val_out = __phy_read(gp, data->phy_id & 0x1f,
+                                          data->reg_num & 0x1f);
+               rc = 0;
+               break;
+
+       case SIOCSMIIREG:               /* Write MII PHY register. */
+               __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f,
+                           data->val_in);
+               rc = 0;
+               break;
+       }
+       return rc;
+}
+
+#if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC))
+/* Fetch MAC address from vital product data of PCI ROM. */
+static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr)
+{
+       int this_offset;
+
+       for (this_offset = 0x20; this_offset < len; this_offset++) {
+               void __iomem *p = rom_base + this_offset;
+               int i;
+
+               if (readb(p + 0) != 0x90 ||
+                   readb(p + 1) != 0x00 ||
+                   readb(p + 2) != 0x09 ||
+                   readb(p + 3) != 0x4e ||
+                   readb(p + 4) != 0x41 ||
+                   readb(p + 5) != 0x06)
+                       continue;
+
+               this_offset += 6;
+               p += 6;
+
+               for (i = 0; i < 6; i++)
+                       dev_addr[i] = readb(p + i);
+               return 1;
+       }
+       return 0;
+}
+
+static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
+{
+       size_t size;
+       void __iomem *p = pci_map_rom(pdev, &size);
+
+       if (p) {
+                       int found;
+
+               found = readb(p) == 0x55 &&
+                       readb(p + 1) == 0xaa &&
+                       find_eth_addr_in_vpd(p, (64 * 1024), dev_addr);
+               pci_unmap_rom(pdev, p);
+               if (found)
+                       return;
+       }
+
+       /* Sun MAC prefix then 3 random bytes. */
+       dev_addr[0] = 0x08;
+       dev_addr[1] = 0x00;
+       dev_addr[2] = 0x20;
+       get_random_bytes(dev_addr + 3, 3);
+}
+#endif /* not Sparc and not PPC */
+
+static int __devinit gem_get_device_address(struct gem *gp)
+{
+#if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC)
+       struct net_device *dev = gp->dev;
+       const unsigned char *addr;
+
+       addr = of_get_property(gp->of_node, "local-mac-address", NULL);
+       if (addr == NULL) {
+#ifdef CONFIG_SPARC
+               addr = idprom->id_ethaddr;
+#else
+               printk("\n");
+               pr_err("%s: can't get mac-address\n", dev->name);
+               return -1;
+#endif
+       }
+       memcpy(dev->dev_addr, addr, 6);
+#else
+       get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
+#endif
+       return 0;
+}
+
+static void gem_remove_one(struct pci_dev *pdev)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+
+       if (dev) {
+               struct gem *gp = netdev_priv(dev);
+
+               unregister_netdev(dev);
+
+               /* Ensure reset task is truely gone */
+               cancel_work_sync(&gp->reset_task);
+
+               /* Free resources */
+               pci_free_consistent(pdev,
+                                   sizeof(struct gem_init_block),
+                                   gp->init_block,
+                                   gp->gblock_dvma);
+               iounmap(gp->regs);
+               pci_release_regions(pdev);
+               free_netdev(dev);
+
+               pci_set_drvdata(pdev, NULL);
+       }
+}
+
+static const struct net_device_ops gem_netdev_ops = {
+       .ndo_open               = gem_open,
+       .ndo_stop               = gem_close,
+       .ndo_start_xmit         = gem_start_xmit,
+       .ndo_get_stats          = gem_get_stats,
+       .ndo_set_multicast_list = gem_set_multicast,
+       .ndo_do_ioctl           = gem_ioctl,
+       .ndo_tx_timeout         = gem_tx_timeout,
+       .ndo_change_mtu         = gem_change_mtu,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = gem_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = gem_poll_controller,
+#endif
+};
+
+static int __devinit gem_init_one(struct pci_dev *pdev,
+                                 const struct pci_device_id *ent)
+{
+       unsigned long gemreg_base, gemreg_len;
+       struct net_device *dev;
+       struct gem *gp;
+       int err, pci_using_dac;
+
+       printk_once(KERN_INFO "%s", version);
+
+       /* Apple gmac note: during probe, the chip is powered up by
+        * the arch code to allow the code below to work (and to let
+        * the chip be probed on the config space. It won't stay powered
+        * up until the interface is brought up however, so we can't rely
+        * on register configuration done at this point.
+        */
+       err = pci_enable_device(pdev);
+       if (err) {
+               pr_err("Cannot enable MMIO operation, aborting\n");
+               return err;
+       }
+       pci_set_master(pdev);
+
+       /* Configure DMA attributes. */
+
+       /* All of the GEM documentation states that 64-bit DMA addressing
+        * is fully supported and should work just fine.  However the
+        * front end for RIO based GEMs is different and only supports
+        * 32-bit addressing.
+        *
+        * For now we assume the various PPC GEMs are 32-bit only as well.
+        */
+       if (pdev->vendor == PCI_VENDOR_ID_SUN &&
+           pdev->device == PCI_DEVICE_ID_SUN_GEM &&
+           !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+               pci_using_dac = 1;
+       } else {
+               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (err) {
+                       pr_err("No usable DMA configuration, aborting\n");
+                       goto err_disable_device;
+               }
+               pci_using_dac = 0;
+       }
+
+       gemreg_base = pci_resource_start(pdev, 0);
+       gemreg_len = pci_resource_len(pdev, 0);
+
+       if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
+               pr_err("Cannot find proper PCI device base address, aborting\n");
+               err = -ENODEV;
+               goto err_disable_device;
+       }
+
+       dev = alloc_etherdev(sizeof(*gp));
+       if (!dev) {
+               pr_err("Etherdev alloc failed, aborting\n");
+               err = -ENOMEM;
+               goto err_disable_device;
+       }
+       SET_NETDEV_DEV(dev, &pdev->dev);
+
+       gp = netdev_priv(dev);
+
+       err = pci_request_regions(pdev, DRV_NAME);
+       if (err) {
+               pr_err("Cannot obtain PCI resources, aborting\n");
+               goto err_out_free_netdev;
+       }
+
+       gp->pdev = pdev;
+       dev->base_addr = (long) pdev;
+       gp->dev = dev;
+
+       gp->msg_enable = DEFAULT_MSG;
+
+       init_timer(&gp->link_timer);
+       gp->link_timer.function = gem_link_timer;
+       gp->link_timer.data = (unsigned long) gp;
+
+       INIT_WORK(&gp->reset_task, gem_reset_task);
+
+       gp->lstate = link_down;
+       gp->timer_ticks = 0;
+       netif_carrier_off(dev);
+
+       gp->regs = ioremap(gemreg_base, gemreg_len);
+       if (!gp->regs) {
+               pr_err("Cannot map device registers, aborting\n");
+               err = -EIO;
+               goto err_out_free_res;
+       }
+
+       /* On Apple, we want a reference to the Open Firmware device-tree
+        * node. We use it for clock control.
+        */
+#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC)
+       gp->of_node = pci_device_to_OF_node(pdev);
+#endif
+
+       /* Only Apple version supports WOL afaik */
+       if (pdev->vendor == PCI_VENDOR_ID_APPLE)
+               gp->has_wol = 1;
+
+       /* Make sure cell is enabled */
+       gem_get_cell(gp);
+
+       /* Make sure everything is stopped and in init state */
+       gem_reset(gp);
+
+       /* Fill up the mii_phy structure (even if we won't use it) */
+       gp->phy_mii.dev = dev;
+       gp->phy_mii.mdio_read = _phy_read;
+       gp->phy_mii.mdio_write = _phy_write;
+#ifdef CONFIG_PPC_PMAC
+       gp->phy_mii.platform_data = gp->of_node;
+#endif
+       /* By default, we start with autoneg */
+       gp->want_autoneg = 1;
+
+       /* Check fifo sizes, PHY type, etc... */
+       if (gem_check_invariants(gp)) {
+               err = -ENODEV;
+               goto err_out_iounmap;
+       }
+
+       /* It is guaranteed that the returned buffer will be at least
+        * PAGE_SIZE aligned.
+        */
+       gp->init_block = (struct gem_init_block *)
+               pci_alloc_consistent(pdev, sizeof(struct gem_init_block),
+                                    &gp->gblock_dvma);
+       if (!gp->init_block) {
+               pr_err("Cannot allocate init block, aborting\n");
+               err = -ENOMEM;
+               goto err_out_iounmap;
+       }
+
+       if (gem_get_device_address(gp))
+               goto err_out_free_consistent;
+
+       dev->netdev_ops = &gem_netdev_ops;
+       netif_napi_add(dev, &gp->napi, gem_poll, 64);
+       dev->ethtool_ops = &gem_ethtool_ops;
+       dev->watchdog_timeo = 5 * HZ;
+       dev->irq = pdev->irq;
+       dev->dma = 0;
+
+       /* Set that now, in case PM kicks in now */
+       pci_set_drvdata(pdev, dev);
+
+       /* We can do scatter/gather and HW checksum */
+       dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
+       dev->features |= dev->hw_features | NETIF_F_RXCSUM;
+       if (pci_using_dac)
+               dev->features |= NETIF_F_HIGHDMA;
+
+       /* Register with kernel */
+       if (register_netdev(dev)) {
+               pr_err("Cannot register net device, aborting\n");
+               err = -ENOMEM;
+               goto err_out_free_consistent;
+       }
+
+       /* Undo the get_cell with appropriate locking (we could use
+        * ndo_init/uninit but that would be even more clumsy imho)
+        */
+       rtnl_lock();
+       gem_put_cell(gp);
+       rtnl_unlock();
+
+       netdev_info(dev, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n",
+                   dev->dev_addr);
+       return 0;
+
+err_out_free_consistent:
+       gem_remove_one(pdev);
+err_out_iounmap:
+       gem_put_cell(gp);
+       iounmap(gp->regs);
+
+err_out_free_res:
+       pci_release_regions(pdev);
+
+err_out_free_netdev:
+       free_netdev(dev);
+err_disable_device:
+       pci_disable_device(pdev);
+       return err;
+
+}
+
+
+static struct pci_driver gem_driver = {
+       .name           = GEM_MODULE_NAME,
+       .id_table       = gem_pci_tbl,
+       .probe          = gem_init_one,
+       .remove         = gem_remove_one,
+#ifdef CONFIG_PM
+       .suspend        = gem_suspend,
+       .resume         = gem_resume,
+#endif /* CONFIG_PM */
+};
+
+static int __init gem_init(void)
+{
+       return pci_register_driver(&gem_driver);
+}
+
+static void __exit gem_cleanup(void)
+{
+       pci_unregister_driver(&gem_driver);
+}
+
+module_init(gem_init);
+module_exit(gem_cleanup);
diff --git a/drivers/net/ethernet/sun/sungem.h b/drivers/net/ethernet/sun/sungem.h
new file mode 100644 (file)
index 0000000..835ce1b
--- /dev/null
@@ -0,0 +1,1027 @@
+/* $Id: sungem.h,v 1.10.2.4 2002/03/11 08:54:48 davem Exp $
+ * sungem.h: Definitions for Sun GEM ethernet driver.
+ *
+ * Copyright (C) 2000 David S. Miller (davem@redhat.com)
+ */
+
+#ifndef _SUNGEM_H
+#define _SUNGEM_H
+
+/* Global Registers */
+#define GREG_SEBSTATE  0x0000UL        /* SEB State Register           */
+#define GREG_CFG       0x0004UL        /* Configuration Register       */
+#define GREG_STAT      0x000CUL        /* Status Register              */
+#define GREG_IMASK     0x0010UL        /* Interrupt Mask Register      */
+#define GREG_IACK      0x0014UL        /* Interrupt ACK Register       */
+#define GREG_STAT2     0x001CUL        /* Alias of GREG_STAT           */
+#define GREG_PCIESTAT  0x1000UL        /* PCI Error Status Register    */
+#define GREG_PCIEMASK  0x1004UL        /* PCI Error Mask Register      */
+#define GREG_BIFCFG    0x1008UL        /* BIF Configuration Register   */
+#define GREG_BIFDIAG   0x100CUL        /* BIF Diagnostics Register     */
+#define GREG_SWRST     0x1010UL        /* Software Reset Register      */
+
+/* Global SEB State Register */
+#define GREG_SEBSTATE_ARB      0x00000003      /* State of Arbiter             */
+#define GREG_SEBSTATE_RXWON    0x00000004      /* RX won internal arbitration  */
+
+/* Global Configuration Register */
+#define GREG_CFG_IBURST                0x00000001      /* Infinite Burst               */
+#define GREG_CFG_TXDMALIM      0x0000003e      /* TX DMA grant limit           */
+#define GREG_CFG_RXDMALIM      0x000007c0      /* RX DMA grant limit           */
+#define GREG_CFG_RONPAULBIT    0x00000800      /* Use mem read multiple for PCI read
+                                                * after infinite burst (Apple) */
+#define GREG_CFG_ENBUG2FIX     0x00001000      /* Fix Rx hang after overflow */
+
+/* Global Interrupt Status Register.
+ *
+ * Reading this register automatically clears bits 0 through 6.
+ * This auto-clearing does not occur when the alias at GREG_STAT2
+ * is read instead.  The rest of the interrupt bits only clear when
+ * the secondary interrupt status register corresponding to that
+ * bit is read (ie. if GREG_STAT_PCS is set, it will be cleared by
+ * reading PCS_ISTAT).
+ */
+#define GREG_STAT_TXINTME      0x00000001      /* TX INTME frame transferred   */
+#define GREG_STAT_TXALL                0x00000002      /* All TX frames transferred    */
+#define GREG_STAT_TXDONE       0x00000004      /* One TX frame transferred     */
+#define GREG_STAT_RXDONE       0x00000010      /* One RX frame arrived         */
+#define GREG_STAT_RXNOBUF      0x00000020      /* No free RX buffers available */
+#define GREG_STAT_RXTAGERR     0x00000040      /* RX tag framing is corrupt    */
+#define GREG_STAT_PCS          0x00002000      /* PCS signalled interrupt      */
+#define GREG_STAT_TXMAC                0x00004000      /* TX MAC signalled interrupt   */
+#define GREG_STAT_RXMAC                0x00008000      /* RX MAC signalled interrupt   */
+#define GREG_STAT_MAC          0x00010000      /* MAC Control signalled irq    */
+#define GREG_STAT_MIF          0x00020000      /* MIF signalled interrupt      */
+#define GREG_STAT_PCIERR       0x00040000      /* PCI Error interrupt          */
+#define GREG_STAT_TXNR         0xfff80000      /* == TXDMA_TXDONE reg val      */
+#define GREG_STAT_TXNR_SHIFT   19
+
+#define GREG_STAT_ABNORMAL     (GREG_STAT_RXNOBUF | GREG_STAT_RXTAGERR | \
+                                GREG_STAT_PCS | GREG_STAT_TXMAC | GREG_STAT_RXMAC | \
+                                GREG_STAT_MAC | GREG_STAT_MIF | GREG_STAT_PCIERR)
+
+#define GREG_STAT_NAPI         (GREG_STAT_TXALL  | GREG_STAT_TXINTME | \
+                                GREG_STAT_RXDONE | GREG_STAT_ABNORMAL)
+
+/* The layout of GREG_IMASK and GREG_IACK is identical to GREG_STAT.
+ * Bits set in GREG_IMASK will prevent that interrupt type from being
+ * signalled to the cpu.  GREG_IACK can be used to clear specific top-level
+ * interrupt conditions in GREG_STAT, ie. it only works for bits 0 through 6.
+ * Setting the bit will clear that interrupt, clear bits will have no effect
+ * on GREG_STAT.
+ */
+
+/* Global PCI Error Status Register */
+#define GREG_PCIESTAT_BADACK   0x00000001      /* No ACK64# during ABS64 cycle */
+#define GREG_PCIESTAT_DTRTO    0x00000002      /* Delayed transaction timeout  */
+#define GREG_PCIESTAT_OTHER    0x00000004      /* Other PCI error, check cfg space */
+
+/* The layout of the GREG_PCIEMASK is identical to that of GREG_PCIESTAT.
+ * Bits set in GREG_PCIEMASK will prevent that interrupt type from being
+ * signalled to the cpu.
+ */
+
+/* Global BIF Configuration Register */
+#define GREG_BIFCFG_SLOWCLK    0x00000001      /* Set if PCI runs < 25Mhz      */
+#define GREG_BIFCFG_B64DIS     0x00000002      /* Disable 64bit wide data cycle*/
+#define GREG_BIFCFG_M66EN      0x00000004      /* Set if on 66Mhz PCI segment  */
+
+/* Global BIF Diagnostics Register */
+#define GREG_BIFDIAG_BURSTSM   0x007f0000      /* PCI Burst state machine      */
+#define GREG_BIFDIAG_BIFSM     0xff000000      /* BIF state machine            */
+
+/* Global Software Reset Register.
+ *
+ * This register is used to perform a global reset of the RX and TX portions
+ * of the GEM asic.  Setting the RX or TX reset bit will start the reset.
+ * The driver _MUST_ poll these bits until they clear.  One may not attempt
+ * to program any other part of GEM until the bits clear.
+ */
+#define GREG_SWRST_TXRST       0x00000001      /* TX Software Reset            */
+#define GREG_SWRST_RXRST       0x00000002      /* RX Software Reset            */
+#define GREG_SWRST_RSTOUT      0x00000004      /* Force RST# pin active        */
+#define GREG_SWRST_CACHESIZE   0x00ff0000      /* RIO only: cache line size    */
+#define GREG_SWRST_CACHE_SHIFT 16
+
+/* TX DMA Registers */
+#define TXDMA_KICK     0x2000UL        /* TX Kick Register             */
+#define TXDMA_CFG      0x2004UL        /* TX Configuration Register    */
+#define TXDMA_DBLOW    0x2008UL        /* TX Desc. Base Low            */
+#define TXDMA_DBHI     0x200CUL        /* TX Desc. Base High           */
+#define TXDMA_FWPTR    0x2014UL        /* TX FIFO Write Pointer        */
+#define TXDMA_FSWPTR   0x2018UL        /* TX FIFO Shadow Write Pointer */
+#define TXDMA_FRPTR    0x201CUL        /* TX FIFO Read Pointer         */
+#define TXDMA_FSRPTR   0x2020UL        /* TX FIFO Shadow Read Pointer  */
+#define TXDMA_PCNT     0x2024UL        /* TX FIFO Packet Counter       */
+#define TXDMA_SMACHINE 0x2028UL        /* TX State Machine Register    */
+#define TXDMA_DPLOW    0x2030UL        /* TX Data Pointer Low          */
+#define TXDMA_DPHI     0x2034UL        /* TX Data Pointer High         */
+#define TXDMA_TXDONE   0x2100UL        /* TX Completion Register       */
+#define TXDMA_FADDR    0x2104UL        /* TX FIFO Address              */
+#define TXDMA_FTAG     0x2108UL        /* TX FIFO Tag                  */
+#define TXDMA_DLOW     0x210CUL        /* TX FIFO Data Low             */
+#define TXDMA_DHIT1    0x2110UL        /* TX FIFO Data HighT1          */
+#define TXDMA_DHIT0    0x2114UL        /* TX FIFO Data HighT0          */
+#define TXDMA_FSZ      0x2118UL        /* TX FIFO Size                 */
+
+/* TX Kick Register.
+ *
+ * This 13-bit register is programmed by the driver to hold the descriptor
+ * entry index which follows the last valid transmit descriptor.
+ */
+
+/* TX Completion Register.
+ *
+ * This 13-bit register is updated by GEM to hold to descriptor entry index
+ * which follows the last descriptor already processed by GEM.  Note that
+ * this value is mirrored in GREG_STAT which eliminates the need to even
+ * access this register in the driver during interrupt processing.
+ */
+
+/* TX Configuration Register.
+ *
+ * Note that TXDMA_CFG_FTHRESH, the TX FIFO Threshold, is an obsolete feature
+ * that was meant to be used with jumbo packets.  It should be set to the
+ * maximum value of 0x4ff, else one risks getting TX MAC Underrun errors.
+ */
+#define TXDMA_CFG_ENABLE       0x00000001      /* Enable TX DMA channel        */
+#define TXDMA_CFG_RINGSZ       0x0000001e      /* TX descriptor ring size      */
+#define TXDMA_CFG_RINGSZ_32    0x00000000      /* 32 TX descriptors            */
+#define TXDMA_CFG_RINGSZ_64    0x00000002      /* 64 TX descriptors            */
+#define TXDMA_CFG_RINGSZ_128   0x00000004      /* 128 TX descriptors           */
+#define TXDMA_CFG_RINGSZ_256   0x00000006      /* 256 TX descriptors           */
+#define TXDMA_CFG_RINGSZ_512   0x00000008      /* 512 TX descriptors           */
+#define TXDMA_CFG_RINGSZ_1K    0x0000000a      /* 1024 TX descriptors          */
+#define TXDMA_CFG_RINGSZ_2K    0x0000000c      /* 2048 TX descriptors          */
+#define TXDMA_CFG_RINGSZ_4K    0x0000000e      /* 4096 TX descriptors          */
+#define TXDMA_CFG_RINGSZ_8K    0x00000010      /* 8192 TX descriptors          */
+#define TXDMA_CFG_PIOSEL       0x00000020      /* Enable TX FIFO PIO from cpu  */
+#define TXDMA_CFG_FTHRESH      0x001ffc00      /* TX FIFO Threshold, obsolete  */
+#define TXDMA_CFG_PMODE                0x00200000      /* TXALL irq means TX FIFO empty*/
+
+/* TX Descriptor Base Low/High.
+ *
+ * These two registers store the 53 most significant bits of the base address
+ * of the TX descriptor table.  The 11 least significant bits are always
+ * zero.  As a result, the TX descriptor table must be 2K aligned.
+ */
+
+/* The rest of the TXDMA_* registers are for diagnostics and debug, I will document
+ * them later. -DaveM
+ */
+
+/* WakeOnLan Registers */
+#define WOL_MATCH0     0x3000UL
+#define WOL_MATCH1     0x3004UL
+#define WOL_MATCH2     0x3008UL
+#define WOL_MCOUNT     0x300CUL
+#define WOL_WAKECSR    0x3010UL
+
+/* WOL Match count register
+ */
+#define WOL_MCOUNT_N           0x00000010
+#define WOL_MCOUNT_M           0x00000000 /* 0 << 8 */
+
+#define WOL_WAKECSR_ENABLE     0x00000001
+#define WOL_WAKECSR_MII                0x00000002
+#define WOL_WAKECSR_SEEN       0x00000004
+#define WOL_WAKECSR_FILT_UCAST 0x00000008
+#define WOL_WAKECSR_FILT_MCAST 0x00000010
+#define WOL_WAKECSR_FILT_BCAST 0x00000020
+#define WOL_WAKECSR_FILT_SEEN  0x00000040
+
+
+/* Receive DMA Registers */
+#define RXDMA_CFG      0x4000UL        /* RX Configuration Register    */
+#define RXDMA_DBLOW    0x4004UL        /* RX Descriptor Base Low       */
+#define RXDMA_DBHI     0x4008UL        /* RX Descriptor Base High      */
+#define RXDMA_FWPTR    0x400CUL        /* RX FIFO Write Pointer        */
+#define RXDMA_FSWPTR   0x4010UL        /* RX FIFO Shadow Write Pointer */
+#define RXDMA_FRPTR    0x4014UL        /* RX FIFO Read Pointer         */
+#define RXDMA_PCNT     0x4018UL        /* RX FIFO Packet Counter       */
+#define RXDMA_SMACHINE 0x401CUL        /* RX State Machine Register    */
+#define RXDMA_PTHRESH  0x4020UL        /* Pause Thresholds             */
+#define RXDMA_DPLOW    0x4024UL        /* RX Data Pointer Low          */
+#define RXDMA_DPHI     0x4028UL        /* RX Data Pointer High         */
+#define RXDMA_KICK     0x4100UL        /* RX Kick Register             */
+#define RXDMA_DONE     0x4104UL        /* RX Completion Register       */
+#define RXDMA_BLANK    0x4108UL        /* RX Blanking Register         */
+#define RXDMA_FADDR    0x410CUL        /* RX FIFO Address              */
+#define RXDMA_FTAG     0x4110UL        /* RX FIFO Tag                  */
+#define RXDMA_DLOW     0x4114UL        /* RX FIFO Data Low             */
+#define RXDMA_DHIT1    0x4118UL        /* RX FIFO Data HighT0          */
+#define RXDMA_DHIT0    0x411CUL        /* RX FIFO Data HighT1          */
+#define RXDMA_FSZ      0x4120UL        /* RX FIFO Size                 */
+
+/* RX Configuration Register. */
+#define RXDMA_CFG_ENABLE       0x00000001      /* Enable RX DMA channel        */
+#define RXDMA_CFG_RINGSZ       0x0000001e      /* RX descriptor ring size      */
+#define RXDMA_CFG_RINGSZ_32    0x00000000      /* - 32   entries               */
+#define RXDMA_CFG_RINGSZ_64    0x00000002      /* - 64   entries               */
+#define RXDMA_CFG_RINGSZ_128   0x00000004      /* - 128  entries               */
+#define RXDMA_CFG_RINGSZ_256   0x00000006      /* - 256  entries               */
+#define RXDMA_CFG_RINGSZ_512   0x00000008      /* - 512  entries               */
+#define RXDMA_CFG_RINGSZ_1K    0x0000000a      /* - 1024 entries               */
+#define RXDMA_CFG_RINGSZ_2K    0x0000000c      /* - 2048 entries               */
+#define RXDMA_CFG_RINGSZ_4K    0x0000000e      /* - 4096 entries               */
+#define RXDMA_CFG_RINGSZ_8K    0x00000010      /* - 8192 entries               */
+#define RXDMA_CFG_RINGSZ_BDISAB        0x00000020      /* Disable RX desc batching     */
+#define RXDMA_CFG_FBOFF                0x00001c00      /* Offset of first data byte    */
+#define RXDMA_CFG_CSUMOFF      0x000fe000      /* Skip bytes before csum calc  */
+#define RXDMA_CFG_FTHRESH      0x07000000      /* RX FIFO dma start threshold  */
+#define RXDMA_CFG_FTHRESH_64   0x00000000      /* - 64   bytes                 */
+#define RXDMA_CFG_FTHRESH_128  0x01000000      /* - 128  bytes                 */
+#define RXDMA_CFG_FTHRESH_256  0x02000000      /* - 256  bytes                 */
+#define RXDMA_CFG_FTHRESH_512  0x03000000      /* - 512  bytes                 */
+#define RXDMA_CFG_FTHRESH_1K   0x04000000      /* - 1024 bytes                 */
+#define RXDMA_CFG_FTHRESH_2K   0x05000000      /* - 2048 bytes                 */
+
+/* RX Descriptor Base Low/High.
+ *
+ * These two registers store the 53 most significant bits of the base address
+ * of the RX descriptor table.  The 11 least significant bits are always
+ * zero.  As a result, the RX descriptor table must be 2K aligned.
+ */
+
+/* RX PAUSE Thresholds.
+ *
+ * These values determine when XOFF and XON PAUSE frames are emitted by
+ * GEM.  The thresholds measure RX FIFO occupancy in units of 64 bytes.
+ */
+#define RXDMA_PTHRESH_OFF      0x000001ff      /* XOFF emitted w/FIFO > this   */
+#define RXDMA_PTHRESH_ON       0x001ff000      /* XON emitted w/FIFO < this    */
+
+/* RX Kick Register.
+ *
+ * This 13-bit register is written by the host CPU and holds the last
+ * valid RX descriptor number plus one.  This is, if 'N' is written to
+ * this register, it means that all RX descriptors up to but excluding
+ * 'N' are valid.
+ *
+ * The hardware requires that RX descriptors are posted in increments
+ * of 4.  This means 'N' must be a multiple of four.  For the best
+ * performance, the first new descriptor being posted should be (PCI)
+ * cache line aligned.
+ */
+
+/* RX Completion Register.
+ *
+ * This 13-bit register is updated by GEM to indicate which RX descriptors
+ * have already been used for receive frames.  All descriptors up to but
+ * excluding the value in this register are ready to be processed.  GEM
+ * updates this register value after the RX FIFO empties completely into
+ * the RX descriptor's buffer, but before the RX_DONE bit is set in the
+ * interrupt status register.
+ */
+
+/* RX Blanking Register. */
+#define RXDMA_BLANK_IPKTS      0x000001ff      /* RX_DONE asserted after this
+                                                * many packets received since
+                                                * previous RX_DONE.
+                                                */
+#define RXDMA_BLANK_ITIME      0x000ff000      /* RX_DONE asserted after this
+                                                * many clocks (measured in 2048
+                                                * PCI clocks) were counted since
+                                                * the previous RX_DONE.
+                                                */
+
+/* RX FIFO Size.
+ *
+ * This 11-bit read-only register indicates how large, in units of 64-bytes,
+ * the RX FIFO is.  The driver uses this to properly configure the RX PAUSE
+ * thresholds.
+ */
+
+/* The rest of the RXDMA_* registers are for diagnostics and debug, I will document
+ * them later. -DaveM
+ */
+
+/* MAC Registers */
+#define MAC_TXRST      0x6000UL        /* TX MAC Software Reset Command*/
+#define MAC_RXRST      0x6004UL        /* RX MAC Software Reset Command*/
+#define MAC_SNDPAUSE   0x6008UL        /* Send Pause Command Register  */
+#define MAC_TXSTAT     0x6010UL        /* TX MAC Status Register       */
+#define MAC_RXSTAT     0x6014UL        /* RX MAC Status Register       */
+#define MAC_CSTAT      0x6018UL        /* MAC Control Status Register  */
+#define MAC_TXMASK     0x6020UL        /* TX MAC Mask Register         */
+#define MAC_RXMASK     0x6024UL        /* RX MAC Mask Register         */
+#define MAC_MCMASK     0x6028UL        /* MAC Control Mask Register    */
+#define MAC_TXCFG      0x6030UL        /* TX MAC Configuration Register*/
+#define MAC_RXCFG      0x6034UL        /* RX MAC Configuration Register*/
+#define MAC_MCCFG      0x6038UL        /* MAC Control Config Register  */
+#define MAC_XIFCFG     0x603CUL        /* XIF Configuration Register   */
+#define MAC_IPG0       0x6040UL        /* InterPacketGap0 Register     */
+#define MAC_IPG1       0x6044UL        /* InterPacketGap1 Register     */
+#define MAC_IPG2       0x6048UL        /* InterPacketGap2 Register     */
+#define MAC_STIME      0x604CUL        /* SlotTime Register            */
+#define MAC_MINFSZ     0x6050UL        /* MinFrameSize Register        */
+#define MAC_MAXFSZ     0x6054UL        /* MaxFrameSize Register        */
+#define MAC_PASIZE     0x6058UL        /* PA Size Register             */
+#define MAC_JAMSIZE    0x605CUL        /* JamSize Register             */
+#define MAC_ATTLIM     0x6060UL        /* Attempt Limit Register       */
+#define MAC_MCTYPE     0x6064UL        /* MAC Control Type Register    */
+#define MAC_ADDR0      0x6080UL        /* MAC Address 0 Register       */
+#define MAC_ADDR1      0x6084UL        /* MAC Address 1 Register       */
+#define MAC_ADDR2      0x6088UL        /* MAC Address 2 Register       */
+#define MAC_ADDR3      0x608CUL        /* MAC Address 3 Register       */
+#define MAC_ADDR4      0x6090UL        /* MAC Address 4 Register       */
+#define MAC_ADDR5      0x6094UL        /* MAC Address 5 Register       */
+#define MAC_ADDR6      0x6098UL        /* MAC Address 6 Register       */
+#define MAC_ADDR7      0x609CUL        /* MAC Address 7 Register       */
+#define MAC_ADDR8      0x60A0UL        /* MAC Address 8 Register       */
+#define MAC_AFILT0     0x60A4UL        /* Address Filter 0 Register    */
+#define MAC_AFILT1     0x60A8UL        /* Address Filter 1 Register    */
+#define MAC_AFILT2     0x60ACUL        /* Address Filter 2 Register    */
+#define MAC_AF21MSK    0x60B0UL        /* Address Filter 2&1 Mask Reg  */
+#define MAC_AF0MSK     0x60B4UL        /* Address Filter 0 Mask Reg    */
+#define MAC_HASH0      0x60C0UL        /* Hash Table 0 Register        */
+#define MAC_HASH1      0x60C4UL        /* Hash Table 1 Register        */
+#define MAC_HASH2      0x60C8UL        /* Hash Table 2 Register        */
+#define MAC_HASH3      0x60CCUL        /* Hash Table 3 Register        */
+#define MAC_HASH4      0x60D0UL        /* Hash Table 4 Register        */
+#define MAC_HASH5      0x60D4UL        /* Hash Table 5 Register        */
+#define MAC_HASH6      0x60D8UL        /* Hash Table 6 Register        */
+#define MAC_HASH7      0x60DCUL        /* Hash Table 7 Register        */
+#define MAC_HASH8      0x60E0UL        /* Hash Table 8 Register        */
+#define MAC_HASH9      0x60E4UL        /* Hash Table 9 Register        */
+#define MAC_HASH10     0x60E8UL        /* Hash Table 10 Register       */
+#define MAC_HASH11     0x60ECUL        /* Hash Table 11 Register       */
+#define MAC_HASH12     0x60F0UL        /* Hash Table 12 Register       */
+#define MAC_HASH13     0x60F4UL        /* Hash Table 13 Register       */
+#define MAC_HASH14     0x60F8UL        /* Hash Table 14 Register       */
+#define MAC_HASH15     0x60FCUL        /* Hash Table 15 Register       */
+#define MAC_NCOLL      0x6100UL        /* Normal Collision Counter     */
+#define MAC_FASUCC     0x6104UL        /* First Attmpt. Succ Coll Ctr. */
+#define MAC_ECOLL      0x6108UL        /* Excessive Collision Counter  */
+#define MAC_LCOLL      0x610CUL        /* Late Collision Counter       */
+#define MAC_DTIMER     0x6110UL        /* Defer Timer                  */
+#define MAC_PATMPS     0x6114UL        /* Peak Attempts Register       */
+#define MAC_RFCTR      0x6118UL        /* Receive Frame Counter        */
+#define MAC_LERR       0x611CUL        /* Length Error Counter         */
+#define MAC_AERR       0x6120UL        /* Alignment Error Counter      */
+#define MAC_FCSERR     0x6124UL        /* FCS Error Counter            */
+#define MAC_RXCVERR    0x6128UL        /* RX code Violation Error Ctr  */
+#define MAC_RANDSEED   0x6130UL        /* Random Number Seed Register  */
+#define MAC_SMACHINE   0x6134UL        /* State Machine Register       */
+
+/* TX MAC Software Reset Command. */
+#define MAC_TXRST_CMD  0x00000001      /* Start sw reset, self-clears  */
+
+/* RX MAC Software Reset Command. */
+#define MAC_RXRST_CMD  0x00000001      /* Start sw reset, self-clears  */
+
+/* Send Pause Command. */
+#define MAC_SNDPAUSE_TS        0x0000ffff      /* The pause_time operand used in
+                                        * Send_Pause and flow-control
+                                        * handshakes.
+                                        */
+#define MAC_SNDPAUSE_SP        0x00010000      /* Setting this bit instructs the MAC
+                                        * to send a Pause Flow Control
+                                        * frame onto the network.
+                                        */
+
+/* TX MAC Status Register. */
+#define MAC_TXSTAT_XMIT        0x00000001      /* Frame Transmitted            */
+#define MAC_TXSTAT_URUN        0x00000002      /* TX Underrun                  */
+#define MAC_TXSTAT_MPE 0x00000004      /* Max Packet Size Error        */
+#define MAC_TXSTAT_NCE 0x00000008      /* Normal Collision Cntr Expire */
+#define MAC_TXSTAT_ECE 0x00000010      /* Excess Collision Cntr Expire */
+#define MAC_TXSTAT_LCE 0x00000020      /* Late Collision Cntr Expire   */
+#define MAC_TXSTAT_FCE 0x00000040      /* First Collision Cntr Expire  */
+#define MAC_TXSTAT_DTE 0x00000080      /* Defer Timer Expire           */
+#define MAC_TXSTAT_PCE 0x00000100      /* Peak Attempts Cntr Expire    */
+
+/* RX MAC Status Register. */
+#define MAC_RXSTAT_RCV 0x00000001      /* Frame Received               */
+#define MAC_RXSTAT_OFLW        0x00000002      /* Receive Overflow             */
+#define MAC_RXSTAT_FCE 0x00000004      /* Frame Cntr Expire            */
+#define MAC_RXSTAT_ACE 0x00000008      /* Align Error Cntr Expire      */
+#define MAC_RXSTAT_CCE 0x00000010      /* CRC Error Cntr Expire        */
+#define MAC_RXSTAT_LCE 0x00000020      /* Length Error Cntr Expire     */
+#define MAC_RXSTAT_VCE 0x00000040      /* Code Violation Cntr Expire   */
+
+/* MAC Control Status Register. */
+#define MAC_CSTAT_PRCV 0x00000001      /* Pause Received               */
+#define MAC_CSTAT_PS   0x00000002      /* Paused State                 */
+#define MAC_CSTAT_NPS  0x00000004      /* Not Paused State             */
+#define MAC_CSTAT_PTR  0xffff0000      /* Pause Time Received          */
+
+/* The layout of the MAC_{TX,RX,C}MASK registers is identical to that
+ * of MAC_{TX,RX,C}STAT.  Bits set in MAC_{TX,RX,C}MASK will prevent
+ * that interrupt type from being signalled to front end of GEM.  For
+ * the interrupt to actually get sent to the cpu, it is necessary to
+ * properly set the appropriate GREG_IMASK_{TX,RX,}MAC bits as well.
+ */
+
+/* TX MAC Configuration Register.
+ *
+ * NOTE: The TX MAC Enable bit must be cleared and polled until
+ *      zero before any other bits in this register are changed.
+ *
+ *      Also, enabling the Carrier Extension feature of GEM is
+ *      a 3 step process 1) Set TX Carrier Extension 2) Set
+ *      RX Carrier Extension 3) Set Slot Time to 0x200.  This
+ *      mode must be enabled when in half-duplex at 1Gbps, else
+ *      it must be disabled.
+ */
+#define MAC_TXCFG_ENAB 0x00000001      /* TX MAC Enable                */
+#define MAC_TXCFG_ICS  0x00000002      /* Ignore Carrier Sense         */
+#define MAC_TXCFG_ICOLL        0x00000004      /* Ignore Collisions            */
+#define MAC_TXCFG_EIPG0        0x00000008      /* Enable IPG0                  */
+#define MAC_TXCFG_NGU  0x00000010      /* Never Give Up                */
+#define MAC_TXCFG_NGUL 0x00000020      /* Never Give Up Limit          */
+#define MAC_TXCFG_NBO  0x00000040      /* No Backoff                   */
+#define MAC_TXCFG_SD   0x00000080      /* Slow Down                    */
+#define MAC_TXCFG_NFCS 0x00000100      /* No FCS                       */
+#define MAC_TXCFG_TCE  0x00000200      /* TX Carrier Extension         */
+
+/* RX MAC Configuration Register.
+ *
+ * NOTE: The RX MAC Enable bit must be cleared and polled until
+ *      zero before any other bits in this register are changed.
+ *
+ *      Similar rules apply to the Hash Filter Enable bit when
+ *      programming the hash table registers, and the Address Filter
+ *      Enable bit when programming the address filter registers.
+ */
+#define MAC_RXCFG_ENAB 0x00000001      /* RX MAC Enable                */
+#define MAC_RXCFG_SPAD 0x00000002      /* Strip Pad                    */
+#define MAC_RXCFG_SFCS 0x00000004      /* Strip FCS                    */
+#define MAC_RXCFG_PROM 0x00000008      /* Promiscuous Mode             */
+#define MAC_RXCFG_PGRP 0x00000010      /* Promiscuous Group            */
+#define MAC_RXCFG_HFE  0x00000020      /* Hash Filter Enable           */
+#define MAC_RXCFG_AFE  0x00000040      /* Address Filter Enable        */
+#define MAC_RXCFG_DDE  0x00000080      /* Disable Discard on Error     */
+#define MAC_RXCFG_RCE  0x00000100      /* RX Carrier Extension         */
+
+/* MAC Control Config Register. */
+#define MAC_MCCFG_SPE  0x00000001      /* Send Pause Enable            */
+#define MAC_MCCFG_RPE  0x00000002      /* Receive Pause Enable         */
+#define MAC_MCCFG_PMC  0x00000004      /* Pass MAC Control             */
+
+/* XIF Configuration Register.
+ *
+ * NOTE: When leaving or entering loopback mode, a global hardware
+ *       init of GEM should be performed.
+ */
+#define MAC_XIFCFG_OE  0x00000001      /* MII TX Output Driver Enable  */
+#define MAC_XIFCFG_LBCK        0x00000002      /* Loopback TX to RX            */
+#define MAC_XIFCFG_DISE        0x00000004      /* Disable RX path during TX    */
+#define MAC_XIFCFG_GMII        0x00000008      /* Use GMII clocks + datapath   */
+#define MAC_XIFCFG_MBOE        0x00000010      /* Controls MII_BUF_EN pin      */
+#define MAC_XIFCFG_LLED        0x00000020      /* Force LINKLED# active (low)  */
+#define MAC_XIFCFG_FLED        0x00000040      /* Force FDPLXLED# active (low) */
+
+/* InterPacketGap0 Register.  This 8-bit value is used as an extension
+ * to the InterPacketGap1 Register.  Specifically it contributes to the
+ * timing of the RX-to-TX IPG.  This value is ignored and presumed to
+ * be zero for TX-to-TX IPG calculations and/or when the Enable IPG0 bit
+ * is cleared in the TX MAC Configuration Register.
+ *
+ * This value in this register in terms of media byte time.
+ *
+ * Recommended value: 0x00
+ */
+
+/* InterPacketGap1 Register.  This 8-bit value defines the first 2/3
+ * portion of the Inter Packet Gap.
+ *
+ * This value in this register in terms of media byte time.
+ *
+ * Recommended value: 0x08
+ */
+
+/* InterPacketGap2 Register.  This 8-bit value defines the second 1/3
+ * portion of the Inter Packet Gap.
+ *
+ * This value in this register in terms of media byte time.
+ *
+ * Recommended value: 0x04
+ */
+
+/* Slot Time Register.  This 10-bit value specifies the slot time
+ * parameter in units of media byte time.  It determines the physical
+ * span of the network.
+ *
+ * Recommended value: 0x40
+ */
+
+/* Minimum Frame Size Register.  This 10-bit register specifies the
+ * smallest sized frame the TXMAC will send onto the medium, and the
+ * RXMAC will receive from the medium.
+ *
+ * Recommended value: 0x40
+ */
+
+/* Maximum Frame and Burst Size Register.
+ *
+ * This register specifies two things.  First it specifies the maximum
+ * sized frame the TXMAC will send and the RXMAC will recognize as
+ * valid.  Second, it specifies the maximum run length of a burst of
+ * packets sent in half-duplex gigabit modes.
+ *
+ * Recommended value: 0x200005ee
+ */
+#define MAC_MAXFSZ_MFS 0x00007fff      /* Max Frame Size               */
+#define MAC_MAXFSZ_MBS 0x7fff0000      /* Max Burst Size               */
+
+/* PA Size Register.  This 10-bit register specifies the number of preamble
+ * bytes which will be transmitted at the beginning of each frame.  A
+ * value of two or greater should be programmed here.
+ *
+ * Recommended value: 0x07
+ */
+
+/* Jam Size Register.  This 4-bit register specifies the duration of
+ * the jam in units of media byte time.
+ *
+ * Recommended value: 0x04
+ */
+
+/* Attempts Limit Register.  This 8-bit register specifies the number
+ * of attempts that the TXMAC will make to transmit a frame, before it
+ * resets its Attempts Counter.  After reaching the Attempts Limit the
+ * TXMAC may or may not drop the frame, as determined by the NGU
+ * (Never Give Up) and NGUL (Never Give Up Limit) bits in the TXMAC
+ * Configuration Register.
+ *
+ * Recommended value: 0x10
+ */
+
+/* MAX Control Type Register.  This 16-bit register specifies the
+ * "type" field of a MAC Control frame.  The TXMAC uses this field to
+ * encapsulate the MAC Control frame for transmission, and the RXMAC
+ * uses it for decoding valid MAC Control frames received from the
+ * network.
+ *
+ * Recommended value: 0x8808
+ */
+
+/* MAC Address Registers.  Each of these registers specify the
+ * ethernet MAC of the interface, 16-bits at a time.  Register
+ * 0 specifies bits [47:32], register 1 bits [31:16], and register
+ * 2 bits [15:0].
+ *
+ * Registers 3 through and including 5 specify an alternate
+ * MAC address for the interface.
+ *
+ * Registers 6 through and including 8 specify the MAC Control
+ * Address, which must be the reserved multicast address for MAC
+ * Control frames.
+ *
+ * Example: To program primary station address a:b:c:d:e:f into
+ *         the chip.
+ *             MAC_Address_2 = (a << 8) | b
+ *             MAC_Address_1 = (c << 8) | d
+ *             MAC_Address_0 = (e << 8) | f
+ */
+
+/* Address Filter Registers.  Registers 0 through 2 specify bit
+ * fields [47:32] through [15:0], respectively, of the address
+ * filter.  The Address Filter 2&1 Mask Register denotes the 8-bit
+ * nibble mask for Address Filter Registers 2 and 1.  The Address
+ * Filter 0 Mask Register denotes the 16-bit mask for the Address
+ * Filter Register 0.
+ */
+
+/* Hash Table Registers.  Registers 0 through 15 specify bit fields
+ * [255:240] through [15:0], respectively, of the hash table.
+ */
+
+/* Statistics Registers.  All of these registers are 16-bits and
+ * track occurrences of a specific event.  GEM can be configured
+ * to interrupt the host cpu when any of these counters overflow.
+ * They should all be explicitly initialized to zero when the interface
+ * is brought up.
+ */
+
+/* Random Number Seed Register.  This 10-bit value is used as the
+ * RNG seed inside GEM for the CSMA/CD backoff algorithm.  It is
+ * recommended to program this register to the 10 LSB of the
+ * interfaces MAC address.
+ */
+
+/* Pause Timer, read-only.  This 16-bit timer is used to time the pause
+ * interval as indicated by a received pause flow control frame.
+ * A non-zero value in this timer indicates that the MAC is currently in
+ * the paused state.
+ */
+
+/* MIF Registers */
+#define MIF_BBCLK      0x6200UL        /* MIF Bit-Bang Clock           */
+#define MIF_BBDATA     0x6204UL        /* MIF Bit-Band Data            */
+#define MIF_BBOENAB    0x6208UL        /* MIF Bit-Bang Output Enable   */
+#define MIF_FRAME      0x620CUL        /* MIF Frame/Output Register    */
+#define MIF_CFG                0x6210UL        /* MIF Configuration Register   */
+#define MIF_MASK       0x6214UL        /* MIF Mask Register            */
+#define MIF_STATUS     0x6218UL        /* MIF Status Register          */
+#define MIF_SMACHINE   0x621CUL        /* MIF State Machine Register   */
+
+/* MIF Bit-Bang Clock.  This 1-bit register is used to generate the
+ * MDC clock waveform on the MII Management Interface when the MIF is
+ * programmed in the "Bit-Bang" mode.  Writing a '1' after a '0' into
+ * this register will create a rising edge on the MDC, while writing
+ * a '0' after a '1' will create a falling edge.  For every bit that
+ * is transferred on the management interface, both edges have to be
+ * generated.
+ */
+
+/* MIF Bit-Bang Data.  This 1-bit register is used to generate the
+ * outgoing data (MDO) on the MII Management Interface when the MIF
+ * is programmed in the "Bit-Bang" mode.  The daa will be steered to the
+ * appropriate MDIO based on the state of the PHY_Select bit in the MIF
+ * Configuration Register.
+ */
+
+/* MIF Big-Band Output Enable.  THis 1-bit register is used to enable
+ * ('1') or disable ('0') the I-directional driver on the MII when the
+ * MIF is programmed in the "Bit-Bang" mode.  The MDIO should be enabled
+ * when data bits are transferred from the MIF to the transceiver, and it
+ * should be disabled when the interface is idle or when data bits are
+ * transferred from the transceiver to the MIF (data portion of a read
+ * instruction).  Only one MDIO will be enabled at a given time, depending
+ * on the state of the PHY_Select bit in the MIF Configuration Register.
+ */
+
+/* MIF Configuration Register.  This 15-bit register controls the operation
+ * of the MIF.
+ */
+#define MIF_CFG_PSELECT        0x00000001      /* Xcvr slct: 0=mdio0 1=mdio1   */
+#define MIF_CFG_POLL   0x00000002      /* Enable polling mechanism     */
+#define MIF_CFG_BBMODE 0x00000004      /* 1=bit-bang 0=frame mode      */
+#define MIF_CFG_PRADDR 0x000000f8      /* Xcvr poll register address   */
+#define MIF_CFG_MDI0   0x00000100      /* MDIO_0 present or read-bit   */
+#define MIF_CFG_MDI1   0x00000200      /* MDIO_1 present or read-bit   */
+#define MIF_CFG_PPADDR 0x00007c00      /* Xcvr poll PHY address        */
+
+/* MIF Frame/Output Register.  This 32-bit register allows the host to
+ * communicate with a transceiver in frame mode (as opposed to big-bang
+ * mode).  Writes by the host specify an instrution.  After being issued
+ * the host must poll this register for completion.  Also, after
+ * completion this register holds the data returned by the transceiver
+ * if applicable.
+ */
+#define MIF_FRAME_ST   0xc0000000      /* STart of frame               */
+#define MIF_FRAME_OP   0x30000000      /* OPcode                       */
+#define MIF_FRAME_PHYAD        0x0f800000      /* PHY ADdress                  */
+#define MIF_FRAME_REGAD        0x007c0000      /* REGister ADdress             */
+#define MIF_FRAME_TAMSB        0x00020000      /* Turn Around MSB              */
+#define MIF_FRAME_TALSB        0x00010000      /* Turn Around LSB              */
+#define MIF_FRAME_DATA 0x0000ffff      /* Instruction Payload          */
+
+/* MIF Status Register.  This register reports status when the MIF is
+ * operating in the poll mode.  The poll status field is auto-clearing
+ * on read.
+ */
+#define MIF_STATUS_DATA        0xffff0000      /* Live image of XCVR reg       */
+#define MIF_STATUS_STAT        0x0000ffff      /* Which bits have changed      */
+
+/* MIF Mask Register.  This 16-bit register is used when in poll mode
+ * to say which bits of the polled register will cause an interrupt
+ * when changed.
+ */
+
+/* PCS/Serialink Registers */
+#define PCS_MIICTRL    0x9000UL        /* PCS MII Control Register     */
+#define PCS_MIISTAT    0x9004UL        /* PCS MII Status Register      */
+#define PCS_MIIADV     0x9008UL        /* PCS MII Advertisement Reg    */
+#define PCS_MIILP      0x900CUL        /* PCS MII Link Partner Ability */
+#define PCS_CFG                0x9010UL        /* PCS Configuration Register   */
+#define PCS_SMACHINE   0x9014UL        /* PCS State Machine Register   */
+#define PCS_ISTAT      0x9018UL        /* PCS Interrupt Status Reg     */
+#define PCS_DMODE      0x9050UL        /* Datapath Mode Register       */
+#define PCS_SCTRL      0x9054UL        /* Serialink Control Register   */
+#define PCS_SOS                0x9058UL        /* Shared Output Select Reg     */
+#define PCS_SSTATE     0x905CUL        /* Serialink State Register     */
+
+/* PCD MII Control Register. */
+#define PCS_MIICTRL_SPD        0x00000040      /* Read as one, writes ignored  */
+#define PCS_MIICTRL_CT 0x00000080      /* Force COL signal active      */
+#define PCS_MIICTRL_DM 0x00000100      /* Duplex mode, forced low      */
+#define PCS_MIICTRL_RAN        0x00000200      /* Restart auto-neg, self clear */
+#define PCS_MIICTRL_ISO        0x00000400      /* Read as zero, writes ignored */
+#define PCS_MIICTRL_PD 0x00000800      /* Read as zero, writes ignored */
+#define PCS_MIICTRL_ANE        0x00001000      /* Auto-neg enable              */
+#define PCS_MIICTRL_SS 0x00002000      /* Read as zero, writes ignored */
+#define PCS_MIICTRL_WB 0x00004000      /* Wrapback, loopback at 10-bit
+                                        * input side of Serialink
+                                        */
+#define PCS_MIICTRL_RST        0x00008000      /* Resets PCS, self clearing    */
+
+/* PCS MII Status Register. */
+#define PCS_MIISTAT_EC 0x00000001      /* Ext Capability: Read as zero */
+#define PCS_MIISTAT_JD 0x00000002      /* Jabber Detect: Read as zero  */
+#define PCS_MIISTAT_LS 0x00000004      /* Link Status: 1=up 0=down     */
+#define PCS_MIISTAT_ANA        0x00000008      /* Auto-neg Ability, always 1   */
+#define PCS_MIISTAT_RF 0x00000010      /* Remote Fault                 */
+#define PCS_MIISTAT_ANC        0x00000020      /* Auto-neg complete            */
+#define PCS_MIISTAT_ES 0x00000100      /* Extended Status, always 1    */
+
+/* PCS MII Advertisement Register. */
+#define PCS_MIIADV_FD  0x00000020      /* Advertise Full Duplex        */
+#define PCS_MIIADV_HD  0x00000040      /* Advertise Half Duplex        */
+#define PCS_MIIADV_SP  0x00000080      /* Advertise Symmetric Pause    */
+#define PCS_MIIADV_AP  0x00000100      /* Advertise Asymmetric Pause   */
+#define PCS_MIIADV_RF  0x00003000      /* Remote Fault                 */
+#define PCS_MIIADV_ACK 0x00004000      /* Read-only                    */
+#define PCS_MIIADV_NP  0x00008000      /* Next-page, forced low        */
+
+/* PCS MII Link Partner Ability Register.   This register is equivalent
+ * to the Link Partnet Ability Register of the standard MII register set.
+ * It's layout corresponds to the PCS MII Advertisement Register.
+ */
+
+/* PCS Configuration Register. */
+#define PCS_CFG_ENABLE 0x00000001      /* Must be zero while changing
+                                        * PCS MII advertisement reg.
+                                        */
+#define PCS_CFG_SDO    0x00000002      /* Signal detect override       */
+#define PCS_CFG_SDL    0x00000004      /* Signal detect active low     */
+#define PCS_CFG_JS     0x00000018      /* Jitter-study:
+                                        * 0 = normal operation
+                                        * 1 = high-frequency test pattern
+                                        * 2 = low-frequency test pattern
+                                        * 3 = reserved
+                                        */
+#define PCS_CFG_TO     0x00000020      /* 10ms auto-neg timer override */
+
+/* PCS Interrupt Status Register.  This register is self-clearing
+ * when read.
+ */
+#define PCS_ISTAT_LSC  0x00000004      /* Link Status Change           */
+
+/* Datapath Mode Register. */
+#define PCS_DMODE_SM   0x00000001      /* 1 = use internal Serialink   */
+#define PCS_DMODE_ESM  0x00000002      /* External SERDES mode         */
+#define PCS_DMODE_MGM  0x00000004      /* MII/GMII mode                */
+#define PCS_DMODE_GMOE 0x00000008      /* GMII Output Enable           */
+
+/* Serialink Control Register.
+ *
+ * NOTE: When in SERDES mode, the loopback bit has inverse logic.
+ */
+#define PCS_SCTRL_LOOP 0x00000001      /* Loopback enable              */
+#define PCS_SCTRL_ESCD 0x00000002      /* Enable sync char detection   */
+#define PCS_SCTRL_LOCK 0x00000004      /* Lock to reference clock      */
+#define PCS_SCTRL_EMP  0x00000018      /* Output driver emphasis       */
+#define PCS_SCTRL_STEST        0x000001c0      /* Self test patterns           */
+#define PCS_SCTRL_PDWN 0x00000200      /* Software power-down          */
+#define PCS_SCTRL_RXZ  0x00000c00      /* PLL input to Serialink       */
+#define PCS_SCTRL_RXP  0x00003000      /* PLL input to Serialink       */
+#define PCS_SCTRL_TXZ  0x0000c000      /* PLL input to Serialink       */
+#define PCS_SCTRL_TXP  0x00030000      /* PLL input to Serialink       */
+
+/* Shared Output Select Register.  For test and debug, allows multiplexing
+ * test outputs into the PROM address pins.  Set to zero for normal
+ * operation.
+ */
+#define PCS_SOS_PADDR  0x00000003      /* PROM Address                 */
+
+/* PROM Image Space */
+#define PROM_START     0x100000UL      /* Expansion ROM run time access*/
+#define PROM_SIZE      0x0fffffUL      /* Size of ROM                  */
+#define PROM_END       0x200000UL      /* End of ROM                   */
+
+/* MII definitions missing from mii.h */
+
+#define BMCR_SPD2      0x0040          /* Gigabit enable? (bcm5411)    */
+#define LPA_PAUSE      0x0400
+
+/* More PHY registers (specific to Broadcom models) */
+
+/* MII BCM5201 MULTIPHY interrupt register */
+#define MII_BCM5201_INTERRUPT                  0x1A
+#define MII_BCM5201_INTERRUPT_INTENABLE                0x4000
+
+#define MII_BCM5201_AUXMODE2                   0x1B
+#define MII_BCM5201_AUXMODE2_LOWPOWER          0x0008
+
+#define MII_BCM5201_MULTIPHY                    0x1E
+
+/* MII BCM5201 MULTIPHY register bits */
+#define MII_BCM5201_MULTIPHY_SERIALMODE         0x0002
+#define MII_BCM5201_MULTIPHY_SUPERISOLATE       0x0008
+
+/* MII BCM5400 1000-BASET Control register */
+#define MII_BCM5400_GB_CONTROL                 0x09
+#define MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP   0x0200
+
+/* MII BCM5400 AUXCONTROL register */
+#define MII_BCM5400_AUXCONTROL                  0x18
+#define MII_BCM5400_AUXCONTROL_PWR10BASET       0x0004
+
+/* MII BCM5400 AUXSTATUS register */
+#define MII_BCM5400_AUXSTATUS                   0x19
+#define MII_BCM5400_AUXSTATUS_LINKMODE_MASK     0x0700
+#define MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT    8
+
+/* When it can, GEM internally caches 4 aligned TX descriptors
+ * at a time, so that it can use full cacheline DMA reads.
+ *
+ * Note that unlike HME, there is no ownership bit in the descriptor
+ * control word.  The same functionality is obtained via the TX-Kick
+ * and TX-Complete registers.  As a result, GEM need not write back
+ * updated values to the TX descriptor ring, it only performs reads.
+ *
+ * Since TX descriptors are never modified by GEM, the driver can
+ * use the buffer DMA address as a place to keep track of allocated
+ * DMA mappings for a transmitted packet.
+ */
+struct gem_txd {
+       __le64  control_word;
+       __le64  buffer;
+};
+
+#define TXDCTRL_BUFSZ  0x0000000000007fffULL   /* Buffer Size          */
+#define TXDCTRL_CSTART 0x00000000001f8000ULL   /* CSUM Start Offset    */
+#define TXDCTRL_COFF   0x000000001fe00000ULL   /* CSUM Stuff Offset    */
+#define TXDCTRL_CENAB  0x0000000020000000ULL   /* CSUM Enable          */
+#define TXDCTRL_EOF    0x0000000040000000ULL   /* End of Frame         */
+#define TXDCTRL_SOF    0x0000000080000000ULL   /* Start of Frame       */
+#define TXDCTRL_INTME  0x0000000100000000ULL   /* "Interrupt Me"       */
+#define TXDCTRL_NOCRC  0x0000000200000000ULL   /* No CRC Present       */
+
+/* GEM requires that RX descriptors are provided four at a time,
+ * aligned.  Also, the RX ring may not wrap around.  This means that
+ * there will be at least 4 unused descriptor entries in the middle
+ * of the RX ring at all times.
+ *
+ * Similar to HME, GEM assumes that it can write garbage bytes before
+ * the beginning of the buffer and right after the end in order to DMA
+ * whole cachelines.
+ *
+ * Unlike for TX, GEM does update the status word in the RX descriptors
+ * when packets arrive.  Therefore an ownership bit does exist in the
+ * RX descriptors.  It is advisory, GEM clears it but does not check
+ * it in any way.  So when buffers are posted to the RX ring (via the
+ * RX Kick register) by the driver it must make sure the buffers are
+ * truly ready and that the ownership bits are set properly.
+ *
+ * Even though GEM modifies the RX descriptors, it guarantees that the
+ * buffer DMA address field will stay the same when it performs these
+ * updates.  Therefore it can be used to keep track of DMA mappings
+ * by the host driver just as in the TX descriptor case above.
+ */
+struct gem_rxd {
+       __le64  status_word;
+       __le64  buffer;
+};
+
+#define RXDCTRL_TCPCSUM        0x000000000000ffffULL   /* TCP Pseudo-CSUM      */
+#define RXDCTRL_BUFSZ  0x000000007fff0000ULL   /* Buffer Size          */
+#define RXDCTRL_OWN    0x0000000080000000ULL   /* GEM owns this entry  */
+#define RXDCTRL_HASHVAL        0x0ffff00000000000ULL   /* Hash Value           */
+#define RXDCTRL_HPASS  0x1000000000000000ULL   /* Passed Hash Filter   */
+#define RXDCTRL_ALTMAC 0x2000000000000000ULL   /* Matched ALT MAC      */
+#define RXDCTRL_BAD    0x4000000000000000ULL   /* Frame has bad CRC    */
+
+#define RXDCTRL_FRESH(gp)      \
+       ((((RX_BUF_ALLOC_SIZE(gp) - RX_OFFSET) << 16) & RXDCTRL_BUFSZ) | \
+        RXDCTRL_OWN)
+
+#define TX_RING_SIZE 128
+#define RX_RING_SIZE 128
+
+#if TX_RING_SIZE == 32
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_32
+#elif TX_RING_SIZE == 64
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_64
+#elif TX_RING_SIZE == 128
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_128
+#elif TX_RING_SIZE == 256
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_256
+#elif TX_RING_SIZE == 512
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_512
+#elif TX_RING_SIZE == 1024
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_1K
+#elif TX_RING_SIZE == 2048
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_2K
+#elif TX_RING_SIZE == 4096
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_4K
+#elif TX_RING_SIZE == 8192
+#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_8K
+#else
+#error TX_RING_SIZE value is illegal...
+#endif
+
+#if RX_RING_SIZE == 32
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_32
+#elif RX_RING_SIZE == 64
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_64
+#elif RX_RING_SIZE == 128
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_128
+#elif RX_RING_SIZE == 256
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_256
+#elif RX_RING_SIZE == 512
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_512
+#elif RX_RING_SIZE == 1024
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_1K
+#elif RX_RING_SIZE == 2048
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_2K
+#elif RX_RING_SIZE == 4096
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_4K
+#elif RX_RING_SIZE == 8192
+#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_8K
+#else
+#error RX_RING_SIZE is illegal...
+#endif
+
+#define NEXT_TX(N)     (((N) + 1) & (TX_RING_SIZE - 1))
+#define NEXT_RX(N)     (((N) + 1) & (RX_RING_SIZE - 1))
+
+#define TX_BUFFS_AVAIL(GP)                                     \
+       (((GP)->tx_old <= (GP)->tx_new) ?                       \
+         (GP)->tx_old + (TX_RING_SIZE - 1) - (GP)->tx_new :    \
+         (GP)->tx_old - (GP)->tx_new - 1)
+
+#define RX_OFFSET          2
+#define RX_BUF_ALLOC_SIZE(gp)  ((gp)->rx_buf_sz + 28 + RX_OFFSET + 64)
+
+#define RX_COPY_THRESHOLD  256
+
+#if TX_RING_SIZE < 128
+#define INIT_BLOCK_TX_RING_SIZE                128
+#else
+#define INIT_BLOCK_TX_RING_SIZE                TX_RING_SIZE
+#endif
+
+#if RX_RING_SIZE < 128
+#define INIT_BLOCK_RX_RING_SIZE                128
+#else
+#define INIT_BLOCK_RX_RING_SIZE                RX_RING_SIZE
+#endif
+
+struct gem_init_block {
+       struct gem_txd  txd[INIT_BLOCK_TX_RING_SIZE];
+       struct gem_rxd  rxd[INIT_BLOCK_RX_RING_SIZE];
+};
+
+enum gem_phy_type {
+       phy_mii_mdio0,
+       phy_mii_mdio1,
+       phy_serialink,
+       phy_serdes,
+};
+
+enum link_state {
+       link_down = 0,  /* No link, will retry */
+       link_aneg,      /* Autoneg in progress */
+       link_force_try, /* Try Forced link speed */
+       link_force_ret, /* Forced mode worked, retrying autoneg */
+       link_force_ok,  /* Stay in forced mode */
+       link_up         /* Link is up */
+};
+
+struct gem {
+       void __iomem            *regs;
+       int                     rx_new, rx_old;
+       int                     tx_new, tx_old;
+
+       unsigned int has_wol : 1;       /* chip supports wake-on-lan */
+       unsigned int asleep_wol : 1;    /* was asleep with WOL enabled */
+
+       int                     cell_enabled;
+       u32                     msg_enable;
+       u32                     status;
+
+       struct napi_struct      napi;
+
+       int                     tx_fifo_sz;
+       int                     rx_fifo_sz;
+       int                     rx_pause_off;
+       int                     rx_pause_on;
+       int                     rx_buf_sz;
+       u64                     pause_entered;
+       u16                     pause_last_time_recvd;
+       u32                     mac_rx_cfg;
+       u32                     swrst_base;
+
+       int                     want_autoneg;
+       int                     last_forced_speed;
+       enum link_state         lstate;
+       struct timer_list       link_timer;
+       int                     timer_ticks;
+       int                     wake_on_lan;
+       struct work_struct      reset_task;
+       volatile int            reset_task_pending;
+
+       enum gem_phy_type       phy_type;
+       struct mii_phy          phy_mii;
+       int                     mii_phy_addr;
+
+       struct gem_init_block   *init_block;
+       struct sk_buff          *rx_skbs[RX_RING_SIZE];
+       struct sk_buff          *tx_skbs[TX_RING_SIZE];
+       dma_addr_t              gblock_dvma;
+
+       struct pci_dev          *pdev;
+       struct net_device       *dev;
+#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC)
+       struct device_node      *of_node;
+#endif
+};
+
+#define found_mii_phy(gp) ((gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) && \
+                          gp->phy_mii.def && gp->phy_mii.def->ops)
+
+#endif /* _SUNGEM_H */
diff --git a/drivers/net/ethernet/sun/sungem_phy.c b/drivers/net/ethernet/sun/sungem_phy.c
new file mode 100644 (file)
index 0000000..d16880d
--- /dev/null
@@ -0,0 +1,1200 @@
+/*
+ * PHY drivers for the sungem ethernet driver.
+ *
+ * This file could be shared with other drivers.
+ *
+ * (c) 2002-2007, Benjamin Herrenscmidt (benh@kernel.crashing.org)
+ *
+ * TODO:
+ *  - Add support for PHYs that provide an IRQ line
+ *  - Eventually moved the entire polling state machine in
+ *    there (out of the eth driver), so that it can easily be
+ *    skipped on PHYs that implement it in hardware.
+ *  - On LXT971 & BCM5201, Apple uses some chip specific regs
+ *    to read the link status. Figure out why and if it makes
+ *    sense to do the same (magic aneg ?)
+ *  - Apple has some additional power management code for some
+ *    Broadcom PHYs that they "hide" from the OpenSource version
+ *    of darwin, still need to reverse engineer that
+ */
+
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+
+#ifdef CONFIG_PPC_PMAC
+#include <asm/prom.h>
+#endif
+
+#include "sungem_phy.h"
+
+/* Link modes of the BCM5400 PHY */
+static const int phy_BCM5400_link_table[8][3] = {
+       { 0, 0, 0 },    /* No link */
+       { 0, 0, 0 },    /* 10BT Half Duplex */
+       { 1, 0, 0 },    /* 10BT Full Duplex */
+       { 0, 1, 0 },    /* 100BT Half Duplex */
+       { 0, 1, 0 },    /* 100BT Half Duplex */
+       { 1, 1, 0 },    /* 100BT Full Duplex*/
+       { 1, 0, 1 },    /* 1000BT */
+       { 1, 0, 1 },    /* 1000BT */
+};
+
+static inline int __phy_read(struct mii_phy* phy, int id, int reg)
+{
+       return phy->mdio_read(phy->dev, id, reg);
+}
+
+static inline void __phy_write(struct mii_phy* phy, int id, int reg, int val)
+{
+       phy->mdio_write(phy->dev, id, reg, val);
+}
+
+static inline int phy_read(struct mii_phy* phy, int reg)
+{
+       return phy->mdio_read(phy->dev, phy->mii_id, reg);
+}
+
+static inline void phy_write(struct mii_phy* phy, int reg, int val)
+{
+       phy->mdio_write(phy->dev, phy->mii_id, reg, val);
+}
+
+static int reset_one_mii_phy(struct mii_phy* phy, int phy_id)
+{
+       u16 val;
+       int limit = 10000;
+
+       val = __phy_read(phy, phy_id, MII_BMCR);
+       val &= ~(BMCR_ISOLATE | BMCR_PDOWN);
+       val |= BMCR_RESET;
+       __phy_write(phy, phy_id, MII_BMCR, val);
+
+       udelay(100);
+
+       while (--limit) {
+               val = __phy_read(phy, phy_id, MII_BMCR);
+               if ((val & BMCR_RESET) == 0)
+                       break;
+               udelay(10);
+       }
+       if ((val & BMCR_ISOLATE) && limit > 0)
+               __phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE);
+
+       return limit <= 0;
+}
+
+static int bcm5201_init(struct mii_phy* phy)
+{
+       u16 data;
+
+       data = phy_read(phy, MII_BCM5201_MULTIPHY);
+       data &= ~MII_BCM5201_MULTIPHY_SUPERISOLATE;
+       phy_write(phy, MII_BCM5201_MULTIPHY, data);
+
+       phy_write(phy, MII_BCM5201_INTERRUPT, 0);
+
+       return 0;
+}
+
+static int bcm5201_suspend(struct mii_phy* phy)
+{
+       phy_write(phy, MII_BCM5201_INTERRUPT, 0);
+       phy_write(phy, MII_BCM5201_MULTIPHY, MII_BCM5201_MULTIPHY_SUPERISOLATE);
+
+       return 0;
+}
+
+static int bcm5221_init(struct mii_phy* phy)
+{
+       u16 data;
+
+       data = phy_read(phy, MII_BCM5221_TEST);
+       phy_write(phy, MII_BCM5221_TEST,
+               data | MII_BCM5221_TEST_ENABLE_SHADOWS);
+
+       data = phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2);
+       phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2,
+               data | MII_BCM5221_SHDOW_AUX_STAT2_APD);
+
+       data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
+       phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
+               data | MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR);
+
+       data = phy_read(phy, MII_BCM5221_TEST);
+       phy_write(phy, MII_BCM5221_TEST,
+               data & ~MII_BCM5221_TEST_ENABLE_SHADOWS);
+
+       return 0;
+}
+
+static int bcm5221_suspend(struct mii_phy* phy)
+{
+       u16 data;
+
+       data = phy_read(phy, MII_BCM5221_TEST);
+       phy_write(phy, MII_BCM5221_TEST,
+               data | MII_BCM5221_TEST_ENABLE_SHADOWS);
+
+       data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
+       phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
+                 data | MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE);
+
+       return 0;
+}
+
+static int bcm5241_init(struct mii_phy* phy)
+{
+       u16 data;
+
+       data = phy_read(phy, MII_BCM5221_TEST);
+       phy_write(phy, MII_BCM5221_TEST,
+               data | MII_BCM5221_TEST_ENABLE_SHADOWS);
+
+       data = phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2);
+       phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2,
+               data | MII_BCM5221_SHDOW_AUX_STAT2_APD);
+
+       data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
+       phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
+               data & ~MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR);
+
+       data = phy_read(phy, MII_BCM5221_TEST);
+       phy_write(phy, MII_BCM5221_TEST,
+               data & ~MII_BCM5221_TEST_ENABLE_SHADOWS);
+
+       return 0;
+}
+
+static int bcm5241_suspend(struct mii_phy* phy)
+{
+       u16 data;
+
+       data = phy_read(phy, MII_BCM5221_TEST);
+       phy_write(phy, MII_BCM5221_TEST,
+               data | MII_BCM5221_TEST_ENABLE_SHADOWS);
+
+       data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
+       phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
+                 data | MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR);
+
+       return 0;
+}
+
+static int bcm5400_init(struct mii_phy* phy)
+{
+       u16 data;
+
+       /* Configure for gigabit full duplex */
+       data = phy_read(phy, MII_BCM5400_AUXCONTROL);
+       data |= MII_BCM5400_AUXCONTROL_PWR10BASET;
+       phy_write(phy, MII_BCM5400_AUXCONTROL, data);
+
+       data = phy_read(phy, MII_BCM5400_GB_CONTROL);
+       data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP;
+       phy_write(phy, MII_BCM5400_GB_CONTROL, data);
+
+       udelay(100);
+
+       /* Reset and configure cascaded 10/100 PHY */
+       (void)reset_one_mii_phy(phy, 0x1f);
+
+       data = __phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY);
+       data |= MII_BCM5201_MULTIPHY_SERIALMODE;
+       __phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data);
+
+       data = phy_read(phy, MII_BCM5400_AUXCONTROL);
+       data &= ~MII_BCM5400_AUXCONTROL_PWR10BASET;
+       phy_write(phy, MII_BCM5400_AUXCONTROL, data);
+
+       return 0;
+}
+
+static int bcm5400_suspend(struct mii_phy* phy)
+{
+#if 0 /* Commented out in Darwin... someone has those dawn docs ? */
+       phy_write(phy, MII_BMCR, BMCR_PDOWN);
+#endif
+       return 0;
+}
+
+static int bcm5401_init(struct mii_phy* phy)
+{
+       u16 data;
+       int rev;
+
+       rev = phy_read(phy, MII_PHYSID2) & 0x000f;
+       if (rev == 0 || rev == 3) {
+               /* Some revisions of 5401 appear to need this
+                * initialisation sequence to disable, according
+                * to OF, "tap power management"
+                *
+                * WARNING ! OF and Darwin don't agree on the
+                * register addresses. OF seem to interpret the
+                * register numbers below as decimal
+                *
+                * Note: This should (and does) match tg3_init_5401phy_dsp
+                *       in the tg3.c driver. -DaveM
+                */
+               phy_write(phy, 0x18, 0x0c20);
+               phy_write(phy, 0x17, 0x0012);
+               phy_write(phy, 0x15, 0x1804);
+               phy_write(phy, 0x17, 0x0013);
+               phy_write(phy, 0x15, 0x1204);
+               phy_write(phy, 0x17, 0x8006);
+               phy_write(phy, 0x15, 0x0132);
+               phy_write(phy, 0x17, 0x8006);
+               phy_write(phy, 0x15, 0x0232);
+               phy_write(phy, 0x17, 0x201f);
+               phy_write(phy, 0x15, 0x0a20);
+       }
+
+       /* Configure for gigabit full duplex */
+       data = phy_read(phy, MII_BCM5400_GB_CONTROL);
+       data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP;
+       phy_write(phy, MII_BCM5400_GB_CONTROL, data);
+
+       udelay(10);
+
+       /* Reset and configure cascaded 10/100 PHY */
+       (void)reset_one_mii_phy(phy, 0x1f);
+
+       data = __phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY);
+       data |= MII_BCM5201_MULTIPHY_SERIALMODE;
+       __phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data);
+
+       return 0;
+}
+
+static int bcm5401_suspend(struct mii_phy* phy)
+{
+#if 0 /* Commented out in Darwin... someone has those dawn docs ? */
+       phy_write(phy, MII_BMCR, BMCR_PDOWN);
+#endif
+       return 0;
+}
+
+static int bcm5411_init(struct mii_phy* phy)
+{
+       u16 data;
+
+       /* Here's some more Apple black magic to setup
+        * some voltage stuffs.
+        */
+       phy_write(phy, 0x1c, 0x8c23);
+       phy_write(phy, 0x1c, 0x8ca3);
+       phy_write(phy, 0x1c, 0x8c23);
+
+       /* Here, Apple seems to want to reset it, do
+        * it as well
+        */
+       phy_write(phy, MII_BMCR, BMCR_RESET);
+       phy_write(phy, MII_BMCR, 0x1340);
+
+       data = phy_read(phy, MII_BCM5400_GB_CONTROL);
+       data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP;
+       phy_write(phy, MII_BCM5400_GB_CONTROL, data);
+
+       udelay(10);
+
+       /* Reset and configure cascaded 10/100 PHY */
+       (void)reset_one_mii_phy(phy, 0x1f);
+
+       return 0;
+}
+
+static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
+{
+       u16 ctl, adv;
+
+       phy->autoneg = 1;
+       phy->speed = SPEED_10;
+       phy->duplex = DUPLEX_HALF;
+       phy->pause = 0;
+       phy->advertising = advertise;
+
+       /* Setup standard advertise */
+       adv = phy_read(phy, MII_ADVERTISE);
+       adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+       if (advertise & ADVERTISED_10baseT_Half)
+               adv |= ADVERTISE_10HALF;
+       if (advertise & ADVERTISED_10baseT_Full)
+               adv |= ADVERTISE_10FULL;
+       if (advertise & ADVERTISED_100baseT_Half)
+               adv |= ADVERTISE_100HALF;
+       if (advertise & ADVERTISED_100baseT_Full)
+               adv |= ADVERTISE_100FULL;
+       phy_write(phy, MII_ADVERTISE, adv);
+
+       /* Start/Restart aneg */
+       ctl = phy_read(phy, MII_BMCR);
+       ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+       phy_write(phy, MII_BMCR, ctl);
+
+       return 0;
+}
+
+static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
+{
+       u16 ctl;
+
+       phy->autoneg = 0;
+       phy->speed = speed;
+       phy->duplex = fd;
+       phy->pause = 0;
+
+       ctl = phy_read(phy, MII_BMCR);
+       ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_ANENABLE);
+
+       /* First reset the PHY */
+       phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
+
+       /* Select speed & duplex */
+       switch(speed) {
+       case SPEED_10:
+               break;
+       case SPEED_100:
+               ctl |= BMCR_SPEED100;
+               break;
+       case SPEED_1000:
+       default:
+               return -EINVAL;
+       }
+       if (fd == DUPLEX_FULL)
+               ctl |= BMCR_FULLDPLX;
+       phy_write(phy, MII_BMCR, ctl);
+
+       return 0;
+}
+
+static int genmii_poll_link(struct mii_phy *phy)
+{
+       u16 status;
+
+       (void)phy_read(phy, MII_BMSR);
+       status = phy_read(phy, MII_BMSR);
+       if ((status & BMSR_LSTATUS) == 0)
+               return 0;
+       if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE))
+               return 0;
+       return 1;
+}
+
+static int genmii_read_link(struct mii_phy *phy)
+{
+       u16 lpa;
+
+       if (phy->autoneg) {
+               lpa = phy_read(phy, MII_LPA);
+
+               if (lpa & (LPA_10FULL | LPA_100FULL))
+                       phy->duplex = DUPLEX_FULL;
+               else
+                       phy->duplex = DUPLEX_HALF;
+               if (lpa & (LPA_100FULL | LPA_100HALF))
+                       phy->speed = SPEED_100;
+               else
+                       phy->speed = SPEED_10;
+               phy->pause = 0;
+       }
+       /* On non-aneg, we assume what we put in BMCR is the speed,
+        * though magic-aneg shouldn't prevent this case from occurring
+        */
+
+        return 0;
+}
+
+static int generic_suspend(struct mii_phy* phy)
+{
+       phy_write(phy, MII_BMCR, BMCR_PDOWN);
+
+       return 0;
+}
+
+static int bcm5421_init(struct mii_phy* phy)
+{
+       u16 data;
+       unsigned int id;
+
+       id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2));
+
+       /* Revision 0 of 5421 needs some fixups */
+       if (id == 0x002060e0) {
+               /* This is borrowed from MacOS
+                */
+               phy_write(phy, 0x18, 0x1007);
+               data = phy_read(phy, 0x18);
+               phy_write(phy, 0x18, data | 0x0400);
+               phy_write(phy, 0x18, 0x0007);
+               data = phy_read(phy, 0x18);
+               phy_write(phy, 0x18, data | 0x0800);
+               phy_write(phy, 0x17, 0x000a);
+               data = phy_read(phy, 0x15);
+               phy_write(phy, 0x15, data | 0x0200);
+       }
+
+       /* Pick up some init code from OF for K2 version */
+       if ((id & 0xfffffff0) == 0x002062e0) {
+               phy_write(phy, 4, 0x01e1);
+               phy_write(phy, 9, 0x0300);
+       }
+
+       /* Check if we can enable automatic low power */
+#ifdef CONFIG_PPC_PMAC
+       if (phy->platform_data) {
+               struct device_node *np = of_get_parent(phy->platform_data);
+               int can_low_power = 1;
+               if (np == NULL || of_get_property(np, "no-autolowpower", NULL))
+                       can_low_power = 0;
+               if (can_low_power) {
+                       /* Enable automatic low-power */
+                       phy_write(phy, 0x1c, 0x9002);
+                       phy_write(phy, 0x1c, 0xa821);
+                       phy_write(phy, 0x1c, 0x941d);
+               }
+       }
+#endif /* CONFIG_PPC_PMAC */
+
+       return 0;
+}
+
+static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise)
+{
+       u16 ctl, adv;
+
+       phy->autoneg = 1;
+       phy->speed = SPEED_10;
+       phy->duplex = DUPLEX_HALF;
+       phy->pause = 0;
+       phy->advertising = advertise;
+
+       /* Setup standard advertise */
+       adv = phy_read(phy, MII_ADVERTISE);
+       adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+       if (advertise & ADVERTISED_10baseT_Half)
+               adv |= ADVERTISE_10HALF;
+       if (advertise & ADVERTISED_10baseT_Full)
+               adv |= ADVERTISE_10FULL;
+       if (advertise & ADVERTISED_100baseT_Half)
+               adv |= ADVERTISE_100HALF;
+       if (advertise & ADVERTISED_100baseT_Full)
+               adv |= ADVERTISE_100FULL;
+       if (advertise & ADVERTISED_Pause)
+               adv |= ADVERTISE_PAUSE_CAP;
+       if (advertise & ADVERTISED_Asym_Pause)
+               adv |= ADVERTISE_PAUSE_ASYM;
+       phy_write(phy, MII_ADVERTISE, adv);
+
+       /* Setup 1000BT advertise */
+       adv = phy_read(phy, MII_1000BASETCONTROL);
+       adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP|MII_1000BASETCONTROL_HALFDUPLEXCAP);
+       if (advertise & SUPPORTED_1000baseT_Half)
+               adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP;
+       if (advertise & SUPPORTED_1000baseT_Full)
+               adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP;
+       phy_write(phy, MII_1000BASETCONTROL, adv);
+
+       /* Start/Restart aneg */
+       ctl = phy_read(phy, MII_BMCR);
+       ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+       phy_write(phy, MII_BMCR, ctl);
+
+       return 0;
+}
+
+static int bcm54xx_setup_forced(struct mii_phy *phy, int speed, int fd)
+{
+       u16 ctl;
+
+       phy->autoneg = 0;
+       phy->speed = speed;
+       phy->duplex = fd;
+       phy->pause = 0;
+
+       ctl = phy_read(phy, MII_BMCR);
+       ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPD2|BMCR_ANENABLE);
+
+       /* First reset the PHY */
+       phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
+
+       /* Select speed & duplex */
+       switch(speed) {
+       case SPEED_10:
+               break;
+       case SPEED_100:
+               ctl |= BMCR_SPEED100;
+               break;
+       case SPEED_1000:
+               ctl |= BMCR_SPD2;
+       }
+       if (fd == DUPLEX_FULL)
+               ctl |= BMCR_FULLDPLX;
+
+       // XXX Should we set the sungem to GII now on 1000BT ?
+
+       phy_write(phy, MII_BMCR, ctl);
+
+       return 0;
+}
+
+static int bcm54xx_read_link(struct mii_phy *phy)
+{
+       int link_mode;
+       u16 val;
+
+       if (phy->autoneg) {
+               val = phy_read(phy, MII_BCM5400_AUXSTATUS);
+               link_mode = ((val & MII_BCM5400_AUXSTATUS_LINKMODE_MASK) >>
+                            MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT);
+               phy->duplex = phy_BCM5400_link_table[link_mode][0] ?
+                       DUPLEX_FULL : DUPLEX_HALF;
+               phy->speed = phy_BCM5400_link_table[link_mode][2] ?
+                               SPEED_1000 :
+                               (phy_BCM5400_link_table[link_mode][1] ?
+                                SPEED_100 : SPEED_10);
+               val = phy_read(phy, MII_LPA);
+               phy->pause = (phy->duplex == DUPLEX_FULL) &&
+                       ((val & LPA_PAUSE) != 0);
+       }
+       /* On non-aneg, we assume what we put in BMCR is the speed,
+        * though magic-aneg shouldn't prevent this case from occurring
+        */
+
+       return 0;
+}
+
+static int marvell88e1111_init(struct mii_phy* phy)
+{
+       u16 rev;
+
+       /* magic init sequence for rev 0 */
+       rev = phy_read(phy, MII_PHYSID2) & 0x000f;
+       if (rev == 0) {
+               phy_write(phy, 0x1d, 0x000a);
+               phy_write(phy, 0x1e, 0x0821);
+
+               phy_write(phy, 0x1d, 0x0006);
+               phy_write(phy, 0x1e, 0x8600);
+
+               phy_write(phy, 0x1d, 0x000b);
+               phy_write(phy, 0x1e, 0x0100);
+
+               phy_write(phy, 0x1d, 0x0004);
+               phy_write(phy, 0x1e, 0x4850);
+       }
+       return 0;
+}
+
+#define BCM5421_MODE_MASK      (1 << 5)
+
+static int bcm5421_poll_link(struct mii_phy* phy)
+{
+       u32 phy_reg;
+       int mode;
+
+       /* find out in what mode we are */
+       phy_write(phy, MII_NCONFIG, 0x1000);
+       phy_reg = phy_read(phy, MII_NCONFIG);
+
+       mode = (phy_reg & BCM5421_MODE_MASK) >> 5;
+
+       if ( mode == BCM54XX_COPPER)
+               return genmii_poll_link(phy);
+
+       /* try to find out wether we have a link */
+       phy_write(phy, MII_NCONFIG, 0x2000);
+       phy_reg = phy_read(phy, MII_NCONFIG);
+
+       if (phy_reg & 0x0020)
+               return 0;
+       else
+               return 1;
+}
+
+static int bcm5421_read_link(struct mii_phy* phy)
+{
+       u32 phy_reg;
+       int mode;
+
+       /* find out in what mode we are */
+       phy_write(phy, MII_NCONFIG, 0x1000);
+       phy_reg = phy_read(phy, MII_NCONFIG);
+
+       mode = (phy_reg & BCM5421_MODE_MASK ) >> 5;
+
+       if ( mode == BCM54XX_COPPER)
+               return bcm54xx_read_link(phy);
+
+       phy->speed = SPEED_1000;
+
+       /* find out wether we are running half- or full duplex */
+       phy_write(phy, MII_NCONFIG, 0x2000);
+       phy_reg = phy_read(phy, MII_NCONFIG);
+
+       if ( (phy_reg & 0x0080) >> 7)
+               phy->duplex |=  DUPLEX_HALF;
+       else
+               phy->duplex |=  DUPLEX_FULL;
+
+       return 0;
+}
+
+static int bcm5421_enable_fiber(struct mii_phy* phy, int autoneg)
+{
+       /* enable fiber mode */
+       phy_write(phy, MII_NCONFIG, 0x9020);
+       /* LEDs active in both modes, autosense prio = fiber */
+       phy_write(phy, MII_NCONFIG, 0x945f);
+
+       if (!autoneg) {
+               /* switch off fibre autoneg */
+               phy_write(phy, MII_NCONFIG, 0xfc01);
+               phy_write(phy, 0x0b, 0x0004);
+       }
+
+       phy->autoneg = autoneg;
+
+       return 0;
+}
+
+#define BCM5461_FIBER_LINK     (1 << 2)
+#define BCM5461_MODE_MASK      (3 << 1)
+
+static int bcm5461_poll_link(struct mii_phy* phy)
+{
+       u32 phy_reg;
+       int mode;
+
+       /* find out in what mode we are */
+       phy_write(phy, MII_NCONFIG, 0x7c00);
+       phy_reg = phy_read(phy, MII_NCONFIG);
+
+       mode = (phy_reg & BCM5461_MODE_MASK ) >> 1;
+
+       if ( mode == BCM54XX_COPPER)
+               return genmii_poll_link(phy);
+
+       /* find out wether we have a link */
+       phy_write(phy, MII_NCONFIG, 0x7000);
+       phy_reg = phy_read(phy, MII_NCONFIG);
+
+       if (phy_reg & BCM5461_FIBER_LINK)
+               return 1;
+       else
+               return 0;
+}
+
+#define BCM5461_FIBER_DUPLEX   (1 << 3)
+
+static int bcm5461_read_link(struct mii_phy* phy)
+{
+       u32 phy_reg;
+       int mode;
+
+       /* find out in what mode we are */
+       phy_write(phy, MII_NCONFIG, 0x7c00);
+       phy_reg = phy_read(phy, MII_NCONFIG);
+
+       mode = (phy_reg & BCM5461_MODE_MASK ) >> 1;
+
+       if ( mode == BCM54XX_COPPER) {
+               return bcm54xx_read_link(phy);
+       }
+
+       phy->speed = SPEED_1000;
+
+       /* find out wether we are running half- or full duplex */
+       phy_write(phy, MII_NCONFIG, 0x7000);
+       phy_reg = phy_read(phy, MII_NCONFIG);
+
+       if (phy_reg & BCM5461_FIBER_DUPLEX)
+               phy->duplex |=  DUPLEX_FULL;
+       else
+               phy->duplex |=  DUPLEX_HALF;
+
+       return 0;
+}
+
+static int bcm5461_enable_fiber(struct mii_phy* phy, int autoneg)
+{
+       /* select fiber mode, enable 1000 base-X registers */
+       phy_write(phy, MII_NCONFIG, 0xfc0b);
+
+       if (autoneg) {
+               /* enable fiber with no autonegotiation */
+               phy_write(phy, MII_ADVERTISE, 0x01e0);
+               phy_write(phy, MII_BMCR, 0x1140);
+       } else {
+               /* enable fiber with autonegotiation */
+               phy_write(phy, MII_BMCR, 0x0140);
+       }
+
+       phy->autoneg = autoneg;
+
+       return 0;
+}
+
+static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise)
+{
+       u16 ctl, adv;
+
+       phy->autoneg = 1;
+       phy->speed = SPEED_10;
+       phy->duplex = DUPLEX_HALF;
+       phy->pause = 0;
+       phy->advertising = advertise;
+
+       /* Setup standard advertise */
+       adv = phy_read(phy, MII_ADVERTISE);
+       adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+       if (advertise & ADVERTISED_10baseT_Half)
+               adv |= ADVERTISE_10HALF;
+       if (advertise & ADVERTISED_10baseT_Full)
+               adv |= ADVERTISE_10FULL;
+       if (advertise & ADVERTISED_100baseT_Half)
+               adv |= ADVERTISE_100HALF;
+       if (advertise & ADVERTISED_100baseT_Full)
+               adv |= ADVERTISE_100FULL;
+       if (advertise & ADVERTISED_Pause)
+               adv |= ADVERTISE_PAUSE_CAP;
+       if (advertise & ADVERTISED_Asym_Pause)
+               adv |= ADVERTISE_PAUSE_ASYM;
+       phy_write(phy, MII_ADVERTISE, adv);
+
+       /* Setup 1000BT advertise & enable crossover detect
+        * XXX How do we advertise 1000BT ? Darwin source is
+        * confusing here, they read from specific control and
+        * write to control... Someone has specs for those
+        * beasts ?
+        */
+       adv = phy_read(phy, MII_M1011_PHY_SPEC_CONTROL);
+       adv |= MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX;
+       adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP |
+                       MII_1000BASETCONTROL_HALFDUPLEXCAP);
+       if (advertise & SUPPORTED_1000baseT_Half)
+               adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP;
+       if (advertise & SUPPORTED_1000baseT_Full)
+               adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP;
+       phy_write(phy, MII_1000BASETCONTROL, adv);
+
+       /* Start/Restart aneg */
+       ctl = phy_read(phy, MII_BMCR);
+       ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+       phy_write(phy, MII_BMCR, ctl);
+
+       return 0;
+}
+
+static int marvell_setup_forced(struct mii_phy *phy, int speed, int fd)
+{
+       u16 ctl, ctl2;
+
+       phy->autoneg = 0;
+       phy->speed = speed;
+       phy->duplex = fd;
+       phy->pause = 0;
+
+       ctl = phy_read(phy, MII_BMCR);
+       ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPD2|BMCR_ANENABLE);
+       ctl |= BMCR_RESET;
+
+       /* Select speed & duplex */
+       switch(speed) {
+       case SPEED_10:
+               break;
+       case SPEED_100:
+               ctl |= BMCR_SPEED100;
+               break;
+       /* I'm not sure about the one below, again, Darwin source is
+        * quite confusing and I lack chip specs
+        */
+       case SPEED_1000:
+               ctl |= BMCR_SPD2;
+       }
+       if (fd == DUPLEX_FULL)
+               ctl |= BMCR_FULLDPLX;
+
+       /* Disable crossover. Again, the way Apple does it is strange,
+        * though I don't assume they are wrong ;)
+        */
+       ctl2 = phy_read(phy, MII_M1011_PHY_SPEC_CONTROL);
+       ctl2 &= ~(MII_M1011_PHY_SPEC_CONTROL_MANUAL_MDIX |
+               MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX |
+               MII_1000BASETCONTROL_FULLDUPLEXCAP |
+               MII_1000BASETCONTROL_HALFDUPLEXCAP);
+       if (speed == SPEED_1000)
+               ctl2 |= (fd == DUPLEX_FULL) ?
+                       MII_1000BASETCONTROL_FULLDUPLEXCAP :
+                       MII_1000BASETCONTROL_HALFDUPLEXCAP;
+       phy_write(phy, MII_1000BASETCONTROL, ctl2);
+
+       // XXX Should we set the sungem to GII now on 1000BT ?
+
+       phy_write(phy, MII_BMCR, ctl);
+
+       return 0;
+}
+
+static int marvell_read_link(struct mii_phy *phy)
+{
+       u16 status, pmask;
+
+       if (phy->autoneg) {
+               status = phy_read(phy, MII_M1011_PHY_SPEC_STATUS);
+               if ((status & MII_M1011_PHY_SPEC_STATUS_RESOLVED) == 0)
+                       return -EAGAIN;
+               if (status & MII_M1011_PHY_SPEC_STATUS_1000)
+                       phy->speed = SPEED_1000;
+               else if (status & MII_M1011_PHY_SPEC_STATUS_100)
+                       phy->speed = SPEED_100;
+               else
+                       phy->speed = SPEED_10;
+               if (status & MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX)
+                       phy->duplex = DUPLEX_FULL;
+               else
+                       phy->duplex = DUPLEX_HALF;
+               pmask = MII_M1011_PHY_SPEC_STATUS_TX_PAUSE |
+                       MII_M1011_PHY_SPEC_STATUS_RX_PAUSE;
+               phy->pause = (status & pmask) == pmask;
+       }
+       /* On non-aneg, we assume what we put in BMCR is the speed,
+        * though magic-aneg shouldn't prevent this case from occurring
+        */
+
+       return 0;
+}
+
+#define MII_BASIC_FEATURES \
+       (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |      \
+        SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |    \
+        SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII |     \
+        SUPPORTED_Pause)
+
+/* On gigabit capable PHYs, we advertise Pause support but not asym pause
+ * support for now as I'm not sure it's supported and Darwin doesn't do
+ * it neither. --BenH.
+ */
+#define MII_GBIT_FEATURES \
+       (MII_BASIC_FEATURES |   \
+        SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)
+
+/* Broadcom BCM 5201 */
+static struct mii_phy_ops bcm5201_phy_ops = {
+       .init           = bcm5201_init,
+       .suspend        = bcm5201_suspend,
+       .setup_aneg     = genmii_setup_aneg,
+       .setup_forced   = genmii_setup_forced,
+       .poll_link      = genmii_poll_link,
+       .read_link      = genmii_read_link,
+};
+
+static struct mii_phy_def bcm5201_phy_def = {
+       .phy_id         = 0x00406210,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "BCM5201",
+       .features       = MII_BASIC_FEATURES,
+       .magic_aneg     = 1,
+       .ops            = &bcm5201_phy_ops
+};
+
+/* Broadcom BCM 5221 */
+static struct mii_phy_ops bcm5221_phy_ops = {
+       .suspend        = bcm5221_suspend,
+       .init           = bcm5221_init,
+       .setup_aneg     = genmii_setup_aneg,
+       .setup_forced   = genmii_setup_forced,
+       .poll_link      = genmii_poll_link,
+       .read_link      = genmii_read_link,
+};
+
+static struct mii_phy_def bcm5221_phy_def = {
+       .phy_id         = 0x004061e0,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "BCM5221",
+       .features       = MII_BASIC_FEATURES,
+       .magic_aneg     = 1,
+       .ops            = &bcm5221_phy_ops
+};
+
+/* Broadcom BCM 5241 */
+static struct mii_phy_ops bcm5241_phy_ops = {
+       .suspend        = bcm5241_suspend,
+       .init           = bcm5241_init,
+       .setup_aneg     = genmii_setup_aneg,
+       .setup_forced   = genmii_setup_forced,
+       .poll_link      = genmii_poll_link,
+       .read_link      = genmii_read_link,
+};
+static struct mii_phy_def bcm5241_phy_def = {
+       .phy_id         = 0x0143bc30,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "BCM5241",
+       .features       = MII_BASIC_FEATURES,
+       .magic_aneg     = 1,
+       .ops            = &bcm5241_phy_ops
+};
+
+/* Broadcom BCM 5400 */
+static struct mii_phy_ops bcm5400_phy_ops = {
+       .init           = bcm5400_init,
+       .suspend        = bcm5400_suspend,
+       .setup_aneg     = bcm54xx_setup_aneg,
+       .setup_forced   = bcm54xx_setup_forced,
+       .poll_link      = genmii_poll_link,
+       .read_link      = bcm54xx_read_link,
+};
+
+static struct mii_phy_def bcm5400_phy_def = {
+       .phy_id         = 0x00206040,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "BCM5400",
+       .features       = MII_GBIT_FEATURES,
+       .magic_aneg     = 1,
+       .ops            = &bcm5400_phy_ops
+};
+
+/* Broadcom BCM 5401 */
+static struct mii_phy_ops bcm5401_phy_ops = {
+       .init           = bcm5401_init,
+       .suspend        = bcm5401_suspend,
+       .setup_aneg     = bcm54xx_setup_aneg,
+       .setup_forced   = bcm54xx_setup_forced,
+       .poll_link      = genmii_poll_link,
+       .read_link      = bcm54xx_read_link,
+};
+
+static struct mii_phy_def bcm5401_phy_def = {
+       .phy_id         = 0x00206050,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "BCM5401",
+       .features       = MII_GBIT_FEATURES,
+       .magic_aneg     = 1,
+       .ops            = &bcm5401_phy_ops
+};
+
+/* Broadcom BCM 5411 */
+static struct mii_phy_ops bcm5411_phy_ops = {
+       .init           = bcm5411_init,
+       .suspend        = generic_suspend,
+       .setup_aneg     = bcm54xx_setup_aneg,
+       .setup_forced   = bcm54xx_setup_forced,
+       .poll_link      = genmii_poll_link,
+       .read_link      = bcm54xx_read_link,
+};
+
+static struct mii_phy_def bcm5411_phy_def = {
+       .phy_id         = 0x00206070,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "BCM5411",
+       .features       = MII_GBIT_FEATURES,
+       .magic_aneg     = 1,
+       .ops            = &bcm5411_phy_ops
+};
+
+/* Broadcom BCM 5421 */
+static struct mii_phy_ops bcm5421_phy_ops = {
+       .init           = bcm5421_init,
+       .suspend        = generic_suspend,
+       .setup_aneg     = bcm54xx_setup_aneg,
+       .setup_forced   = bcm54xx_setup_forced,
+       .poll_link      = bcm5421_poll_link,
+       .read_link      = bcm5421_read_link,
+       .enable_fiber   = bcm5421_enable_fiber,
+};
+
+static struct mii_phy_def bcm5421_phy_def = {
+       .phy_id         = 0x002060e0,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "BCM5421",
+       .features       = MII_GBIT_FEATURES,
+       .magic_aneg     = 1,
+       .ops            = &bcm5421_phy_ops
+};
+
+/* Broadcom BCM 5421 built-in K2 */
+static struct mii_phy_ops bcm5421k2_phy_ops = {
+       .init           = bcm5421_init,
+       .suspend        = generic_suspend,
+       .setup_aneg     = bcm54xx_setup_aneg,
+       .setup_forced   = bcm54xx_setup_forced,
+       .poll_link      = genmii_poll_link,
+       .read_link      = bcm54xx_read_link,
+};
+
+static struct mii_phy_def bcm5421k2_phy_def = {
+       .phy_id         = 0x002062e0,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "BCM5421-K2",
+       .features       = MII_GBIT_FEATURES,
+       .magic_aneg     = 1,
+       .ops            = &bcm5421k2_phy_ops
+};
+
+static struct mii_phy_ops bcm5461_phy_ops = {
+       .init           = bcm5421_init,
+       .suspend        = generic_suspend,
+       .setup_aneg     = bcm54xx_setup_aneg,
+       .setup_forced   = bcm54xx_setup_forced,
+       .poll_link      = bcm5461_poll_link,
+       .read_link      = bcm5461_read_link,
+       .enable_fiber   = bcm5461_enable_fiber,
+};
+
+static struct mii_phy_def bcm5461_phy_def = {
+       .phy_id         = 0x002060c0,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "BCM5461",
+       .features       = MII_GBIT_FEATURES,
+       .magic_aneg     = 1,
+       .ops            = &bcm5461_phy_ops
+};
+
+/* Broadcom BCM 5462 built-in Vesta */
+static struct mii_phy_ops bcm5462V_phy_ops = {
+       .init           = bcm5421_init,
+       .suspend        = generic_suspend,
+       .setup_aneg     = bcm54xx_setup_aneg,
+       .setup_forced   = bcm54xx_setup_forced,
+       .poll_link      = genmii_poll_link,
+       .read_link      = bcm54xx_read_link,
+};
+
+static struct mii_phy_def bcm5462V_phy_def = {
+       .phy_id         = 0x002060d0,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "BCM5462-Vesta",
+       .features       = MII_GBIT_FEATURES,
+       .magic_aneg     = 1,
+       .ops            = &bcm5462V_phy_ops
+};
+
+/* Marvell 88E1101 amd 88E1111 */
+static struct mii_phy_ops marvell88e1101_phy_ops = {
+       .suspend        = generic_suspend,
+       .setup_aneg     = marvell_setup_aneg,
+       .setup_forced   = marvell_setup_forced,
+       .poll_link      = genmii_poll_link,
+       .read_link      = marvell_read_link
+};
+
+static struct mii_phy_ops marvell88e1111_phy_ops = {
+       .init           = marvell88e1111_init,
+       .suspend        = generic_suspend,
+       .setup_aneg     = marvell_setup_aneg,
+       .setup_forced   = marvell_setup_forced,
+       .poll_link      = genmii_poll_link,
+       .read_link      = marvell_read_link
+};
+
+/* two revs in darwin for the 88e1101 ... I could use a datasheet
+ * to get the proper names...
+ */
+static struct mii_phy_def marvell88e1101v1_phy_def = {
+       .phy_id         = 0x01410c20,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "Marvell 88E1101v1",
+       .features       = MII_GBIT_FEATURES,
+       .magic_aneg     = 1,
+       .ops            = &marvell88e1101_phy_ops
+};
+static struct mii_phy_def marvell88e1101v2_phy_def = {
+       .phy_id         = 0x01410c60,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "Marvell 88E1101v2",
+       .features       = MII_GBIT_FEATURES,
+       .magic_aneg     = 1,
+       .ops            = &marvell88e1101_phy_ops
+};
+static struct mii_phy_def marvell88e1111_phy_def = {
+       .phy_id         = 0x01410cc0,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "Marvell 88E1111",
+       .features       = MII_GBIT_FEATURES,
+       .magic_aneg     = 1,
+       .ops            = &marvell88e1111_phy_ops
+};
+
+/* Generic implementation for most 10/100 PHYs */
+static struct mii_phy_ops generic_phy_ops = {
+       .setup_aneg     = genmii_setup_aneg,
+       .setup_forced   = genmii_setup_forced,
+       .poll_link      = genmii_poll_link,
+       .read_link      = genmii_read_link
+};
+
+static struct mii_phy_def genmii_phy_def = {
+       .phy_id         = 0x00000000,
+       .phy_id_mask    = 0x00000000,
+       .name           = "Generic MII",
+       .features       = MII_BASIC_FEATURES,
+       .magic_aneg     = 0,
+       .ops            = &generic_phy_ops
+};
+
+static struct mii_phy_def* mii_phy_table[] = {
+       &bcm5201_phy_def,
+       &bcm5221_phy_def,
+       &bcm5241_phy_def,
+       &bcm5400_phy_def,
+       &bcm5401_phy_def,
+       &bcm5411_phy_def,
+       &bcm5421_phy_def,
+       &bcm5421k2_phy_def,
+       &bcm5461_phy_def,
+       &bcm5462V_phy_def,
+       &marvell88e1101v1_phy_def,
+       &marvell88e1101v2_phy_def,
+       &marvell88e1111_phy_def,
+       &genmii_phy_def,
+       NULL
+};
+
+int mii_phy_probe(struct mii_phy *phy, int mii_id)
+{
+       int rc;
+       u32 id;
+       struct mii_phy_def* def;
+       int i;
+
+       /* We do not reset the mii_phy structure as the driver
+        * may re-probe the PHY regulary
+        */
+       phy->mii_id = mii_id;
+
+       /* Take PHY out of isloate mode and reset it. */
+       rc = reset_one_mii_phy(phy, mii_id);
+       if (rc)
+               goto fail;
+
+       /* Read ID and find matching entry */
+       id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2));
+       printk(KERN_DEBUG KBUILD_MODNAME ": " "PHY ID: %x, addr: %x\n",
+              id, mii_id);
+       for (i=0; (def = mii_phy_table[i]) != NULL; i++)
+               if ((id & def->phy_id_mask) == def->phy_id)
+                       break;
+       /* Should never be NULL (we have a generic entry), but... */
+       if (def == NULL)
+               goto fail;
+
+       phy->def = def;
+
+       return 0;
+fail:
+       phy->speed = 0;
+       phy->duplex = 0;
+       phy->pause = 0;
+       phy->advertising = 0;
+       return -ENODEV;
+}
+
+EXPORT_SYMBOL(mii_phy_probe);
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/net/ethernet/sun/sungem_phy.h b/drivers/net/ethernet/sun/sungem_phy.h
new file mode 100644 (file)
index 0000000..af02f94
--- /dev/null
@@ -0,0 +1,132 @@
+#ifndef __SUNGEM_PHY_H__
+#define __SUNGEM_PHY_H__
+
+struct mii_phy;
+
+/* Operations supported by any kind of PHY */
+struct mii_phy_ops
+{
+       int             (*init)(struct mii_phy *phy);
+       int             (*suspend)(struct mii_phy *phy);
+       int             (*setup_aneg)(struct mii_phy *phy, u32 advertise);
+       int             (*setup_forced)(struct mii_phy *phy, int speed, int fd);
+       int             (*poll_link)(struct mii_phy *phy);
+       int             (*read_link)(struct mii_phy *phy);
+       int             (*enable_fiber)(struct mii_phy *phy, int autoneg);
+};
+
+/* Structure used to statically define an mii/gii based PHY */
+struct mii_phy_def
+{
+       u32                             phy_id;         /* Concatenated ID1 << 16 | ID2 */
+       u32                             phy_id_mask;    /* Significant bits */
+       u32                             features;       /* Ethtool SUPPORTED_* defines */
+       int                             magic_aneg;     /* Autoneg does all speed test for us */
+       const char*                     name;
+       const struct mii_phy_ops*       ops;
+};
+
+enum {
+       BCM54XX_COPPER,
+       BCM54XX_FIBER,
+       BCM54XX_GBIC,
+       BCM54XX_SGMII,
+       BCM54XX_UNKNOWN,
+};
+
+/* An instance of a PHY, partially borrowed from mii_if_info */
+struct mii_phy
+{
+       struct mii_phy_def*     def;
+       u32                     advertising;
+       int                     mii_id;
+
+       /* 1: autoneg enabled, 0: disabled */
+       int                     autoneg;
+
+       /* forced speed & duplex (no autoneg)
+        * partner speed & duplex & pause (autoneg)
+        */
+       int                     speed;
+       int                     duplex;
+       int                     pause;
+
+       /* Provided by host chip */
+       struct net_device       *dev;
+       int (*mdio_read) (struct net_device *dev, int mii_id, int reg);
+       void (*mdio_write) (struct net_device *dev, int mii_id, int reg, int val);
+       void                    *platform_data;
+};
+
+/* Pass in a struct mii_phy with dev, mdio_read and mdio_write
+ * filled, the remaining fields will be filled on return
+ */
+extern int mii_phy_probe(struct mii_phy *phy, int mii_id);
+
+
+/* MII definitions missing from mii.h */
+
+#define BMCR_SPD2      0x0040          /* Gigabit enable (bcm54xx)     */
+#define LPA_PAUSE      0x0400
+
+/* More PHY registers (model specific) */
+
+/* MII BCM5201 MULTIPHY interrupt register */
+#define MII_BCM5201_INTERRUPT                  0x1A
+#define MII_BCM5201_INTERRUPT_INTENABLE                0x4000
+
+#define MII_BCM5201_AUXMODE2                   0x1B
+#define MII_BCM5201_AUXMODE2_LOWPOWER          0x0008
+
+#define MII_BCM5201_MULTIPHY                    0x1E
+
+/* MII BCM5201 MULTIPHY register bits */
+#define MII_BCM5201_MULTIPHY_SERIALMODE         0x0002
+#define MII_BCM5201_MULTIPHY_SUPERISOLATE       0x0008
+
+/* MII BCM5221 Additional registers */
+#define MII_BCM5221_TEST                       0x1f
+#define MII_BCM5221_TEST_ENABLE_SHADOWS                0x0080
+#define MII_BCM5221_SHDOW_AUX_STAT2            0x1b
+#define MII_BCM5221_SHDOW_AUX_STAT2_APD                0x0020
+#define MII_BCM5221_SHDOW_AUX_MODE4            0x1a
+#define MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE   0x0001
+#define MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR   0x0004
+
+/* MII BCM5241 Additional registers */
+#define MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR 0x0008
+
+/* MII BCM5400 1000-BASET Control register */
+#define MII_BCM5400_GB_CONTROL                 0x09
+#define MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP   0x0200
+
+/* MII BCM5400 AUXCONTROL register */
+#define MII_BCM5400_AUXCONTROL                  0x18
+#define MII_BCM5400_AUXCONTROL_PWR10BASET       0x0004
+
+/* MII BCM5400 AUXSTATUS register */
+#define MII_BCM5400_AUXSTATUS                   0x19
+#define MII_BCM5400_AUXSTATUS_LINKMODE_MASK     0x0700
+#define MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT    8
+
+/* 1000BT control (Marvell & BCM54xx at least) */
+#define MII_1000BASETCONTROL                   0x09
+#define MII_1000BASETCONTROL_FULLDUPLEXCAP     0x0200
+#define MII_1000BASETCONTROL_HALFDUPLEXCAP     0x0100
+
+/* Marvell 88E1011 PHY control */
+#define MII_M1011_PHY_SPEC_CONTROL             0x10
+#define MII_M1011_PHY_SPEC_CONTROL_MANUAL_MDIX 0x20
+#define MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX   0x40
+
+/* Marvell 88E1011 PHY status */
+#define MII_M1011_PHY_SPEC_STATUS              0x11
+#define MII_M1011_PHY_SPEC_STATUS_1000         0x8000
+#define MII_M1011_PHY_SPEC_STATUS_100          0x4000
+#define MII_M1011_PHY_SPEC_STATUS_SPD_MASK     0xc000
+#define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX   0x2000
+#define MII_M1011_PHY_SPEC_STATUS_RESOLVED     0x0800
+#define MII_M1011_PHY_SPEC_STATUS_TX_PAUSE     0x0008
+#define MII_M1011_PHY_SPEC_STATUS_RX_PAUSE     0x0004
+
+#endif /* __SUNGEM_PHY_H__ */
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
new file mode 100644 (file)
index 0000000..856e05b
--- /dev/null
@@ -0,0 +1,3360 @@
+/* sunhme.c: Sparc HME/BigMac 10/100baseT half/full duplex auto switching,
+ *           auto carrier detecting ethernet driver.  Also known as the
+ *           "Happy Meal Ethernet" found on SunSwift SBUS cards.
+ *
+ * Copyright (C) 1996, 1998, 1999, 2002, 2003,
+ *             2006, 2008 David S. Miller (davem@davemloft.net)
+ *
+ * Changes :
+ * 2000/11/11 Willy Tarreau <willy AT meta-x.org>
+ *   - port to non-sparc architectures. Tested only on x86 and
+ *     only currently works with QFE PCI cards.
+ *   - ability to specify the MAC address at module load time by passing this
+ *     argument : macaddr=0x00,0x10,0x20,0x30,0x40,0x50
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <linux/random.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/mm.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+
+#ifdef CONFIG_SPARC
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <asm/idprom.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/prom.h>
+#include <asm/auxio.h>
+#endif
+#include <asm/uaccess.h>
+
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+
+#ifdef CONFIG_PCI
+#include <linux/pci.h>
+#endif
+
+#include "sunhme.h"
+
+#define DRV_NAME       "sunhme"
+#define DRV_VERSION    "3.10"
+#define DRV_RELDATE    "August 26, 2008"
+#define DRV_AUTHOR     "David S. Miller (davem@davemloft.net)"
+
+static char version[] =
+       DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
+
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION("Sun HappyMealEthernet(HME) 10/100baseT ethernet driver");
+MODULE_LICENSE("GPL");
+
+static int macaddr[6];
+
+/* accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
+module_param_array(macaddr, int, NULL, 0);
+MODULE_PARM_DESC(macaddr, "Happy Meal MAC address to set");
+
+#ifdef CONFIG_SBUS
+static struct quattro *qfe_sbus_list;
+#endif
+
+#ifdef CONFIG_PCI
+static struct quattro *qfe_pci_list;
+#endif
+
+#undef HMEDEBUG
+#undef SXDEBUG
+#undef RXDEBUG
+#undef TXDEBUG
+#undef TXLOGGING
+
+#ifdef TXLOGGING
+struct hme_tx_logent {
+       unsigned int tstamp;
+       int tx_new, tx_old;
+       unsigned int action;
+#define TXLOG_ACTION_IRQ       0x01
+#define TXLOG_ACTION_TXMIT     0x02
+#define TXLOG_ACTION_TBUSY     0x04
+#define TXLOG_ACTION_NBUFS     0x08
+       unsigned int status;
+};
+#define TX_LOG_LEN     128
+static struct hme_tx_logent tx_log[TX_LOG_LEN];
+static int txlog_cur_entry;
+static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigned int s)
+{
+       struct hme_tx_logent *tlp;
+       unsigned long flags;
+
+       local_irq_save(flags);
+       tlp = &tx_log[txlog_cur_entry];
+       tlp->tstamp = (unsigned int)jiffies;
+       tlp->tx_new = hp->tx_new;
+       tlp->tx_old = hp->tx_old;
+       tlp->action = a;
+       tlp->status = s;
+       txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1);
+       local_irq_restore(flags);
+}
+static __inline__ void tx_dump_log(void)
+{
+       int i, this;
+
+       this = txlog_cur_entry;
+       for (i = 0; i < TX_LOG_LEN; i++) {
+               printk("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i,
+                      tx_log[this].tstamp,
+                      tx_log[this].tx_new, tx_log[this].tx_old,
+                      tx_log[this].action, tx_log[this].status);
+               this = (this + 1) & (TX_LOG_LEN - 1);
+       }
+}
+static __inline__ void tx_dump_ring(struct happy_meal *hp)
+{
+       struct hmeal_init_block *hb = hp->happy_block;
+       struct happy_meal_txd *tp = &hb->happy_meal_txd[0];
+       int i;
+
+       for (i = 0; i < TX_RING_SIZE; i+=4) {
+               printk("TXD[%d..%d]: [%08x:%08x] [%08x:%08x] [%08x:%08x] [%08x:%08x]\n",
+                      i, i + 4,
+                      le32_to_cpu(tp[i].tx_flags), le32_to_cpu(tp[i].tx_addr),
+                      le32_to_cpu(tp[i + 1].tx_flags), le32_to_cpu(tp[i + 1].tx_addr),
+                      le32_to_cpu(tp[i + 2].tx_flags), le32_to_cpu(tp[i + 2].tx_addr),
+                      le32_to_cpu(tp[i + 3].tx_flags), le32_to_cpu(tp[i + 3].tx_addr));
+       }
+}
+#else
+#define tx_add_log(hp, a, s)           do { } while(0)
+#define tx_dump_log()                  do { } while(0)
+#define tx_dump_ring(hp)               do { } while(0)
+#endif
+
+#ifdef HMEDEBUG
+#define HMD(x)  printk x
+#else
+#define HMD(x)
+#endif
+
+/* #define AUTO_SWITCH_DEBUG */
+
+#ifdef AUTO_SWITCH_DEBUG
+#define ASD(x)  printk x
+#else
+#define ASD(x)
+#endif
+
+#define DEFAULT_IPG0      16 /* For lance-mode only */
+#define DEFAULT_IPG1       8 /* For all modes */
+#define DEFAULT_IPG2       4 /* For all modes */
+#define DEFAULT_JAMSIZE    4 /* Toe jam */
+
+/* NOTE: In the descriptor writes one _must_ write the address
+ *      member _first_.  The card must not be allowed to see
+ *      the updated descriptor flags until the address is
+ *      correct.  I've added a write memory barrier between
+ *      the two stores so that I can sleep well at night... -DaveM
+ */
+
+#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
+static void sbus_hme_write32(void __iomem *reg, u32 val)
+{
+       sbus_writel(val, reg);
+}
+
+static u32 sbus_hme_read32(void __iomem *reg)
+{
+       return sbus_readl(reg);
+}
+
+static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
+{
+       rxd->rx_addr = (__force hme32)addr;
+       wmb();
+       rxd->rx_flags = (__force hme32)flags;
+}
+
+static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
+{
+       txd->tx_addr = (__force hme32)addr;
+       wmb();
+       txd->tx_flags = (__force hme32)flags;
+}
+
+static u32 sbus_hme_read_desc32(hme32 *p)
+{
+       return (__force u32)*p;
+}
+
+static void pci_hme_write32(void __iomem *reg, u32 val)
+{
+       writel(val, reg);
+}
+
+static u32 pci_hme_read32(void __iomem *reg)
+{
+       return readl(reg);
+}
+
+static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
+{
+       rxd->rx_addr = (__force hme32)cpu_to_le32(addr);
+       wmb();
+       rxd->rx_flags = (__force hme32)cpu_to_le32(flags);
+}
+
+static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
+{
+       txd->tx_addr = (__force hme32)cpu_to_le32(addr);
+       wmb();
+       txd->tx_flags = (__force hme32)cpu_to_le32(flags);
+}
+
+static u32 pci_hme_read_desc32(hme32 *p)
+{
+       return le32_to_cpup((__le32 *)p);
+}
+
+#define hme_write32(__hp, __reg, __val) \
+       ((__hp)->write32((__reg), (__val)))
+#define hme_read32(__hp, __reg) \
+       ((__hp)->read32(__reg))
+#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
+       ((__hp)->write_rxd((__rxd), (__flags), (__addr)))
+#define hme_write_txd(__hp, __txd, __flags, __addr) \
+       ((__hp)->write_txd((__txd), (__flags), (__addr)))
+#define hme_read_desc32(__hp, __p) \
+       ((__hp)->read_desc32(__p))
+#define hme_dma_map(__hp, __ptr, __size, __dir) \
+       ((__hp)->dma_map((__hp)->dma_dev, (__ptr), (__size), (__dir)))
+#define hme_dma_unmap(__hp, __addr, __size, __dir) \
+       ((__hp)->dma_unmap((__hp)->dma_dev, (__addr), (__size), (__dir)))
+#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
+       ((__hp)->dma_sync_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir)))
+#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
+       ((__hp)->dma_sync_for_device((__hp)->dma_dev, (__addr), (__size), (__dir)))
+#else
+#ifdef CONFIG_SBUS
+/* SBUS only compilation */
+#define hme_write32(__hp, __reg, __val) \
+       sbus_writel((__val), (__reg))
+#define hme_read32(__hp, __reg) \
+       sbus_readl(__reg)
+#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
+do {   (__rxd)->rx_addr = (__force hme32)(u32)(__addr); \
+       wmb(); \
+       (__rxd)->rx_flags = (__force hme32)(u32)(__flags); \
+} while(0)
+#define hme_write_txd(__hp, __txd, __flags, __addr) \
+do {   (__txd)->tx_addr = (__force hme32)(u32)(__addr); \
+       wmb(); \
+       (__txd)->tx_flags = (__force hme32)(u32)(__flags); \
+} while(0)
+#define hme_read_desc32(__hp, __p)     ((__force u32)(hme32)*(__p))
+#define hme_dma_map(__hp, __ptr, __size, __dir) \
+       dma_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir))
+#define hme_dma_unmap(__hp, __addr, __size, __dir) \
+       dma_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir))
+#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
+       dma_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))
+#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
+       dma_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))
+#else
+/* PCI only compilation */
+#define hme_write32(__hp, __reg, __val) \
+       writel((__val), (__reg))
+#define hme_read32(__hp, __reg) \
+       readl(__reg)
+#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
+do {   (__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \
+       wmb(); \
+       (__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \
+} while(0)
+#define hme_write_txd(__hp, __txd, __flags, __addr) \
+do {   (__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \
+       wmb(); \
+       (__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \
+} while(0)
+static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p)
+{
+       return le32_to_cpup((__le32 *)p);
+}
+#define hme_dma_map(__hp, __ptr, __size, __dir) \
+       pci_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir))
+#define hme_dma_unmap(__hp, __addr, __size, __dir) \
+       pci_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir))
+#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
+       pci_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))
+#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
+       pci_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))
+#endif
+#endif
+
+
+/* Oh yes, the MIF BitBang is mighty fun to program.  BitBucket is more like it. */
+static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit)
+{
+       hme_write32(hp, tregs + TCVR_BBDATA, bit);
+       hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
+       hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
+}
+
+#if 0
+static u32 BB_GET_BIT(struct happy_meal *hp, void __iomem *tregs, int internal)
+{
+       u32 ret;
+
+       hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
+       hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
+       ret = hme_read32(hp, tregs + TCVR_CFG);
+       if (internal)
+               ret &= TCV_CFG_MDIO0;
+       else
+               ret &= TCV_CFG_MDIO1;
+
+       return ret;
+}
+#endif
+
+static u32 BB_GET_BIT2(struct happy_meal *hp, void __iomem *tregs, int internal)
+{
+       u32 retval;
+
+       hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
+       udelay(1);
+       retval = hme_read32(hp, tregs + TCVR_CFG);
+       if (internal)
+               retval &= TCV_CFG_MDIO0;
+       else
+               retval &= TCV_CFG_MDIO1;
+       hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
+
+       return retval;
+}
+
+#define TCVR_FAILURE      0x80000000     /* Impossible MIF read value */
+
+static int happy_meal_bb_read(struct happy_meal *hp,
+                             void __iomem *tregs, int reg)
+{
+       u32 tmp;
+       int retval = 0;
+       int i;
+
+       ASD(("happy_meal_bb_read: reg=%d ", reg));
+
+       /* Enable the MIF BitBang outputs. */
+       hme_write32(hp, tregs + TCVR_BBOENAB, 1);
+
+       /* Force BitBang into the idle state. */
+       for (i = 0; i < 32; i++)
+               BB_PUT_BIT(hp, tregs, 1);
+
+       /* Give it the read sequence. */
+       BB_PUT_BIT(hp, tregs, 0);
+       BB_PUT_BIT(hp, tregs, 1);
+       BB_PUT_BIT(hp, tregs, 1);
+       BB_PUT_BIT(hp, tregs, 0);
+
+       /* Give it the PHY address. */
+       tmp = hp->paddr & 0xff;
+       for (i = 4; i >= 0; i--)
+               BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
+
+       /* Tell it what register we want to read. */
+       tmp = (reg & 0xff);
+       for (i = 4; i >= 0; i--)
+               BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
+
+       /* Close down the MIF BitBang outputs. */
+       hme_write32(hp, tregs + TCVR_BBOENAB, 0);
+
+       /* Now read in the value. */
+       (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
+       for (i = 15; i >= 0; i--)
+               retval |= BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
+       (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
+       (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
+       (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
+       ASD(("value=%x\n", retval));
+       return retval;
+}
+
+static void happy_meal_bb_write(struct happy_meal *hp,
+                               void __iomem *tregs, int reg,
+                               unsigned short value)
+{
+       u32 tmp;
+       int i;
+
+       ASD(("happy_meal_bb_write: reg=%d value=%x\n", reg, value));
+
+       /* Enable the MIF BitBang outputs. */
+       hme_write32(hp, tregs + TCVR_BBOENAB, 1);
+
+       /* Force BitBang into the idle state. */
+       for (i = 0; i < 32; i++)
+               BB_PUT_BIT(hp, tregs, 1);
+
+       /* Give it write sequence. */
+       BB_PUT_BIT(hp, tregs, 0);
+       BB_PUT_BIT(hp, tregs, 1);
+       BB_PUT_BIT(hp, tregs, 0);
+       BB_PUT_BIT(hp, tregs, 1);
+
+       /* Give it the PHY address. */
+       tmp = (hp->paddr & 0xff);
+       for (i = 4; i >= 0; i--)
+               BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
+
+       /* Tell it what register we will be writing. */
+       tmp = (reg & 0xff);
+       for (i = 4; i >= 0; i--)
+               BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
+
+       /* Tell it to become ready for the bits. */
+       BB_PUT_BIT(hp, tregs, 1);
+       BB_PUT_BIT(hp, tregs, 0);
+
+       for (i = 15; i >= 0; i--)
+               BB_PUT_BIT(hp, tregs, ((value >> i) & 1));
+
+       /* Close down the MIF BitBang outputs. */
+       hme_write32(hp, tregs + TCVR_BBOENAB, 0);
+}
+
+#define TCVR_READ_TRIES   16
+
+static int happy_meal_tcvr_read(struct happy_meal *hp,
+                               void __iomem *tregs, int reg)
+{
+       int tries = TCVR_READ_TRIES;
+       int retval;
+
+       ASD(("happy_meal_tcvr_read: reg=0x%02x ", reg));
+       if (hp->tcvr_type == none) {
+               ASD(("no transceiver, value=TCVR_FAILURE\n"));
+               return TCVR_FAILURE;
+       }
+
+       if (!(hp->happy_flags & HFLAG_FENABLE)) {
+               ASD(("doing bit bang\n"));
+               return happy_meal_bb_read(hp, tregs, reg);
+       }
+
+       hme_write32(hp, tregs + TCVR_FRAME,
+                   (FRAME_READ | (hp->paddr << 23) | ((reg & 0xff) << 18)));
+       while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
+               udelay(20);
+       if (!tries) {
+               printk(KERN_ERR "happy meal: Aieee, transceiver MIF read bolixed\n");
+               return TCVR_FAILURE;
+       }
+       retval = hme_read32(hp, tregs + TCVR_FRAME) & 0xffff;
+       ASD(("value=%04x\n", retval));
+       return retval;
+}
+
+#define TCVR_WRITE_TRIES  16
+
+static void happy_meal_tcvr_write(struct happy_meal *hp,
+                                 void __iomem *tregs, int reg,
+                                 unsigned short value)
+{
+       int tries = TCVR_WRITE_TRIES;
+
+       ASD(("happy_meal_tcvr_write: reg=0x%02x value=%04x\n", reg, value));
+
+       /* Welcome to Sun Microsystems, can I take your order please? */
+       if (!(hp->happy_flags & HFLAG_FENABLE)) {
+               happy_meal_bb_write(hp, tregs, reg, value);
+               return;
+       }
+
+       /* Would you like fries with that? */
+       hme_write32(hp, tregs + TCVR_FRAME,
+                   (FRAME_WRITE | (hp->paddr << 23) |
+                    ((reg & 0xff) << 18) | (value & 0xffff)));
+       while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
+               udelay(20);
+
+       /* Anything else? */
+       if (!tries)
+               printk(KERN_ERR "happy meal: Aieee, transceiver MIF write bolixed\n");
+
+       /* Fifty-two cents is your change, have a nice day. */
+}
+
+/* Auto negotiation.  The scheme is very simple.  We have a timer routine
+ * that keeps watching the auto negotiation process as it progresses.
+ * The DP83840 is first told to start doing it's thing, we set up the time
+ * and place the timer state machine in it's initial state.
+ *
+ * Here the timer peeks at the DP83840 status registers at each click to see
+ * if the auto negotiation has completed, we assume here that the DP83840 PHY
+ * will time out at some point and just tell us what (didn't) happen.  For
+ * complete coverage we only allow so many of the ticks at this level to run,
+ * when this has expired we print a warning message and try another strategy.
+ * This "other" strategy is to force the interface into various speed/duplex
+ * configurations and we stop when we see a link-up condition before the
+ * maximum number of "peek" ticks have occurred.
+ *
+ * Once a valid link status has been detected we configure the BigMAC and
+ * the rest of the Happy Meal to speak the most efficient protocol we could
+ * get a clean link for.  The priority for link configurations, highest first
+ * is:
+ *                 100 Base-T Full Duplex
+ *                 100 Base-T Half Duplex
+ *                 10 Base-T Full Duplex
+ *                 10 Base-T Half Duplex
+ *
+ * We start a new timer now, after a successful auto negotiation status has
+ * been detected.  This timer just waits for the link-up bit to get set in
+ * the BMCR of the DP83840.  When this occurs we print a kernel log message
+ * describing the link type in use and the fact that it is up.
+ *
+ * If a fatal error of some sort is signalled and detected in the interrupt
+ * service routine, and the chip is reset, or the link is ifconfig'd down
+ * and then back up, this entire process repeats itself all over again.
+ */
+static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs)
+{
+       hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+
+       /* Downgrade from full to half duplex.  Only possible
+        * via ethtool.
+        */
+       if (hp->sw_bmcr & BMCR_FULLDPLX) {
+               hp->sw_bmcr &= ~(BMCR_FULLDPLX);
+               happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
+               return 0;
+       }
+
+       /* Downgrade from 100 to 10. */
+       if (hp->sw_bmcr & BMCR_SPEED100) {
+               hp->sw_bmcr &= ~(BMCR_SPEED100);
+               happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
+               return 0;
+       }
+
+       /* We've tried everything. */
+       return -1;
+}
+
+static void display_link_mode(struct happy_meal *hp, void __iomem *tregs)
+{
+       printk(KERN_INFO "%s: Link is up using ", hp->dev->name);
+       if (hp->tcvr_type == external)
+               printk("external ");
+       else
+               printk("internal ");
+       printk("transceiver at ");
+       hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
+       if (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) {
+               if (hp->sw_lpa & LPA_100FULL)
+                       printk("100Mb/s, Full Duplex.\n");
+               else
+                       printk("100Mb/s, Half Duplex.\n");
+       } else {
+               if (hp->sw_lpa & LPA_10FULL)
+                       printk("10Mb/s, Full Duplex.\n");
+               else
+                       printk("10Mb/s, Half Duplex.\n");
+       }
+}
+
+static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs)
+{
+       printk(KERN_INFO "%s: Link has been forced up using ", hp->dev->name);
+       if (hp->tcvr_type == external)
+               printk("external ");
+       else
+               printk("internal ");
+       printk("transceiver at ");
+       hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+       if (hp->sw_bmcr & BMCR_SPEED100)
+               printk("100Mb/s, ");
+       else
+               printk("10Mb/s, ");
+       if (hp->sw_bmcr & BMCR_FULLDPLX)
+               printk("Full Duplex.\n");
+       else
+               printk("Half Duplex.\n");
+}
+
+static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs)
+{
+       int full;
+
+       /* All we care about is making sure the bigmac tx_cfg has a
+        * proper duplex setting.
+        */
+       if (hp->timer_state == arbwait) {
+               hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
+               if (!(hp->sw_lpa & (LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL)))
+                       goto no_response;
+               if (hp->sw_lpa & LPA_100FULL)
+                       full = 1;
+               else if (hp->sw_lpa & LPA_100HALF)
+                       full = 0;
+               else if (hp->sw_lpa & LPA_10FULL)
+                       full = 1;
+               else
+                       full = 0;
+       } else {
+               /* Forcing a link mode. */
+               hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+               if (hp->sw_bmcr & BMCR_FULLDPLX)
+                       full = 1;
+               else
+                       full = 0;
+       }
+
+       /* Before changing other bits in the tx_cfg register, and in
+        * general any of other the TX config registers too, you
+        * must:
+        * 1) Clear Enable
+        * 2) Poll with reads until that bit reads back as zero
+        * 3) Make TX configuration changes
+        * 4) Set Enable once more
+        */
+       hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
+                   hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
+                   ~(BIGMAC_TXCFG_ENABLE));
+       while (hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & BIGMAC_TXCFG_ENABLE)
+               barrier();
+       if (full) {
+               hp->happy_flags |= HFLAG_FULL;
+               hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
+                           hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
+                           BIGMAC_TXCFG_FULLDPLX);
+       } else {
+               hp->happy_flags &= ~(HFLAG_FULL);
+               hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
+                           hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
+                           ~(BIGMAC_TXCFG_FULLDPLX));
+       }
+       hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
+                   hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
+                   BIGMAC_TXCFG_ENABLE);
+       return 0;
+no_response:
+       return 1;
+}
+
+static int happy_meal_init(struct happy_meal *hp);
+
+static int is_lucent_phy(struct happy_meal *hp)
+{
+       void __iomem *tregs = hp->tcvregs;
+       unsigned short mr2, mr3;
+       int ret = 0;
+
+       mr2 = happy_meal_tcvr_read(hp, tregs, 2);
+       mr3 = happy_meal_tcvr_read(hp, tregs, 3);
+       if ((mr2 & 0xffff) == 0x0180 &&
+           ((mr3 & 0xffff) >> 10) == 0x1d)
+               ret = 1;
+
+       return ret;
+}
+
+static void happy_meal_timer(unsigned long data)
+{
+       struct happy_meal *hp = (struct happy_meal *) data;
+       void __iomem *tregs = hp->tcvregs;
+       int restart_timer = 0;
+
+       spin_lock_irq(&hp->happy_lock);
+
+       hp->timer_ticks++;
+       switch(hp->timer_state) {
+       case arbwait:
+               /* Only allow for 5 ticks, thats 10 seconds and much too
+                * long to wait for arbitration to complete.
+                */
+               if (hp->timer_ticks >= 10) {
+                       /* Enter force mode. */
+       do_force_mode:
+                       hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+                       printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful, trying force link mode\n",
+                              hp->dev->name);
+                       hp->sw_bmcr = BMCR_SPEED100;
+                       happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
+
+                       if (!is_lucent_phy(hp)) {
+                               /* OK, seems we need do disable the transceiver for the first
+                                * tick to make sure we get an accurate link state at the
+                                * second tick.
+                                */
+                               hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
+                               hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
+                               happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig);
+                       }
+                       hp->timer_state = ltrywait;
+                       hp->timer_ticks = 0;
+                       restart_timer = 1;
+               } else {
+                       /* Anything interesting happen? */
+                       hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
+                       if (hp->sw_bmsr & BMSR_ANEGCOMPLETE) {
+                               int ret;
+
+                               /* Just what we've been waiting for... */
+                               ret = set_happy_link_modes(hp, tregs);
+                               if (ret) {
+                                       /* Ooops, something bad happened, go to force
+                                        * mode.
+                                        *
+                                        * XXX Broken hubs which don't support 802.3u
+                                        * XXX auto-negotiation make this happen as well.
+                                        */
+                                       goto do_force_mode;
+                               }
+
+                               /* Success, at least so far, advance our state engine. */
+                               hp->timer_state = lupwait;
+                               restart_timer = 1;
+                       } else {
+                               restart_timer = 1;
+                       }
+               }
+               break;
+
+       case lupwait:
+               /* Auto negotiation was successful and we are awaiting a
+                * link up status.  I have decided to let this timer run
+                * forever until some sort of error is signalled, reporting
+                * a message to the user at 10 second intervals.
+                */
+               hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
+               if (hp->sw_bmsr & BMSR_LSTATUS) {
+                       /* Wheee, it's up, display the link mode in use and put
+                        * the timer to sleep.
+                        */
+                       display_link_mode(hp, tregs);
+                       hp->timer_state = asleep;
+                       restart_timer = 0;
+               } else {
+                       if (hp->timer_ticks >= 10) {
+                               printk(KERN_NOTICE "%s: Auto negotiation successful, link still "
+                                      "not completely up.\n", hp->dev->name);
+                               hp->timer_ticks = 0;
+                               restart_timer = 1;
+                       } else {
+                               restart_timer = 1;
+                       }
+               }
+               break;
+
+       case ltrywait:
+               /* Making the timeout here too long can make it take
+                * annoyingly long to attempt all of the link mode
+                * permutations, but then again this is essentially
+                * error recovery code for the most part.
+                */
+               hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
+               hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
+               if (hp->timer_ticks == 1) {
+                       if (!is_lucent_phy(hp)) {
+                               /* Re-enable transceiver, we'll re-enable the transceiver next
+                                * tick, then check link state on the following tick.
+                                */
+                               hp->sw_csconfig |= CSCONFIG_TCVDISAB;
+                               happy_meal_tcvr_write(hp, tregs,
+                                                     DP83840_CSCONFIG, hp->sw_csconfig);
+                       }
+                       restart_timer = 1;
+                       break;
+               }
+               if (hp->timer_ticks == 2) {
+                       if (!is_lucent_phy(hp)) {
+                               hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
+                               happy_meal_tcvr_write(hp, tregs,
+                                                     DP83840_CSCONFIG, hp->sw_csconfig);
+                       }
+                       restart_timer = 1;
+                       break;
+               }
+               if (hp->sw_bmsr & BMSR_LSTATUS) {
+                       /* Force mode selection success. */
+                       display_forced_link_mode(hp, tregs);
+                       set_happy_link_modes(hp, tregs); /* XXX error? then what? */
+                       hp->timer_state = asleep;
+                       restart_timer = 0;
+               } else {
+                       if (hp->timer_ticks >= 4) { /* 6 seconds or so... */
+                               int ret;
+
+                               ret = try_next_permutation(hp, tregs);
+                               if (ret == -1) {
+                                       /* Aieee, tried them all, reset the
+                                        * chip and try all over again.
+                                        */
+
+                                       /* Let the user know... */
+                                       printk(KERN_NOTICE "%s: Link down, cable problem?\n",
+                                              hp->dev->name);
+
+                                       ret = happy_meal_init(hp);
+                                       if (ret) {
+                                               /* ho hum... */
+                                               printk(KERN_ERR "%s: Error, cannot re-init the "
+                                                      "Happy Meal.\n", hp->dev->name);
+                                       }
+                                       goto out;
+                               }
+                               if (!is_lucent_phy(hp)) {
+                                       hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
+                                                                              DP83840_CSCONFIG);
+                                       hp->sw_csconfig |= CSCONFIG_TCVDISAB;
+                                       happy_meal_tcvr_write(hp, tregs,
+                                                             DP83840_CSCONFIG, hp->sw_csconfig);
+                               }
+                               hp->timer_ticks = 0;
+                               restart_timer = 1;
+                       } else {
+                               restart_timer = 1;
+                       }
+               }
+               break;
+
+       case asleep:
+       default:
+               /* Can't happens.... */
+               printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n",
+                      hp->dev->name);
+               restart_timer = 0;
+               hp->timer_ticks = 0;
+               hp->timer_state = asleep; /* foo on you */
+               break;
+       }
+
+       if (restart_timer) {
+               hp->happy_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */
+               add_timer(&hp->happy_timer);
+       }
+
+out:
+       spin_unlock_irq(&hp->happy_lock);
+}
+
+#define TX_RESET_TRIES     32
+#define RX_RESET_TRIES     32
+
+/* hp->happy_lock must be held */
+static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs)
+{
+       int tries = TX_RESET_TRIES;
+
+       HMD(("happy_meal_tx_reset: reset, "));
+
+       /* Would you like to try our SMCC Delux? */
+       hme_write32(hp, bregs + BMAC_TXSWRESET, 0);
+       while ((hme_read32(hp, bregs + BMAC_TXSWRESET) & 1) && --tries)
+               udelay(20);
+
+       /* Lettuce, tomato, buggy hardware (no extra charge)? */
+       if (!tries)
+               printk(KERN_ERR "happy meal: Transceiver BigMac ATTACK!");
+
+       /* Take care. */
+       HMD(("done\n"));
+}
+
+/* hp->happy_lock must be held */
+static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs)
+{
+       int tries = RX_RESET_TRIES;
+
+       HMD(("happy_meal_rx_reset: reset, "));
+
+       /* We have a special on GNU/Viking hardware bugs today. */
+       hme_write32(hp, bregs + BMAC_RXSWRESET, 0);
+       while ((hme_read32(hp, bregs + BMAC_RXSWRESET) & 1) && --tries)
+               udelay(20);
+
+       /* Will that be all? */
+       if (!tries)
+               printk(KERN_ERR "happy meal: Receiver BigMac ATTACK!");
+
+       /* Don't forget your vik_1137125_wa.  Have a nice day. */
+       HMD(("done\n"));
+}
+
+#define STOP_TRIES         16
+
+/* hp->happy_lock must be held */
+static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
+{
+       int tries = STOP_TRIES;
+
+       HMD(("happy_meal_stop: reset, "));
+
+       /* We're consolidating our STB products, it's your lucky day. */
+       hme_write32(hp, gregs + GREG_SWRESET, GREG_RESET_ALL);
+       while (hme_read32(hp, gregs + GREG_SWRESET) && --tries)
+               udelay(20);
+
+       /* Come back next week when we are "Sun Microelectronics". */
+       if (!tries)
+               printk(KERN_ERR "happy meal: Fry guys.");
+
+       /* Remember: "Different name, same old buggy as shit hardware." */
+       HMD(("done\n"));
+}
+
+/* hp->happy_lock must be held */
+static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs)
+{
+       struct net_device_stats *stats = &hp->net_stats;
+
+       stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR);
+       hme_write32(hp, bregs + BMAC_RCRCECTR, 0);
+
+       stats->rx_frame_errors += hme_read32(hp, bregs + BMAC_UNALECTR);
+       hme_write32(hp, bregs + BMAC_UNALECTR, 0);
+
+       stats->rx_length_errors += hme_read32(hp, bregs + BMAC_GLECTR);
+       hme_write32(hp, bregs + BMAC_GLECTR, 0);
+
+       stats->tx_aborted_errors += hme_read32(hp, bregs + BMAC_EXCTR);
+
+       stats->collisions +=
+               (hme_read32(hp, bregs + BMAC_EXCTR) +
+                hme_read32(hp, bregs + BMAC_LTCTR));
+       hme_write32(hp, bregs + BMAC_EXCTR, 0);
+       hme_write32(hp, bregs + BMAC_LTCTR, 0);
+}
+
+/* hp->happy_lock must be held */
+static void happy_meal_poll_stop(struct happy_meal *hp, void __iomem *tregs)
+{
+       ASD(("happy_meal_poll_stop: "));
+
+       /* If polling disabled or not polling already, nothing to do. */
+       if ((hp->happy_flags & (HFLAG_POLLENABLE | HFLAG_POLL)) !=
+          (HFLAG_POLLENABLE | HFLAG_POLL)) {
+               HMD(("not polling, return\n"));
+               return;
+       }
+
+       /* Shut up the MIF. */
+       ASD(("were polling, mif ints off, "));
+       hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
+
+       /* Turn off polling. */
+       ASD(("polling off, "));
+       hme_write32(hp, tregs + TCVR_CFG,
+                   hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_PENABLE));
+
+       /* We are no longer polling. */
+       hp->happy_flags &= ~(HFLAG_POLL);
+
+       /* Let the bits set. */
+       udelay(200);
+       ASD(("done\n"));
+}
+
+/* Only Sun can take such nice parts and fuck up the programming interface
+ * like this.  Good job guys...
+ */
+#define TCVR_RESET_TRIES       16 /* It should reset quickly        */
+#define TCVR_UNISOLATE_TRIES   32 /* Dis-isolation can take longer. */
+
+/* hp->happy_lock must be held */
+static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
+{
+       u32 tconfig;
+       int result, tries = TCVR_RESET_TRIES;
+
+       tconfig = hme_read32(hp, tregs + TCVR_CFG);
+       ASD(("happy_meal_tcvr_reset: tcfg<%08lx> ", tconfig));
+       if (hp->tcvr_type == external) {
+               ASD(("external<"));
+               hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT));
+               hp->tcvr_type = internal;
+               hp->paddr = TCV_PADDR_ITX;
+               ASD(("ISOLATE,"));
+               happy_meal_tcvr_write(hp, tregs, MII_BMCR,
+                                     (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
+               result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+               if (result == TCVR_FAILURE) {
+                       ASD(("phyread_fail>\n"));
+                       return -1;
+               }
+               ASD(("phyread_ok,PSELECT>"));
+               hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
+               hp->tcvr_type = external;
+               hp->paddr = TCV_PADDR_ETX;
+       } else {
+               if (tconfig & TCV_CFG_MDIO1) {
+                       ASD(("internal<PSELECT,"));
+                       hme_write32(hp, tregs + TCVR_CFG, (tconfig | TCV_CFG_PSELECT));
+                       ASD(("ISOLATE,"));
+                       happy_meal_tcvr_write(hp, tregs, MII_BMCR,
+                                             (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
+                       result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+                       if (result == TCVR_FAILURE) {
+                               ASD(("phyread_fail>\n"));
+                               return -1;
+                       }
+                       ASD(("phyread_ok,~PSELECT>"));
+                       hme_write32(hp, tregs + TCVR_CFG, (tconfig & ~(TCV_CFG_PSELECT)));
+                       hp->tcvr_type = internal;
+                       hp->paddr = TCV_PADDR_ITX;
+               }
+       }
+
+       ASD(("BMCR_RESET "));
+       happy_meal_tcvr_write(hp, tregs, MII_BMCR, BMCR_RESET);
+
+       while (--tries) {
+               result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+               if (result == TCVR_FAILURE)
+                       return -1;
+               hp->sw_bmcr = result;
+               if (!(result & BMCR_RESET))
+                       break;
+               udelay(20);
+       }
+       if (!tries) {
+               ASD(("BMCR RESET FAILED!\n"));
+               return -1;
+       }
+       ASD(("RESET_OK\n"));
+
+       /* Get fresh copies of the PHY registers. */
+       hp->sw_bmsr      = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
+       hp->sw_physid1   = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
+       hp->sw_physid2   = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
+       hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
+
+       ASD(("UNISOLATE"));
+       hp->sw_bmcr &= ~(BMCR_ISOLATE);
+       happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
+
+       tries = TCVR_UNISOLATE_TRIES;
+       while (--tries) {
+               result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+               if (result == TCVR_FAILURE)
+                       return -1;
+               if (!(result & BMCR_ISOLATE))
+                       break;
+               udelay(20);
+       }
+       if (!tries) {
+               ASD((" FAILED!\n"));
+               return -1;
+       }
+       ASD((" SUCCESS and CSCONFIG_DFBYPASS\n"));
+       if (!is_lucent_phy(hp)) {
+               result = happy_meal_tcvr_read(hp, tregs,
+                                             DP83840_CSCONFIG);
+               happy_meal_tcvr_write(hp, tregs,
+                                     DP83840_CSCONFIG, (result | CSCONFIG_DFBYPASS));
+       }
+       return 0;
+}
+
+/* Figure out whether we have an internal or external transceiver.
+ *
+ * hp->happy_lock must be held
+ */
+static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tregs)
+{
+       unsigned long tconfig = hme_read32(hp, tregs + TCVR_CFG);
+
+       ASD(("happy_meal_transceiver_check: tcfg=%08lx ", tconfig));
+       if (hp->happy_flags & HFLAG_POLL) {
+               /* If we are polling, we must stop to get the transceiver type. */
+               ASD(("<polling> "));
+               if (hp->tcvr_type == internal) {
+                       if (tconfig & TCV_CFG_MDIO1) {
+                               ASD(("<internal> <poll stop> "));
+                               happy_meal_poll_stop(hp, tregs);
+                               hp->paddr = TCV_PADDR_ETX;
+                               hp->tcvr_type = external;
+                               ASD(("<external>\n"));
+                               tconfig &= ~(TCV_CFG_PENABLE);
+                               tconfig |= TCV_CFG_PSELECT;
+                               hme_write32(hp, tregs + TCVR_CFG, tconfig);
+                       }
+               } else {
+                       if (hp->tcvr_type == external) {
+                               ASD(("<external> "));
+                               if (!(hme_read32(hp, tregs + TCVR_STATUS) >> 16)) {
+                                       ASD(("<poll stop> "));
+                                       happy_meal_poll_stop(hp, tregs);
+                                       hp->paddr = TCV_PADDR_ITX;
+                                       hp->tcvr_type = internal;
+                                       ASD(("<internal>\n"));
+                                       hme_write32(hp, tregs + TCVR_CFG,
+                                                   hme_read32(hp, tregs + TCVR_CFG) &
+                                                   ~(TCV_CFG_PSELECT));
+                               }
+                               ASD(("\n"));
+                       } else {
+                               ASD(("<none>\n"));
+                       }
+               }
+       } else {
+               u32 reread = hme_read32(hp, tregs + TCVR_CFG);
+
+               /* Else we can just work off of the MDIO bits. */
+               ASD(("<not polling> "));
+               if (reread & TCV_CFG_MDIO1) {
+                       hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
+                       hp->paddr = TCV_PADDR_ETX;
+                       hp->tcvr_type = external;
+                       ASD(("<external>\n"));
+               } else {
+                       if (reread & TCV_CFG_MDIO0) {
+                               hme_write32(hp, tregs + TCVR_CFG,
+                                           tconfig & ~(TCV_CFG_PSELECT));
+                               hp->paddr = TCV_PADDR_ITX;
+                               hp->tcvr_type = internal;
+                               ASD(("<internal>\n"));
+                       } else {
+                               printk(KERN_ERR "happy meal: Transceiver and a coke please.");
+                               hp->tcvr_type = none; /* Grrr... */
+                               ASD(("<none>\n"));
+                       }
+               }
+       }
+}
+
+/* The receive ring buffers are a bit tricky to get right.  Here goes...
+ *
+ * The buffers we dma into must be 64 byte aligned.  So we use a special
+ * alloc_skb() routine for the happy meal to allocate 64 bytes more than
+ * we really need.
+ *
+ * We use skb_reserve() to align the data block we get in the skb.  We
+ * also program the etxregs->cfg register to use an offset of 2.  This
+ * imperical constant plus the ethernet header size will always leave
+ * us with a nicely aligned ip header once we pass things up to the
+ * protocol layers.
+ *
+ * The numbers work out to:
+ *
+ *         Max ethernet frame size         1518
+ *         Ethernet header size              14
+ *         Happy Meal base offset             2
+ *
+ * Say a skb data area is at 0xf001b010, and its size alloced is
+ * (ETH_FRAME_LEN + 64 + 2) = (1514 + 64 + 2) = 1580 bytes.
+ *
+ * First our alloc_skb() routine aligns the data base to a 64 byte
+ * boundary.  We now have 0xf001b040 as our skb data address.  We
+ * plug this into the receive descriptor address.
+ *
+ * Next, we skb_reserve() 2 bytes to account for the Happy Meal offset.
+ * So now the data we will end up looking at starts at 0xf001b042.  When
+ * the packet arrives, we will check out the size received and subtract
+ * this from the skb->length.  Then we just pass the packet up to the
+ * protocols as is, and allocate a new skb to replace this slot we have
+ * just received from.
+ *
+ * The ethernet layer will strip the ether header from the front of the
+ * skb we just sent to it, this leaves us with the ip header sitting
+ * nicely aligned at 0xf001b050.  Also, for tcp and udp packets the
+ * Happy Meal has even checksummed the tcp/udp data for us.  The 16
+ * bit checksum is obtained from the low bits of the receive descriptor
+ * flags, thus:
+ *
+ *     skb->csum = rxd->rx_flags & 0xffff;
+ *     skb->ip_summed = CHECKSUM_COMPLETE;
+ *
+ * before sending off the skb to the protocols, and we are good as gold.
+ */
+static void happy_meal_clean_rings(struct happy_meal *hp)
+{
+       int i;
+
+       for (i = 0; i < RX_RING_SIZE; i++) {
+               if (hp->rx_skbs[i] != NULL) {
+                       struct sk_buff *skb = hp->rx_skbs[i];
+                       struct happy_meal_rxd *rxd;
+                       u32 dma_addr;
+
+                       rxd = &hp->happy_block->happy_meal_rxd[i];
+                       dma_addr = hme_read_desc32(hp, &rxd->rx_addr);
+                       dma_unmap_single(hp->dma_dev, dma_addr,
+                                        RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
+                       dev_kfree_skb_any(skb);
+                       hp->rx_skbs[i] = NULL;
+               }
+       }
+
+       for (i = 0; i < TX_RING_SIZE; i++) {
+               if (hp->tx_skbs[i] != NULL) {
+                       struct sk_buff *skb = hp->tx_skbs[i];
+                       struct happy_meal_txd *txd;
+                       u32 dma_addr;
+                       int frag;
+
+                       hp->tx_skbs[i] = NULL;
+
+                       for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
+                               txd = &hp->happy_block->happy_meal_txd[i];
+                               dma_addr = hme_read_desc32(hp, &txd->tx_addr);
+                               if (!frag)
+                                       dma_unmap_single(hp->dma_dev, dma_addr,
+                                                        (hme_read_desc32(hp, &txd->tx_flags)
+                                                         & TXFLAG_SIZE),
+                                                        DMA_TO_DEVICE);
+                               else
+                                       dma_unmap_page(hp->dma_dev, dma_addr,
+                                                        (hme_read_desc32(hp, &txd->tx_flags)
+                                                         & TXFLAG_SIZE),
+                                                        DMA_TO_DEVICE);
+
+                               if (frag != skb_shinfo(skb)->nr_frags)
+                                       i++;
+                       }
+
+                       dev_kfree_skb_any(skb);
+               }
+       }
+}
+
+/* hp->happy_lock must be held */
+static void happy_meal_init_rings(struct happy_meal *hp)
+{
+       struct hmeal_init_block *hb = hp->happy_block;
+       struct net_device *dev = hp->dev;
+       int i;
+
+       HMD(("happy_meal_init_rings: counters to zero, "));
+       hp->rx_new = hp->rx_old = hp->tx_new = hp->tx_old = 0;
+
+       /* Free any skippy bufs left around in the rings. */
+       HMD(("clean, "));
+       happy_meal_clean_rings(hp);
+
+       /* Now get new skippy bufs for the receive ring. */
+       HMD(("init rxring, "));
+       for (i = 0; i < RX_RING_SIZE; i++) {
+               struct sk_buff *skb;
+
+               skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
+               if (!skb) {
+                       hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
+                       continue;
+               }
+               hp->rx_skbs[i] = skb;
+               skb->dev = dev;
+
+               /* Because we reserve afterwards. */
+               skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
+               hme_write_rxd(hp, &hb->happy_meal_rxd[i],
+                             (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
+                             dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
+                                            DMA_FROM_DEVICE));
+               skb_reserve(skb, RX_OFFSET);
+       }
+
+       HMD(("init txring, "));
+       for (i = 0; i < TX_RING_SIZE; i++)
+               hme_write_txd(hp, &hb->happy_meal_txd[i], 0, 0);
+
+       HMD(("done\n"));
+}
+
+/* hp->happy_lock must be held */
+static void happy_meal_begin_auto_negotiation(struct happy_meal *hp,
+                                             void __iomem *tregs,
+                                             struct ethtool_cmd *ep)
+{
+       int timeout;
+
+       /* Read all of the registers we are interested in now. */
+       hp->sw_bmsr      = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
+       hp->sw_bmcr      = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+       hp->sw_physid1   = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
+       hp->sw_physid2   = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
+
+       /* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */
+
+       hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
+       if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
+               /* Advertise everything we can support. */
+               if (hp->sw_bmsr & BMSR_10HALF)
+                       hp->sw_advertise |= (ADVERTISE_10HALF);
+               else
+                       hp->sw_advertise &= ~(ADVERTISE_10HALF);
+
+               if (hp->sw_bmsr & BMSR_10FULL)
+                       hp->sw_advertise |= (ADVERTISE_10FULL);
+               else
+                       hp->sw_advertise &= ~(ADVERTISE_10FULL);
+               if (hp->sw_bmsr & BMSR_100HALF)
+                       hp->sw_advertise |= (ADVERTISE_100HALF);
+               else
+                       hp->sw_advertise &= ~(ADVERTISE_100HALF);
+               if (hp->sw_bmsr & BMSR_100FULL)
+                       hp->sw_advertise |= (ADVERTISE_100FULL);
+               else
+                       hp->sw_advertise &= ~(ADVERTISE_100FULL);
+               happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
+
+               /* XXX Currently no Happy Meal cards I know off support 100BaseT4,
+                * XXX and this is because the DP83840 does not support it, changes
+                * XXX would need to be made to the tx/rx logic in the driver as well
+                * XXX so I completely skip checking for it in the BMSR for now.
+                */
+
+#ifdef AUTO_SWITCH_DEBUG
+               ASD(("%s: Advertising [ ", hp->dev->name));
+               if (hp->sw_advertise & ADVERTISE_10HALF)
+                       ASD(("10H "));
+               if (hp->sw_advertise & ADVERTISE_10FULL)
+                       ASD(("10F "));
+               if (hp->sw_advertise & ADVERTISE_100HALF)
+                       ASD(("100H "));
+               if (hp->sw_advertise & ADVERTISE_100FULL)
+                       ASD(("100F "));
+#endif
+
+               /* Enable Auto-Negotiation, this is usually on already... */
+               hp->sw_bmcr |= BMCR_ANENABLE;
+               happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
+
+               /* Restart it to make sure it is going. */
+               hp->sw_bmcr |= BMCR_ANRESTART;
+               happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
+
+               /* BMCR_ANRESTART self clears when the process has begun. */
+
+               timeout = 64;  /* More than enough. */
+               while (--timeout) {
+                       hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+                       if (!(hp->sw_bmcr & BMCR_ANRESTART))
+                               break; /* got it. */
+                       udelay(10);
+               }
+               if (!timeout) {
+                       printk(KERN_ERR "%s: Happy Meal would not start auto negotiation "
+                              "BMCR=0x%04x\n", hp->dev->name, hp->sw_bmcr);
+                       printk(KERN_NOTICE "%s: Performing force link detection.\n",
+                              hp->dev->name);
+                       goto force_link;
+               } else {
+                       hp->timer_state = arbwait;
+               }
+       } else {
+force_link:
+               /* Force the link up, trying first a particular mode.
+                * Either we are here at the request of ethtool or
+                * because the Happy Meal would not start to autoneg.
+                */
+
+               /* Disable auto-negotiation in BMCR, enable the duplex and
+                * speed setting, init the timer state machine, and fire it off.
+                */
+               if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
+                       hp->sw_bmcr = BMCR_SPEED100;
+               } else {
+                       if (ethtool_cmd_speed(ep) == SPEED_100)
+                               hp->sw_bmcr = BMCR_SPEED100;
+                       else
+                               hp->sw_bmcr = 0;
+                       if (ep->duplex == DUPLEX_FULL)
+                               hp->sw_bmcr |= BMCR_FULLDPLX;
+               }
+               happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
+
+               if (!is_lucent_phy(hp)) {
+                       /* OK, seems we need do disable the transceiver for the first
+                        * tick to make sure we get an accurate link state at the
+                        * second tick.
+                        */
+                       hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
+                                                              DP83840_CSCONFIG);
+                       hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
+                       happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG,
+                                             hp->sw_csconfig);
+               }
+               hp->timer_state = ltrywait;
+       }
+
+       hp->timer_ticks = 0;
+       hp->happy_timer.expires = jiffies + (12 * HZ)/10;  /* 1.2 sec. */
+       hp->happy_timer.data = (unsigned long) hp;
+       hp->happy_timer.function = happy_meal_timer;
+       add_timer(&hp->happy_timer);
+}
+
+/* hp->happy_lock must be held */
+static int happy_meal_init(struct happy_meal *hp)
+{
+       void __iomem *gregs        = hp->gregs;
+       void __iomem *etxregs      = hp->etxregs;
+       void __iomem *erxregs      = hp->erxregs;
+       void __iomem *bregs        = hp->bigmacregs;
+       void __iomem *tregs        = hp->tcvregs;
+       u32 regtmp, rxcfg;
+       unsigned char *e = &hp->dev->dev_addr[0];
+
+       /* If auto-negotiation timer is running, kill it. */
+       del_timer(&hp->happy_timer);
+
+       HMD(("happy_meal_init: happy_flags[%08x] ",
+            hp->happy_flags));
+       if (!(hp->happy_flags & HFLAG_INIT)) {
+               HMD(("set HFLAG_INIT, "));
+               hp->happy_flags |= HFLAG_INIT;
+               happy_meal_get_counters(hp, bregs);
+       }
+
+       /* Stop polling. */
+       HMD(("to happy_meal_poll_stop\n"));
+       happy_meal_poll_stop(hp, tregs);
+
+       /* Stop transmitter and receiver. */
+       HMD(("happy_meal_init: to happy_meal_stop\n"));
+       happy_meal_stop(hp, gregs);
+
+       /* Alloc and reset the tx/rx descriptor chains. */
+       HMD(("happy_meal_init: to happy_meal_init_rings\n"));
+       happy_meal_init_rings(hp);
+
+       /* Shut up the MIF. */
+       HMD(("happy_meal_init: Disable all MIF irqs (old[%08x]), ",
+            hme_read32(hp, tregs + TCVR_IMASK)));
+       hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
+
+       /* See if we can enable the MIF frame on this card to speak to the DP83840. */
+       if (hp->happy_flags & HFLAG_FENABLE) {
+               HMD(("use frame old[%08x], ",
+                    hme_read32(hp, tregs + TCVR_CFG)));
+               hme_write32(hp, tregs + TCVR_CFG,
+                           hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
+       } else {
+               HMD(("use bitbang old[%08x], ",
+                    hme_read32(hp, tregs + TCVR_CFG)));
+               hme_write32(hp, tregs + TCVR_CFG,
+                           hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
+       }
+
+       /* Check the state of the transceiver. */
+       HMD(("to happy_meal_transceiver_check\n"));
+       happy_meal_transceiver_check(hp, tregs);
+
+       /* Put the Big Mac into a sane state. */
+       HMD(("happy_meal_init: "));
+       switch(hp->tcvr_type) {
+       case none:
+               /* Cannot operate if we don't know the transceiver type! */
+               HMD(("AAIEEE no transceiver type, EAGAIN"));
+               return -EAGAIN;
+
+       case internal:
+               /* Using the MII buffers. */
+               HMD(("internal, using MII, "));
+               hme_write32(hp, bregs + BMAC_XIFCFG, 0);
+               break;
+
+       case external:
+               /* Not using the MII, disable it. */
+               HMD(("external, disable MII, "));
+               hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
+               break;
+       }
+
+       if (happy_meal_tcvr_reset(hp, tregs))
+               return -EAGAIN;
+
+       /* Reset the Happy Meal Big Mac transceiver and the receiver. */
+       HMD(("tx/rx reset, "));
+       happy_meal_tx_reset(hp, bregs);
+       happy_meal_rx_reset(hp, bregs);
+
+       /* Set jam size and inter-packet gaps to reasonable defaults. */
+       HMD(("jsize/ipg1/ipg2, "));
+       hme_write32(hp, bregs + BMAC_JSIZE, DEFAULT_JAMSIZE);
+       hme_write32(hp, bregs + BMAC_IGAP1, DEFAULT_IPG1);
+       hme_write32(hp, bregs + BMAC_IGAP2, DEFAULT_IPG2);
+
+       /* Load up the MAC address and random seed. */
+       HMD(("rseed/macaddr, "));
+
+       /* The docs recommend to use the 10LSB of our MAC here. */
+       hme_write32(hp, bregs + BMAC_RSEED, ((e[5] | e[4]<<8)&0x3ff));
+
+       hme_write32(hp, bregs + BMAC_MACADDR2, ((e[4] << 8) | e[5]));
+       hme_write32(hp, bregs + BMAC_MACADDR1, ((e[2] << 8) | e[3]));
+       hme_write32(hp, bregs + BMAC_MACADDR0, ((e[0] << 8) | e[1]));
+
+       HMD(("htable, "));
+       if ((hp->dev->flags & IFF_ALLMULTI) ||
+           (netdev_mc_count(hp->dev) > 64)) {
+               hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
+               hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
+               hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
+               hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
+       } else if ((hp->dev->flags & IFF_PROMISC) == 0) {
+               u16 hash_table[4];
+               struct netdev_hw_addr *ha;
+               u32 crc;
+
+               memset(hash_table, 0, sizeof(hash_table));
+               netdev_for_each_mc_addr(ha, hp->dev) {
+                       crc = ether_crc_le(6, ha->addr);
+                       crc >>= 26;
+                       hash_table[crc >> 4] |= 1 << (crc & 0xf);
+               }
+               hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
+               hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
+               hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
+               hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
+       } else {
+               hme_write32(hp, bregs + BMAC_HTABLE3, 0);
+               hme_write32(hp, bregs + BMAC_HTABLE2, 0);
+               hme_write32(hp, bregs + BMAC_HTABLE1, 0);
+               hme_write32(hp, bregs + BMAC_HTABLE0, 0);
+       }
+
+       /* Set the RX and TX ring ptrs. */
+       HMD(("ring ptrs rxr[%08x] txr[%08x]\n",
+            ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)),
+            ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0))));
+       hme_write32(hp, erxregs + ERX_RING,
+                   ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)));
+       hme_write32(hp, etxregs + ETX_RING,
+                   ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
+
+       /* Parity issues in the ERX unit of some HME revisions can cause some
+        * registers to not be written unless their parity is even.  Detect such
+        * lost writes and simply rewrite with a low bit set (which will be ignored
+        * since the rxring needs to be 2K aligned).
+        */
+       if (hme_read32(hp, erxregs + ERX_RING) !=
+           ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)))
+               hme_write32(hp, erxregs + ERX_RING,
+                           ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))
+                           | 0x4);
+
+       /* Set the supported burst sizes. */
+       HMD(("happy_meal_init: old[%08x] bursts<",
+            hme_read32(hp, gregs + GREG_CFG)));
+
+#ifndef CONFIG_SPARC
+       /* It is always PCI and can handle 64byte bursts. */
+       hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST64);
+#else
+       if ((hp->happy_bursts & DMA_BURST64) &&
+           ((hp->happy_flags & HFLAG_PCI) != 0
+#ifdef CONFIG_SBUS
+            || sbus_can_burst64()
+#endif
+            || 0)) {
+               u32 gcfg = GREG_CFG_BURST64;
+
+               /* I have no idea if I should set the extended
+                * transfer mode bit for Cheerio, so for now I
+                * do not.  -DaveM
+                */
+#ifdef CONFIG_SBUS
+               if ((hp->happy_flags & HFLAG_PCI) == 0) {
+                       struct platform_device *op = hp->happy_dev;
+                       if (sbus_can_dma_64bit()) {
+                               sbus_set_sbus64(&op->dev,
+                                               hp->happy_bursts);
+                               gcfg |= GREG_CFG_64BIT;
+                       }
+               }
+#endif
+
+               HMD(("64>"));
+               hme_write32(hp, gregs + GREG_CFG, gcfg);
+       } else if (hp->happy_bursts & DMA_BURST32) {
+               HMD(("32>"));
+               hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST32);
+       } else if (hp->happy_bursts & DMA_BURST16) {
+               HMD(("16>"));
+               hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST16);
+       } else {
+               HMD(("XXX>"));
+               hme_write32(hp, gregs + GREG_CFG, 0);
+       }
+#endif /* CONFIG_SPARC */
+
+       /* Turn off interrupts we do not want to hear. */
+       HMD((", enable global interrupts, "));
+       hme_write32(hp, gregs + GREG_IMASK,
+                   (GREG_IMASK_GOTFRAME | GREG_IMASK_RCNTEXP |
+                    GREG_IMASK_SENTFRAME | GREG_IMASK_TXPERR));
+
+       /* Set the transmit ring buffer size. */
+       HMD(("tx rsize=%d oreg[%08x], ", (int)TX_RING_SIZE,
+            hme_read32(hp, etxregs + ETX_RSIZE)));
+       hme_write32(hp, etxregs + ETX_RSIZE, (TX_RING_SIZE >> ETX_RSIZE_SHIFT) - 1);
+
+       /* Enable transmitter DVMA. */
+       HMD(("tx dma enable old[%08x], ",
+            hme_read32(hp, etxregs + ETX_CFG)));
+       hme_write32(hp, etxregs + ETX_CFG,
+                   hme_read32(hp, etxregs + ETX_CFG) | ETX_CFG_DMAENABLE);
+
+       /* This chip really rots, for the receiver sometimes when you
+        * write to its control registers not all the bits get there
+        * properly.  I cannot think of a sane way to provide complete
+        * coverage for this hardware bug yet.
+        */
+       HMD(("erx regs bug old[%08x]\n",
+            hme_read32(hp, erxregs + ERX_CFG)));
+       hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
+       regtmp = hme_read32(hp, erxregs + ERX_CFG);
+       hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
+       if (hme_read32(hp, erxregs + ERX_CFG) != ERX_CFG_DEFAULT(RX_OFFSET)) {
+               printk(KERN_ERR "happy meal: Eieee, rx config register gets greasy fries.\n");
+               printk(KERN_ERR "happy meal: Trying to set %08x, reread gives %08x\n",
+                      ERX_CFG_DEFAULT(RX_OFFSET), regtmp);
+               /* XXX Should return failure here... */
+       }
+
+       /* Enable Big Mac hash table filter. */
+       HMD(("happy_meal_init: enable hash rx_cfg_old[%08x], ",
+            hme_read32(hp, bregs + BMAC_RXCFG)));
+       rxcfg = BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_REJME;
+       if (hp->dev->flags & IFF_PROMISC)
+               rxcfg |= BIGMAC_RXCFG_PMISC;
+       hme_write32(hp, bregs + BMAC_RXCFG, rxcfg);
+
+       /* Let the bits settle in the chip. */
+       udelay(10);
+
+       /* Ok, configure the Big Mac transmitter. */
+       HMD(("BIGMAC init, "));
+       regtmp = 0;
+       if (hp->happy_flags & HFLAG_FULL)
+               regtmp |= BIGMAC_TXCFG_FULLDPLX;
+
+       /* Don't turn on the "don't give up" bit for now.  It could cause hme
+        * to deadlock with the PHY if a Jabber occurs.
+        */
+       hme_write32(hp, bregs + BMAC_TXCFG, regtmp /*| BIGMAC_TXCFG_DGIVEUP*/);
+
+       /* Give up after 16 TX attempts. */
+       hme_write32(hp, bregs + BMAC_ALIMIT, 16);
+
+       /* Enable the output drivers no matter what. */
+       regtmp = BIGMAC_XCFG_ODENABLE;
+
+       /* If card can do lance mode, enable it. */
+       if (hp->happy_flags & HFLAG_LANCE)
+               regtmp |= (DEFAULT_IPG0 << 5) | BIGMAC_XCFG_LANCE;
+
+       /* Disable the MII buffers if using external transceiver. */
+       if (hp->tcvr_type == external)
+               regtmp |= BIGMAC_XCFG_MIIDISAB;
+
+       HMD(("XIF config old[%08x], ",
+            hme_read32(hp, bregs + BMAC_XIFCFG)));
+       hme_write32(hp, bregs + BMAC_XIFCFG, regtmp);
+
+       /* Start things up. */
+       HMD(("tx old[%08x] and rx [%08x] ON!\n",
+            hme_read32(hp, bregs + BMAC_TXCFG),
+            hme_read32(hp, bregs + BMAC_RXCFG)));
+
+       /* Set larger TX/RX size to allow for 802.1q */
+       hme_write32(hp, bregs + BMAC_TXMAX, ETH_FRAME_LEN + 8);
+       hme_write32(hp, bregs + BMAC_RXMAX, ETH_FRAME_LEN + 8);
+
+       hme_write32(hp, bregs + BMAC_TXCFG,
+                   hme_read32(hp, bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE);
+       hme_write32(hp, bregs + BMAC_RXCFG,
+                   hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE);
+
+       /* Get the autonegotiation started, and the watch timer ticking. */
+       happy_meal_begin_auto_negotiation(hp, tregs, NULL);
+
+       /* Success. */
+       return 0;
+}
+
+/* hp->happy_lock must be held */
+static void happy_meal_set_initial_advertisement(struct happy_meal *hp)
+{
+       void __iomem *tregs     = hp->tcvregs;
+       void __iomem *bregs     = hp->bigmacregs;
+       void __iomem *gregs     = hp->gregs;
+
+       happy_meal_stop(hp, gregs);
+       hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
+       if (hp->happy_flags & HFLAG_FENABLE)
+               hme_write32(hp, tregs + TCVR_CFG,
+                           hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
+       else
+               hme_write32(hp, tregs + TCVR_CFG,
+                           hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
+       happy_meal_transceiver_check(hp, tregs);
+       switch(hp->tcvr_type) {
+       case none:
+               return;
+       case internal:
+               hme_write32(hp, bregs + BMAC_XIFCFG, 0);
+               break;
+       case external:
+               hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
+               break;
+       }
+       if (happy_meal_tcvr_reset(hp, tregs))
+               return;
+
+       /* Latch PHY registers as of now. */
+       hp->sw_bmsr      = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
+       hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
+
+       /* Advertise everything we can support. */
+       if (hp->sw_bmsr & BMSR_10HALF)
+               hp->sw_advertise |= (ADVERTISE_10HALF);
+       else
+               hp->sw_advertise &= ~(ADVERTISE_10HALF);
+
+       if (hp->sw_bmsr & BMSR_10FULL)
+               hp->sw_advertise |= (ADVERTISE_10FULL);
+       else
+               hp->sw_advertise &= ~(ADVERTISE_10FULL);
+       if (hp->sw_bmsr & BMSR_100HALF)
+               hp->sw_advertise |= (ADVERTISE_100HALF);
+       else
+               hp->sw_advertise &= ~(ADVERTISE_100HALF);
+       if (hp->sw_bmsr & BMSR_100FULL)
+               hp->sw_advertise |= (ADVERTISE_100FULL);
+       else
+               hp->sw_advertise &= ~(ADVERTISE_100FULL);
+
+       /* Update the PHY advertisement register. */
+       happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
+}
+
+/* Once status is latched (by happy_meal_interrupt) it is cleared by
+ * the hardware, so we cannot re-read it and get a correct value.
+ *
+ * hp->happy_lock must be held
+ */
+static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
+{
+       int reset = 0;
+
+       /* Only print messages for non-counter related interrupts. */
+       if (status & (GREG_STAT_STSTERR | GREG_STAT_TFIFO_UND |
+                     GREG_STAT_MAXPKTERR | GREG_STAT_RXERR |
+                     GREG_STAT_RXPERR | GREG_STAT_RXTERR | GREG_STAT_EOPERR |
+                     GREG_STAT_MIFIRQ | GREG_STAT_TXEACK | GREG_STAT_TXLERR |
+                     GREG_STAT_TXPERR | GREG_STAT_TXTERR | GREG_STAT_SLVERR |
+                     GREG_STAT_SLVPERR))
+               printk(KERN_ERR "%s: Error interrupt for happy meal, status = %08x\n",
+                      hp->dev->name, status);
+
+       if (status & GREG_STAT_RFIFOVF) {
+               /* Receive FIFO overflow is harmless and the hardware will take
+                  care of it, just some packets are lost. Who cares. */
+               printk(KERN_DEBUG "%s: Happy Meal receive FIFO overflow.\n", hp->dev->name);
+       }
+
+       if (status & GREG_STAT_STSTERR) {
+               /* BigMAC SQE link test failed. */
+               printk(KERN_ERR "%s: Happy Meal BigMAC SQE test failed.\n", hp->dev->name);
+               reset = 1;
+       }
+
+       if (status & GREG_STAT_TFIFO_UND) {
+               /* Transmit FIFO underrun, again DMA error likely. */
+               printk(KERN_ERR "%s: Happy Meal transmitter FIFO underrun, DMA error.\n",
+                      hp->dev->name);
+               reset = 1;
+       }
+
+       if (status & GREG_STAT_MAXPKTERR) {
+               /* Driver error, tried to transmit something larger
+                * than ethernet max mtu.
+                */
+               printk(KERN_ERR "%s: Happy Meal MAX Packet size error.\n", hp->dev->name);
+               reset = 1;
+       }
+
+       if (status & GREG_STAT_NORXD) {
+               /* This is harmless, it just means the system is
+                * quite loaded and the incoming packet rate was
+                * faster than the interrupt handler could keep up
+                * with.
+                */
+               printk(KERN_INFO "%s: Happy Meal out of receive "
+                      "descriptors, packet dropped.\n",
+                      hp->dev->name);
+       }
+
+       if (status & (GREG_STAT_RXERR|GREG_STAT_RXPERR|GREG_STAT_RXTERR)) {
+               /* All sorts of DMA receive errors. */
+               printk(KERN_ERR "%s: Happy Meal rx DMA errors [ ", hp->dev->name);
+               if (status & GREG_STAT_RXERR)
+                       printk("GenericError ");
+               if (status & GREG_STAT_RXPERR)
+                       printk("ParityError ");
+               if (status & GREG_STAT_RXTERR)
+                       printk("RxTagBotch ");
+               printk("]\n");
+               reset = 1;
+       }
+
+       if (status & GREG_STAT_EOPERR) {
+               /* Driver bug, didn't set EOP bit in tx descriptor given
+                * to the happy meal.
+                */
+               printk(KERN_ERR "%s: EOP not set in happy meal transmit descriptor!\n",
+                      hp->dev->name);
+               reset = 1;
+       }
+
+       if (status & GREG_STAT_MIFIRQ) {
+               /* MIF signalled an interrupt, were we polling it? */
+               printk(KERN_ERR "%s: Happy Meal MIF interrupt.\n", hp->dev->name);
+       }
+
+       if (status &
+           (GREG_STAT_TXEACK|GREG_STAT_TXLERR|GREG_STAT_TXPERR|GREG_STAT_TXTERR)) {
+               /* All sorts of transmit DMA errors. */
+               printk(KERN_ERR "%s: Happy Meal tx DMA errors [ ", hp->dev->name);
+               if (status & GREG_STAT_TXEACK)
+                       printk("GenericError ");
+               if (status & GREG_STAT_TXLERR)
+                       printk("LateError ");
+               if (status & GREG_STAT_TXPERR)
+                       printk("ParityErro ");
+               if (status & GREG_STAT_TXTERR)
+                       printk("TagBotch ");
+               printk("]\n");
+               reset = 1;
+       }
+
+       if (status & (GREG_STAT_SLVERR|GREG_STAT_SLVPERR)) {
+               /* Bus or parity error when cpu accessed happy meal registers
+                * or it's internal FIFO's.  Should never see this.
+                */
+               printk(KERN_ERR "%s: Happy Meal register access SBUS slave (%s) error.\n",
+                      hp->dev->name,
+                      (status & GREG_STAT_SLVPERR) ? "parity" : "generic");
+               reset = 1;
+       }
+
+       if (reset) {
+               printk(KERN_NOTICE "%s: Resetting...\n", hp->dev->name);
+               happy_meal_init(hp);
+               return 1;
+       }
+       return 0;
+}
+
+/* hp->happy_lock must be held */
+static void happy_meal_mif_interrupt(struct happy_meal *hp)
+{
+       void __iomem *tregs = hp->tcvregs;
+
+       printk(KERN_INFO "%s: Link status change.\n", hp->dev->name);
+       hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
+       hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
+
+       /* Use the fastest transmission protocol possible. */
+       if (hp->sw_lpa & LPA_100FULL) {
+               printk(KERN_INFO "%s: Switching to 100Mbps at full duplex.", hp->dev->name);
+               hp->sw_bmcr |= (BMCR_FULLDPLX | BMCR_SPEED100);
+       } else if (hp->sw_lpa & LPA_100HALF) {
+               printk(KERN_INFO "%s: Switching to 100MBps at half duplex.", hp->dev->name);
+               hp->sw_bmcr |= BMCR_SPEED100;
+       } else if (hp->sw_lpa & LPA_10FULL) {
+               printk(KERN_INFO "%s: Switching to 10MBps at full duplex.", hp->dev->name);
+               hp->sw_bmcr |= BMCR_FULLDPLX;
+       } else {
+               printk(KERN_INFO "%s: Using 10Mbps at half duplex.", hp->dev->name);
+       }
+       happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
+
+       /* Finally stop polling and shut up the MIF. */
+       happy_meal_poll_stop(hp, tregs);
+}
+
+#ifdef TXDEBUG
+#define TXD(x) printk x
+#else
+#define TXD(x)
+#endif
+
+/* hp->happy_lock must be held */
+static void happy_meal_tx(struct happy_meal *hp)
+{
+       struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
+       struct happy_meal_txd *this;
+       struct net_device *dev = hp->dev;
+       int elem;
+
+       elem = hp->tx_old;
+       TXD(("TX<"));
+       while (elem != hp->tx_new) {
+               struct sk_buff *skb;
+               u32 flags, dma_addr, dma_len;
+               int frag;
+
+               TXD(("[%d]", elem));
+               this = &txbase[elem];
+               flags = hme_read_desc32(hp, &this->tx_flags);
+               if (flags & TXFLAG_OWN)
+                       break;
+               skb = hp->tx_skbs[elem];
+               if (skb_shinfo(skb)->nr_frags) {
+                       int last;
+
+                       last = elem + skb_shinfo(skb)->nr_frags;
+                       last &= (TX_RING_SIZE - 1);
+                       flags = hme_read_desc32(hp, &txbase[last].tx_flags);
+                       if (flags & TXFLAG_OWN)
+                               break;
+               }
+               hp->tx_skbs[elem] = NULL;
+               hp->net_stats.tx_bytes += skb->len;
+
+               for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
+                       dma_addr = hme_read_desc32(hp, &this->tx_addr);
+                       dma_len = hme_read_desc32(hp, &this->tx_flags);
+
+                       dma_len &= TXFLAG_SIZE;
+                       if (!frag)
+                               dma_unmap_single(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
+                       else
+                               dma_unmap_page(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
+
+                       elem = NEXT_TX(elem);
+                       this = &txbase[elem];
+               }
+
+               dev_kfree_skb_irq(skb);
+               hp->net_stats.tx_packets++;
+       }
+       hp->tx_old = elem;
+       TXD((">"));
+
+       if (netif_queue_stopped(dev) &&
+           TX_BUFFS_AVAIL(hp) > (MAX_SKB_FRAGS + 1))
+               netif_wake_queue(dev);
+}
+
+#ifdef RXDEBUG
+#define RXD(x) printk x
+#else
+#define RXD(x)
+#endif
+
+/* Originally I used to handle the allocation failure by just giving back just
+ * that one ring buffer to the happy meal.  Problem is that usually when that
+ * condition is triggered, the happy meal expects you to do something reasonable
+ * with all of the packets it has DMA'd in.  So now I just drop the entire
+ * ring when we cannot get a new skb and give them all back to the happy meal,
+ * maybe things will be "happier" now.
+ *
+ * hp->happy_lock must be held
+ */
+static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
+{
+       struct happy_meal_rxd *rxbase = &hp->happy_block->happy_meal_rxd[0];
+       struct happy_meal_rxd *this;
+       int elem = hp->rx_new, drops = 0;
+       u32 flags;
+
+       RXD(("RX<"));
+       this = &rxbase[elem];
+       while (!((flags = hme_read_desc32(hp, &this->rx_flags)) & RXFLAG_OWN)) {
+               struct sk_buff *skb;
+               int len = flags >> 16;
+               u16 csum = flags & RXFLAG_CSUM;
+               u32 dma_addr = hme_read_desc32(hp, &this->rx_addr);
+
+               RXD(("[%d ", elem));
+
+               /* Check for errors. */
+               if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) {
+                       RXD(("ERR(%08x)]", flags));
+                       hp->net_stats.rx_errors++;
+                       if (len < ETH_ZLEN)
+                               hp->net_stats.rx_length_errors++;
+                       if (len & (RXFLAG_OVERFLOW >> 16)) {
+                               hp->net_stats.rx_over_errors++;
+                               hp->net_stats.rx_fifo_errors++;
+                       }
+
+                       /* Return it to the Happy meal. */
+       drop_it:
+                       hp->net_stats.rx_dropped++;
+                       hme_write_rxd(hp, this,
+                                     (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
+                                     dma_addr);
+                       goto next;
+               }
+               skb = hp->rx_skbs[elem];
+               if (len > RX_COPY_THRESHOLD) {
+                       struct sk_buff *new_skb;
+
+                       /* Now refill the entry, if we can. */
+                       new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
+                       if (new_skb == NULL) {
+                               drops++;
+                               goto drop_it;
+                       }
+                       dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
+                       hp->rx_skbs[elem] = new_skb;
+                       new_skb->dev = dev;
+                       skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
+                       hme_write_rxd(hp, this,
+                                     (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
+                                     dma_map_single(hp->dma_dev, new_skb->data, RX_BUF_ALLOC_SIZE,
+                                                    DMA_FROM_DEVICE));
+                       skb_reserve(new_skb, RX_OFFSET);
+
+                       /* Trim the original skb for the netif. */
+                       skb_trim(skb, len);
+               } else {
+                       struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
+
+                       if (copy_skb == NULL) {
+                               drops++;
+                               goto drop_it;
+                       }
+
+                       skb_reserve(copy_skb, 2);
+                       skb_put(copy_skb, len);
+                       dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
+                       skb_copy_from_linear_data(skb, copy_skb->data, len);
+                       dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
+                       /* Reuse original ring buffer. */
+                       hme_write_rxd(hp, this,
+                                     (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
+                                     dma_addr);
+
+                       skb = copy_skb;
+               }
+
+               /* This card is _fucking_ hot... */
+               skb->csum = csum_unfold(~(__force __sum16)htons(csum));
+               skb->ip_summed = CHECKSUM_COMPLETE;
+
+               RXD(("len=%d csum=%4x]", len, csum));
+               skb->protocol = eth_type_trans(skb, dev);
+               netif_rx(skb);
+
+               hp->net_stats.rx_packets++;
+               hp->net_stats.rx_bytes += len;
+       next:
+               elem = NEXT_RX(elem);
+               this = &rxbase[elem];
+       }
+       hp->rx_new = elem;
+       if (drops)
+               printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", hp->dev->name);
+       RXD((">"));
+}
+
+static irqreturn_t happy_meal_interrupt(int irq, void *dev_id)
+{
+       struct net_device *dev = dev_id;
+       struct happy_meal *hp  = netdev_priv(dev);
+       u32 happy_status       = hme_read32(hp, hp->gregs + GREG_STAT);
+
+       HMD(("happy_meal_interrupt: status=%08x ", happy_status));
+
+       spin_lock(&hp->happy_lock);
+
+       if (happy_status & GREG_STAT_ERRORS) {
+               HMD(("ERRORS "));
+               if (happy_meal_is_not_so_happy(hp, /* un- */ happy_status))
+                       goto out;
+       }
+
+       if (happy_status & GREG_STAT_MIFIRQ) {
+               HMD(("MIFIRQ "));
+               happy_meal_mif_interrupt(hp);
+       }
+
+       if (happy_status & GREG_STAT_TXALL) {
+               HMD(("TXALL "));
+               happy_meal_tx(hp);
+       }
+
+       if (happy_status & GREG_STAT_RXTOHOST) {
+               HMD(("RXTOHOST "));
+               happy_meal_rx(hp, dev);
+       }
+
+       HMD(("done\n"));
+out:
+       spin_unlock(&hp->happy_lock);
+
+       return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_SBUS
+static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie)
+{
+       struct quattro *qp = (struct quattro *) cookie;
+       int i;
+
+       for (i = 0; i < 4; i++) {
+               struct net_device *dev = qp->happy_meals[i];
+               struct happy_meal *hp  = netdev_priv(dev);
+               u32 happy_status       = hme_read32(hp, hp->gregs + GREG_STAT);
+
+               HMD(("quattro_interrupt: status=%08x ", happy_status));
+
+               if (!(happy_status & (GREG_STAT_ERRORS |
+                                     GREG_STAT_MIFIRQ |
+                                     GREG_STAT_TXALL |
+                                     GREG_STAT_RXTOHOST)))
+                       continue;
+
+               spin_lock(&hp->happy_lock);
+
+               if (happy_status & GREG_STAT_ERRORS) {
+                       HMD(("ERRORS "));
+                       if (happy_meal_is_not_so_happy(hp, happy_status))
+                               goto next;
+               }
+
+               if (happy_status & GREG_STAT_MIFIRQ) {
+                       HMD(("MIFIRQ "));
+                       happy_meal_mif_interrupt(hp);
+               }
+
+               if (happy_status & GREG_STAT_TXALL) {
+                       HMD(("TXALL "));
+                       happy_meal_tx(hp);
+               }
+
+               if (happy_status & GREG_STAT_RXTOHOST) {
+                       HMD(("RXTOHOST "));
+                       happy_meal_rx(hp, dev);
+               }
+
+       next:
+               spin_unlock(&hp->happy_lock);
+       }
+       HMD(("done\n"));
+
+       return IRQ_HANDLED;
+}
+#endif
+
+static int happy_meal_open(struct net_device *dev)
+{
+       struct happy_meal *hp = netdev_priv(dev);
+       int res;
+
+       HMD(("happy_meal_open: "));
+
+       /* On SBUS Quattro QFE cards, all hme interrupts are concentrated
+        * into a single source which we register handling at probe time.
+        */
+       if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) {
+               if (request_irq(dev->irq, happy_meal_interrupt,
+                               IRQF_SHARED, dev->name, (void *)dev)) {
+                       HMD(("EAGAIN\n"));
+                       printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n",
+                              dev->irq);
+
+                       return -EAGAIN;
+               }
+       }
+
+       HMD(("to happy_meal_init\n"));
+
+       spin_lock_irq(&hp->happy_lock);
+       res = happy_meal_init(hp);
+       spin_unlock_irq(&hp->happy_lock);
+
+       if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO))
+               free_irq(dev->irq, dev);
+       return res;
+}
+
+static int happy_meal_close(struct net_device *dev)
+{
+       struct happy_meal *hp = netdev_priv(dev);
+
+       spin_lock_irq(&hp->happy_lock);
+       happy_meal_stop(hp, hp->gregs);
+       happy_meal_clean_rings(hp);
+
+       /* If auto-negotiation timer is running, kill it. */
+       del_timer(&hp->happy_timer);
+
+       spin_unlock_irq(&hp->happy_lock);
+
+       /* On Quattro QFE cards, all hme interrupts are concentrated
+        * into a single source which we register handling at probe
+        * time and never unregister.
+        */
+       if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)
+               free_irq(dev->irq, dev);
+
+       return 0;
+}
+
+#ifdef SXDEBUG
+#define SXD(x) printk x
+#else
+#define SXD(x)
+#endif
+
+static void happy_meal_tx_timeout(struct net_device *dev)
+{
+       struct happy_meal *hp = netdev_priv(dev);
+
+       printk (KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
+       tx_dump_log();
+       printk (KERN_ERR "%s: Happy Status %08x TX[%08x:%08x]\n", dev->name,
+               hme_read32(hp, hp->gregs + GREG_STAT),
+               hme_read32(hp, hp->etxregs + ETX_CFG),
+               hme_read32(hp, hp->bigmacregs + BMAC_TXCFG));
+
+       spin_lock_irq(&hp->happy_lock);
+       happy_meal_init(hp);
+       spin_unlock_irq(&hp->happy_lock);
+
+       netif_wake_queue(dev);
+}
+
+static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
+                                        struct net_device *dev)
+{
+       struct happy_meal *hp = netdev_priv(dev);
+       int entry;
+       u32 tx_flags;
+
+       tx_flags = TXFLAG_OWN;
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               const u32 csum_start_off = skb_checksum_start_offset(skb);
+               const u32 csum_stuff_off = csum_start_off + skb->csum_offset;
+
+               tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE |
+                           ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) |
+                           ((csum_stuff_off << 20) & TXFLAG_CSLOCATION));
+       }
+
+       spin_lock_irq(&hp->happy_lock);
+
+       if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) {
+               netif_stop_queue(dev);
+               spin_unlock_irq(&hp->happy_lock);
+               printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
+                      dev->name);
+               return NETDEV_TX_BUSY;
+       }
+
+       entry = hp->tx_new;
+       SXD(("SX<l[%d]e[%d]>", len, entry));
+       hp->tx_skbs[entry] = skb;
+
+       if (skb_shinfo(skb)->nr_frags == 0) {
+               u32 mapping, len;
+
+               len = skb->len;
+               mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
+               tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
+               hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
+                             (tx_flags | (len & TXFLAG_SIZE)),
+                             mapping);
+               entry = NEXT_TX(entry);
+       } else {
+               u32 first_len, first_mapping;
+               int frag, first_entry = entry;
+
+               /* We must give this initial chunk to the device last.
+                * Otherwise we could race with the device.
+                */
+               first_len = skb_headlen(skb);
+               first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
+                                              DMA_TO_DEVICE);
+               entry = NEXT_TX(entry);
+
+               for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+                       skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
+                       u32 len, mapping, this_txflags;
+
+                       len = this_frag->size;
+                       mapping = dma_map_page(hp->dma_dev, this_frag->page,
+                                              this_frag->page_offset, len,
+                                              DMA_TO_DEVICE);
+                       this_txflags = tx_flags;
+                       if (frag == skb_shinfo(skb)->nr_frags - 1)
+                               this_txflags |= TXFLAG_EOP;
+                       hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
+                                     (this_txflags | (len & TXFLAG_SIZE)),
+                                     mapping);
+                       entry = NEXT_TX(entry);
+               }
+               hme_write_txd(hp, &hp->happy_block->happy_meal_txd[first_entry],
+                             (tx_flags | TXFLAG_SOP | (first_len & TXFLAG_SIZE)),
+                             first_mapping);
+       }
+
+       hp->tx_new = entry;
+
+       if (TX_BUFFS_AVAIL(hp) <= (MAX_SKB_FRAGS + 1))
+               netif_stop_queue(dev);
+
+       /* Get it going. */
+       hme_write32(hp, hp->etxregs + ETX_PENDING, ETX_TP_DMAWAKEUP);
+
+       spin_unlock_irq(&hp->happy_lock);
+
+       tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
+       return NETDEV_TX_OK;
+}
+
+static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
+{
+       struct happy_meal *hp = netdev_priv(dev);
+
+       spin_lock_irq(&hp->happy_lock);
+       happy_meal_get_counters(hp, hp->bigmacregs);
+       spin_unlock_irq(&hp->happy_lock);
+
+       return &hp->net_stats;
+}
+
+static void happy_meal_set_multicast(struct net_device *dev)
+{
+       struct happy_meal *hp = netdev_priv(dev);
+       void __iomem *bregs = hp->bigmacregs;
+       struct netdev_hw_addr *ha;
+       u32 crc;
+
+       spin_lock_irq(&hp->happy_lock);
+
+       if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
+               hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
+               hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
+               hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
+               hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
+       } else if (dev->flags & IFF_PROMISC) {
+               hme_write32(hp, bregs + BMAC_RXCFG,
+                           hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_PMISC);
+       } else {
+               u16 hash_table[4];
+
+               memset(hash_table, 0, sizeof(hash_table));
+               netdev_for_each_mc_addr(ha, dev) {
+                       crc = ether_crc_le(6, ha->addr);
+                       crc >>= 26;
+                       hash_table[crc >> 4] |= 1 << (crc & 0xf);
+               }
+               hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
+               hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
+               hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
+               hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
+       }
+
+       spin_unlock_irq(&hp->happy_lock);
+}
+
+/* Ethtool support... */
+static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct happy_meal *hp = netdev_priv(dev);
+       u32 speed;
+
+       cmd->supported =
+               (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+                SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+                SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
+
+       /* XXX hardcoded stuff for now */
+       cmd->port = PORT_TP; /* XXX no MII support */
+       cmd->transceiver = XCVR_INTERNAL; /* XXX no external xcvr support */
+       cmd->phy_address = 0; /* XXX fixed PHYAD */
+
+       /* Record PHY settings. */
+       spin_lock_irq(&hp->happy_lock);
+       hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
+       hp->sw_lpa = happy_meal_tcvr_read(hp, hp->tcvregs, MII_LPA);
+       spin_unlock_irq(&hp->happy_lock);
+
+       if (hp->sw_bmcr & BMCR_ANENABLE) {
+               cmd->autoneg = AUTONEG_ENABLE;
+               speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ?
+                        SPEED_100 : SPEED_10);
+               if (speed == SPEED_100)
+                       cmd->duplex =
+                               (hp->sw_lpa & (LPA_100FULL)) ?
+                               DUPLEX_FULL : DUPLEX_HALF;
+               else
+                       cmd->duplex =
+                               (hp->sw_lpa & (LPA_10FULL)) ?
+                               DUPLEX_FULL : DUPLEX_HALF;
+       } else {
+               cmd->autoneg = AUTONEG_DISABLE;
+               speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
+               cmd->duplex =
+                       (hp->sw_bmcr & BMCR_FULLDPLX) ?
+                       DUPLEX_FULL : DUPLEX_HALF;
+       }
+       ethtool_cmd_speed_set(cmd, speed);
+       return 0;
+}
+
+static int hme_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct happy_meal *hp = netdev_priv(dev);
+
+       /* Verify the settings we care about. */
+       if (cmd->autoneg != AUTONEG_ENABLE &&
+           cmd->autoneg != AUTONEG_DISABLE)
+               return -EINVAL;
+       if (cmd->autoneg == AUTONEG_DISABLE &&
+           ((ethtool_cmd_speed(cmd) != SPEED_100 &&
+             ethtool_cmd_speed(cmd) != SPEED_10) ||
+            (cmd->duplex != DUPLEX_HALF &&
+             cmd->duplex != DUPLEX_FULL)))
+               return -EINVAL;
+
+       /* Ok, do it to it. */
+       spin_lock_irq(&hp->happy_lock);
+       del_timer(&hp->happy_timer);
+       happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd);
+       spin_unlock_irq(&hp->happy_lock);
+
+       return 0;
+}
+
+static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+       struct happy_meal *hp = netdev_priv(dev);
+
+       strcpy(info->driver, "sunhme");
+       strcpy(info->version, "2.02");
+       if (hp->happy_flags & HFLAG_PCI) {
+               struct pci_dev *pdev = hp->happy_dev;
+               strcpy(info->bus_info, pci_name(pdev));
+       }
+#ifdef CONFIG_SBUS
+       else {
+               const struct linux_prom_registers *regs;
+               struct platform_device *op = hp->happy_dev;
+               regs = of_get_property(op->dev.of_node, "regs", NULL);
+               if (regs)
+                       sprintf(info->bus_info, "SBUS:%d",
+                               regs->which_io);
+       }
+#endif
+}
+
+static u32 hme_get_link(struct net_device *dev)
+{
+       struct happy_meal *hp = netdev_priv(dev);
+
+       spin_lock_irq(&hp->happy_lock);
+       hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
+       spin_unlock_irq(&hp->happy_lock);
+
+       return hp->sw_bmsr & BMSR_LSTATUS;
+}
+
+static const struct ethtool_ops hme_ethtool_ops = {
+       .get_settings           = hme_get_settings,
+       .set_settings           = hme_set_settings,
+       .get_drvinfo            = hme_get_drvinfo,
+       .get_link               = hme_get_link,
+};
+
+static int hme_version_printed;
+
+#ifdef CONFIG_SBUS
+/* Given a happy meal sbus device, find it's quattro parent.
+ * If none exist, allocate and return a new one.
+ *
+ * Return NULL on failure.
+ */
+static struct quattro * __devinit quattro_sbus_find(struct platform_device *child)
+{
+       struct device *parent = child->dev.parent;
+       struct platform_device *op;
+       struct quattro *qp;
+
+       op = to_platform_device(parent);
+       qp = dev_get_drvdata(&op->dev);
+       if (qp)
+               return qp;
+
+       qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
+       if (qp != NULL) {
+               int i;
+
+               for (i = 0; i < 4; i++)
+                       qp->happy_meals[i] = NULL;
+
+               qp->quattro_dev = child;
+               qp->next = qfe_sbus_list;
+               qfe_sbus_list = qp;
+
+               dev_set_drvdata(&op->dev, qp);
+       }
+       return qp;
+}
+
+/* After all quattro cards have been probed, we call these functions
+ * to register the IRQ handlers for the cards that have been
+ * successfully probed and skip the cards that failed to initialize
+ */
+static int __init quattro_sbus_register_irqs(void)
+{
+       struct quattro *qp;
+
+       for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
+               struct platform_device *op = qp->quattro_dev;
+               int err, qfe_slot, skip = 0;
+
+               for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
+                       if (!qp->happy_meals[qfe_slot])
+                               skip = 1;
+               }
+               if (skip)
+                       continue;
+
+               err = request_irq(op->archdata.irqs[0],
+                                 quattro_sbus_interrupt,
+                                 IRQF_SHARED, "Quattro",
+                                 qp);
+               if (err != 0) {
+                       printk(KERN_ERR "Quattro HME: IRQ registration "
+                              "error %d.\n", err);
+                       return err;
+               }
+       }
+
+       return 0;
+}
+
+static void quattro_sbus_free_irqs(void)
+{
+       struct quattro *qp;
+
+       for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
+               struct platform_device *op = qp->quattro_dev;
+               int qfe_slot, skip = 0;
+
+               for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
+                       if (!qp->happy_meals[qfe_slot])
+                               skip = 1;
+               }
+               if (skip)
+                       continue;
+
+               free_irq(op->archdata.irqs[0], qp);
+       }
+}
+#endif /* CONFIG_SBUS */
+
+#ifdef CONFIG_PCI
+static struct quattro * __devinit quattro_pci_find(struct pci_dev *pdev)
+{
+       struct pci_dev *bdev = pdev->bus->self;
+       struct quattro *qp;
+
+       if (!bdev) return NULL;
+       for (qp = qfe_pci_list; qp != NULL; qp = qp->next) {
+               struct pci_dev *qpdev = qp->quattro_dev;
+
+               if (qpdev == bdev)
+                       return qp;
+       }
+       qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
+       if (qp != NULL) {
+               int i;
+
+               for (i = 0; i < 4; i++)
+                       qp->happy_meals[i] = NULL;
+
+               qp->quattro_dev = bdev;
+               qp->next = qfe_pci_list;
+               qfe_pci_list = qp;
+
+               /* No range tricks necessary on PCI. */
+               qp->nranges = 0;
+       }
+       return qp;
+}
+#endif /* CONFIG_PCI */
+
+static const struct net_device_ops hme_netdev_ops = {
+       .ndo_open               = happy_meal_open,
+       .ndo_stop               = happy_meal_close,
+       .ndo_start_xmit         = happy_meal_start_xmit,
+       .ndo_tx_timeout         = happy_meal_tx_timeout,
+       .ndo_get_stats          = happy_meal_get_stats,
+       .ndo_set_multicast_list = happy_meal_set_multicast,
+       .ndo_change_mtu         = eth_change_mtu,
+       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+};
+
+#ifdef CONFIG_SBUS
+static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
+{
+       struct device_node *dp = op->dev.of_node, *sbus_dp;
+       struct quattro *qp = NULL;
+       struct happy_meal *hp;
+       struct net_device *dev;
+       int i, qfe_slot = -1;
+       int err = -ENODEV;
+
+       sbus_dp = op->dev.parent->of_node;
+
+       /* We can match PCI devices too, do not accept those here. */
+       if (strcmp(sbus_dp->name, "sbus"))
+               return err;
+
+       if (is_qfe) {
+               qp = quattro_sbus_find(op);
+               if (qp == NULL)
+                       goto err_out;
+               for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
+                       if (qp->happy_meals[qfe_slot] == NULL)
+                               break;
+               if (qfe_slot == 4)
+                       goto err_out;
+       }
+
+       err = -ENOMEM;
+       dev = alloc_etherdev(sizeof(struct happy_meal));
+       if (!dev)
+               goto err_out;
+       SET_NETDEV_DEV(dev, &op->dev);
+
+       if (hme_version_printed++ == 0)
+               printk(KERN_INFO "%s", version);
+
+       /* If user did not specify a MAC address specifically, use
+        * the Quattro local-mac-address property...
+        */
+       for (i = 0; i < 6; i++) {
+               if (macaddr[i] != 0)
+                       break;
+       }
+       if (i < 6) { /* a mac address was given */
+               for (i = 0; i < 6; i++)
+                       dev->dev_addr[i] = macaddr[i];
+               macaddr[5]++;
+       } else {
+               const unsigned char *addr;
+               int len;
+
+               addr = of_get_property(dp, "local-mac-address", &len);
+
+               if (qfe_slot != -1 && addr && len == 6)
+                       memcpy(dev->dev_addr, addr, 6);
+               else
+                       memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+       }
+
+       hp = netdev_priv(dev);
+
+       hp->happy_dev = op;
+       hp->dma_dev = &op->dev;
+
+       spin_lock_init(&hp->happy_lock);
+
+       err = -ENODEV;
+       if (qp != NULL) {
+               hp->qfe_parent = qp;
+               hp->qfe_ent = qfe_slot;
+               qp->happy_meals[qfe_slot] = dev;
+       }
+
+       hp->gregs = of_ioremap(&op->resource[0], 0,
+                              GREG_REG_SIZE, "HME Global Regs");
+       if (!hp->gregs) {
+               printk(KERN_ERR "happymeal: Cannot map global registers.\n");
+               goto err_out_free_netdev;
+       }
+
+       hp->etxregs = of_ioremap(&op->resource[1], 0,
+                                ETX_REG_SIZE, "HME TX Regs");
+       if (!hp->etxregs) {
+               printk(KERN_ERR "happymeal: Cannot map MAC TX registers.\n");
+               goto err_out_iounmap;
+       }
+
+       hp->erxregs = of_ioremap(&op->resource[2], 0,
+                                ERX_REG_SIZE, "HME RX Regs");
+       if (!hp->erxregs) {
+               printk(KERN_ERR "happymeal: Cannot map MAC RX registers.\n");
+               goto err_out_iounmap;
+       }
+
+       hp->bigmacregs = of_ioremap(&op->resource[3], 0,
+                                   BMAC_REG_SIZE, "HME BIGMAC Regs");
+       if (!hp->bigmacregs) {
+               printk(KERN_ERR "happymeal: Cannot map BIGMAC registers.\n");
+               goto err_out_iounmap;
+       }
+
+       hp->tcvregs = of_ioremap(&op->resource[4], 0,
+                                TCVR_REG_SIZE, "HME Tranceiver Regs");
+       if (!hp->tcvregs) {
+               printk(KERN_ERR "happymeal: Cannot map TCVR registers.\n");
+               goto err_out_iounmap;
+       }
+
+       hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
+       if (hp->hm_revision == 0xff)
+               hp->hm_revision = 0xa0;
+
+       /* Now enable the feature flags we can. */
+       if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
+               hp->happy_flags = HFLAG_20_21;
+       else if (hp->hm_revision != 0xa0)
+               hp->happy_flags = HFLAG_NOT_A0;
+
+       if (qp != NULL)
+               hp->happy_flags |= HFLAG_QUATTRO;
+
+       /* Get the supported DVMA burst sizes from our Happy SBUS. */
+       hp->happy_bursts = of_getintprop_default(sbus_dp,
+                                                "burst-sizes", 0x00);
+
+       hp->happy_block = dma_alloc_coherent(hp->dma_dev,
+                                            PAGE_SIZE,
+                                            &hp->hblock_dvma,
+                                            GFP_ATOMIC);
+       err = -ENOMEM;
+       if (!hp->happy_block) {
+               printk(KERN_ERR "happymeal: Cannot allocate descriptors.\n");
+               goto err_out_iounmap;
+       }
+
+       /* Force check of the link first time we are brought up. */
+       hp->linkcheck = 0;
+
+       /* Force timer state to 'asleep' with count of zero. */
+       hp->timer_state = asleep;
+       hp->timer_ticks = 0;
+
+       init_timer(&hp->happy_timer);
+
+       hp->dev = dev;
+       dev->netdev_ops = &hme_netdev_ops;
+       dev->watchdog_timeo = 5*HZ;
+       dev->ethtool_ops = &hme_ethtool_ops;
+
+       /* Happy Meal can do it all... */
+       dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
+       dev->features |= dev->hw_features | NETIF_F_RXCSUM;
+
+       dev->irq = op->archdata.irqs[0];
+
+#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
+       /* Hook up SBUS register/descriptor accessors. */
+       hp->read_desc32 = sbus_hme_read_desc32;
+       hp->write_txd = sbus_hme_write_txd;
+       hp->write_rxd = sbus_hme_write_rxd;
+       hp->read32 = sbus_hme_read32;
+       hp->write32 = sbus_hme_write32;
+#endif
+
+       /* Grrr, Happy Meal comes up by default not advertising
+        * full duplex 100baseT capabilities, fix this.
+        */
+       spin_lock_irq(&hp->happy_lock);
+       happy_meal_set_initial_advertisement(hp);
+       spin_unlock_irq(&hp->happy_lock);
+
+       err = register_netdev(hp->dev);
+       if (err) {
+               printk(KERN_ERR "happymeal: Cannot register net device, "
+                      "aborting.\n");
+               goto err_out_free_coherent;
+       }
+
+       dev_set_drvdata(&op->dev, hp);
+
+       if (qfe_slot != -1)
+               printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ",
+                      dev->name, qfe_slot);
+       else
+               printk(KERN_INFO "%s: HAPPY MEAL (SBUS) 10/100baseT Ethernet ",
+                      dev->name);
+
+       printk("%pM\n", dev->dev_addr);
+
+       return 0;
+
+err_out_free_coherent:
+       dma_free_coherent(hp->dma_dev,
+                         PAGE_SIZE,
+                         hp->happy_block,
+                         hp->hblock_dvma);
+
+err_out_iounmap:
+       if (hp->gregs)
+               of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE);
+       if (hp->etxregs)
+               of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE);
+       if (hp->erxregs)
+               of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE);
+       if (hp->bigmacregs)
+               of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE);
+       if (hp->tcvregs)
+               of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
+
+       if (qp)
+               qp->happy_meals[qfe_slot] = NULL;
+
+err_out_free_netdev:
+       free_netdev(dev);
+
+err_out:
+       return err;
+}
+#endif
+
+#ifdef CONFIG_PCI
+#ifndef CONFIG_SPARC
+static int is_quattro_p(struct pci_dev *pdev)
+{
+       struct pci_dev *busdev = pdev->bus->self;
+       struct list_head *tmp;
+       int n_hmes;
+
+       if (busdev == NULL ||
+           busdev->vendor != PCI_VENDOR_ID_DEC ||
+           busdev->device != PCI_DEVICE_ID_DEC_21153)
+               return 0;
+
+       n_hmes = 0;
+       tmp = pdev->bus->devices.next;
+       while (tmp != &pdev->bus->devices) {
+               struct pci_dev *this_pdev = pci_dev_b(tmp);
+
+               if (this_pdev->vendor == PCI_VENDOR_ID_SUN &&
+                   this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL)
+                       n_hmes++;
+
+               tmp = tmp->next;
+       }
+
+       if (n_hmes != 4)
+               return 0;
+
+       return 1;
+}
+
+/* Fetch MAC address from vital product data of PCI ROM. */
+static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, int index, unsigned char *dev_addr)
+{
+       int this_offset;
+
+       for (this_offset = 0x20; this_offset < len; this_offset++) {
+               void __iomem *p = rom_base + this_offset;
+
+               if (readb(p + 0) != 0x90 ||
+                   readb(p + 1) != 0x00 ||
+                   readb(p + 2) != 0x09 ||
+                   readb(p + 3) != 0x4e ||
+                   readb(p + 4) != 0x41 ||
+                   readb(p + 5) != 0x06)
+                       continue;
+
+               this_offset += 6;
+               p += 6;
+
+               if (index == 0) {
+                       int i;
+
+                       for (i = 0; i < 6; i++)
+                               dev_addr[i] = readb(p + i);
+                       return 1;
+               }
+               index--;
+       }
+       return 0;
+}
+
+static void get_hme_mac_nonsparc(struct pci_dev *pdev, unsigned char *dev_addr)
+{
+       size_t size;
+       void __iomem *p = pci_map_rom(pdev, &size);
+
+       if (p) {
+               int index = 0;
+               int found;
+
+               if (is_quattro_p(pdev))
+                       index = PCI_SLOT(pdev->devfn);
+
+               found = readb(p) == 0x55 &&
+                       readb(p + 1) == 0xaa &&
+                       find_eth_addr_in_vpd(p, (64 * 1024), index, dev_addr);
+               pci_unmap_rom(pdev, p);
+               if (found)
+                       return;
+       }
+
+       /* Sun MAC prefix then 3 random bytes. */
+       dev_addr[0] = 0x08;
+       dev_addr[1] = 0x00;
+       dev_addr[2] = 0x20;
+       get_random_bytes(&dev_addr[3], 3);
+}
+#endif /* !(CONFIG_SPARC) */
+
+static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
+                                         const struct pci_device_id *ent)
+{
+       struct quattro *qp = NULL;
+#ifdef CONFIG_SPARC
+       struct device_node *dp;
+#endif
+       struct happy_meal *hp;
+       struct net_device *dev;
+       void __iomem *hpreg_base;
+       unsigned long hpreg_res;
+       int i, qfe_slot = -1;
+       char prom_name[64];
+       int err;
+
+       /* Now make sure pci_dev cookie is there. */
+#ifdef CONFIG_SPARC
+       dp = pci_device_to_OF_node(pdev);
+       strcpy(prom_name, dp->name);
+#else
+       if (is_quattro_p(pdev))
+               strcpy(prom_name, "SUNW,qfe");
+       else
+               strcpy(prom_name, "SUNW,hme");
+#endif
+
+       err = -ENODEV;
+
+       if (pci_enable_device(pdev))
+               goto err_out;
+       pci_set_master(pdev);
+
+       if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) {
+               qp = quattro_pci_find(pdev);
+               if (qp == NULL)
+                       goto err_out;
+               for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
+                       if (qp->happy_meals[qfe_slot] == NULL)
+                               break;
+               if (qfe_slot == 4)
+                       goto err_out;
+       }
+
+       dev = alloc_etherdev(sizeof(struct happy_meal));
+       err = -ENOMEM;
+       if (!dev)
+               goto err_out;
+       SET_NETDEV_DEV(dev, &pdev->dev);
+
+       if (hme_version_printed++ == 0)
+               printk(KERN_INFO "%s", version);
+
+       dev->base_addr = (long) pdev;
+
+       hp = netdev_priv(dev);
+
+       hp->happy_dev = pdev;
+       hp->dma_dev = &pdev->dev;
+
+       spin_lock_init(&hp->happy_lock);
+
+       if (qp != NULL) {
+               hp->qfe_parent = qp;
+               hp->qfe_ent = qfe_slot;
+               qp->happy_meals[qfe_slot] = dev;
+       }
+
+       hpreg_res = pci_resource_start(pdev, 0);
+       err = -ENODEV;
+       if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
+               printk(KERN_ERR "happymeal(PCI): Cannot find proper PCI device base address.\n");
+               goto err_out_clear_quattro;
+       }
+       if (pci_request_regions(pdev, DRV_NAME)) {
+               printk(KERN_ERR "happymeal(PCI): Cannot obtain PCI resources, "
+                      "aborting.\n");
+               goto err_out_clear_quattro;
+       }
+
+       if ((hpreg_base = ioremap(hpreg_res, 0x8000)) == NULL) {
+               printk(KERN_ERR "happymeal(PCI): Unable to remap card memory.\n");
+               goto err_out_free_res;
+       }
+
+       for (i = 0; i < 6; i++) {
+               if (macaddr[i] != 0)
+                       break;
+       }
+       if (i < 6) { /* a mac address was given */
+               for (i = 0; i < 6; i++)
+                       dev->dev_addr[i] = macaddr[i];
+               macaddr[5]++;
+       } else {
+#ifdef CONFIG_SPARC
+               const unsigned char *addr;
+               int len;
+
+               if (qfe_slot != -1 &&
+                   (addr = of_get_property(dp, "local-mac-address", &len))
+                       != NULL &&
+                   len == 6) {
+                       memcpy(dev->dev_addr, addr, 6);
+               } else {
+                       memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+               }
+#else
+               get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]);
+#endif
+       }
+
+       /* Layout registers. */
+       hp->gregs      = (hpreg_base + 0x0000UL);
+       hp->etxregs    = (hpreg_base + 0x2000UL);
+       hp->erxregs    = (hpreg_base + 0x4000UL);
+       hp->bigmacregs = (hpreg_base + 0x6000UL);
+       hp->tcvregs    = (hpreg_base + 0x7000UL);
+
+#ifdef CONFIG_SPARC
+       hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
+       if (hp->hm_revision == 0xff)
+               hp->hm_revision = 0xc0 | (pdev->revision & 0x0f);
+#else
+       /* works with this on non-sparc hosts */
+       hp->hm_revision = 0x20;
+#endif
+
+       /* Now enable the feature flags we can. */
+       if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
+               hp->happy_flags = HFLAG_20_21;
+       else if (hp->hm_revision != 0xa0 && hp->hm_revision != 0xc0)
+               hp->happy_flags = HFLAG_NOT_A0;
+
+       if (qp != NULL)
+               hp->happy_flags |= HFLAG_QUATTRO;
+
+       /* And of course, indicate this is PCI. */
+       hp->happy_flags |= HFLAG_PCI;
+
+#ifdef CONFIG_SPARC
+       /* Assume PCI happy meals can handle all burst sizes. */
+       hp->happy_bursts = DMA_BURSTBITS;
+#endif
+
+       hp->happy_block = (struct hmeal_init_block *)
+               dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &hp->hblock_dvma, GFP_KERNEL);
+
+       err = -ENODEV;
+       if (!hp->happy_block) {
+               printk(KERN_ERR "happymeal(PCI): Cannot get hme init block.\n");
+               goto err_out_iounmap;
+       }
+
+       hp->linkcheck = 0;
+       hp->timer_state = asleep;
+       hp->timer_ticks = 0;
+
+       init_timer(&hp->happy_timer);
+
+       hp->dev = dev;
+       dev->netdev_ops = &hme_netdev_ops;
+       dev->watchdog_timeo = 5*HZ;
+       dev->ethtool_ops = &hme_ethtool_ops;
+       dev->irq = pdev->irq;
+       dev->dma = 0;
+
+       /* Happy Meal can do it all... */
+       dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
+       dev->features |= dev->hw_features | NETIF_F_RXCSUM;
+
+#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
+       /* Hook up PCI register/descriptor accessors. */
+       hp->read_desc32 = pci_hme_read_desc32;
+       hp->write_txd = pci_hme_write_txd;
+       hp->write_rxd = pci_hme_write_rxd;
+       hp->read32 = pci_hme_read32;
+       hp->write32 = pci_hme_write32;
+#endif
+
+       /* Grrr, Happy Meal comes up by default not advertising
+        * full duplex 100baseT capabilities, fix this.
+        */
+       spin_lock_irq(&hp->happy_lock);
+       happy_meal_set_initial_advertisement(hp);
+       spin_unlock_irq(&hp->happy_lock);
+
+       err = register_netdev(hp->dev);
+       if (err) {
+               printk(KERN_ERR "happymeal(PCI): Cannot register net device, "
+                      "aborting.\n");
+               goto err_out_iounmap;
+       }
+
+       dev_set_drvdata(&pdev->dev, hp);
+
+       if (!qfe_slot) {
+               struct pci_dev *qpdev = qp->quattro_dev;
+
+               prom_name[0] = 0;
+               if (!strncmp(dev->name, "eth", 3)) {
+                       int i = simple_strtoul(dev->name + 3, NULL, 10);
+                       sprintf(prom_name, "-%d", i + 3);
+               }
+               printk(KERN_INFO "%s%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet ", dev->name, prom_name);
+               if (qpdev->vendor == PCI_VENDOR_ID_DEC &&
+                   qpdev->device == PCI_DEVICE_ID_DEC_21153)
+                       printk("DEC 21153 PCI Bridge\n");
+               else
+                       printk("unknown bridge %04x.%04x\n",
+                               qpdev->vendor, qpdev->device);
+       }
+
+       if (qfe_slot != -1)
+               printk(KERN_INFO "%s: Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet ",
+                      dev->name, qfe_slot);
+       else
+               printk(KERN_INFO "%s: HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet ",
+                      dev->name);
+
+       printk("%pM\n", dev->dev_addr);
+
+       return 0;
+
+err_out_iounmap:
+       iounmap(hp->gregs);
+
+err_out_free_res:
+       pci_release_regions(pdev);
+
+err_out_clear_quattro:
+       if (qp != NULL)
+               qp->happy_meals[qfe_slot] = NULL;
+
+       free_netdev(dev);
+
+err_out:
+       return err;
+}
+
+static void __devexit happy_meal_pci_remove(struct pci_dev *pdev)
+{
+       struct happy_meal *hp = dev_get_drvdata(&pdev->dev);
+       struct net_device *net_dev = hp->dev;
+
+       unregister_netdev(net_dev);
+
+       dma_free_coherent(hp->dma_dev, PAGE_SIZE,
+                         hp->happy_block, hp->hblock_dvma);
+       iounmap(hp->gregs);
+       pci_release_regions(hp->happy_dev);
+
+       free_netdev(net_dev);
+
+       dev_set_drvdata(&pdev->dev, NULL);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = {
+       { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
+       { }                     /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(pci, happymeal_pci_ids);
+
+static struct pci_driver hme_pci_driver = {
+       .name           = "hme",
+       .id_table       = happymeal_pci_ids,
+       .probe          = happy_meal_pci_probe,
+       .remove         = __devexit_p(happy_meal_pci_remove),
+};
+
+static int __init happy_meal_pci_init(void)
+{
+       return pci_register_driver(&hme_pci_driver);
+}
+
+static void happy_meal_pci_exit(void)
+{
+       pci_unregister_driver(&hme_pci_driver);
+
+       while (qfe_pci_list) {
+               struct quattro *qfe = qfe_pci_list;
+               struct quattro *next = qfe->next;
+
+               kfree(qfe);
+
+               qfe_pci_list = next;
+       }
+}
+
+#endif
+
+#ifdef CONFIG_SBUS
+static const struct of_device_id hme_sbus_match[];
+static int __devinit hme_sbus_probe(struct platform_device *op)
+{
+       const struct of_device_id *match;
+       struct device_node *dp = op->dev.of_node;
+       const char *model = of_get_property(dp, "model", NULL);
+       int is_qfe;
+
+       match = of_match_device(hme_sbus_match, &op->dev);
+       if (!match)
+               return -EINVAL;
+       is_qfe = (match->data != NULL);
+
+       if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe"))
+               is_qfe = 1;
+
+       return happy_meal_sbus_probe_one(op, is_qfe);
+}
+
+static int __devexit hme_sbus_remove(struct platform_device *op)
+{
+       struct happy_meal *hp = dev_get_drvdata(&op->dev);
+       struct net_device *net_dev = hp->dev;
+
+       unregister_netdev(net_dev);
+
+       /* XXX qfe parent interrupt... */
+
+       of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE);
+       of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE);
+       of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE);
+       of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE);
+       of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
+       dma_free_coherent(hp->dma_dev,
+                         PAGE_SIZE,
+                         hp->happy_block,
+                         hp->hblock_dvma);
+
+       free_netdev(net_dev);
+
+       dev_set_drvdata(&op->dev, NULL);
+
+       return 0;
+}
+
+static const struct of_device_id hme_sbus_match[] = {
+       {
+               .name = "SUNW,hme",
+       },
+       {
+               .name = "SUNW,qfe",
+               .data = (void *) 1,
+       },
+       {
+               .name = "qfe",
+               .data = (void *) 1,
+       },
+       {},
+};
+
+MODULE_DEVICE_TABLE(of, hme_sbus_match);
+
+static struct platform_driver hme_sbus_driver = {
+       .driver = {
+               .name = "hme",
+               .owner = THIS_MODULE,
+               .of_match_table = hme_sbus_match,
+       },
+       .probe          = hme_sbus_probe,
+       .remove         = __devexit_p(hme_sbus_remove),
+};
+
+static int __init happy_meal_sbus_init(void)
+{
+       int err;
+
+       err = platform_driver_register(&hme_sbus_driver);
+       if (!err)
+               err = quattro_sbus_register_irqs();
+
+       return err;
+}
+
+static void happy_meal_sbus_exit(void)
+{
+       platform_driver_unregister(&hme_sbus_driver);
+       quattro_sbus_free_irqs();
+
+       while (qfe_sbus_list) {
+               struct quattro *qfe = qfe_sbus_list;
+               struct quattro *next = qfe->next;
+
+               kfree(qfe);
+
+               qfe_sbus_list = next;
+       }
+}
+#endif
+
+static int __init happy_meal_probe(void)
+{
+       int err = 0;
+
+#ifdef CONFIG_SBUS
+       err = happy_meal_sbus_init();
+#endif
+#ifdef CONFIG_PCI
+       if (!err) {
+               err = happy_meal_pci_init();
+#ifdef CONFIG_SBUS
+               if (err)
+                       happy_meal_sbus_exit();
+#endif
+       }
+#endif
+
+       return err;
+}
+
+
+static void __exit happy_meal_exit(void)
+{
+#ifdef CONFIG_SBUS
+       happy_meal_sbus_exit();
+#endif
+#ifdef CONFIG_PCI
+       happy_meal_pci_exit();
+#endif
+}
+
+module_init(happy_meal_probe);
+module_exit(happy_meal_exit);
diff --git a/drivers/net/ethernet/sun/sunhme.h b/drivers/net/ethernet/sun/sunhme.h
new file mode 100644 (file)
index 0000000..64f2783
--- /dev/null
@@ -0,0 +1,512 @@
+/* $Id: sunhme.h,v 1.33 2001/08/03 06:23:04 davem Exp $
+ * sunhme.h: Definitions for Sparc HME/BigMac 10/100baseT ethernet driver.
+ *           Also known as the "Happy Meal".
+ *
+ * Copyright (C) 1996, 1999 David S. Miller (davem@redhat.com)
+ */
+
+#ifndef _SUNHME_H
+#define _SUNHME_H
+
+#include <linux/pci.h>
+
+/* Happy Meal global registers. */
+#define GREG_SWRESET   0x000UL /* Software Reset  */
+#define GREG_CFG       0x004UL /* Config Register */
+#define GREG_STAT      0x108UL /* Status          */
+#define GREG_IMASK     0x10cUL /* Interrupt Mask  */
+#define GREG_REG_SIZE  0x110UL
+
+/* Global reset register. */
+#define GREG_RESET_ETX         0x01
+#define GREG_RESET_ERX         0x02
+#define GREG_RESET_ALL         0x03
+
+/* Global config register. */
+#define GREG_CFG_BURSTMSK      0x03
+#define GREG_CFG_BURST16       0x00
+#define GREG_CFG_BURST32       0x01
+#define GREG_CFG_BURST64       0x02
+#define GREG_CFG_64BIT         0x04
+#define GREG_CFG_PARITY        0x08
+#define GREG_CFG_RESV          0x10
+
+/* Global status register. */
+#define GREG_STAT_GOTFRAME     0x00000001 /* Received a frame                         */
+#define GREG_STAT_RCNTEXP      0x00000002 /* Receive frame counter expired            */
+#define GREG_STAT_ACNTEXP      0x00000004 /* Align-error counter expired              */
+#define GREG_STAT_CCNTEXP      0x00000008 /* CRC-error counter expired                */
+#define GREG_STAT_LCNTEXP      0x00000010 /* Length-error counter expired             */
+#define GREG_STAT_RFIFOVF      0x00000020 /* Receive FIFO overflow                    */
+#define GREG_STAT_CVCNTEXP     0x00000040 /* Code-violation counter expired           */
+#define GREG_STAT_STSTERR      0x00000080 /* Test error in XIF for SQE                */
+#define GREG_STAT_SENTFRAME    0x00000100 /* Transmitted a frame                      */
+#define GREG_STAT_TFIFO_UND    0x00000200 /* Transmit FIFO underrun                   */
+#define GREG_STAT_MAXPKTERR    0x00000400 /* Max-packet size error                    */
+#define GREG_STAT_NCNTEXP      0x00000800 /* Normal-collision counter expired         */
+#define GREG_STAT_ECNTEXP      0x00001000 /* Excess-collision counter expired         */
+#define GREG_STAT_LCCNTEXP     0x00002000 /* Late-collision counter expired           */
+#define GREG_STAT_FCNTEXP      0x00004000 /* First-collision counter expired          */
+#define GREG_STAT_DTIMEXP      0x00008000 /* Defer-timer expired                      */
+#define GREG_STAT_RXTOHOST     0x00010000 /* Moved from receive-FIFO to host memory   */
+#define GREG_STAT_NORXD        0x00020000 /* No more receive descriptors              */
+#define GREG_STAT_RXERR        0x00040000 /* Error during receive dma                 */
+#define GREG_STAT_RXLATERR     0x00080000 /* Late error during receive dma            */
+#define GREG_STAT_RXPERR       0x00100000 /* Parity error during receive dma          */
+#define GREG_STAT_RXTERR       0x00200000 /* Tag error during receive dma             */
+#define GREG_STAT_EOPERR       0x00400000 /* Transmit descriptor did not have EOP set */
+#define GREG_STAT_MIFIRQ       0x00800000 /* MIF is signaling an interrupt condition  */
+#define GREG_STAT_HOSTTOTX     0x01000000 /* Moved from host memory to transmit-FIFO  */
+#define GREG_STAT_TXALL        0x02000000 /* Transmitted all packets in the tx-fifo   */
+#define GREG_STAT_TXEACK       0x04000000 /* Error during transmit dma                */
+#define GREG_STAT_TXLERR       0x08000000 /* Late error during transmit dma           */
+#define GREG_STAT_TXPERR       0x10000000 /* Parity error during transmit dma         */
+#define GREG_STAT_TXTERR       0x20000000 /* Tag error during transmit dma            */
+#define GREG_STAT_SLVERR       0x40000000 /* PIO access got an error                  */
+#define GREG_STAT_SLVPERR      0x80000000 /* PIO access got a parity error            */
+
+/* All interesting error conditions. */
+#define GREG_STAT_ERRORS       0xfc7efefc
+
+/* Global interrupt mask register. */
+#define GREG_IMASK_GOTFRAME    0x00000001 /* Received a frame                         */
+#define GREG_IMASK_RCNTEXP     0x00000002 /* Receive frame counter expired            */
+#define GREG_IMASK_ACNTEXP     0x00000004 /* Align-error counter expired              */
+#define GREG_IMASK_CCNTEXP     0x00000008 /* CRC-error counter expired                */
+#define GREG_IMASK_LCNTEXP     0x00000010 /* Length-error counter expired             */
+#define GREG_IMASK_RFIFOVF     0x00000020 /* Receive FIFO overflow                    */
+#define GREG_IMASK_CVCNTEXP    0x00000040 /* Code-violation counter expired           */
+#define GREG_IMASK_STSTERR     0x00000080 /* Test error in XIF for SQE                */
+#define GREG_IMASK_SENTFRAME   0x00000100 /* Transmitted a frame                      */
+#define GREG_IMASK_TFIFO_UND   0x00000200 /* Transmit FIFO underrun                   */
+#define GREG_IMASK_MAXPKTERR   0x00000400 /* Max-packet size error                    */
+#define GREG_IMASK_NCNTEXP     0x00000800 /* Normal-collision counter expired         */
+#define GREG_IMASK_ECNTEXP     0x00001000 /* Excess-collision counter expired         */
+#define GREG_IMASK_LCCNTEXP    0x00002000 /* Late-collision counter expired           */
+#define GREG_IMASK_FCNTEXP     0x00004000 /* First-collision counter expired          */
+#define GREG_IMASK_DTIMEXP     0x00008000 /* Defer-timer expired                      */
+#define GREG_IMASK_RXTOHOST    0x00010000 /* Moved from receive-FIFO to host memory   */
+#define GREG_IMASK_NORXD       0x00020000 /* No more receive descriptors              */
+#define GREG_IMASK_RXERR       0x00040000 /* Error during receive dma                 */
+#define GREG_IMASK_RXLATERR    0x00080000 /* Late error during receive dma            */
+#define GREG_IMASK_RXPERR      0x00100000 /* Parity error during receive dma          */
+#define GREG_IMASK_RXTERR      0x00200000 /* Tag error during receive dma             */
+#define GREG_IMASK_EOPERR      0x00400000 /* Transmit descriptor did not have EOP set */
+#define GREG_IMASK_MIFIRQ      0x00800000 /* MIF is signaling an interrupt condition  */
+#define GREG_IMASK_HOSTTOTX    0x01000000 /* Moved from host memory to transmit-FIFO  */
+#define GREG_IMASK_TXALL       0x02000000 /* Transmitted all packets in the tx-fifo   */
+#define GREG_IMASK_TXEACK      0x04000000 /* Error during transmit dma                */
+#define GREG_IMASK_TXLERR      0x08000000 /* Late error during transmit dma           */
+#define GREG_IMASK_TXPERR      0x10000000 /* Parity error during transmit dma         */
+#define GREG_IMASK_TXTERR      0x20000000 /* Tag error during transmit dma            */
+#define GREG_IMASK_SLVERR      0x40000000 /* PIO access got an error                  */
+#define GREG_IMASK_SLVPERR     0x80000000 /* PIO access got a parity error            */
+
+/* Happy Meal external transmitter registers. */
+#define ETX_PENDING    0x00UL  /* Transmit pending/wakeup register */
+#define ETX_CFG                0x04UL  /* Transmit config register         */
+#define ETX_RING       0x08UL  /* Transmit ring pointer            */
+#define ETX_BBASE      0x0cUL  /* Transmit buffer base             */
+#define ETX_BDISP      0x10UL  /* Transmit buffer displacement     */
+#define ETX_FIFOWPTR   0x14UL  /* FIFO write ptr                   */
+#define ETX_FIFOSWPTR  0x18UL  /* FIFO write ptr (shadow register) */
+#define ETX_FIFORPTR   0x1cUL  /* FIFO read ptr                    */
+#define ETX_FIFOSRPTR  0x20UL  /* FIFO read ptr (shadow register)  */
+#define ETX_FIFOPCNT   0x24UL  /* FIFO packet counter              */
+#define ETX_SMACHINE   0x28UL  /* Transmitter state machine        */
+#define ETX_RSIZE      0x2cUL  /* Ring descriptor size             */
+#define ETX_BPTR       0x30UL  /* Transmit data buffer ptr         */
+#define ETX_REG_SIZE   0x34UL
+
+/* ETX transmit pending register. */
+#define ETX_TP_DMAWAKEUP         0x00000001 /* Restart transmit dma             */
+
+/* ETX config register. */
+#define ETX_CFG_DMAENABLE        0x00000001 /* Enable transmit dma              */
+#define ETX_CFG_FIFOTHRESH       0x000003fe /* Transmit FIFO threshold          */
+#define ETX_CFG_IRQDAFTER        0x00000400 /* Interrupt after TX-FIFO drained  */
+#define ETX_CFG_IRQDBEFORE       0x00000000 /* Interrupt before TX-FIFO drained */
+
+#define ETX_RSIZE_SHIFT          4
+
+/* Happy Meal external receiver registers. */
+#define ERX_CFG                0x00UL  /* Receiver config register         */
+#define ERX_RING       0x04UL  /* Receiver ring ptr                */
+#define ERX_BPTR       0x08UL  /* Receiver buffer ptr              */
+#define ERX_FIFOWPTR   0x0cUL  /* FIFO write ptr                   */
+#define ERX_FIFOSWPTR  0x10UL  /* FIFO write ptr (shadow register) */
+#define ERX_FIFORPTR   0x14UL  /* FIFO read ptr                    */
+#define ERX_FIFOSRPTR  0x18UL  /* FIFO read ptr (shadow register)  */
+#define ERX_SMACHINE   0x1cUL  /* Receiver state machine           */
+#define ERX_REG_SIZE   0x20UL
+
+/* ERX config register. */
+#define ERX_CFG_DMAENABLE    0x00000001 /* Enable receive DMA        */
+#define ERX_CFG_RESV1        0x00000006 /* Unused...                 */
+#define ERX_CFG_BYTEOFFSET   0x00000038 /* Receive first byte offset */
+#define ERX_CFG_RESV2        0x000001c0 /* Unused...                 */
+#define ERX_CFG_SIZE32       0x00000000 /* Receive ring size == 32   */
+#define ERX_CFG_SIZE64       0x00000200 /* Receive ring size == 64   */
+#define ERX_CFG_SIZE128      0x00000400 /* Receive ring size == 128  */
+#define ERX_CFG_SIZE256      0x00000600 /* Receive ring size == 256  */
+#define ERX_CFG_RESV3        0x0000f800 /* Unused...                 */
+#define ERX_CFG_CSUMSTART    0x007f0000 /* Offset of checksum start,
+                                        * in halfwords. */
+
+/* I'd like a Big Mac, small fries, small coke, and SparcLinux please. */
+#define BMAC_XIFCFG    0x0000UL        /* XIF config register                */
+       /* 0x4-->0x204, reserved */
+#define BMAC_TXSWRESET 0x208UL /* Transmitter software reset         */
+#define BMAC_TXCFG     0x20cUL /* Transmitter config register        */
+#define BMAC_IGAP1     0x210UL /* Inter-packet gap 1                 */
+#define BMAC_IGAP2     0x214UL /* Inter-packet gap 2                 */
+#define BMAC_ALIMIT    0x218UL /* Transmit attempt limit             */
+#define BMAC_STIME     0x21cUL /* Transmit slot time                 */
+#define BMAC_PLEN      0x220UL /* Size of transmit preamble          */
+#define BMAC_PPAT      0x224UL /* Pattern for transmit preamble      */
+#define BMAC_TXSDELIM  0x228UL /* Transmit delimiter                 */
+#define BMAC_JSIZE     0x22cUL /* Jam size                           */
+#define BMAC_TXMAX     0x230UL /* Transmit max pkt size              */
+#define BMAC_TXMIN     0x234UL /* Transmit min pkt size              */
+#define BMAC_PATTEMPT  0x238UL /* Count of transmit peak attempts    */
+#define BMAC_DTCTR     0x23cUL /* Transmit defer timer               */
+#define BMAC_NCCTR     0x240UL /* Transmit normal-collision counter  */
+#define BMAC_FCCTR     0x244UL /* Transmit first-collision counter   */
+#define BMAC_EXCTR     0x248UL /* Transmit excess-collision counter  */
+#define BMAC_LTCTR     0x24cUL /* Transmit late-collision counter    */
+#define BMAC_RSEED     0x250UL /* Transmit random number seed        */
+#define BMAC_TXSMACHINE        0x254UL /* Transmit state machine             */
+       /* 0x258-->0x304, reserved */
+#define BMAC_RXSWRESET 0x308UL /* Receiver software reset            */
+#define BMAC_RXCFG     0x30cUL /* Receiver config register           */
+#define BMAC_RXMAX     0x310UL /* Receive max pkt size               */
+#define BMAC_RXMIN     0x314UL /* Receive min pkt size               */
+#define BMAC_MACADDR2  0x318UL /* Ether address register 2           */
+#define BMAC_MACADDR1  0x31cUL /* Ether address register 1           */
+#define BMAC_MACADDR0  0x320UL /* Ether address register 0           */
+#define BMAC_FRCTR     0x324UL /* Receive frame receive counter      */
+#define BMAC_GLECTR    0x328UL /* Receive giant-length error counter */
+#define BMAC_UNALECTR  0x32cUL /* Receive unaligned error counter    */
+#define BMAC_RCRCECTR  0x330UL /* Receive CRC error counter          */
+#define BMAC_RXSMACHINE        0x334UL /* Receiver state machine             */
+#define BMAC_RXCVALID  0x338UL /* Receiver code violation            */
+       /* 0x33c, reserved */
+#define BMAC_HTABLE3   0x340UL /* Hash table 3                       */
+#define BMAC_HTABLE2   0x344UL /* Hash table 2                       */
+#define BMAC_HTABLE1   0x348UL /* Hash table 1                       */
+#define BMAC_HTABLE0   0x34cUL /* Hash table 0                       */
+#define BMAC_AFILTER2  0x350UL /* Address filter 2                   */
+#define BMAC_AFILTER1  0x354UL /* Address filter 1                   */
+#define BMAC_AFILTER0  0x358UL /* Address filter 0                   */
+#define BMAC_AFMASK    0x35cUL /* Address filter mask                */
+#define BMAC_REG_SIZE  0x360UL
+
+/* BigMac XIF config register. */
+#define BIGMAC_XCFG_ODENABLE  0x00000001 /* Output driver enable         */
+#define BIGMAC_XCFG_XLBACK    0x00000002 /* Loopback-mode XIF enable     */
+#define BIGMAC_XCFG_MLBACK    0x00000004 /* Loopback-mode MII enable     */
+#define BIGMAC_XCFG_MIIDISAB  0x00000008 /* MII receive buffer disable   */
+#define BIGMAC_XCFG_SQENABLE  0x00000010 /* SQE test enable              */
+#define BIGMAC_XCFG_SQETWIN   0x000003e0 /* SQE time window              */
+#define BIGMAC_XCFG_LANCE     0x00000010 /* Lance mode enable            */
+#define BIGMAC_XCFG_LIPG0     0x000003e0 /* Lance mode IPG0              */
+
+/* BigMac transmit config register. */
+#define BIGMAC_TXCFG_ENABLE   0x00000001 /* Enable the transmitter       */
+#define BIGMAC_TXCFG_SMODE    0x00000020 /* Enable slow transmit mode    */
+#define BIGMAC_TXCFG_CIGN     0x00000040 /* Ignore transmit collisions   */
+#define BIGMAC_TXCFG_FCSOFF   0x00000080 /* Do not emit FCS              */
+#define BIGMAC_TXCFG_DBACKOFF 0x00000100 /* Disable backoff              */
+#define BIGMAC_TXCFG_FULLDPLX 0x00000200 /* Enable full-duplex           */
+#define BIGMAC_TXCFG_DGIVEUP  0x00000400 /* Don't give up on transmits   */
+
+/* BigMac receive config register. */
+#define BIGMAC_RXCFG_ENABLE   0x00000001 /* Enable the receiver             */
+#define BIGMAC_RXCFG_PSTRIP   0x00000020 /* Pad byte strip enable           */
+#define BIGMAC_RXCFG_PMISC    0x00000040 /* Enable promiscuous mode          */
+#define BIGMAC_RXCFG_DERR     0x00000080 /* Disable error checking          */
+#define BIGMAC_RXCFG_DCRCS    0x00000100 /* Disable CRC stripping           */
+#define BIGMAC_RXCFG_REJME    0x00000200 /* Reject packets addressed to me  */
+#define BIGMAC_RXCFG_PGRP     0x00000400 /* Enable promisc group mode       */
+#define BIGMAC_RXCFG_HENABLE  0x00000800 /* Enable the hash filter          */
+#define BIGMAC_RXCFG_AENABLE  0x00001000 /* Enable the address filter       */
+
+/* These are the "Management Interface" (ie. MIF) registers of the transceiver. */
+#define TCVR_BBCLOCK   0x00UL  /* Bit bang clock register          */
+#define TCVR_BBDATA    0x04UL  /* Bit bang data register           */
+#define TCVR_BBOENAB   0x08UL  /* Bit bang output enable           */
+#define TCVR_FRAME     0x0cUL  /* Frame control/data register      */
+#define TCVR_CFG       0x10UL  /* MIF config register              */
+#define TCVR_IMASK     0x14UL  /* MIF interrupt mask               */
+#define TCVR_STATUS    0x18UL  /* MIF status                       */
+#define TCVR_SMACHINE  0x1cUL  /* MIF state machine                */
+#define TCVR_REG_SIZE  0x20UL
+
+/* Frame commands. */
+#define FRAME_WRITE           0x50020000
+#define FRAME_READ            0x60020000
+
+/* Transceiver config register */
+#define TCV_CFG_PSELECT       0x00000001 /* Select PHY                      */
+#define TCV_CFG_PENABLE       0x00000002 /* Enable MIF polling              */
+#define TCV_CFG_BENABLE       0x00000004 /* Enable the "bit banger" oh baby */
+#define TCV_CFG_PREGADDR      0x000000f8 /* Address of poll register        */
+#define TCV_CFG_MDIO0         0x00000100 /* MDIO zero, data/attached        */
+#define TCV_CFG_MDIO1         0x00000200 /* MDIO one,  data/attached        */
+#define TCV_CFG_PDADDR        0x00007c00 /* Device PHY address polling      */
+
+/* Here are some PHY addresses. */
+#define TCV_PADDR_ETX         0          /* Internal transceiver            */
+#define TCV_PADDR_ITX         1          /* External transceiver            */
+
+/* Transceiver status register */
+#define TCV_STAT_BASIC        0xffff0000 /* The "basic" part                */
+#define TCV_STAT_NORMAL       0x0000ffff /* The "non-basic" part            */
+
+/* Inside the Happy Meal transceiver is the physical layer, they use an
+ * implementations for National Semiconductor, part number DP83840VCE.
+ * You can retrieve the data sheets and programming docs for this beast
+ * from http://www.national.com/
+ *
+ * The DP83840 is capable of both 10 and 100Mbps ethernet, in both
+ * half and full duplex mode.  It also supports auto negotiation.
+ *
+ * But.... THIS THING IS A PAIN IN THE ASS TO PROGRAM!
+ * Debugging eeprom burnt code is more fun than programming this chip!
+ */
+
+/* Generic MII registers defined in linux/mii.h, these below
+ * are DP83840 specific.
+ */
+#define DP83840_CSCONFIG        0x17        /* CS configuration            */
+
+/* The Carrier Sense config register. */
+#define CSCONFIG_RESV1          0x0001  /* Unused...                   */
+#define CSCONFIG_LED4           0x0002  /* Pin for full-dplx LED4      */
+#define CSCONFIG_LED1           0x0004  /* Pin for conn-status LED1    */
+#define CSCONFIG_RESV2          0x0008  /* Unused...                   */
+#define CSCONFIG_TCVDISAB       0x0010  /* Turns off the transceiver   */
+#define CSCONFIG_DFBYPASS       0x0020  /* Bypass disconnect function  */
+#define CSCONFIG_GLFORCE        0x0040  /* Good link force for 100mbps */
+#define CSCONFIG_CLKTRISTATE    0x0080  /* Tristate 25m clock          */
+#define CSCONFIG_RESV3          0x0700  /* Unused...                   */
+#define CSCONFIG_ENCODE         0x0800  /* 1=MLT-3, 0=binary           */
+#define CSCONFIG_RENABLE        0x1000  /* Repeater mode enable        */
+#define CSCONFIG_TCDISABLE      0x2000  /* Disable timeout counter     */
+#define CSCONFIG_RESV4          0x4000  /* Unused...                   */
+#define CSCONFIG_NDISABLE       0x8000  /* Disable NRZI                */
+
+/* Happy Meal descriptor rings and such.
+ * All descriptor rings must be aligned on a 2K boundary.
+ * All receive buffers must be 64 byte aligned.
+ * Always write the address first before setting the ownership
+ * bits to avoid races with the hardware scanning the ring.
+ */
+typedef u32 __bitwise__ hme32;
+
+struct happy_meal_rxd {
+       hme32 rx_flags;
+       hme32 rx_addr;
+};
+
+#define RXFLAG_OWN         0x80000000 /* 1 = hardware, 0 = software */
+#define RXFLAG_OVERFLOW    0x40000000 /* 1 = buffer overflow        */
+#define RXFLAG_SIZE        0x3fff0000 /* Size of the buffer         */
+#define RXFLAG_CSUM        0x0000ffff /* HW computed checksum       */
+
+struct happy_meal_txd {
+       hme32 tx_flags;
+       hme32 tx_addr;
+};
+
+#define TXFLAG_OWN         0x80000000 /* 1 = hardware, 0 = software */
+#define TXFLAG_SOP         0x40000000 /* 1 = start of packet        */
+#define TXFLAG_EOP         0x20000000 /* 1 = end of packet          */
+#define TXFLAG_CSENABLE    0x10000000 /* 1 = enable hw-checksums    */
+#define TXFLAG_CSLOCATION  0x0ff00000 /* Where to stick the csum    */
+#define TXFLAG_CSBUFBEGIN  0x000fc000 /* Where to begin checksum    */
+#define TXFLAG_SIZE        0x00003fff /* Size of the packet         */
+
+#define TX_RING_SIZE       32         /* Must be >16 and <255, multiple of 16  */
+#define RX_RING_SIZE       32         /* see ERX_CFG_SIZE* for possible values */
+
+#if (TX_RING_SIZE < 16 || TX_RING_SIZE > 256 || (TX_RING_SIZE % 16) != 0)
+#error TX_RING_SIZE holds illegal value
+#endif
+
+#define TX_RING_MAXSIZE    256
+#define RX_RING_MAXSIZE    256
+
+/* We use a 14 byte offset for checksum computation. */
+#if (RX_RING_SIZE == 32)
+#define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE32|((14/2)<<16))
+#else
+#if (RX_RING_SIZE == 64)
+#define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE64|((14/2)<<16))
+#else
+#if (RX_RING_SIZE == 128)
+#define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE128|((14/2)<<16))
+#else
+#if (RX_RING_SIZE == 256)
+#define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE256|((14/2)<<16))
+#else
+#error RX_RING_SIZE holds illegal value
+#endif
+#endif
+#endif
+#endif
+
+#define NEXT_RX(num)       (((num) + 1) & (RX_RING_SIZE - 1))
+#define NEXT_TX(num)       (((num) + 1) & (TX_RING_SIZE - 1))
+#define PREV_RX(num)       (((num) - 1) & (RX_RING_SIZE - 1))
+#define PREV_TX(num)       (((num) - 1) & (TX_RING_SIZE - 1))
+
+#define TX_BUFFS_AVAIL(hp)                                    \
+        (((hp)->tx_old <= (hp)->tx_new) ?                     \
+         (hp)->tx_old + (TX_RING_SIZE - 1) - (hp)->tx_new :  \
+                           (hp)->tx_old - (hp)->tx_new - 1)
+
+#define RX_OFFSET          2
+#define RX_BUF_ALLOC_SIZE  (1546 + RX_OFFSET + 64)
+
+#define RX_COPY_THRESHOLD  256
+
+struct hmeal_init_block {
+       struct happy_meal_rxd happy_meal_rxd[RX_RING_MAXSIZE];
+       struct happy_meal_txd happy_meal_txd[TX_RING_MAXSIZE];
+};
+
+#define hblock_offset(mem, elem) \
+((__u32)((unsigned long)(&(((struct hmeal_init_block *)0)->mem[elem]))))
+
+/* Now software state stuff. */
+enum happy_transceiver {
+       external = 0,
+       internal = 1,
+       none     = 2,
+};
+
+/* Timer state engine. */
+enum happy_timer_state {
+       arbwait  = 0,  /* Waiting for auto negotiation to complete.          */
+       lupwait  = 1,  /* Auto-neg complete, awaiting link-up status.        */
+       ltrywait = 2,  /* Forcing try of all modes, from fastest to slowest. */
+       asleep   = 3,  /* Time inactive.                                     */
+};
+
+struct quattro;
+
+/* Happy happy, joy joy! */
+struct happy_meal {
+       void __iomem    *gregs;                 /* Happy meal global registers       */
+       struct hmeal_init_block  *happy_block;  /* RX and TX descriptors (CPU addr)  */
+
+#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
+       u32 (*read_desc32)(hme32 *);
+       void (*write_txd)(struct happy_meal_txd *, u32, u32);
+       void (*write_rxd)(struct happy_meal_rxd *, u32, u32);
+#endif
+
+       /* This is either an platform_device or a pci_dev. */
+       void                      *happy_dev;
+       struct device             *dma_dev;
+
+       spinlock_t                happy_lock;
+
+       struct sk_buff           *rx_skbs[RX_RING_SIZE];
+       struct sk_buff           *tx_skbs[TX_RING_SIZE];
+
+       int rx_new, tx_new, rx_old, tx_old;
+
+       struct net_device_stats   net_stats;      /* Statistical counters              */
+
+#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
+       u32 (*read32)(void __iomem *);
+       void (*write32)(void __iomem *, u32);
+#endif
+
+       void __iomem    *etxregs;        /* External transmitter regs        */
+       void __iomem    *erxregs;        /* External receiver regs           */
+       void __iomem    *bigmacregs;     /* BIGMAC core regs                 */
+       void __iomem    *tcvregs;        /* MIF transceiver regs             */
+
+       dma_addr_t                hblock_dvma;    /* DVMA visible address happy block  */
+       unsigned int              happy_flags;    /* Driver state flags                */
+       enum happy_transceiver    tcvr_type;      /* Kind of transceiver in use        */
+       unsigned int              happy_bursts;   /* Get your mind out of the gutter   */
+       unsigned int              paddr;          /* PHY address for transceiver       */
+       unsigned short            hm_revision;    /* Happy meal revision               */
+       unsigned short            sw_bmcr;        /* SW copy of BMCR                   */
+       unsigned short            sw_bmsr;        /* SW copy of BMSR                   */
+       unsigned short            sw_physid1;     /* SW copy of PHYSID1                */
+       unsigned short            sw_physid2;     /* SW copy of PHYSID2                */
+       unsigned short            sw_advertise;   /* SW copy of ADVERTISE              */
+       unsigned short            sw_lpa;         /* SW copy of LPA                    */
+       unsigned short            sw_expansion;   /* SW copy of EXPANSION              */
+       unsigned short            sw_csconfig;    /* SW copy of CSCONFIG               */
+       unsigned int              auto_speed;     /* Auto-nego link speed              */
+        unsigned int              forced_speed;   /* Force mode link speed             */
+       unsigned int              poll_data;      /* MIF poll data                     */
+       unsigned int              poll_flag;      /* MIF poll flag                     */
+       unsigned int              linkcheck;      /* Have we checked the link yet?     */
+       unsigned int              lnkup;          /* Is the link up as far as we know? */
+       unsigned int              lnkdown;        /* Trying to force the link down?    */
+       unsigned int              lnkcnt;         /* Counter for link-up attempts.     */
+       struct timer_list         happy_timer;    /* To watch the link when coming up. */
+       enum happy_timer_state    timer_state;    /* State of the auto-neg timer.      */
+       unsigned int              timer_ticks;    /* Number of clicks at each state.   */
+
+       struct net_device        *dev;          /* Backpointer                       */
+       struct quattro           *qfe_parent;   /* For Quattro cards                 */
+       int                       qfe_ent;      /* Which instance on quattro         */
+};
+
+/* Here are the happy flags. */
+#define HFLAG_POLL                0x00000001      /* We are doing MIF polling          */
+#define HFLAG_FENABLE             0x00000002      /* The MII frame is enabled          */
+#define HFLAG_LANCE               0x00000004      /* We are using lance-mode           */
+#define HFLAG_RXENABLE            0x00000008      /* Receiver is enabled               */
+#define HFLAG_AUTO                0x00000010      /* Using auto-negotiation, 0 = force */
+#define HFLAG_FULL                0x00000020      /* Full duplex enable                */
+#define HFLAG_MACFULL             0x00000040      /* Using full duplex in the MAC      */
+#define HFLAG_POLLENABLE          0x00000080      /* Actually try MIF polling          */
+#define HFLAG_RXCV                0x00000100      /* XXX RXCV ENABLE                   */
+#define HFLAG_INIT                0x00000200      /* Init called at least once         */
+#define HFLAG_LINKUP              0x00000400      /* 1 = Link is up                    */
+#define HFLAG_PCI                 0x00000800      /* PCI based Happy Meal              */
+#define HFLAG_QUATTRO            0x00001000      /* On QFE/Quattro card               */
+
+#define HFLAG_20_21  (HFLAG_POLLENABLE | HFLAG_FENABLE)
+#define HFLAG_NOT_A0 (HFLAG_POLLENABLE | HFLAG_FENABLE | HFLAG_LANCE | HFLAG_RXCV)
+
+/* Support for QFE/Quattro cards. */
+struct quattro {
+       struct net_device       *happy_meals[4];
+
+       /* This is either a sbus_dev or a pci_dev. */
+       void                    *quattro_dev;
+
+       struct quattro          *next;
+
+       /* PROM ranges, if any. */
+#ifdef CONFIG_SBUS
+       struct linux_prom_ranges  ranges[8];
+#endif
+       int                       nranges;
+};
+
+/* We use this to acquire receive skb's that we can DMA directly into. */
+#define ALIGNED_RX_SKB_ADDR(addr) \
+        ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
+#define happy_meal_alloc_skb(__length, __gfp_flags) \
+({     struct sk_buff *__skb; \
+       __skb = alloc_skb((__length) + 64, (__gfp_flags)); \
+       if(__skb) { \
+               int __offset = (int) ALIGNED_RX_SKB_ADDR(__skb->data); \
+               if(__offset) \
+                       skb_reserve(__skb, __offset); \
+       } \
+       __skb; \
+})
+
+#endif /* !(_SUNHME_H) */
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
new file mode 100644 (file)
index 0000000..209c7f8
--- /dev/null
@@ -0,0 +1,1007 @@
+/* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
+ *          Once again I am out to prove that every ethernet
+ *          controller out there can be most efficiently programmed
+ *          if you make it look like a LANCE.
+ *
+ * Copyright (C) 1996, 1999, 2003, 2006, 2008 David S. Miller (davem@davemloft.net)
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/byteorder.h>
+#include <asm/idprom.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/auxio.h>
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+
+#include "sunqe.h"
+
+#define DRV_NAME       "sunqe"
+#define DRV_VERSION    "4.1"
+#define DRV_RELDATE    "August 27, 2008"
+#define DRV_AUTHOR     "David S. Miller (davem@davemloft.net)"
+
+static char version[] =
+       DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
+
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver");
+MODULE_LICENSE("GPL");
+
+static struct sunqec *root_qec_dev;
+
+static void qe_set_multicast(struct net_device *dev);
+
+#define QEC_RESET_TRIES 200
+
+static inline int qec_global_reset(void __iomem *gregs)
+{
+       int tries = QEC_RESET_TRIES;
+
+       sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);
+       while (--tries) {
+               u32 tmp = sbus_readl(gregs + GLOB_CTRL);
+               if (tmp & GLOB_CTRL_RESET) {
+                       udelay(20);
+                       continue;
+               }
+               break;
+       }
+       if (tries)
+               return 0;
+       printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n");
+       return -1;
+}
+
+#define MACE_RESET_RETRIES 200
+#define QE_RESET_RETRIES   200
+
+static inline int qe_stop(struct sunqe *qep)
+{
+       void __iomem *cregs = qep->qcregs;
+       void __iomem *mregs = qep->mregs;
+       int tries;
+
+       /* Reset the MACE, then the QEC channel. */
+       sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG);
+       tries = MACE_RESET_RETRIES;
+       while (--tries) {
+               u8 tmp = sbus_readb(mregs + MREGS_BCONFIG);
+               if (tmp & MREGS_BCONFIG_RESET) {
+                       udelay(20);
+                       continue;
+               }
+               break;
+       }
+       if (!tries) {
+               printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n");
+               return -1;
+       }
+
+       sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL);
+       tries = QE_RESET_RETRIES;
+       while (--tries) {
+               u32 tmp = sbus_readl(cregs + CREG_CTRL);
+               if (tmp & CREG_CTRL_RESET) {
+                       udelay(20);
+                       continue;
+               }
+               break;
+       }
+       if (!tries) {
+               printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n");
+               return -1;
+       }
+       return 0;
+}
+
+static void qe_init_rings(struct sunqe *qep)
+{
+       struct qe_init_block *qb = qep->qe_block;
+       struct sunqe_buffers *qbufs = qep->buffers;
+       __u32 qbufs_dvma = qep->buffers_dvma;
+       int i;
+
+       qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
+       memset(qb, 0, sizeof(struct qe_init_block));
+       memset(qbufs, 0, sizeof(struct sunqe_buffers));
+       for (i = 0; i < RX_RING_SIZE; i++) {
+               qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i);
+               qb->qe_rxd[i].rx_flags =
+                       (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
+       }
+}
+
+static int qe_init(struct sunqe *qep, int from_irq)
+{
+       struct sunqec *qecp = qep->parent;
+       void __iomem *cregs = qep->qcregs;
+       void __iomem *mregs = qep->mregs;
+       void __iomem *gregs = qecp->gregs;
+       unsigned char *e = &qep->dev->dev_addr[0];
+       u32 tmp;
+       int i;
+
+       /* Shut it up. */
+       if (qe_stop(qep))
+               return -EAGAIN;
+
+       /* Setup initial rx/tx init block pointers. */
+       sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
+       sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
+
+       /* Enable/mask the various irq's. */
+       sbus_writel(0, cregs + CREG_RIMASK);
+       sbus_writel(1, cregs + CREG_TIMASK);
+
+       sbus_writel(0, cregs + CREG_QMASK);
+       sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK);
+
+       /* Setup the FIFO pointers into QEC local memory. */
+       tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE);
+       sbus_writel(tmp, cregs + CREG_RXRBUFPTR);
+       sbus_writel(tmp, cregs + CREG_RXWBUFPTR);
+
+       tmp = sbus_readl(cregs + CREG_RXRBUFPTR) +
+               sbus_readl(gregs + GLOB_RSIZE);
+       sbus_writel(tmp, cregs + CREG_TXRBUFPTR);
+       sbus_writel(tmp, cregs + CREG_TXWBUFPTR);
+
+       /* Clear the channel collision counter. */
+       sbus_writel(0, cregs + CREG_CCNT);
+
+       /* For 10baseT, inter frame space nor throttle seems to be necessary. */
+       sbus_writel(0, cregs + CREG_PIPG);
+
+       /* Now dork with the AMD MACE. */
+       sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG);
+       sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL);
+       sbus_writeb(0, mregs + MREGS_RXFCNTL);
+
+       /* The QEC dma's the rx'd packets from local memory out to main memory,
+        * and therefore it interrupts when the packet reception is "complete".
+        * So don't listen for the MACE talking about it.
+        */
+       sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK);
+       sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG);
+       sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 |
+                    MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU),
+                   mregs + MREGS_FCONFIG);
+
+       /* Only usable interface on QuadEther is twisted pair. */
+       sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG);
+
+       /* Tell MACE we are changing the ether address. */
+       sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET,
+                   mregs + MREGS_IACONFIG);
+       while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
+               barrier();
+       sbus_writeb(e[0], mregs + MREGS_ETHADDR);
+       sbus_writeb(e[1], mregs + MREGS_ETHADDR);
+       sbus_writeb(e[2], mregs + MREGS_ETHADDR);
+       sbus_writeb(e[3], mregs + MREGS_ETHADDR);
+       sbus_writeb(e[4], mregs + MREGS_ETHADDR);
+       sbus_writeb(e[5], mregs + MREGS_ETHADDR);
+
+       /* Clear out the address filter. */
+       sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
+                   mregs + MREGS_IACONFIG);
+       while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
+               barrier();
+       for (i = 0; i < 8; i++)
+               sbus_writeb(0, mregs + MREGS_FILTER);
+
+       /* Address changes are now complete. */
+       sbus_writeb(0, mregs + MREGS_IACONFIG);
+
+       qe_init_rings(qep);
+
+       /* Wait a little bit for the link to come up... */
+       mdelay(5);
+       if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) {
+               int tries = 50;
+
+               while (--tries) {
+                       u8 tmp;
+
+                       mdelay(5);
+                       barrier();
+                       tmp = sbus_readb(mregs + MREGS_PHYCONFIG);
+                       if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0)
+                               break;
+               }
+               if (tries == 0)
+                       printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name);
+       }
+
+       /* Missed packet counter is cleared on a read. */
+       sbus_readb(mregs + MREGS_MPCNT);
+
+       /* Reload multicast information, this will enable the receiver
+        * and transmitter.
+        */
+       qe_set_multicast(qep->dev);
+
+       /* QEC should now start to show interrupts. */
+       return 0;
+}
+
+/* Grrr, certain error conditions completely lock up the AMD MACE,
+ * so when we get these we _must_ reset the chip.
+ */
+static int qe_is_bolixed(struct sunqe *qep, u32 qe_status)
+{
+       struct net_device *dev = qep->dev;
+       int mace_hwbug_workaround = 0;
+
+       if (qe_status & CREG_STAT_EDEFER) {
+               printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name);
+               dev->stats.tx_errors++;
+       }
+
+       if (qe_status & CREG_STAT_CLOSS) {
+               printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name);
+               dev->stats.tx_errors++;
+               dev->stats.tx_carrier_errors++;
+       }
+
+       if (qe_status & CREG_STAT_ERETRIES) {
+               printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name);
+               dev->stats.tx_errors++;
+               mace_hwbug_workaround = 1;
+       }
+
+       if (qe_status & CREG_STAT_LCOLL) {
+               printk(KERN_ERR "%s: Late transmit collision.\n", dev->name);
+               dev->stats.tx_errors++;
+               dev->stats.collisions++;
+               mace_hwbug_workaround = 1;
+       }
+
+       if (qe_status & CREG_STAT_FUFLOW) {
+               printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name);
+               dev->stats.tx_errors++;
+               mace_hwbug_workaround = 1;
+       }
+
+       if (qe_status & CREG_STAT_JERROR) {
+               printk(KERN_ERR "%s: Jabber error.\n", dev->name);
+       }
+
+       if (qe_status & CREG_STAT_BERROR) {
+               printk(KERN_ERR "%s: Babble error.\n", dev->name);
+       }
+
+       if (qe_status & CREG_STAT_CCOFLOW) {
+               dev->stats.tx_errors += 256;
+               dev->stats.collisions += 256;
+       }
+
+       if (qe_status & CREG_STAT_TXDERROR) {
+               printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name);
+               dev->stats.tx_errors++;
+               dev->stats.tx_aborted_errors++;
+               mace_hwbug_workaround = 1;
+       }
+
+       if (qe_status & CREG_STAT_TXLERR) {
+               printk(KERN_ERR "%s: Transmit late error.\n", dev->name);
+               dev->stats.tx_errors++;
+               mace_hwbug_workaround = 1;
+       }
+
+       if (qe_status & CREG_STAT_TXPERR) {
+               printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name);
+               dev->stats.tx_errors++;
+               dev->stats.tx_aborted_errors++;
+               mace_hwbug_workaround = 1;
+       }
+
+       if (qe_status & CREG_STAT_TXSERR) {
+               printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name);
+               dev->stats.tx_errors++;
+               dev->stats.tx_aborted_errors++;
+               mace_hwbug_workaround = 1;
+       }
+
+       if (qe_status & CREG_STAT_RCCOFLOW) {
+               dev->stats.rx_errors += 256;
+               dev->stats.collisions += 256;
+       }
+
+       if (qe_status & CREG_STAT_RUOFLOW) {
+               dev->stats.rx_errors += 256;
+               dev->stats.rx_over_errors += 256;
+       }
+
+       if (qe_status & CREG_STAT_MCOFLOW) {
+               dev->stats.rx_errors += 256;
+               dev->stats.rx_missed_errors += 256;
+       }
+
+       if (qe_status & CREG_STAT_RXFOFLOW) {
+               printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name);
+               dev->stats.rx_errors++;
+               dev->stats.rx_over_errors++;
+       }
+
+       if (qe_status & CREG_STAT_RLCOLL) {
+               printk(KERN_ERR "%s: Late receive collision.\n", dev->name);
+               dev->stats.rx_errors++;
+               dev->stats.collisions++;
+       }
+
+       if (qe_status & CREG_STAT_FCOFLOW) {
+               dev->stats.rx_errors += 256;
+               dev->stats.rx_frame_errors += 256;
+       }
+
+       if (qe_status & CREG_STAT_CECOFLOW) {
+               dev->stats.rx_errors += 256;
+               dev->stats.rx_crc_errors += 256;
+       }
+
+       if (qe_status & CREG_STAT_RXDROP) {
+               printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name);
+               dev->stats.rx_errors++;
+               dev->stats.rx_dropped++;
+               dev->stats.rx_missed_errors++;
+       }
+
+       if (qe_status & CREG_STAT_RXSMALL) {
+               printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name);
+               dev->stats.rx_errors++;
+               dev->stats.rx_length_errors++;
+       }
+
+       if (qe_status & CREG_STAT_RXLERR) {
+               printk(KERN_ERR "%s: Receive late error.\n", dev->name);
+               dev->stats.rx_errors++;
+               mace_hwbug_workaround = 1;
+       }
+
+       if (qe_status & CREG_STAT_RXPERR) {
+               printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name);
+               dev->stats.rx_errors++;
+               dev->stats.rx_missed_errors++;
+               mace_hwbug_workaround = 1;
+       }
+
+       if (qe_status & CREG_STAT_RXSERR) {
+               printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name);
+               dev->stats.rx_errors++;
+               dev->stats.rx_missed_errors++;
+               mace_hwbug_workaround = 1;
+       }
+
+       if (mace_hwbug_workaround)
+               qe_init(qep, 1);
+       return mace_hwbug_workaround;
+}
+
+/* Per-QE receive interrupt service routine.  Just like on the happy meal
+ * we receive directly into skb's with a small packet copy water mark.
+ */
+static void qe_rx(struct sunqe *qep)
+{
+       struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0];
+       struct net_device *dev = qep->dev;
+       struct qe_rxd *this;
+       struct sunqe_buffers *qbufs = qep->buffers;
+       __u32 qbufs_dvma = qep->buffers_dvma;
+       int elem = qep->rx_new, drops = 0;
+       u32 flags;
+
+       this = &rxbase[elem];
+       while (!((flags = this->rx_flags) & RXD_OWN)) {
+               struct sk_buff *skb;
+               unsigned char *this_qbuf =
+                       &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0];
+               __u32 this_qbuf_dvma = qbufs_dvma +
+                       qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1)));
+               struct qe_rxd *end_rxd =
+                       &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)];
+               int len = (flags & RXD_LENGTH) - 4;  /* QE adds ether FCS size to len */
+
+               /* Check for errors. */
+               if (len < ETH_ZLEN) {
+                       dev->stats.rx_errors++;
+                       dev->stats.rx_length_errors++;
+                       dev->stats.rx_dropped++;
+               } else {
+                       skb = dev_alloc_skb(len + 2);
+                       if (skb == NULL) {
+                               drops++;
+                               dev->stats.rx_dropped++;
+                       } else {
+                               skb_reserve(skb, 2);
+                               skb_put(skb, len);
+                               skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf,
+                                                len);
+                               skb->protocol = eth_type_trans(skb, qep->dev);
+                               netif_rx(skb);
+                               dev->stats.rx_packets++;
+                               dev->stats.rx_bytes += len;
+                       }
+               }
+               end_rxd->rx_addr = this_qbuf_dvma;
+               end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
+
+               elem = NEXT_RX(elem);
+               this = &rxbase[elem];
+       }
+       qep->rx_new = elem;
+       if (drops)
+               printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name);
+}
+
+static void qe_tx_reclaim(struct sunqe *qep);
+
+/* Interrupts for all QE's get filtered out via the QEC master controller,
+ * so we just run through each qe and check to see who is signaling
+ * and thus needs to be serviced.
+ */
+static irqreturn_t qec_interrupt(int irq, void *dev_id)
+{
+       struct sunqec *qecp = dev_id;
+       u32 qec_status;
+       int channel = 0;
+
+       /* Latch the status now. */
+       qec_status = sbus_readl(qecp->gregs + GLOB_STAT);
+       while (channel < 4) {
+               if (qec_status & 0xf) {
+                       struct sunqe *qep = qecp->qes[channel];
+                       u32 qe_status;
+
+                       qe_status = sbus_readl(qep->qcregs + CREG_STAT);
+                       if (qe_status & CREG_STAT_ERRORS) {
+                               if (qe_is_bolixed(qep, qe_status))
+                                       goto next;
+                       }
+                       if (qe_status & CREG_STAT_RXIRQ)
+                               qe_rx(qep);
+                       if (netif_queue_stopped(qep->dev) &&
+                           (qe_status & CREG_STAT_TXIRQ)) {
+                               spin_lock(&qep->lock);
+                               qe_tx_reclaim(qep);
+                               if (TX_BUFFS_AVAIL(qep) > 0) {
+                                       /* Wake net queue and return to
+                                        * lazy tx reclaim.
+                                        */
+                                       netif_wake_queue(qep->dev);
+                                       sbus_writel(1, qep->qcregs + CREG_TIMASK);
+                               }
+                               spin_unlock(&qep->lock);
+                       }
+       next:
+                       ;
+               }
+               qec_status >>= 4;
+               channel++;
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int qe_open(struct net_device *dev)
+{
+       struct sunqe *qep = netdev_priv(dev);
+
+       qep->mconfig = (MREGS_MCONFIG_TXENAB |
+                       MREGS_MCONFIG_RXENAB |
+                       MREGS_MCONFIG_MBAENAB);
+       return qe_init(qep, 0);
+}
+
+static int qe_close(struct net_device *dev)
+{
+       struct sunqe *qep = netdev_priv(dev);
+
+       qe_stop(qep);
+       return 0;
+}
+
+/* Reclaim TX'd frames from the ring.  This must always run under
+ * the IRQ protected qep->lock.
+ */
+static void qe_tx_reclaim(struct sunqe *qep)
+{
+       struct qe_txd *txbase = &qep->qe_block->qe_txd[0];
+       int elem = qep->tx_old;
+
+       while (elem != qep->tx_new) {
+               u32 flags = txbase[elem].tx_flags;
+
+               if (flags & TXD_OWN)
+                       break;
+               elem = NEXT_TX(elem);
+       }
+       qep->tx_old = elem;
+}
+
+static void qe_tx_timeout(struct net_device *dev)
+{
+       struct sunqe *qep = netdev_priv(dev);
+       int tx_full;
+
+       spin_lock_irq(&qep->lock);
+
+       /* Try to reclaim, if that frees up some tx
+        * entries, we're fine.
+        */
+       qe_tx_reclaim(qep);
+       tx_full = TX_BUFFS_AVAIL(qep) <= 0;
+
+       spin_unlock_irq(&qep->lock);
+
+       if (! tx_full)
+               goto out;
+
+       printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
+       qe_init(qep, 1);
+
+out:
+       netif_wake_queue(dev);
+}
+
+/* Get a packet queued to go onto the wire. */
+static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct sunqe *qep = netdev_priv(dev);
+       struct sunqe_buffers *qbufs = qep->buffers;
+       __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma;
+       unsigned char *txbuf;
+       int len, entry;
+
+       spin_lock_irq(&qep->lock);
+
+       qe_tx_reclaim(qep);
+
+       len = skb->len;
+       entry = qep->tx_new;
+
+       txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0];
+       txbuf_dvma = qbufs_dvma +
+               qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1)));
+
+       /* Avoid a race... */
+       qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;
+
+       skb_copy_from_linear_data(skb, txbuf, len);
+
+       qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma;
+       qep->qe_block->qe_txd[entry].tx_flags =
+               (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
+       qep->tx_new = NEXT_TX(entry);
+
+       /* Get it going. */
+       sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);
+
+       dev->stats.tx_packets++;
+       dev->stats.tx_bytes += len;
+
+       if (TX_BUFFS_AVAIL(qep) <= 0) {
+               /* Halt the net queue and enable tx interrupts.
+                * When the tx queue empties the tx irq handler
+                * will wake up the queue and return us back to
+                * the lazy tx reclaim scheme.
+                */
+               netif_stop_queue(dev);
+               sbus_writel(0, qep->qcregs + CREG_TIMASK);
+       }
+       spin_unlock_irq(&qep->lock);
+
+       dev_kfree_skb(skb);
+
+       return NETDEV_TX_OK;
+}
+
+static void qe_set_multicast(struct net_device *dev)
+{
+       struct sunqe *qep = netdev_priv(dev);
+       struct netdev_hw_addr *ha;
+       u8 new_mconfig = qep->mconfig;
+       int i;
+       u32 crc;
+
+       /* Lock out others. */
+       netif_stop_queue(dev);
+
+       if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
+               sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
+                           qep->mregs + MREGS_IACONFIG);
+               while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
+                       barrier();
+               for (i = 0; i < 8; i++)
+                       sbus_writeb(0xff, qep->mregs + MREGS_FILTER);
+               sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
+       } else if (dev->flags & IFF_PROMISC) {
+               new_mconfig |= MREGS_MCONFIG_PROMISC;
+       } else {
+               u16 hash_table[4];
+               u8 *hbytes = (unsigned char *) &hash_table[0];
+
+               memset(hash_table, 0, sizeof(hash_table));
+               netdev_for_each_mc_addr(ha, dev) {
+                       crc = ether_crc_le(6, ha->addr);
+                       crc >>= 26;
+                       hash_table[crc >> 4] |= 1 << (crc & 0xf);
+               }
+               /* Program the qe with the new filter value. */
+               sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
+                           qep->mregs + MREGS_IACONFIG);
+               while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
+                       barrier();
+               for (i = 0; i < 8; i++) {
+                       u8 tmp = *hbytes++;
+                       sbus_writeb(tmp, qep->mregs + MREGS_FILTER);
+               }
+               sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
+       }
+
+       /* Any change of the logical address filter, the physical address,
+        * or enabling/disabling promiscuous mode causes the MACE to disable
+        * the receiver.  So we must re-enable them here or else the MACE
+        * refuses to listen to anything on the network.  Sheesh, took
+        * me a day or two to find this bug.
+        */
+       qep->mconfig = new_mconfig;
+       sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG);
+
+       /* Let us get going again. */
+       netif_wake_queue(dev);
+}
+
+/* Ethtool support... */
+static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+       const struct linux_prom_registers *regs;
+       struct sunqe *qep = netdev_priv(dev);
+       struct platform_device *op;
+
+       strcpy(info->driver, "sunqe");
+       strcpy(info->version, "3.0");
+
+       op = qep->op;
+       regs = of_get_property(op->dev.of_node, "reg", NULL);
+       if (regs)
+               sprintf(info->bus_info, "SBUS:%d", regs->which_io);
+
+}
+
+static u32 qe_get_link(struct net_device *dev)
+{
+       struct sunqe *qep = netdev_priv(dev);
+       void __iomem *mregs = qep->mregs;
+       u8 phyconfig;
+
+       spin_lock_irq(&qep->lock);
+       phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG);
+       spin_unlock_irq(&qep->lock);
+
+       return phyconfig & MREGS_PHYCONFIG_LSTAT;
+}
+
+static const struct ethtool_ops qe_ethtool_ops = {
+       .get_drvinfo            = qe_get_drvinfo,
+       .get_link               = qe_get_link,
+};
+
+/* This is only called once at boot time for each card probed. */
+static void qec_init_once(struct sunqec *qecp, struct platform_device *op)
+{
+       u8 bsizes = qecp->qec_bursts;
+
+       if (sbus_can_burst64() && (bsizes & DMA_BURST64)) {
+               sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL);
+       } else if (bsizes & DMA_BURST32) {
+               sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL);
+       } else {
+               sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL);
+       }
+
+       /* Packetsize only used in 100baseT BigMAC configurations,
+        * set it to zero just to be on the safe side.
+        */
+       sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE);
+
+       /* Set the local memsize register, divided up to one piece per QE channel. */
+       sbus_writel((resource_size(&op->resource[1]) >> 2),
+                   qecp->gregs + GLOB_MSIZE);
+
+       /* Divide up the local QEC memory amongst the 4 QE receiver and
+        * transmitter FIFOs.  Basically it is (total / 2 / num_channels).
+        */
+       sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
+                   qecp->gregs + GLOB_TSIZE);
+       sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
+                   qecp->gregs + GLOB_RSIZE);
+}
+
+static u8 __devinit qec_get_burst(struct device_node *dp)
+{
+       u8 bsizes, bsizes_more;
+
+       /* Find and set the burst sizes for the QEC, since it
+        * does the actual dma for all 4 channels.
+        */
+       bsizes = of_getintprop_default(dp, "burst-sizes", 0xff);
+       bsizes &= 0xff;
+       bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff);
+
+       if (bsizes_more != 0xff)
+               bsizes &= bsizes_more;
+       if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
+           (bsizes & DMA_BURST32)==0)
+               bsizes = (DMA_BURST32 - 1);
+
+       return bsizes;
+}
+
+static struct sunqec * __devinit get_qec(struct platform_device *child)
+{
+       struct platform_device *op = to_platform_device(child->dev.parent);
+       struct sunqec *qecp;
+
+       qecp = dev_get_drvdata(&op->dev);
+       if (!qecp) {
+               qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL);
+               if (qecp) {
+                       u32 ctrl;
+
+                       qecp->op = op;
+                       qecp->gregs = of_ioremap(&op->resource[0], 0,
+                                                GLOB_REG_SIZE,
+                                                "QEC Global Registers");
+                       if (!qecp->gregs)
+                               goto fail;
+
+                       /* Make sure the QEC is in MACE mode. */
+                       ctrl = sbus_readl(qecp->gregs + GLOB_CTRL);
+                       ctrl &= 0xf0000000;
+                       if (ctrl != GLOB_CTRL_MMODE) {
+                               printk(KERN_ERR "qec: Not in MACE mode!\n");
+                               goto fail;
+                       }
+
+                       if (qec_global_reset(qecp->gregs))
+                               goto fail;
+
+                       qecp->qec_bursts = qec_get_burst(op->dev.of_node);
+
+                       qec_init_once(qecp, op);
+
+                       if (request_irq(op->archdata.irqs[0], qec_interrupt,
+                                       IRQF_SHARED, "qec", (void *) qecp)) {
+                               printk(KERN_ERR "qec: Can't register irq.\n");
+                               goto fail;
+                       }
+
+                       dev_set_drvdata(&op->dev, qecp);
+
+                       qecp->next_module = root_qec_dev;
+                       root_qec_dev = qecp;
+               }
+       }
+
+       return qecp;
+
+fail:
+       if (qecp->gregs)
+               of_iounmap(&op->resource[0], qecp->gregs, GLOB_REG_SIZE);
+       kfree(qecp);
+       return NULL;
+}
+
+static const struct net_device_ops qec_ops = {
+       .ndo_open               = qe_open,
+       .ndo_stop               = qe_close,
+       .ndo_start_xmit         = qe_start_xmit,
+       .ndo_set_multicast_list = qe_set_multicast,
+       .ndo_tx_timeout         = qe_tx_timeout,
+       .ndo_change_mtu         = eth_change_mtu,
+       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+};
+
+static int __devinit qec_ether_init(struct platform_device *op)
+{
+       static unsigned version_printed;
+       struct net_device *dev;
+       struct sunqec *qecp;
+       struct sunqe *qe;
+       int i, res;
+
+       if (version_printed++ == 0)
+               printk(KERN_INFO "%s", version);
+
+       dev = alloc_etherdev(sizeof(struct sunqe));
+       if (!dev)
+               return -ENOMEM;
+
+       memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+
+       qe = netdev_priv(dev);
+
+       res = -ENODEV;
+
+       i = of_getintprop_default(op->dev.of_node, "channel#", -1);
+       if (i == -1)
+               goto fail;
+       qe->channel = i;
+       spin_lock_init(&qe->lock);
+
+       qecp = get_qec(op);
+       if (!qecp)
+               goto fail;
+
+       qecp->qes[qe->channel] = qe;
+       qe->dev = dev;
+       qe->parent = qecp;
+       qe->op = op;
+
+       res = -ENOMEM;
+       qe->qcregs = of_ioremap(&op->resource[0], 0,
+                               CREG_REG_SIZE, "QEC Channel Registers");
+       if (!qe->qcregs) {
+               printk(KERN_ERR "qe: Cannot map channel registers.\n");
+               goto fail;
+       }
+
+       qe->mregs = of_ioremap(&op->resource[1], 0,
+                              MREGS_REG_SIZE, "QE MACE Registers");
+       if (!qe->mregs) {
+               printk(KERN_ERR "qe: Cannot map MACE registers.\n");
+               goto fail;
+       }
+
+       qe->qe_block = dma_alloc_coherent(&op->dev, PAGE_SIZE,
+                                         &qe->qblock_dvma, GFP_ATOMIC);
+       qe->buffers = dma_alloc_coherent(&op->dev, sizeof(struct sunqe_buffers),
+                                        &qe->buffers_dvma, GFP_ATOMIC);
+       if (qe->qe_block == NULL || qe->qblock_dvma == 0 ||
+           qe->buffers == NULL || qe->buffers_dvma == 0)
+               goto fail;
+
+       /* Stop this QE. */
+       qe_stop(qe);
+
+       SET_NETDEV_DEV(dev, &op->dev);
+
+       dev->watchdog_timeo = 5*HZ;
+       dev->irq = op->archdata.irqs[0];
+       dev->dma = 0;
+       dev->ethtool_ops = &qe_ethtool_ops;
+       dev->netdev_ops = &qec_ops;
+
+       res = register_netdev(dev);
+       if (res)
+               goto fail;
+
+       dev_set_drvdata(&op->dev, qe);
+
+       printk(KERN_INFO "%s: qe channel[%d] ", dev->name, qe->channel);
+       for (i = 0; i < 6; i++)
+               printk ("%2.2x%c",
+                       dev->dev_addr[i],
+                       i == 5 ? ' ': ':');
+       printk("\n");
+
+
+       return 0;
+
+fail:
+       if (qe->qcregs)
+               of_iounmap(&op->resource[0], qe->qcregs, CREG_REG_SIZE);
+       if (qe->mregs)
+               of_iounmap(&op->resource[1], qe->mregs, MREGS_REG_SIZE);
+       if (qe->qe_block)
+               dma_free_coherent(&op->dev, PAGE_SIZE,
+                                 qe->qe_block, qe->qblock_dvma);
+       if (qe->buffers)
+               dma_free_coherent(&op->dev,
+                                 sizeof(struct sunqe_buffers),
+                                 qe->buffers,
+                                 qe->buffers_dvma);
+
+       free_netdev(dev);
+
+       return res;
+}
+
+static int __devinit qec_sbus_probe(struct platform_device *op)
+{
+       return qec_ether_init(op);
+}
+
+static int __devexit qec_sbus_remove(struct platform_device *op)
+{
+       struct sunqe *qp = dev_get_drvdata(&op->dev);
+       struct net_device *net_dev = qp->dev;
+
+       unregister_netdev(net_dev);
+
+       of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE);
+       of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE);
+       dma_free_coherent(&op->dev, PAGE_SIZE,
+                         qp->qe_block, qp->qblock_dvma);
+       dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers),
+                         qp->buffers, qp->buffers_dvma);
+
+       free_netdev(net_dev);
+
+       dev_set_drvdata(&op->dev, NULL);
+
+       return 0;
+}
+
+static const struct of_device_id qec_sbus_match[] = {
+       {
+               .name = "qe",
+       },
+       {},
+};
+
+MODULE_DEVICE_TABLE(of, qec_sbus_match);
+
+static struct platform_driver qec_sbus_driver = {
+       .driver = {
+               .name = "qec",
+               .owner = THIS_MODULE,
+               .of_match_table = qec_sbus_match,
+       },
+       .probe          = qec_sbus_probe,
+       .remove         = __devexit_p(qec_sbus_remove),
+};
+
+static int __init qec_init(void)
+{
+       return platform_driver_register(&qec_sbus_driver);
+}
+
+static void __exit qec_exit(void)
+{
+       platform_driver_unregister(&qec_sbus_driver);
+
+       while (root_qec_dev) {
+               struct sunqec *next = root_qec_dev->next_module;
+               struct platform_device *op = root_qec_dev->op;
+
+               free_irq(op->archdata.irqs[0], (void *) root_qec_dev);
+               of_iounmap(&op->resource[0], root_qec_dev->gregs,
+                          GLOB_REG_SIZE);
+               kfree(root_qec_dev);
+
+               root_qec_dev = next;
+       }
+}
+
+module_init(qec_init);
+module_exit(qec_exit);
diff --git a/drivers/net/ethernet/sun/sunqe.h b/drivers/net/ethernet/sun/sunqe.h
new file mode 100644 (file)
index 0000000..581781b
--- /dev/null
@@ -0,0 +1,350 @@
+/* $Id: sunqe.h,v 1.13 2000/02/09 11:15:42 davem Exp $
+ * sunqe.h: Definitions for the Sun QuadEthernet driver.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#ifndef _SUNQE_H
+#define _SUNQE_H
+
+/* QEC global registers. */
+#define GLOB_CTRL      0x00UL          /* Control                      */
+#define GLOB_STAT      0x04UL          /* Status                       */
+#define GLOB_PSIZE     0x08UL          /* Packet Size                  */
+#define GLOB_MSIZE     0x0cUL          /* Local-memory Size            */
+#define GLOB_RSIZE     0x10UL          /* Receive partition size       */
+#define GLOB_TSIZE     0x14UL          /* Transmit partition size      */
+#define GLOB_REG_SIZE  0x18UL
+
+#define GLOB_CTRL_MMODE       0x40000000 /* MACE qec mode            */
+#define GLOB_CTRL_BMODE       0x10000000 /* BigMAC qec mode          */
+#define GLOB_CTRL_EPAR        0x00000020 /* Enable parity            */
+#define GLOB_CTRL_ACNTRL      0x00000018 /* SBUS arbitration control */
+#define GLOB_CTRL_B64         0x00000004 /* 64 byte dvma bursts      */
+#define GLOB_CTRL_B32         0x00000002 /* 32 byte dvma bursts      */
+#define GLOB_CTRL_B16         0x00000000 /* 16 byte dvma bursts      */
+#define GLOB_CTRL_RESET       0x00000001 /* Reset the QEC            */
+
+#define GLOB_STAT_TX          0x00000008 /* BigMAC Transmit IRQ      */
+#define GLOB_STAT_RX          0x00000004 /* BigMAC Receive IRQ       */
+#define GLOB_STAT_BM          0x00000002 /* BigMAC Global IRQ        */
+#define GLOB_STAT_ER          0x00000001 /* BigMAC Error IRQ         */
+
+#define GLOB_PSIZE_2048       0x00       /* 2k packet size           */
+#define GLOB_PSIZE_4096       0x01       /* 4k packet size           */
+#define GLOB_PSIZE_6144       0x10       /* 6k packet size           */
+#define GLOB_PSIZE_8192       0x11       /* 8k packet size           */
+
+/* In MACE mode, there are four qe channels.  Each channel has it's own
+ * status bits in the QEC status register.  This macro picks out the
+ * ones you want.
+ */
+#define GLOB_STAT_PER_QE(status, channel) (((status) >> ((channel) * 4)) & 0xf)
+
+/* The following registers are for per-qe channel information/status. */
+#define CREG_CTRL      0x00UL  /* Control                   */
+#define CREG_STAT      0x04UL  /* Status                    */
+#define CREG_RXDS      0x08UL  /* RX descriptor ring ptr    */
+#define CREG_TXDS      0x0cUL  /* TX descriptor ring ptr    */
+#define CREG_RIMASK    0x10UL  /* RX Interrupt Mask         */
+#define CREG_TIMASK    0x14UL  /* TX Interrupt Mask         */
+#define CREG_QMASK     0x18UL  /* QEC Error Interrupt Mask  */
+#define CREG_MMASK     0x1cUL  /* MACE Error Interrupt Mask */
+#define CREG_RXWBUFPTR 0x20UL  /* Local memory rx write ptr */
+#define CREG_RXRBUFPTR 0x24UL  /* Local memory rx read ptr  */
+#define CREG_TXWBUFPTR 0x28UL  /* Local memory tx write ptr */
+#define CREG_TXRBUFPTR 0x2cUL  /* Local memory tx read ptr  */
+#define CREG_CCNT      0x30UL  /* Collision Counter         */
+#define CREG_PIPG      0x34UL  /* Inter-Frame Gap           */
+#define CREG_REG_SIZE  0x38UL
+
+#define CREG_CTRL_RXOFF       0x00000004  /* Disable this qe's receiver*/
+#define CREG_CTRL_RESET       0x00000002  /* Reset this qe channel     */
+#define CREG_CTRL_TWAKEUP     0x00000001  /* Transmitter Wakeup, 'go'. */
+
+#define CREG_STAT_EDEFER      0x10000000  /* Excessive Defers          */
+#define CREG_STAT_CLOSS       0x08000000  /* Carrier Loss              */
+#define CREG_STAT_ERETRIES    0x04000000  /* More than 16 retries      */
+#define CREG_STAT_LCOLL       0x02000000  /* Late TX Collision         */
+#define CREG_STAT_FUFLOW      0x01000000  /* FIFO Underflow            */
+#define CREG_STAT_JERROR      0x00800000  /* Jabber Error              */
+#define CREG_STAT_BERROR      0x00400000  /* Babble Error              */
+#define CREG_STAT_TXIRQ       0x00200000  /* Transmit Interrupt        */
+#define CREG_STAT_CCOFLOW     0x00100000  /* TX Coll-counter Overflow  */
+#define CREG_STAT_TXDERROR    0x00080000  /* TX Descriptor is bogus    */
+#define CREG_STAT_TXLERR      0x00040000  /* Late Transmit Error       */
+#define CREG_STAT_TXPERR      0x00020000  /* Transmit Parity Error     */
+#define CREG_STAT_TXSERR      0x00010000  /* Transmit SBUS error ack   */
+#define CREG_STAT_RCCOFLOW    0x00001000  /* RX Coll-counter Overflow  */
+#define CREG_STAT_RUOFLOW     0x00000800  /* Runt Counter Overflow     */
+#define CREG_STAT_MCOFLOW     0x00000400  /* Missed Counter Overflow   */
+#define CREG_STAT_RXFOFLOW    0x00000200  /* RX FIFO Overflow          */
+#define CREG_STAT_RLCOLL      0x00000100  /* RX Late Collision         */
+#define CREG_STAT_FCOFLOW     0x00000080  /* Frame Counter Overflow    */
+#define CREG_STAT_CECOFLOW    0x00000040  /* CRC Error-counter Overflow*/
+#define CREG_STAT_RXIRQ       0x00000020  /* Receive Interrupt         */
+#define CREG_STAT_RXDROP      0x00000010  /* Dropped a RX'd packet     */
+#define CREG_STAT_RXSMALL     0x00000008  /* Receive buffer too small  */
+#define CREG_STAT_RXLERR      0x00000004  /* Receive Late Error        */
+#define CREG_STAT_RXPERR      0x00000002  /* Receive Parity Error      */
+#define CREG_STAT_RXSERR      0x00000001  /* Receive SBUS Error ACK    */
+
+#define CREG_STAT_ERRORS      (CREG_STAT_EDEFER|CREG_STAT_CLOSS|CREG_STAT_ERETRIES|     \
+                              CREG_STAT_LCOLL|CREG_STAT_FUFLOW|CREG_STAT_JERROR|       \
+                              CREG_STAT_BERROR|CREG_STAT_CCOFLOW|CREG_STAT_TXDERROR|   \
+                              CREG_STAT_TXLERR|CREG_STAT_TXPERR|CREG_STAT_TXSERR|      \
+                              CREG_STAT_RCCOFLOW|CREG_STAT_RUOFLOW|CREG_STAT_MCOFLOW| \
+                              CREG_STAT_RXFOFLOW|CREG_STAT_RLCOLL|CREG_STAT_FCOFLOW|   \
+                              CREG_STAT_CECOFLOW|CREG_STAT_RXDROP|CREG_STAT_RXSMALL|   \
+                              CREG_STAT_RXLERR|CREG_STAT_RXPERR|CREG_STAT_RXSERR)
+
+#define CREG_QMASK_COFLOW     0x00100000  /* CollCntr overflow         */
+#define CREG_QMASK_TXDERROR   0x00080000  /* TXD error                 */
+#define CREG_QMASK_TXLERR     0x00040000  /* TX late error             */
+#define CREG_QMASK_TXPERR     0x00020000  /* TX parity error           */
+#define CREG_QMASK_TXSERR     0x00010000  /* TX sbus error ack         */
+#define CREG_QMASK_RXDROP     0x00000010  /* RX drop                   */
+#define CREG_QMASK_RXBERROR   0x00000008  /* RX buffer error           */
+#define CREG_QMASK_RXLEERR    0x00000004  /* RX late error             */
+#define CREG_QMASK_RXPERR     0x00000002  /* RX parity error           */
+#define CREG_QMASK_RXSERR     0x00000001  /* RX sbus error ack         */
+
+#define CREG_MMASK_EDEFER     0x10000000  /* Excess defer              */
+#define CREG_MMASK_CLOSS      0x08000000  /* Carrier loss              */
+#define CREG_MMASK_ERETRY     0x04000000  /* Excess retry              */
+#define CREG_MMASK_LCOLL      0x02000000  /* Late collision error      */
+#define CREG_MMASK_UFLOW      0x01000000  /* Underflow                 */
+#define CREG_MMASK_JABBER     0x00800000  /* Jabber error              */
+#define CREG_MMASK_BABBLE     0x00400000  /* Babble error              */
+#define CREG_MMASK_OFLOW      0x00000800  /* Overflow                  */
+#define CREG_MMASK_RXCOLL     0x00000400  /* RX Coll-Cntr overflow     */
+#define CREG_MMASK_RPKT       0x00000200  /* Runt pkt overflow         */
+#define CREG_MMASK_MPKT       0x00000100  /* Missed pkt overflow       */
+
+#define CREG_PIPG_TENAB       0x00000020  /* Enable Throttle           */
+#define CREG_PIPG_MMODE       0x00000010  /* Manual Mode               */
+#define CREG_PIPG_WMASK       0x0000000f  /* SBUS Wait Mask            */
+
+/* Per-channel AMD 79C940 MACE registers. */
+#define MREGS_RXFIFO   0x00UL  /* Receive FIFO                   */
+#define MREGS_TXFIFO   0x01UL  /* Transmit FIFO                  */
+#define MREGS_TXFCNTL  0x02UL  /* Transmit Frame Control         */
+#define MREGS_TXFSTAT  0x03UL  /* Transmit Frame Status          */
+#define MREGS_TXRCNT   0x04UL  /* Transmit Retry Count           */
+#define MREGS_RXFCNTL  0x05UL  /* Receive Frame Control          */
+#define MREGS_RXFSTAT  0x06UL  /* Receive Frame Status           */
+#define MREGS_FFCNT    0x07UL  /* FIFO Frame Count               */
+#define MREGS_IREG     0x08UL  /* Interrupt Register             */
+#define MREGS_IMASK    0x09UL  /* Interrupt Mask                 */
+#define MREGS_POLL     0x0aUL  /* POLL Register                  */
+#define MREGS_BCONFIG  0x0bUL  /* BIU Config                     */
+#define MREGS_FCONFIG  0x0cUL  /* FIFO Config                    */
+#define MREGS_MCONFIG  0x0dUL  /* MAC Config                     */
+#define MREGS_PLSCONFIG        0x0eUL  /* PLS Config                     */
+#define MREGS_PHYCONFIG        0x0fUL  /* PHY Config                     */
+#define MREGS_CHIPID1  0x10UL  /* Chip-ID, low bits              */
+#define MREGS_CHIPID2  0x11UL  /* Chip-ID, high bits             */
+#define MREGS_IACONFIG 0x12UL  /* Internal Address Config        */
+       /* 0x13UL, reserved */
+#define MREGS_FILTER   0x14UL  /* Logical Address Filter         */
+#define MREGS_ETHADDR  0x15UL  /* Our Ethernet Address           */
+       /* 0x16UL, reserved */
+       /* 0x17UL, reserved */
+#define MREGS_MPCNT    0x18UL  /* Missed Packet Count            */
+       /* 0x19UL, reserved */
+#define MREGS_RPCNT    0x1aUL  /* Runt Packet Count              */
+#define MREGS_RCCNT    0x1bUL  /* RX Collision Count             */
+       /* 0x1cUL, reserved */
+#define MREGS_UTEST    0x1dUL  /* User Test                      */
+#define MREGS_RTEST1   0x1eUL  /* Reserved Test 1                */
+#define MREGS_RTEST2   0x1fUL  /* Reserved Test 2                */
+#define MREGS_REG_SIZE 0x20UL
+
+#define MREGS_TXFCNTL_DRETRY        0x80 /* Retry disable                  */
+#define MREGS_TXFCNTL_DFCS          0x08 /* Disable TX FCS                 */
+#define MREGS_TXFCNTL_AUTOPAD       0x01 /* TX auto pad                    */
+
+#define MREGS_TXFSTAT_VALID         0x80 /* TX valid                       */
+#define MREGS_TXFSTAT_UNDERFLOW     0x40 /* TX underflow                   */
+#define MREGS_TXFSTAT_LCOLL         0x20 /* TX late collision              */
+#define MREGS_TXFSTAT_MRETRY        0x10 /* TX > 1 retries                 */
+#define MREGS_TXFSTAT_ORETRY        0x08 /* TX 1 retry                     */
+#define MREGS_TXFSTAT_PDEFER        0x04 /* TX pkt deferred                */
+#define MREGS_TXFSTAT_CLOSS         0x02 /* TX carrier lost                */
+#define MREGS_TXFSTAT_RERROR        0x01 /* TX retry error                 */
+
+#define MREGS_TXRCNT_EDEFER         0x80 /* TX Excess defers               */
+#define MREGS_TXRCNT_CMASK          0x0f /* TX retry count                 */
+
+#define MREGS_RXFCNTL_LOWLAT        0x08 /* RX low latency                 */
+#define MREGS_RXFCNTL_AREJECT       0x04 /* RX addr match rej              */
+#define MREGS_RXFCNTL_AUTOSTRIP     0x01 /* RX auto strip                  */
+
+#define MREGS_RXFSTAT_OVERFLOW      0x80 /* RX overflow                    */
+#define MREGS_RXFSTAT_LCOLL         0x40 /* RX late collision              */
+#define MREGS_RXFSTAT_FERROR        0x20 /* RX framing error               */
+#define MREGS_RXFSTAT_FCSERROR      0x10 /* RX FCS error                   */
+#define MREGS_RXFSTAT_RBCNT         0x0f /* RX msg byte count              */
+
+#define MREGS_FFCNT_RX              0xf0 /* RX FIFO frame cnt              */
+#define MREGS_FFCNT_TX              0x0f /* TX FIFO frame cnt              */
+
+#define MREGS_IREG_JABBER           0x80 /* IRQ Jabber error               */
+#define MREGS_IREG_BABBLE           0x40 /* IRQ Babble error               */
+#define MREGS_IREG_COLL             0x20 /* IRQ Collision error            */
+#define MREGS_IREG_RCCO             0x10 /* IRQ Collision cnt overflow     */
+#define MREGS_IREG_RPKTCO           0x08 /* IRQ Runt packet count overflow */
+#define MREGS_IREG_MPKTCO           0x04 /* IRQ missed packet cnt overflow */
+#define MREGS_IREG_RXIRQ            0x02 /* IRQ RX'd a packet              */
+#define MREGS_IREG_TXIRQ            0x01 /* IRQ TX'd a packet              */
+
+#define MREGS_IMASK_BABBLE          0x40 /* IMASK Babble errors            */
+#define MREGS_IMASK_COLL            0x20 /* IMASK Collision errors         */
+#define MREGS_IMASK_MPKTCO          0x04 /* IMASK Missed pkt cnt overflow  */
+#define MREGS_IMASK_RXIRQ           0x02 /* IMASK RX interrupts            */
+#define MREGS_IMASK_TXIRQ           0x01 /* IMASK TX interrupts            */
+
+#define MREGS_POLL_TXVALID          0x80 /* TX is valid                    */
+#define MREGS_POLL_TDTR             0x40 /* TX data transfer request       */
+#define MREGS_POLL_RDTR             0x20 /* RX data transfer request       */
+
+#define MREGS_BCONFIG_BSWAP         0x40 /* Byte Swap                      */
+#define MREGS_BCONFIG_4TS           0x00 /* 4byte transmit start point     */
+#define MREGS_BCONFIG_16TS          0x10 /* 16byte transmit start point    */
+#define MREGS_BCONFIG_64TS          0x20 /* 64byte transmit start point    */
+#define MREGS_BCONFIG_112TS         0x30 /* 112byte transmit start point   */
+#define MREGS_BCONFIG_RESET         0x01 /* SW-Reset the MACE              */
+
+#define MREGS_FCONFIG_TXF8          0x00 /* TX fifo 8 write cycles         */
+#define MREGS_FCONFIG_TXF32         0x80 /* TX fifo 32 write cycles        */
+#define MREGS_FCONFIG_TXF16         0x40 /* TX fifo 16 write cycles        */
+#define MREGS_FCONFIG_RXF64         0x20 /* RX fifo 64 write cycles        */
+#define MREGS_FCONFIG_RXF32         0x10 /* RX fifo 32 write cycles        */
+#define MREGS_FCONFIG_RXF16         0x00 /* RX fifo 16 write cycles        */
+#define MREGS_FCONFIG_TFWU          0x08 /* TX fifo watermark update       */
+#define MREGS_FCONFIG_RFWU          0x04 /* RX fifo watermark update       */
+#define MREGS_FCONFIG_TBENAB        0x02 /* TX burst enable                */
+#define MREGS_FCONFIG_RBENAB        0x01 /* RX burst enable                */
+
+#define MREGS_MCONFIG_PROMISC       0x80 /* Promiscuous mode enable        */
+#define MREGS_MCONFIG_TPDDISAB      0x40 /* TX 2part deferral enable       */
+#define MREGS_MCONFIG_MBAENAB       0x20 /* Modified backoff enable        */
+#define MREGS_MCONFIG_RPADISAB      0x08 /* RX physical addr disable       */
+#define MREGS_MCONFIG_RBDISAB       0x04 /* RX broadcast disable           */
+#define MREGS_MCONFIG_TXENAB        0x02 /* Enable transmitter             */
+#define MREGS_MCONFIG_RXENAB        0x01 /* Enable receiver                */
+
+#define MREGS_PLSCONFIG_TXMS        0x08 /* TX mode select                 */
+#define MREGS_PLSCONFIG_GPSI        0x06 /* Use GPSI connector             */
+#define MREGS_PLSCONFIG_DAI         0x04 /* Use DAI connector              */
+#define MREGS_PLSCONFIG_TP          0x02 /* Use TwistedPair connector      */
+#define MREGS_PLSCONFIG_AUI         0x00 /* Use AUI connector              */
+#define MREGS_PLSCONFIG_IOENAB      0x01 /* PLS I/O enable                 */
+
+#define MREGS_PHYCONFIG_LSTAT       0x80 /* Link status                    */
+#define MREGS_PHYCONFIG_LTESTDIS    0x40 /* Disable link test logic        */
+#define MREGS_PHYCONFIG_RXPOLARITY  0x20 /* RX polarity                    */
+#define MREGS_PHYCONFIG_APCDISAB    0x10 /* AutoPolarityCorrect disab      */
+#define MREGS_PHYCONFIG_LTENAB      0x08 /* Select low threshold           */
+#define MREGS_PHYCONFIG_AUTO        0x04 /* Connector port auto-sel        */
+#define MREGS_PHYCONFIG_RWU         0x02 /* Remote WakeUp                  */
+#define MREGS_PHYCONFIG_AW          0x01 /* Auto Wakeup                    */
+
+#define MREGS_IACONFIG_ACHNGE       0x80 /* Do address change              */
+#define MREGS_IACONFIG_PARESET      0x04 /* Physical address reset         */
+#define MREGS_IACONFIG_LARESET      0x02 /* Logical address reset          */
+
+#define MREGS_UTEST_RTRENAB         0x80 /* Enable resv test register      */
+#define MREGS_UTEST_RTRDISAB        0x40 /* Disab resv test register       */
+#define MREGS_UTEST_RPACCEPT        0x20 /* Accept runt packets            */
+#define MREGS_UTEST_FCOLL           0x10 /* Force collision status         */
+#define MREGS_UTEST_FCSENAB         0x08 /* Enable FCS on RX               */
+#define MREGS_UTEST_INTLOOPM        0x06 /* Intern lpback w/MENDEC         */
+#define MREGS_UTEST_INTLOOP         0x04 /* Intern lpback                  */
+#define MREGS_UTEST_EXTLOOP         0x02 /* Extern lpback                  */
+#define MREGS_UTEST_NOLOOP          0x00 /* No loopback                    */
+
+struct qe_rxd {
+       u32 rx_flags;
+       u32 rx_addr;
+};
+
+#define RXD_OWN      0x80000000 /* Ownership.      */
+#define RXD_UPDATE   0x10000000 /* Being Updated?  */
+#define RXD_LENGTH   0x000007ff /* Packet Length.  */
+
+struct qe_txd {
+       u32 tx_flags;
+       u32 tx_addr;
+};
+
+#define TXD_OWN      0x80000000 /* Ownership.      */
+#define TXD_SOP      0x40000000 /* Start Of Packet */
+#define TXD_EOP      0x20000000 /* End Of Packet   */
+#define TXD_UPDATE   0x10000000 /* Being Updated?  */
+#define TXD_LENGTH   0x000007ff /* Packet Length.  */
+
+#define TX_RING_MAXSIZE   256
+#define RX_RING_MAXSIZE   256
+
+#define TX_RING_SIZE      16
+#define RX_RING_SIZE      16
+
+#define NEXT_RX(num)       (((num) + 1) & (RX_RING_MAXSIZE - 1))
+#define NEXT_TX(num)       (((num) + 1) & (TX_RING_MAXSIZE - 1))
+#define PREV_RX(num)       (((num) - 1) & (RX_RING_MAXSIZE - 1))
+#define PREV_TX(num)       (((num) - 1) & (TX_RING_MAXSIZE - 1))
+
+#define TX_BUFFS_AVAIL(qp)                                    \
+        (((qp)->tx_old <= (qp)->tx_new) ?                     \
+         (qp)->tx_old + (TX_RING_SIZE - 1) - (qp)->tx_new :  \
+                           (qp)->tx_old - (qp)->tx_new - 1)
+
+struct qe_init_block {
+       struct qe_rxd qe_rxd[RX_RING_MAXSIZE];
+       struct qe_txd qe_txd[TX_RING_MAXSIZE];
+};
+
+#define qib_offset(mem, elem) \
+((__u32)((unsigned long)(&(((struct qe_init_block *)0)->mem[elem]))))
+
+struct sunqe;
+
+struct sunqec {
+       void __iomem            *gregs;         /* QEC Global Registers         */
+       struct sunqe            *qes[4];        /* Each child MACE              */
+       unsigned int            qec_bursts;     /* Support burst sizes          */
+       struct platform_device  *op;            /* QEC's OF device              */
+       struct sunqec           *next_module;   /* List of all QECs in system   */
+};
+
+#define PKT_BUF_SZ     1664
+#define RXD_PKT_SZ     1664
+
+struct sunqe_buffers {
+       u8      tx_buf[TX_RING_SIZE][PKT_BUF_SZ];
+       u8      __pad[2];
+       u8      rx_buf[RX_RING_SIZE][PKT_BUF_SZ];
+};
+
+#define qebuf_offset(mem, elem) \
+((__u32)((unsigned long)(&(((struct sunqe_buffers *)0)->mem[elem][0]))))
+
+struct sunqe {
+       void __iomem                    *qcregs;                /* QEC per-channel Registers   */
+       void __iomem                    *mregs;         /* Per-channel MACE Registers  */
+       struct qe_init_block            *qe_block;      /* RX and TX descriptors       */
+       __u32                           qblock_dvma;    /* RX and TX descriptors       */
+       spinlock_t                      lock;           /* Protects txfull state       */
+       int                             rx_new, rx_old; /* RX ring extents             */
+       int                             tx_new, tx_old; /* TX ring extents             */
+       struct sunqe_buffers            *buffers;       /* CPU visible address.        */
+       __u32                           buffers_dvma;   /* DVMA visible address.       */
+       struct sunqec                   *parent;
+       u8                              mconfig;        /* Base MACE mconfig value     */
+       struct platform_device          *op;            /* QE's OF device struct       */
+       struct net_device               *dev;           /* QE's netdevice struct       */
+       int                             channel;        /* Who am I?                   */
+};
+
+#endif /* !(_SUNQE_H) */
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
new file mode 100644 (file)
index 0000000..bf3c762
--- /dev/null
@@ -0,0 +1,1284 @@
+/* sunvnet.c: Sun LDOM Virtual Network Driver.
+ *
+ * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/mutex.h>
+
+#include <asm/vio.h>
+#include <asm/ldc.h>
+
+#include "sunvnet.h"
+
+#define DRV_MODULE_NAME                "sunvnet"
+#define DRV_MODULE_VERSION     "1.0"
+#define DRV_MODULE_RELDATE     "June 25, 2007"
+
+static char version[] __devinitdata =
+       DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_DESCRIPTION("Sun LDOM virtual network driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+/* Ordered from largest major to lowest */
+static struct vio_version vnet_versions[] = {
+       { .major = 1, .minor = 0 },
+};
+
+static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
+{
+       return vio_dring_avail(dr, VNET_TX_RING_SIZE);
+}
+
+static int vnet_handle_unknown(struct vnet_port *port, void *arg)
+{
+       struct vio_msg_tag *pkt = arg;
+
+       pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n",
+              pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
+       pr_err("Resetting connection\n");
+
+       ldc_disconnect(port->vio.lp);
+
+       return -ECONNRESET;
+}
+
+static int vnet_send_attr(struct vio_driver_state *vio)
+{
+       struct vnet_port *port = to_vnet_port(vio);
+       struct net_device *dev = port->vp->dev;
+       struct vio_net_attr_info pkt;
+       int i;
+
+       memset(&pkt, 0, sizeof(pkt));
+       pkt.tag.type = VIO_TYPE_CTRL;
+       pkt.tag.stype = VIO_SUBTYPE_INFO;
+       pkt.tag.stype_env = VIO_ATTR_INFO;
+       pkt.tag.sid = vio_send_sid(vio);
+       pkt.xfer_mode = VIO_DRING_MODE;
+       pkt.addr_type = VNET_ADDR_ETHERMAC;
+       pkt.ack_freq = 0;
+       for (i = 0; i < 6; i++)
+               pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
+       pkt.mtu = ETH_FRAME_LEN;
+
+       viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
+              "ackfreq[%u] mtu[%llu]\n",
+              pkt.xfer_mode, pkt.addr_type,
+              (unsigned long long) pkt.addr,
+              pkt.ack_freq,
+              (unsigned long long) pkt.mtu);
+
+       return vio_ldc_send(vio, &pkt, sizeof(pkt));
+}
+
+static int handle_attr_info(struct vio_driver_state *vio,
+                           struct vio_net_attr_info *pkt)
+{
+       viodbg(HS, "GOT NET ATTR INFO xmode[0x%x] atype[0x%x] addr[%llx] "
+              "ackfreq[%u] mtu[%llu]\n",
+              pkt->xfer_mode, pkt->addr_type,
+              (unsigned long long) pkt->addr,
+              pkt->ack_freq,
+              (unsigned long long) pkt->mtu);
+
+       pkt->tag.sid = vio_send_sid(vio);
+
+       if (pkt->xfer_mode != VIO_DRING_MODE ||
+           pkt->addr_type != VNET_ADDR_ETHERMAC ||
+           pkt->mtu != ETH_FRAME_LEN) {
+               viodbg(HS, "SEND NET ATTR NACK\n");
+
+               pkt->tag.stype = VIO_SUBTYPE_NACK;
+
+               (void) vio_ldc_send(vio, pkt, sizeof(*pkt));
+
+               return -ECONNRESET;
+       } else {
+               viodbg(HS, "SEND NET ATTR ACK\n");
+
+               pkt->tag.stype = VIO_SUBTYPE_ACK;
+
+               return vio_ldc_send(vio, pkt, sizeof(*pkt));
+       }
+
+}
+
+static int handle_attr_ack(struct vio_driver_state *vio,
+                          struct vio_net_attr_info *pkt)
+{
+       viodbg(HS, "GOT NET ATTR ACK\n");
+
+       return 0;
+}
+
+static int handle_attr_nack(struct vio_driver_state *vio,
+                           struct vio_net_attr_info *pkt)
+{
+       viodbg(HS, "GOT NET ATTR NACK\n");
+
+       return -ECONNRESET;
+}
+
+static int vnet_handle_attr(struct vio_driver_state *vio, void *arg)
+{
+       struct vio_net_attr_info *pkt = arg;
+
+       switch (pkt->tag.stype) {
+       case VIO_SUBTYPE_INFO:
+               return handle_attr_info(vio, pkt);
+
+       case VIO_SUBTYPE_ACK:
+               return handle_attr_ack(vio, pkt);
+
+       case VIO_SUBTYPE_NACK:
+               return handle_attr_nack(vio, pkt);
+
+       default:
+               return -ECONNRESET;
+       }
+}
+
+static void vnet_handshake_complete(struct vio_driver_state *vio)
+{
+       struct vio_dring_state *dr;
+
+       dr = &vio->drings[VIO_DRIVER_RX_RING];
+       dr->snd_nxt = dr->rcv_nxt = 1;
+
+       dr = &vio->drings[VIO_DRIVER_TX_RING];
+       dr->snd_nxt = dr->rcv_nxt = 1;
+}
+
+/* The hypervisor interface that implements copying to/from imported
+ * memory from another domain requires that copies are done to 8-byte
+ * aligned buffers, and that the lengths of such copies are also 8-byte
+ * multiples.
+ *
+ * So we align skb->data to an 8-byte multiple and pad-out the data
+ * area so we can round the copy length up to the next multiple of
+ * 8 for the copy.
+ *
+ * The transmitter puts the actual start of the packet 6 bytes into
+ * the buffer it sends over, so that the IP headers after the ethernet
+ * header are aligned properly.  These 6 bytes are not in the descriptor
+ * length, they are simply implied.  This offset is represented using
+ * the VNET_PACKET_SKIP macro.
+ */
+static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
+                                          unsigned int len)
+{
+       struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8);
+       unsigned long addr, off;
+
+       if (unlikely(!skb))
+               return NULL;
+
+       addr = (unsigned long) skb->data;
+       off = ((addr + 7UL) & ~7UL) - addr;
+       if (off)
+               skb_reserve(skb, off);
+
+       return skb;
+}
+
+static int vnet_rx_one(struct vnet_port *port, unsigned int len,
+                      struct ldc_trans_cookie *cookies, int ncookies)
+{
+       struct net_device *dev = port->vp->dev;
+       unsigned int copy_len;
+       struct sk_buff *skb;
+       int err;
+
+       err = -EMSGSIZE;
+       if (unlikely(len < ETH_ZLEN || len > ETH_FRAME_LEN)) {
+               dev->stats.rx_length_errors++;
+               goto out_dropped;
+       }
+
+       skb = alloc_and_align_skb(dev, len);
+       err = -ENOMEM;
+       if (unlikely(!skb)) {
+               dev->stats.rx_missed_errors++;
+               goto out_dropped;
+       }
+
+       copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U;
+       skb_put(skb, copy_len);
+       err = ldc_copy(port->vio.lp, LDC_COPY_IN,
+                      skb->data, copy_len, 0,
+                      cookies, ncookies);
+       if (unlikely(err < 0)) {
+               dev->stats.rx_frame_errors++;
+               goto out_free_skb;
+       }
+
+       skb_pull(skb, VNET_PACKET_SKIP);
+       skb_trim(skb, len);
+       skb->protocol = eth_type_trans(skb, dev);
+
+       dev->stats.rx_packets++;
+       dev->stats.rx_bytes += len;
+
+       netif_rx(skb);
+
+       return 0;
+
+out_free_skb:
+       kfree_skb(skb);
+
+out_dropped:
+       dev->stats.rx_dropped++;
+       return err;
+}
+
+static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
+                        u32 start, u32 end, u8 vio_dring_state)
+{
+       struct vio_dring_data hdr = {
+               .tag = {
+                       .type           = VIO_TYPE_DATA,
+                       .stype          = VIO_SUBTYPE_ACK,
+                       .stype_env      = VIO_DRING_DATA,
+                       .sid            = vio_send_sid(&port->vio),
+               },
+               .dring_ident            = dr->ident,
+               .start_idx              = start,
+               .end_idx                = end,
+               .state                  = vio_dring_state,
+       };
+       int err, delay;
+
+       hdr.seq = dr->snd_nxt;
+       delay = 1;
+       do {
+               err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
+               if (err > 0) {
+                       dr->snd_nxt++;
+                       break;
+               }
+               udelay(delay);
+               if ((delay <<= 1) > 128)
+                       delay = 128;
+       } while (err == -EAGAIN);
+
+       return err;
+}
+
+static u32 next_idx(u32 idx, struct vio_dring_state *dr)
+{
+       if (++idx == dr->num_entries)
+               idx = 0;
+       return idx;
+}
+
+static u32 prev_idx(u32 idx, struct vio_dring_state *dr)
+{
+       if (idx == 0)
+               idx = dr->num_entries - 1;
+       else
+               idx--;
+
+       return idx;
+}
+
+static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
+                                       struct vio_dring_state *dr,
+                                       u32 index)
+{
+       struct vio_net_desc *desc = port->vio.desc_buf;
+       int err;
+
+       err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size,
+                                 (index * dr->entry_size),
+                                 dr->cookies, dr->ncookies);
+       if (err < 0)
+               return ERR_PTR(err);
+
+       return desc;
+}
+
+static int put_rx_desc(struct vnet_port *port,
+                      struct vio_dring_state *dr,
+                      struct vio_net_desc *desc,
+                      u32 index)
+{
+       int err;
+
+       err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size,
+                                 (index * dr->entry_size),
+                                 dr->cookies, dr->ncookies);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+static int vnet_walk_rx_one(struct vnet_port *port,
+                           struct vio_dring_state *dr,
+                           u32 index, int *needs_ack)
+{
+       struct vio_net_desc *desc = get_rx_desc(port, dr, index);
+       struct vio_driver_state *vio = &port->vio;
+       int err;
+
+       if (IS_ERR(desc))
+               return PTR_ERR(desc);
+
+       viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
+              desc->hdr.state, desc->hdr.ack,
+              desc->size, desc->ncookies,
+              desc->cookies[0].cookie_addr,
+              desc->cookies[0].cookie_size);
+
+       if (desc->hdr.state != VIO_DESC_READY)
+               return 1;
+       err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies);
+       if (err == -ECONNRESET)
+               return err;
+       desc->hdr.state = VIO_DESC_DONE;
+       err = put_rx_desc(port, dr, desc, index);
+       if (err < 0)
+               return err;
+       *needs_ack = desc->hdr.ack;
+       return 0;
+}
+
+static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
+                       u32 start, u32 end)
+{
+       struct vio_driver_state *vio = &port->vio;
+       int ack_start = -1, ack_end = -1;
+
+       end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr);
+
+       viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
+
+       while (start != end) {
+               int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack);
+               if (err == -ECONNRESET)
+                       return err;
+               if (err != 0)
+                       break;
+               if (ack_start == -1)
+                       ack_start = start;
+               ack_end = start;
+               start = next_idx(start, dr);
+               if (ack && start != end) {
+                       err = vnet_send_ack(port, dr, ack_start, ack_end,
+                                           VIO_DRING_ACTIVE);
+                       if (err == -ECONNRESET)
+                               return err;
+                       ack_start = -1;
+               }
+       }
+       if (unlikely(ack_start == -1))
+               ack_start = ack_end = prev_idx(start, dr);
+       return vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_STOPPED);
+}
+
+static int vnet_rx(struct vnet_port *port, void *msgbuf)
+{
+       struct vio_dring_data *pkt = msgbuf;
+       struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
+       struct vio_driver_state *vio = &port->vio;
+
+       viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n",
+              pkt->tag.stype_env, pkt->seq, dr->rcv_nxt);
+
+       if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
+               return 0;
+       if (unlikely(pkt->seq != dr->rcv_nxt)) {
+               pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n",
+                      pkt->seq, dr->rcv_nxt);
+               return 0;
+       }
+
+       dr->rcv_nxt++;
+
+       /* XXX Validate pkt->start_idx and pkt->end_idx XXX */
+
+       return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx);
+}
+
+static int idx_is_pending(struct vio_dring_state *dr, u32 end)
+{
+       u32 idx = dr->cons;
+       int found = 0;
+
+       while (idx != dr->prod) {
+               if (idx == end) {
+                       found = 1;
+                       break;
+               }
+               idx = next_idx(idx, dr);
+       }
+       return found;
+}
+
+static int vnet_ack(struct vnet_port *port, void *msgbuf)
+{
+       struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+       struct vio_dring_data *pkt = msgbuf;
+       struct net_device *dev;
+       struct vnet *vp;
+       u32 end;
+
+       if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
+               return 0;
+
+       end = pkt->end_idx;
+       if (unlikely(!idx_is_pending(dr, end)))
+               return 0;
+
+       dr->cons = next_idx(end, dr);
+
+       vp = port->vp;
+       dev = vp->dev;
+       if (unlikely(netif_queue_stopped(dev) &&
+                    vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
+               return 1;
+
+       return 0;
+}
+
+static int vnet_nack(struct vnet_port *port, void *msgbuf)
+{
+       /* XXX just reset or similar XXX */
+       return 0;
+}
+
+static int handle_mcast(struct vnet_port *port, void *msgbuf)
+{
+       struct vio_net_mcast_info *pkt = msgbuf;
+
+       if (pkt->tag.stype != VIO_SUBTYPE_ACK)
+               pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n",
+                      port->vp->dev->name,
+                      pkt->tag.type,
+                      pkt->tag.stype,
+                      pkt->tag.stype_env,
+                      pkt->tag.sid);
+
+       return 0;
+}
+
+static void maybe_tx_wakeup(struct vnet *vp)
+{
+       struct net_device *dev = vp->dev;
+
+       netif_tx_lock(dev);
+       if (likely(netif_queue_stopped(dev))) {
+               struct vnet_port *port;
+               int wake = 1;
+
+               list_for_each_entry(port, &vp->port_list, list) {
+                       struct vio_dring_state *dr;
+
+                       dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+                       if (vnet_tx_dring_avail(dr) <
+                           VNET_TX_WAKEUP_THRESH(dr)) {
+                               wake = 0;
+                               break;
+                       }
+               }
+               if (wake)
+                       netif_wake_queue(dev);
+       }
+       netif_tx_unlock(dev);
+}
+
+static void vnet_event(void *arg, int event)
+{
+       struct vnet_port *port = arg;
+       struct vio_driver_state *vio = &port->vio;
+       unsigned long flags;
+       int tx_wakeup, err;
+
+       spin_lock_irqsave(&vio->lock, flags);
+
+       if (unlikely(event == LDC_EVENT_RESET ||
+                    event == LDC_EVENT_UP)) {
+               vio_link_state_change(vio, event);
+               spin_unlock_irqrestore(&vio->lock, flags);
+
+               if (event == LDC_EVENT_RESET)
+                       vio_port_up(vio);
+               return;
+       }
+
+       if (unlikely(event != LDC_EVENT_DATA_READY)) {
+               pr_warning("Unexpected LDC event %d\n", event);
+               spin_unlock_irqrestore(&vio->lock, flags);
+               return;
+       }
+
+       tx_wakeup = err = 0;
+       while (1) {
+               union {
+                       struct vio_msg_tag tag;
+                       u64 raw[8];
+               } msgbuf;
+
+               err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
+               if (unlikely(err < 0)) {
+                       if (err == -ECONNRESET)
+                               vio_conn_reset(vio);
+                       break;
+               }
+               if (err == 0)
+                       break;
+               viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
+                      msgbuf.tag.type,
+                      msgbuf.tag.stype,
+                      msgbuf.tag.stype_env,
+                      msgbuf.tag.sid);
+               err = vio_validate_sid(vio, &msgbuf.tag);
+               if (err < 0)
+                       break;
+
+               if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
+                       if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
+                               err = vnet_rx(port, &msgbuf);
+                       } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
+                               err = vnet_ack(port, &msgbuf);
+                               if (err > 0)
+                                       tx_wakeup |= err;
+                       } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) {
+                               err = vnet_nack(port, &msgbuf);
+                       }
+               } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
+                       if (msgbuf.tag.stype_env == VNET_MCAST_INFO)
+                               err = handle_mcast(port, &msgbuf);
+                       else
+                               err = vio_control_pkt_engine(vio, &msgbuf);
+                       if (err)
+                               break;
+               } else {
+                       err = vnet_handle_unknown(port, &msgbuf);
+               }
+               if (err == -ECONNRESET)
+                       break;
+       }
+       spin_unlock(&vio->lock);
+       if (unlikely(tx_wakeup && err != -ECONNRESET))
+               maybe_tx_wakeup(port->vp);
+       local_irq_restore(flags);
+}
+
+static int __vnet_tx_trigger(struct vnet_port *port)
+{
+       struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+       struct vio_dring_data hdr = {
+               .tag = {
+                       .type           = VIO_TYPE_DATA,
+                       .stype          = VIO_SUBTYPE_INFO,
+                       .stype_env      = VIO_DRING_DATA,
+                       .sid            = vio_send_sid(&port->vio),
+               },
+               .dring_ident            = dr->ident,
+               .start_idx              = dr->prod,
+               .end_idx                = (u32) -1,
+       };
+       int err, delay;
+
+       hdr.seq = dr->snd_nxt;
+       delay = 1;
+       do {
+               err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
+               if (err > 0) {
+                       dr->snd_nxt++;
+                       break;
+               }
+               udelay(delay);
+               if ((delay <<= 1) > 128)
+                       delay = 128;
+       } while (err == -EAGAIN);
+
+       return err;
+}
+
+struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
+{
+       unsigned int hash = vnet_hashfn(skb->data);
+       struct hlist_head *hp = &vp->port_hash[hash];
+       struct hlist_node *n;
+       struct vnet_port *port;
+
+       hlist_for_each_entry(port, n, hp, hash) {
+               if (!compare_ether_addr(port->raddr, skb->data))
+                       return port;
+       }
+       port = NULL;
+       if (!list_empty(&vp->port_list))
+               port = list_entry(vp->port_list.next, struct vnet_port, list);
+
+       return port;
+}
+
+struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
+{
+       struct vnet_port *ret;
+       unsigned long flags;
+
+       spin_lock_irqsave(&vp->lock, flags);
+       ret = __tx_port_find(vp, skb);
+       spin_unlock_irqrestore(&vp->lock, flags);
+
+       return ret;
+}
+
+static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct vnet *vp = netdev_priv(dev);
+       struct vnet_port *port = tx_port_find(vp, skb);
+       struct vio_dring_state *dr;
+       struct vio_net_desc *d;
+       unsigned long flags;
+       unsigned int len;
+       void *tx_buf;
+       int i, err;
+
+       if (unlikely(!port))
+               goto out_dropped;
+
+       spin_lock_irqsave(&port->vio.lock, flags);
+
+       dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+       if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
+               if (!netif_queue_stopped(dev)) {
+                       netif_stop_queue(dev);
+
+                       /* This is a hard error, log it. */
+                       netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+                       dev->stats.tx_errors++;
+               }
+               spin_unlock_irqrestore(&port->vio.lock, flags);
+               return NETDEV_TX_BUSY;
+       }
+
+       d = vio_dring_cur(dr);
+
+       tx_buf = port->tx_bufs[dr->prod].buf;
+       skb_copy_from_linear_data(skb, tx_buf + VNET_PACKET_SKIP, skb->len);
+
+       len = skb->len;
+       if (len < ETH_ZLEN) {
+               len = ETH_ZLEN;
+               memset(tx_buf+VNET_PACKET_SKIP+skb->len, 0, len - skb->len);
+       }
+
+       d->hdr.ack = VIO_ACK_ENABLE;
+       d->size = len;
+       d->ncookies = port->tx_bufs[dr->prod].ncookies;
+       for (i = 0; i < d->ncookies; i++)
+               d->cookies[i] = port->tx_bufs[dr->prod].cookies[i];
+
+       /* This has to be a non-SMP write barrier because we are writing
+        * to memory which is shared with the peer LDOM.
+        */
+       wmb();
+
+       d->hdr.state = VIO_DESC_READY;
+
+       err = __vnet_tx_trigger(port);
+       if (unlikely(err < 0)) {
+               netdev_info(dev, "TX trigger error %d\n", err);
+               d->hdr.state = VIO_DESC_FREE;
+               dev->stats.tx_carrier_errors++;
+               goto out_dropped_unlock;
+       }
+
+       dev->stats.tx_packets++;
+       dev->stats.tx_bytes += skb->len;
+
+       dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
+       if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
+               netif_stop_queue(dev);
+               if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
+                       netif_wake_queue(dev);
+       }
+
+       spin_unlock_irqrestore(&port->vio.lock, flags);
+
+       dev_kfree_skb(skb);
+
+       return NETDEV_TX_OK;
+
+out_dropped_unlock:
+       spin_unlock_irqrestore(&port->vio.lock, flags);
+
+out_dropped:
+       dev_kfree_skb(skb);
+       dev->stats.tx_dropped++;
+       return NETDEV_TX_OK;
+}
+
+static void vnet_tx_timeout(struct net_device *dev)
+{
+       /* XXX Implement me XXX */
+}
+
+static int vnet_open(struct net_device *dev)
+{
+       netif_carrier_on(dev);
+       netif_start_queue(dev);
+
+       return 0;
+}
+
+static int vnet_close(struct net_device *dev)
+{
+       netif_stop_queue(dev);
+       netif_carrier_off(dev);
+
+       return 0;
+}
+
+static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
+{
+       struct vnet_mcast_entry *m;
+
+       for (m = vp->mcast_list; m; m = m->next) {
+               if (!memcmp(m->addr, addr, ETH_ALEN))
+                       return m;
+       }
+       return NULL;
+}
+
+static void __update_mc_list(struct vnet *vp, struct net_device *dev)
+{
+       struct netdev_hw_addr *ha;
+
+       netdev_for_each_mc_addr(ha, dev) {
+               struct vnet_mcast_entry *m;
+
+               m = __vnet_mc_find(vp, ha->addr);
+               if (m) {
+                       m->hit = 1;
+                       continue;
+               }
+
+               if (!m) {
+                       m = kzalloc(sizeof(*m), GFP_ATOMIC);
+                       if (!m)
+                               continue;
+                       memcpy(m->addr, ha->addr, ETH_ALEN);
+                       m->hit = 1;
+
+                       m->next = vp->mcast_list;
+                       vp->mcast_list = m;
+               }
+       }
+}
+
+static void __send_mc_list(struct vnet *vp, struct vnet_port *port)
+{
+       struct vio_net_mcast_info info;
+       struct vnet_mcast_entry *m, **pp;
+       int n_addrs;
+
+       memset(&info, 0, sizeof(info));
+
+       info.tag.type = VIO_TYPE_CTRL;
+       info.tag.stype = VIO_SUBTYPE_INFO;
+       info.tag.stype_env = VNET_MCAST_INFO;
+       info.tag.sid = vio_send_sid(&port->vio);
+       info.set = 1;
+
+       n_addrs = 0;
+       for (m = vp->mcast_list; m; m = m->next) {
+               if (m->sent)
+                       continue;
+               m->sent = 1;
+               memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
+                      m->addr, ETH_ALEN);
+               if (++n_addrs == VNET_NUM_MCAST) {
+                       info.count = n_addrs;
+
+                       (void) vio_ldc_send(&port->vio, &info,
+                                           sizeof(info));
+                       n_addrs = 0;
+               }
+       }
+       if (n_addrs) {
+               info.count = n_addrs;
+               (void) vio_ldc_send(&port->vio, &info, sizeof(info));
+       }
+
+       info.set = 0;
+
+       n_addrs = 0;
+       pp = &vp->mcast_list;
+       while ((m = *pp) != NULL) {
+               if (m->hit) {
+                       m->hit = 0;
+                       pp = &m->next;
+                       continue;
+               }
+
+               memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
+                      m->addr, ETH_ALEN);
+               if (++n_addrs == VNET_NUM_MCAST) {
+                       info.count = n_addrs;
+                       (void) vio_ldc_send(&port->vio, &info,
+                                           sizeof(info));
+                       n_addrs = 0;
+               }
+
+               *pp = m->next;
+               kfree(m);
+       }
+       if (n_addrs) {
+               info.count = n_addrs;
+               (void) vio_ldc_send(&port->vio, &info, sizeof(info));
+       }
+}
+
+static void vnet_set_rx_mode(struct net_device *dev)
+{
+       struct vnet *vp = netdev_priv(dev);
+       struct vnet_port *port;
+       unsigned long flags;
+
+       spin_lock_irqsave(&vp->lock, flags);
+       if (!list_empty(&vp->port_list)) {
+               port = list_entry(vp->port_list.next, struct vnet_port, list);
+
+               if (port->switch_port) {
+                       __update_mc_list(vp, dev);
+                       __send_mc_list(vp, port);
+               }
+       }
+       spin_unlock_irqrestore(&vp->lock, flags);
+}
+
+static int vnet_change_mtu(struct net_device *dev, int new_mtu)
+{
+       if (new_mtu != ETH_DATA_LEN)
+               return -EINVAL;
+
+       dev->mtu = new_mtu;
+       return 0;
+}
+
+static int vnet_set_mac_addr(struct net_device *dev, void *p)
+{
+       return -EINVAL;
+}
+
+static void vnet_get_drvinfo(struct net_device *dev,
+                            struct ethtool_drvinfo *info)
+{
+       strcpy(info->driver, DRV_MODULE_NAME);
+       strcpy(info->version, DRV_MODULE_VERSION);
+}
+
+static u32 vnet_get_msglevel(struct net_device *dev)
+{
+       struct vnet *vp = netdev_priv(dev);
+       return vp->msg_enable;
+}
+
+static void vnet_set_msglevel(struct net_device *dev, u32 value)
+{
+       struct vnet *vp = netdev_priv(dev);
+       vp->msg_enable = value;
+}
+
+static const struct ethtool_ops vnet_ethtool_ops = {
+       .get_drvinfo            = vnet_get_drvinfo,
+       .get_msglevel           = vnet_get_msglevel,
+       .set_msglevel           = vnet_set_msglevel,
+       .get_link               = ethtool_op_get_link,
+};
+
+static void vnet_port_free_tx_bufs(struct vnet_port *port)
+{
+       struct vio_dring_state *dr;
+       int i;
+
+       dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+       if (dr->base) {
+               ldc_free_exp_dring(port->vio.lp, dr->base,
+                                  (dr->entry_size * dr->num_entries),
+                                  dr->cookies, dr->ncookies);
+               dr->base = NULL;
+               dr->entry_size = 0;
+               dr->num_entries = 0;
+               dr->pending = 0;
+               dr->ncookies = 0;
+       }
+
+       for (i = 0; i < VNET_TX_RING_SIZE; i++) {
+               void *buf = port->tx_bufs[i].buf;
+
+               if (!buf)
+                       continue;
+
+               ldc_unmap(port->vio.lp,
+                         port->tx_bufs[i].cookies,
+                         port->tx_bufs[i].ncookies);
+
+               kfree(buf);
+               port->tx_bufs[i].buf = NULL;
+       }
+}
+
+static int __devinit vnet_port_alloc_tx_bufs(struct vnet_port *port)
+{
+       struct vio_dring_state *dr;
+       unsigned long len;
+       int i, err, ncookies;
+       void *dring;
+
+       for (i = 0; i < VNET_TX_RING_SIZE; i++) {
+               void *buf = kzalloc(ETH_FRAME_LEN + 8, GFP_KERNEL);
+               int map_len = (ETH_FRAME_LEN + 7) & ~7;
+
+               err = -ENOMEM;
+               if (!buf) {
+                       pr_err("TX buffer allocation failure\n");
+                       goto err_out;
+               }
+               err = -EFAULT;
+               if ((unsigned long)buf & (8UL - 1)) {
+                       pr_err("TX buffer misaligned\n");
+                       kfree(buf);
+                       goto err_out;
+               }
+
+               err = ldc_map_single(port->vio.lp, buf, map_len,
+                                    port->tx_bufs[i].cookies, 2,
+                                    (LDC_MAP_SHADOW |
+                                     LDC_MAP_DIRECT |
+                                     LDC_MAP_RW));
+               if (err < 0) {
+                       kfree(buf);
+                       goto err_out;
+               }
+               port->tx_bufs[i].buf = buf;
+               port->tx_bufs[i].ncookies = err;
+       }
+
+       dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+
+       len = (VNET_TX_RING_SIZE *
+              (sizeof(struct vio_net_desc) +
+               (sizeof(struct ldc_trans_cookie) * 2)));
+
+       ncookies = VIO_MAX_RING_COOKIES;
+       dring = ldc_alloc_exp_dring(port->vio.lp, len,
+                                   dr->cookies, &ncookies,
+                                   (LDC_MAP_SHADOW |
+                                    LDC_MAP_DIRECT |
+                                    LDC_MAP_RW));
+       if (IS_ERR(dring)) {
+               err = PTR_ERR(dring);
+               goto err_out;
+       }
+
+       dr->base = dring;
+       dr->entry_size = (sizeof(struct vio_net_desc) +
+                         (sizeof(struct ldc_trans_cookie) * 2));
+       dr->num_entries = VNET_TX_RING_SIZE;
+       dr->prod = dr->cons = 0;
+       dr->pending = VNET_TX_RING_SIZE;
+       dr->ncookies = ncookies;
+
+       return 0;
+
+err_out:
+       vnet_port_free_tx_bufs(port);
+
+       return err;
+}
+
+static LIST_HEAD(vnet_list);
+static DEFINE_MUTEX(vnet_list_mutex);
+
+static const struct net_device_ops vnet_ops = {
+       .ndo_open               = vnet_open,
+       .ndo_stop               = vnet_close,
+       .ndo_set_multicast_list = vnet_set_rx_mode,
+       .ndo_set_mac_address    = vnet_set_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_tx_timeout         = vnet_tx_timeout,
+       .ndo_change_mtu         = vnet_change_mtu,
+       .ndo_start_xmit         = vnet_start_xmit,
+};
+
+static struct vnet * __devinit vnet_new(const u64 *local_mac)
+{
+       struct net_device *dev;
+       struct vnet *vp;
+       int err, i;
+
+       dev = alloc_etherdev(sizeof(*vp));
+       if (!dev) {
+               pr_err("Etherdev alloc failed, aborting\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       for (i = 0; i < ETH_ALEN; i++)
+               dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
+
+       memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
+       vp = netdev_priv(dev);
+
+       spin_lock_init(&vp->lock);
+       vp->dev = dev;
+
+       INIT_LIST_HEAD(&vp->port_list);
+       for (i = 0; i < VNET_PORT_HASH_SIZE; i++)
+               INIT_HLIST_HEAD(&vp->port_hash[i]);
+       INIT_LIST_HEAD(&vp->list);
+       vp->local_mac = *local_mac;
+
+       dev->netdev_ops = &vnet_ops;
+       dev->ethtool_ops = &vnet_ethtool_ops;
+       dev->watchdog_timeo = VNET_TX_TIMEOUT;
+
+       err = register_netdev(dev);
+       if (err) {
+               pr_err("Cannot register net device, aborting\n");
+               goto err_out_free_dev;
+       }
+
+       netdev_info(dev, "Sun LDOM vnet %pM\n", dev->dev_addr);
+
+       list_add(&vp->list, &vnet_list);
+
+       return vp;
+
+err_out_free_dev:
+       free_netdev(dev);
+
+       return ERR_PTR(err);
+}
+
+static struct vnet * __devinit vnet_find_or_create(const u64 *local_mac)
+{
+       struct vnet *iter, *vp;
+
+       mutex_lock(&vnet_list_mutex);
+       vp = NULL;
+       list_for_each_entry(iter, &vnet_list, list) {
+               if (iter->local_mac == *local_mac) {
+                       vp = iter;
+                       break;
+               }
+       }
+       if (!vp)
+               vp = vnet_new(local_mac);
+       mutex_unlock(&vnet_list_mutex);
+
+       return vp;
+}
+
+static const char *local_mac_prop = "local-mac-address";
+
+static struct vnet * __devinit vnet_find_parent(struct mdesc_handle *hp,
+                                               u64 port_node)
+{
+       const u64 *local_mac = NULL;
+       u64 a;
+
+       mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) {
+               u64 target = mdesc_arc_target(hp, a);
+               const char *name;
+
+               name = mdesc_get_property(hp, target, "name", NULL);
+               if (!name || strcmp(name, "network"))
+                       continue;
+
+               local_mac = mdesc_get_property(hp, target,
+                                              local_mac_prop, NULL);
+               if (local_mac)
+                       break;
+       }
+       if (!local_mac)
+               return ERR_PTR(-ENODEV);
+
+       return vnet_find_or_create(local_mac);
+}
+
+static struct ldc_channel_config vnet_ldc_cfg = {
+       .event          = vnet_event,
+       .mtu            = 64,
+       .mode           = LDC_MODE_UNRELIABLE,
+};
+
+static struct vio_driver_ops vnet_vio_ops = {
+       .send_attr              = vnet_send_attr,
+       .handle_attr            = vnet_handle_attr,
+       .handshake_complete     = vnet_handshake_complete,
+};
+
+static void __devinit print_version(void)
+{
+       printk_once(KERN_INFO "%s", version);
+}
+
+const char *remote_macaddr_prop = "remote-mac-address";
+
+static int __devinit vnet_port_probe(struct vio_dev *vdev,
+                                    const struct vio_device_id *id)
+{
+       struct mdesc_handle *hp;
+       struct vnet_port *port;
+       unsigned long flags;
+       struct vnet *vp;
+       const u64 *rmac;
+       int len, i, err, switch_port;
+
+       print_version();
+
+       hp = mdesc_grab();
+
+       vp = vnet_find_parent(hp, vdev->mp);
+       if (IS_ERR(vp)) {
+               pr_err("Cannot find port parent vnet\n");
+               err = PTR_ERR(vp);
+               goto err_out_put_mdesc;
+       }
+
+       rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
+       err = -ENODEV;
+       if (!rmac) {
+               pr_err("Port lacks %s property\n", remote_macaddr_prop);
+               goto err_out_put_mdesc;
+       }
+
+       port = kzalloc(sizeof(*port), GFP_KERNEL);
+       err = -ENOMEM;
+       if (!port) {
+               pr_err("Cannot allocate vnet_port\n");
+               goto err_out_put_mdesc;
+       }
+
+       for (i = 0; i < ETH_ALEN; i++)
+               port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff;
+
+       port->vp = vp;
+
+       err = vio_driver_init(&port->vio, vdev, VDEV_NETWORK,
+                             vnet_versions, ARRAY_SIZE(vnet_versions),
+                             &vnet_vio_ops, vp->dev->name);
+       if (err)
+               goto err_out_free_port;
+
+       err = vio_ldc_alloc(&port->vio, &vnet_ldc_cfg, port);
+       if (err)
+               goto err_out_free_port;
+
+       err = vnet_port_alloc_tx_bufs(port);
+       if (err)
+               goto err_out_free_ldc;
+
+       INIT_HLIST_NODE(&port->hash);
+       INIT_LIST_HEAD(&port->list);
+
+       switch_port = 0;
+       if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL)
+               switch_port = 1;
+       port->switch_port = switch_port;
+
+       spin_lock_irqsave(&vp->lock, flags);
+       if (switch_port)
+               list_add(&port->list, &vp->port_list);
+       else
+               list_add_tail(&port->list, &vp->port_list);
+       hlist_add_head(&port->hash, &vp->port_hash[vnet_hashfn(port->raddr)]);
+       spin_unlock_irqrestore(&vp->lock, flags);
+
+       dev_set_drvdata(&vdev->dev, port);
+
+       pr_info("%s: PORT ( remote-mac %pM%s )\n",
+               vp->dev->name, port->raddr, switch_port ? " switch-port" : "");
+
+       vio_port_up(&port->vio);
+
+       mdesc_release(hp);
+
+       return 0;
+
+err_out_free_ldc:
+       vio_ldc_free(&port->vio);
+
+err_out_free_port:
+       kfree(port);
+
+err_out_put_mdesc:
+       mdesc_release(hp);
+       return err;
+}
+
+static int vnet_port_remove(struct vio_dev *vdev)
+{
+       struct vnet_port *port = dev_get_drvdata(&vdev->dev);
+
+       if (port) {
+               struct vnet *vp = port->vp;
+               unsigned long flags;
+
+               del_timer_sync(&port->vio.timer);
+
+               spin_lock_irqsave(&vp->lock, flags);
+               list_del(&port->list);
+               hlist_del(&port->hash);
+               spin_unlock_irqrestore(&vp->lock, flags);
+
+               vnet_port_free_tx_bufs(port);
+               vio_ldc_free(&port->vio);
+
+               dev_set_drvdata(&vdev->dev, NULL);
+
+               kfree(port);
+       }
+       return 0;
+}
+
+static const struct vio_device_id vnet_port_match[] = {
+       {
+               .type = "vnet-port",
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(vio, vnet_port_match);
+
+static struct vio_driver vnet_port_driver = {
+       .id_table       = vnet_port_match,
+       .probe          = vnet_port_probe,
+       .remove         = vnet_port_remove,
+       .driver         = {
+               .name   = "vnet_port",
+               .owner  = THIS_MODULE,
+       }
+};
+
+static int __init vnet_init(void)
+{
+       return vio_register_driver(&vnet_port_driver);
+}
+
+static void __exit vnet_exit(void)
+{
+       vio_unregister_driver(&vnet_port_driver);
+}
+
+module_init(vnet_init);
+module_exit(vnet_exit);
diff --git a/drivers/net/ethernet/sun/sunvnet.h b/drivers/net/ethernet/sun/sunvnet.h
new file mode 100644 (file)
index 0000000..d347a5b
--- /dev/null
@@ -0,0 +1,83 @@
+#ifndef _SUNVNET_H
+#define _SUNVNET_H
+
+#define DESC_NCOOKIES(entry_size)      \
+       ((entry_size) - sizeof(struct vio_net_desc))
+
+/* length of time before we decide the hardware is borked,
+ * and dev->tx_timeout() should be called to fix the problem
+ */
+#define VNET_TX_TIMEOUT                        (5 * HZ)
+
+#define VNET_TX_RING_SIZE              512
+#define VNET_TX_WAKEUP_THRESH(dr)      ((dr)->pending / 4)
+
+/* VNET packets are sent in buffers with the first 6 bytes skipped
+ * so that after the ethernet header the IPv4/IPv6 headers are aligned
+ * properly.
+ */
+#define VNET_PACKET_SKIP               6
+
+struct vnet_tx_entry {
+       void                    *buf;
+       unsigned int            ncookies;
+       struct ldc_trans_cookie cookies[2];
+};
+
+struct vnet;
+struct vnet_port {
+       struct vio_driver_state vio;
+
+       struct hlist_node       hash;
+       u8                      raddr[ETH_ALEN];
+       u8                      switch_port;
+       u8                      __pad;
+
+       struct vnet             *vp;
+
+       struct vnet_tx_entry    tx_bufs[VNET_TX_RING_SIZE];
+
+       struct list_head        list;
+};
+
+static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio)
+{
+       return container_of(vio, struct vnet_port, vio);
+}
+
+#define VNET_PORT_HASH_SIZE    16
+#define VNET_PORT_HASH_MASK    (VNET_PORT_HASH_SIZE - 1)
+
+static inline unsigned int vnet_hashfn(u8 *mac)
+{
+       unsigned int val = mac[4] ^ mac[5];
+
+       return val & (VNET_PORT_HASH_MASK);
+}
+
+struct vnet_mcast_entry {
+       u8                      addr[ETH_ALEN];
+       u8                      sent;
+       u8                      hit;
+       struct vnet_mcast_entry *next;
+};
+
+struct vnet {
+       /* Protects port_list and port_hash.  */
+       spinlock_t              lock;
+
+       struct net_device       *dev;
+
+       u32                     msg_enable;
+
+       struct list_head        port_list;
+
+       struct hlist_head       port_hash[VNET_PORT_HASH_SIZE];
+
+       struct vnet_mcast_entry *mcast_list;
+
+       struct list_head        list;
+       u64                     local_mac;
+};
+
+#endif /* _SUNVNET_H */
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
deleted file mode 100644 (file)
index ed47585..0000000
+++ /dev/null
@@ -1,10263 +0,0 @@
-/* niu.c: Neptune ethernet driver.
- *
- * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/netdevice.h>
-#include <linux/ethtool.h>
-#include <linux/etherdevice.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
-#include <linux/mii.h>
-#include <linux/if_ether.h>
-#include <linux/if_vlan.h>
-#include <linux/ip.h>
-#include <linux/in.h>
-#include <linux/ipv6.h>
-#include <linux/log2.h>
-#include <linux/jiffies.h>
-#include <linux/crc32.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-
-#include <linux/io.h>
-#include <linux/of_device.h>
-
-#include "niu.h"
-
-#define DRV_MODULE_NAME                "niu"
-#define DRV_MODULE_VERSION     "1.1"
-#define DRV_MODULE_RELDATE     "Apr 22, 2010"
-
-static char version[] __devinitdata =
-       DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-
-MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
-MODULE_DESCRIPTION("NIU ethernet driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
-
-#ifndef readq
-static u64 readq(void __iomem *reg)
-{
-       return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
-}
-
-static void writeq(u64 val, void __iomem *reg)
-{
-       writel(val & 0xffffffff, reg);
-       writel(val >> 32, reg + 0x4UL);
-}
-#endif
-
-static DEFINE_PCI_DEVICE_TABLE(niu_pci_tbl) = {
-       {PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
-       {}
-};
-
-MODULE_DEVICE_TABLE(pci, niu_pci_tbl);
-
-#define NIU_TX_TIMEOUT                 (5 * HZ)
-
-#define nr64(reg)              readq(np->regs + (reg))
-#define nw64(reg, val)         writeq((val), np->regs + (reg))
-
-#define nr64_mac(reg)          readq(np->mac_regs + (reg))
-#define nw64_mac(reg, val)     writeq((val), np->mac_regs + (reg))
-
-#define nr64_ipp(reg)          readq(np->regs + np->ipp_off + (reg))
-#define nw64_ipp(reg, val)     writeq((val), np->regs + np->ipp_off + (reg))
-
-#define nr64_pcs(reg)          readq(np->regs + np->pcs_off + (reg))
-#define nw64_pcs(reg, val)     writeq((val), np->regs + np->pcs_off + (reg))
-
-#define nr64_xpcs(reg)         readq(np->regs + np->xpcs_off + (reg))
-#define nw64_xpcs(reg, val)    writeq((val), np->regs + np->xpcs_off + (reg))
-
-#define NIU_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
-
-static int niu_debug;
-static int debug = -1;
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "NIU debug level");
-
-#define niu_lock_parent(np, flags) \
-       spin_lock_irqsave(&np->parent->lock, flags)
-#define niu_unlock_parent(np, flags) \
-       spin_unlock_irqrestore(&np->parent->lock, flags)
-
-static int serdes_init_10g_serdes(struct niu *np);
-
-static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg,
-                                    u64 bits, int limit, int delay)
-{
-       while (--limit >= 0) {
-               u64 val = nr64_mac(reg);
-
-               if (!(val & bits))
-                       break;
-               udelay(delay);
-       }
-       if (limit < 0)
-               return -ENODEV;
-       return 0;
-}
-
-static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
-                                       u64 bits, int limit, int delay,
-                                       const char *reg_name)
-{
-       int err;
-
-       nw64_mac(reg, bits);
-       err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
-       if (err)
-               netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
-                          (unsigned long long)bits, reg_name,
-                          (unsigned long long)nr64_mac(reg));
-       return err;
-}
-
-#define niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
-({     BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
-       __niu_set_and_wait_clear_mac(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
-})
-
-static int __niu_wait_bits_clear_ipp(struct niu *np, unsigned long reg,
-                                    u64 bits, int limit, int delay)
-{
-       while (--limit >= 0) {
-               u64 val = nr64_ipp(reg);
-
-               if (!(val & bits))
-                       break;
-               udelay(delay);
-       }
-       if (limit < 0)
-               return -ENODEV;
-       return 0;
-}
-
-static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
-                                       u64 bits, int limit, int delay,
-                                       const char *reg_name)
-{
-       int err;
-       u64 val;
-
-       val = nr64_ipp(reg);
-       val |= bits;
-       nw64_ipp(reg, val);
-
-       err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
-       if (err)
-               netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
-                          (unsigned long long)bits, reg_name,
-                          (unsigned long long)nr64_ipp(reg));
-       return err;
-}
-
-#define niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
-({     BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
-       __niu_set_and_wait_clear_ipp(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
-})
-
-static int __niu_wait_bits_clear(struct niu *np, unsigned long reg,
-                                u64 bits, int limit, int delay)
-{
-       while (--limit >= 0) {
-               u64 val = nr64(reg);
-
-               if (!(val & bits))
-                       break;
-               udelay(delay);
-       }
-       if (limit < 0)
-               return -ENODEV;
-       return 0;
-}
-
-#define niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY) \
-({     BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
-       __niu_wait_bits_clear(NP, REG, BITS, LIMIT, DELAY); \
-})
-
-static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
-                                   u64 bits, int limit, int delay,
-                                   const char *reg_name)
-{
-       int err;
-
-       nw64(reg, bits);
-       err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
-       if (err)
-               netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
-                          (unsigned long long)bits, reg_name,
-                          (unsigned long long)nr64(reg));
-       return err;
-}
-
-#define niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME) \
-({     BUILD_BUG_ON(LIMIT <= 0 || DELAY < 0); \
-       __niu_set_and_wait_clear(NP, REG, BITS, LIMIT, DELAY, REG_NAME); \
-})
-
-static void niu_ldg_rearm(struct niu *np, struct niu_ldg *lp, int on)
-{
-       u64 val = (u64) lp->timer;
-
-       if (on)
-               val |= LDG_IMGMT_ARM;
-
-       nw64(LDG_IMGMT(lp->ldg_num), val);
-}
-
-static int niu_ldn_irq_enable(struct niu *np, int ldn, int on)
-{
-       unsigned long mask_reg, bits;
-       u64 val;
-
-       if (ldn < 0 || ldn > LDN_MAX)
-               return -EINVAL;
-
-       if (ldn < 64) {
-               mask_reg = LD_IM0(ldn);
-               bits = LD_IM0_MASK;
-       } else {
-               mask_reg = LD_IM1(ldn - 64);
-               bits = LD_IM1_MASK;
-       }
-
-       val = nr64(mask_reg);
-       if (on)
-               val &= ~bits;
-       else
-               val |= bits;
-       nw64(mask_reg, val);
-
-       return 0;
-}
-
-static int niu_enable_ldn_in_ldg(struct niu *np, struct niu_ldg *lp, int on)
-{
-       struct niu_parent *parent = np->parent;
-       int i;
-
-       for (i = 0; i <= LDN_MAX; i++) {
-               int err;
-
-               if (parent->ldg_map[i] != lp->ldg_num)
-                       continue;
-
-               err = niu_ldn_irq_enable(np, i, on);
-               if (err)
-                       return err;
-       }
-       return 0;
-}
-
-static int niu_enable_interrupts(struct niu *np, int on)
-{
-       int i;
-
-       for (i = 0; i < np->num_ldg; i++) {
-               struct niu_ldg *lp = &np->ldg[i];
-               int err;
-
-               err = niu_enable_ldn_in_ldg(np, lp, on);
-               if (err)
-                       return err;
-       }
-       for (i = 0; i < np->num_ldg; i++)
-               niu_ldg_rearm(np, &np->ldg[i], on);
-
-       return 0;
-}
-
-static u32 phy_encode(u32 type, int port)
-{
-       return type << (port * 2);
-}
-
-static u32 phy_decode(u32 val, int port)
-{
-       return (val >> (port * 2)) & PORT_TYPE_MASK;
-}
-
-static int mdio_wait(struct niu *np)
-{
-       int limit = 1000;
-       u64 val;
-
-       while (--limit > 0) {
-               val = nr64(MIF_FRAME_OUTPUT);
-               if ((val >> MIF_FRAME_OUTPUT_TA_SHIFT) & 0x1)
-                       return val & MIF_FRAME_OUTPUT_DATA;
-
-               udelay(10);
-       }
-
-       return -ENODEV;
-}
-
-static int mdio_read(struct niu *np, int port, int dev, int reg)
-{
-       int err;
-
-       nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
-       err = mdio_wait(np);
-       if (err < 0)
-               return err;
-
-       nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev));
-       return mdio_wait(np);
-}
-
-static int mdio_write(struct niu *np, int port, int dev, int reg, int data)
-{
-       int err;
-
-       nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg));
-       err = mdio_wait(np);
-       if (err < 0)
-               return err;
-
-       nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data));
-       err = mdio_wait(np);
-       if (err < 0)
-               return err;
-
-       return 0;
-}
-
-static int mii_read(struct niu *np, int port, int reg)
-{
-       nw64(MIF_FRAME_OUTPUT, MII_READ_OP(port, reg));
-       return mdio_wait(np);
-}
-
-static int mii_write(struct niu *np, int port, int reg, int data)
-{
-       int err;
-
-       nw64(MIF_FRAME_OUTPUT, MII_WRITE_OP(port, reg, data));
-       err = mdio_wait(np);
-       if (err < 0)
-               return err;
-
-       return 0;
-}
-
-static int esr2_set_tx_cfg(struct niu *np, unsigned long channel, u32 val)
-{
-       int err;
-
-       err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
-                        ESR2_TI_PLL_TX_CFG_L(channel),
-                        val & 0xffff);
-       if (!err)
-               err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
-                                ESR2_TI_PLL_TX_CFG_H(channel),
-                                val >> 16);
-       return err;
-}
-
-static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val)
-{
-       int err;
-
-       err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
-                        ESR2_TI_PLL_RX_CFG_L(channel),
-                        val & 0xffff);
-       if (!err)
-               err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
-                                ESR2_TI_PLL_RX_CFG_H(channel),
-                                val >> 16);
-       return err;
-}
-
-/* Mode is always 10G fiber.  */
-static int serdes_init_niu_10g_fiber(struct niu *np)
-{
-       struct niu_link_config *lp = &np->link_config;
-       u32 tx_cfg, rx_cfg;
-       unsigned long i;
-
-       tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
-       rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
-                 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
-                 PLL_RX_CFG_EQ_LP_ADAPTIVE);
-
-       if (lp->loopback_mode == LOOPBACK_PHY) {
-               u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
-
-               mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
-                          ESR2_TI_PLL_TEST_CFG_L, test_cfg);
-
-               tx_cfg |= PLL_TX_CFG_ENTEST;
-               rx_cfg |= PLL_RX_CFG_ENTEST;
-       }
-
-       /* Initialize all 4 lanes of the SERDES.  */
-       for (i = 0; i < 4; i++) {
-               int err = esr2_set_tx_cfg(np, i, tx_cfg);
-               if (err)
-                       return err;
-       }
-
-       for (i = 0; i < 4; i++) {
-               int err = esr2_set_rx_cfg(np, i, rx_cfg);
-               if (err)
-                       return err;
-       }
-
-       return 0;
-}
-
-static int serdes_init_niu_1g_serdes(struct niu *np)
-{
-       struct niu_link_config *lp = &np->link_config;
-       u16 pll_cfg, pll_sts;
-       int max_retry = 100;
-       u64 uninitialized_var(sig), mask, val;
-       u32 tx_cfg, rx_cfg;
-       unsigned long i;
-       int err;
-
-       tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV |
-                 PLL_TX_CFG_RATE_HALF);
-       rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
-                 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
-                 PLL_RX_CFG_RATE_HALF);
-
-       if (np->port == 0)
-               rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE;
-
-       if (lp->loopback_mode == LOOPBACK_PHY) {
-               u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
-
-               mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
-                          ESR2_TI_PLL_TEST_CFG_L, test_cfg);
-
-               tx_cfg |= PLL_TX_CFG_ENTEST;
-               rx_cfg |= PLL_RX_CFG_ENTEST;
-       }
-
-       /* Initialize PLL for 1G */
-       pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X);
-
-       err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
-                        ESR2_TI_PLL_CFG_L, pll_cfg);
-       if (err) {
-               netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
-                          np->port, __func__);
-               return err;
-       }
-
-       pll_sts = PLL_CFG_ENPLL;
-
-       err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
-                        ESR2_TI_PLL_STS_L, pll_sts);
-       if (err) {
-               netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
-                          np->port, __func__);
-               return err;
-       }
-
-       udelay(200);
-
-       /* Initialize all 4 lanes of the SERDES.  */
-       for (i = 0; i < 4; i++) {
-               err = esr2_set_tx_cfg(np, i, tx_cfg);
-               if (err)
-                       return err;
-       }
-
-       for (i = 0; i < 4; i++) {
-               err = esr2_set_rx_cfg(np, i, rx_cfg);
-               if (err)
-                       return err;
-       }
-
-       switch (np->port) {
-       case 0:
-               val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
-               mask = val;
-               break;
-
-       case 1:
-               val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
-               mask = val;
-               break;
-
-       default:
-               return -EINVAL;
-       }
-
-       while (max_retry--) {
-               sig = nr64(ESR_INT_SIGNALS);
-               if ((sig & mask) == val)
-                       break;
-
-               mdelay(500);
-       }
-
-       if ((sig & mask) != val) {
-               netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
-                          np->port, (int)(sig & mask), (int)val);
-               return -ENODEV;
-       }
-
-       return 0;
-}
-
-static int serdes_init_niu_10g_serdes(struct niu *np)
-{
-       struct niu_link_config *lp = &np->link_config;
-       u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
-       int max_retry = 100;
-       u64 uninitialized_var(sig), mask, val;
-       unsigned long i;
-       int err;
-
-       tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
-       rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
-                 PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
-                 PLL_RX_CFG_EQ_LP_ADAPTIVE);
-
-       if (lp->loopback_mode == LOOPBACK_PHY) {
-               u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
-
-               mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
-                          ESR2_TI_PLL_TEST_CFG_L, test_cfg);
-
-               tx_cfg |= PLL_TX_CFG_ENTEST;
-               rx_cfg |= PLL_RX_CFG_ENTEST;
-       }
-
-       /* Initialize PLL for 10G */
-       pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X);
-
-       err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
-                        ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
-       if (err) {
-               netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
-                          np->port, __func__);
-               return err;
-       }
-
-       pll_sts = PLL_CFG_ENPLL;
-
-       err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
-                        ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
-       if (err) {
-               netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
-                          np->port, __func__);
-               return err;
-       }
-
-       udelay(200);
-
-       /* Initialize all 4 lanes of the SERDES.  */
-       for (i = 0; i < 4; i++) {
-               err = esr2_set_tx_cfg(np, i, tx_cfg);
-               if (err)
-                       return err;
-       }
-
-       for (i = 0; i < 4; i++) {
-               err = esr2_set_rx_cfg(np, i, rx_cfg);
-               if (err)
-                       return err;
-       }
-
-       /* check if serdes is ready */
-
-       switch (np->port) {
-       case 0:
-               mask = ESR_INT_SIGNALS_P0_BITS;
-               val = (ESR_INT_SRDY0_P0 |
-                      ESR_INT_DET0_P0 |
-                      ESR_INT_XSRDY_P0 |
-                      ESR_INT_XDP_P0_CH3 |
-                      ESR_INT_XDP_P0_CH2 |
-                      ESR_INT_XDP_P0_CH1 |
-                      ESR_INT_XDP_P0_CH0);
-               break;
-
-       case 1:
-               mask = ESR_INT_SIGNALS_P1_BITS;
-               val = (ESR_INT_SRDY0_P1 |
-                      ESR_INT_DET0_P1 |
-                      ESR_INT_XSRDY_P1 |
-                      ESR_INT_XDP_P1_CH3 |
-                      ESR_INT_XDP_P1_CH2 |
-                      ESR_INT_XDP_P1_CH1 |
-                      ESR_INT_XDP_P1_CH0);
-               break;
-
-       default:
-               return -EINVAL;
-       }
-
-       while (max_retry--) {
-               sig = nr64(ESR_INT_SIGNALS);
-               if ((sig & mask) == val)
-                       break;
-
-               mdelay(500);
-       }
-
-       if ((sig & mask) != val) {
-               pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n",
-                       np->port, (int)(sig & mask), (int)val);
-
-               /* 10G failed, try initializing at 1G */
-               err = serdes_init_niu_1g_serdes(np);
-               if (!err) {
-                       np->flags &= ~NIU_FLAGS_10G;
-                       np->mac_xcvr = MAC_XCVR_PCS;
-               }  else {
-                       netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
-                                  np->port);
-                       return -ENODEV;
-               }
-       }
-       return 0;
-}
-
-static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val)
-{
-       int err;
-
-       err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan));
-       if (err >= 0) {
-               *val = (err & 0xffff);
-               err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
-                               ESR_RXTX_CTRL_H(chan));
-               if (err >= 0)
-                       *val |= ((err & 0xffff) << 16);
-               err = 0;
-       }
-       return err;
-}
-
-static int esr_read_glue0(struct niu *np, unsigned long chan, u32 *val)
-{
-       int err;
-
-       err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
-                       ESR_GLUE_CTRL0_L(chan));
-       if (err >= 0) {
-               *val = (err & 0xffff);
-               err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
-                               ESR_GLUE_CTRL0_H(chan));
-               if (err >= 0) {
-                       *val |= ((err & 0xffff) << 16);
-                       err = 0;
-               }
-       }
-       return err;
-}
-
-static int esr_read_reset(struct niu *np, u32 *val)
-{
-       int err;
-
-       err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
-                       ESR_RXTX_RESET_CTRL_L);
-       if (err >= 0) {
-               *val = (err & 0xffff);
-               err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR,
-                               ESR_RXTX_RESET_CTRL_H);
-               if (err >= 0) {
-                       *val |= ((err & 0xffff) << 16);
-                       err = 0;
-               }
-       }
-       return err;
-}
-
-static int esr_write_rxtx_ctrl(struct niu *np, unsigned long chan, u32 val)
-{
-       int err;
-
-       err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
-                        ESR_RXTX_CTRL_L(chan), val & 0xffff);
-       if (!err)
-               err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
-                                ESR_RXTX_CTRL_H(chan), (val >> 16));
-       return err;
-}
-
-static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val)
-{
-       int err;
-
-       err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
-                       ESR_GLUE_CTRL0_L(chan), val & 0xffff);
-       if (!err)
-               err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
-                                ESR_GLUE_CTRL0_H(chan), (val >> 16));
-       return err;
-}
-
-static int esr_reset(struct niu *np)
-{
-       u32 uninitialized_var(reset);
-       int err;
-
-       err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
-                        ESR_RXTX_RESET_CTRL_L, 0x0000);
-       if (err)
-               return err;
-       err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
-                        ESR_RXTX_RESET_CTRL_H, 0xffff);
-       if (err)
-               return err;
-       udelay(200);
-
-       err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
-                        ESR_RXTX_RESET_CTRL_L, 0xffff);
-       if (err)
-               return err;
-       udelay(200);
-
-       err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
-                        ESR_RXTX_RESET_CTRL_H, 0x0000);
-       if (err)
-               return err;
-       udelay(200);
-
-       err = esr_read_reset(np, &reset);
-       if (err)
-               return err;
-       if (reset != 0) {
-               netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n",
-                          np->port, reset);
-               return -ENODEV;
-       }
-
-       return 0;
-}
-
-static int serdes_init_10g(struct niu *np)
-{
-       struct niu_link_config *lp = &np->link_config;
-       unsigned long ctrl_reg, test_cfg_reg, i;
-       u64 ctrl_val, test_cfg_val, sig, mask, val;
-       int err;
-
-       switch (np->port) {
-       case 0:
-               ctrl_reg = ENET_SERDES_0_CTRL_CFG;
-               test_cfg_reg = ENET_SERDES_0_TEST_CFG;
-               break;
-       case 1:
-               ctrl_reg = ENET_SERDES_1_CTRL_CFG;
-               test_cfg_reg = ENET_SERDES_1_TEST_CFG;
-               break;
-
-       default:
-               return -EINVAL;
-       }
-       ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
-                   ENET_SERDES_CTRL_SDET_1 |
-                   ENET_SERDES_CTRL_SDET_2 |
-                   ENET_SERDES_CTRL_SDET_3 |
-                   (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
-                   (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
-                   (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
-                   (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
-                   (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
-                   (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
-                   (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
-                   (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
-       test_cfg_val = 0;
-
-       if (lp->loopback_mode == LOOPBACK_PHY) {
-               test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
-                                 ENET_SERDES_TEST_MD_0_SHIFT) |
-                                (ENET_TEST_MD_PAD_LOOPBACK <<
-                                 ENET_SERDES_TEST_MD_1_SHIFT) |
-                                (ENET_TEST_MD_PAD_LOOPBACK <<
-                                 ENET_SERDES_TEST_MD_2_SHIFT) |
-                                (ENET_TEST_MD_PAD_LOOPBACK <<
-                                 ENET_SERDES_TEST_MD_3_SHIFT));
-       }
-
-       nw64(ctrl_reg, ctrl_val);
-       nw64(test_cfg_reg, test_cfg_val);
-
-       /* Initialize all 4 lanes of the SERDES.  */
-       for (i = 0; i < 4; i++) {
-               u32 rxtx_ctrl, glue0;
-
-               err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
-               if (err)
-                       return err;
-               err = esr_read_glue0(np, i, &glue0);
-               if (err)
-                       return err;
-
-               rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
-               rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
-                             (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
-
-               glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
-                          ESR_GLUE_CTRL0_THCNT |
-                          ESR_GLUE_CTRL0_BLTIME);
-               glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
-                         (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
-                         (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
-                         (BLTIME_300_CYCLES <<
-                          ESR_GLUE_CTRL0_BLTIME_SHIFT));
-
-               err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
-               if (err)
-                       return err;
-               err = esr_write_glue0(np, i, glue0);
-               if (err)
-                       return err;
-       }
-
-       err = esr_reset(np);
-       if (err)
-               return err;
-
-       sig = nr64(ESR_INT_SIGNALS);
-       switch (np->port) {
-       case 0:
-               mask = ESR_INT_SIGNALS_P0_BITS;
-               val = (ESR_INT_SRDY0_P0 |
-                      ESR_INT_DET0_P0 |
-                      ESR_INT_XSRDY_P0 |
-                      ESR_INT_XDP_P0_CH3 |
-                      ESR_INT_XDP_P0_CH2 |
-                      ESR_INT_XDP_P0_CH1 |
-                      ESR_INT_XDP_P0_CH0);
-               break;
-
-       case 1:
-               mask = ESR_INT_SIGNALS_P1_BITS;
-               val = (ESR_INT_SRDY0_P1 |
-                      ESR_INT_DET0_P1 |
-                      ESR_INT_XSRDY_P1 |
-                      ESR_INT_XDP_P1_CH3 |
-                      ESR_INT_XDP_P1_CH2 |
-                      ESR_INT_XDP_P1_CH1 |
-                      ESR_INT_XDP_P1_CH0);
-               break;
-
-       default:
-               return -EINVAL;
-       }
-
-       if ((sig & mask) != val) {
-               if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
-                       np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
-                       return 0;
-               }
-               netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
-                          np->port, (int)(sig & mask), (int)val);
-               return -ENODEV;
-       }
-       if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
-               np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
-       return 0;
-}
-
-static int serdes_init_1g(struct niu *np)
-{
-       u64 val;
-
-       val = nr64(ENET_SERDES_1_PLL_CFG);
-       val &= ~ENET_SERDES_PLL_FBDIV2;
-       switch (np->port) {
-       case 0:
-               val |= ENET_SERDES_PLL_HRATE0;
-               break;
-       case 1:
-               val |= ENET_SERDES_PLL_HRATE1;
-               break;
-       case 2:
-               val |= ENET_SERDES_PLL_HRATE2;
-               break;
-       case 3:
-               val |= ENET_SERDES_PLL_HRATE3;
-               break;
-       default:
-               return -EINVAL;
-       }
-       nw64(ENET_SERDES_1_PLL_CFG, val);
-
-       return 0;
-}
-
-static int serdes_init_1g_serdes(struct niu *np)
-{
-       struct niu_link_config *lp = &np->link_config;
-       unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
-       u64 ctrl_val, test_cfg_val, sig, mask, val;
-       int err;
-       u64 reset_val, val_rd;
-
-       val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 |
-               ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 |
-               ENET_SERDES_PLL_FBDIV0;
-       switch (np->port) {
-       case 0:
-               reset_val =  ENET_SERDES_RESET_0;
-               ctrl_reg = ENET_SERDES_0_CTRL_CFG;
-               test_cfg_reg = ENET_SERDES_0_TEST_CFG;
-               pll_cfg = ENET_SERDES_0_PLL_CFG;
-               break;
-       case 1:
-               reset_val =  ENET_SERDES_RESET_1;
-               ctrl_reg = ENET_SERDES_1_CTRL_CFG;
-               test_cfg_reg = ENET_SERDES_1_TEST_CFG;
-               pll_cfg = ENET_SERDES_1_PLL_CFG;
-               break;
-
-       default:
-               return -EINVAL;
-       }
-       ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
-                   ENET_SERDES_CTRL_SDET_1 |
-                   ENET_SERDES_CTRL_SDET_2 |
-                   ENET_SERDES_CTRL_SDET_3 |
-                   (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
-                   (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
-                   (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
-                   (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
-                   (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
-                   (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
-                   (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
-                   (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
-       test_cfg_val = 0;
-
-       if (lp->loopback_mode == LOOPBACK_PHY) {
-               test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
-                                 ENET_SERDES_TEST_MD_0_SHIFT) |
-                                (ENET_TEST_MD_PAD_LOOPBACK <<
-                                 ENET_SERDES_TEST_MD_1_SHIFT) |
-                                (ENET_TEST_MD_PAD_LOOPBACK <<
-                                 ENET_SERDES_TEST_MD_2_SHIFT) |
-                                (ENET_TEST_MD_PAD_LOOPBACK <<
-                                 ENET_SERDES_TEST_MD_3_SHIFT));
-       }
-
-       nw64(ENET_SERDES_RESET, reset_val);
-       mdelay(20);
-       val_rd = nr64(ENET_SERDES_RESET);
-       val_rd &= ~reset_val;
-       nw64(pll_cfg, val);
-       nw64(ctrl_reg, ctrl_val);
-       nw64(test_cfg_reg, test_cfg_val);
-       nw64(ENET_SERDES_RESET, val_rd);
-       mdelay(2000);
-
-       /* Initialize all 4 lanes of the SERDES.  */
-       for (i = 0; i < 4; i++) {
-               u32 rxtx_ctrl, glue0;
-
-               err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
-               if (err)
-                       return err;
-               err = esr_read_glue0(np, i, &glue0);
-               if (err)
-                       return err;
-
-               rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
-               rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
-                             (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
-
-               glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
-                          ESR_GLUE_CTRL0_THCNT |
-                          ESR_GLUE_CTRL0_BLTIME);
-               glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
-                         (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
-                         (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
-                         (BLTIME_300_CYCLES <<
-                          ESR_GLUE_CTRL0_BLTIME_SHIFT));
-
-               err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
-               if (err)
-                       return err;
-               err = esr_write_glue0(np, i, glue0);
-               if (err)
-                       return err;
-       }
-
-
-       sig = nr64(ESR_INT_SIGNALS);
-       switch (np->port) {
-       case 0:
-               val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
-               mask = val;
-               break;
-
-       case 1:
-               val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
-               mask = val;
-               break;
-
-       default:
-               return -EINVAL;
-       }
-
-       if ((sig & mask) != val) {
-               netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
-                          np->port, (int)(sig & mask), (int)val);
-               return -ENODEV;
-       }
-
-       return 0;
-}
-
-static int link_status_1g_serdes(struct niu *np, int *link_up_p)
-{
-       struct niu_link_config *lp = &np->link_config;
-       int link_up;
-       u64 val;
-       u16 current_speed;
-       unsigned long flags;
-       u8 current_duplex;
-
-       link_up = 0;
-       current_speed = SPEED_INVALID;
-       current_duplex = DUPLEX_INVALID;
-
-       spin_lock_irqsave(&np->lock, flags);
-
-       val = nr64_pcs(PCS_MII_STAT);
-
-       if (val & PCS_MII_STAT_LINK_STATUS) {
-               link_up = 1;
-               current_speed = SPEED_1000;
-               current_duplex = DUPLEX_FULL;
-       }
-
-       lp->active_speed = current_speed;
-       lp->active_duplex = current_duplex;
-       spin_unlock_irqrestore(&np->lock, flags);
-
-       *link_up_p = link_up;
-       return 0;
-}
-
-static int link_status_10g_serdes(struct niu *np, int *link_up_p)
-{
-       unsigned long flags;
-       struct niu_link_config *lp = &np->link_config;
-       int link_up = 0;
-       int link_ok = 1;
-       u64 val, val2;
-       u16 current_speed;
-       u8 current_duplex;
-
-       if (!(np->flags & NIU_FLAGS_10G))
-               return link_status_1g_serdes(np, link_up_p);
-
-       current_speed = SPEED_INVALID;
-       current_duplex = DUPLEX_INVALID;
-       spin_lock_irqsave(&np->lock, flags);
-
-       val = nr64_xpcs(XPCS_STATUS(0));
-       val2 = nr64_mac(XMAC_INTER2);
-       if (val2 & 0x01000000)
-               link_ok = 0;
-
-       if ((val & 0x1000ULL) && link_ok) {
-               link_up = 1;
-               current_speed = SPEED_10000;
-               current_duplex = DUPLEX_FULL;
-       }
-       lp->active_speed = current_speed;
-       lp->active_duplex = current_duplex;
-       spin_unlock_irqrestore(&np->lock, flags);
-       *link_up_p = link_up;
-       return 0;
-}
-
-static int link_status_mii(struct niu *np, int *link_up_p)
-{
-       struct niu_link_config *lp = &np->link_config;
-       int err;
-       int bmsr, advert, ctrl1000, stat1000, lpa, bmcr, estatus;
-       int supported, advertising, active_speed, active_duplex;
-
-       err = mii_read(np, np->phy_addr, MII_BMCR);
-       if (unlikely(err < 0))
-               return err;
-       bmcr = err;
-
-       err = mii_read(np, np->phy_addr, MII_BMSR);
-       if (unlikely(err < 0))
-               return err;
-       bmsr = err;
-
-       err = mii_read(np, np->phy_addr, MII_ADVERTISE);
-       if (unlikely(err < 0))
-               return err;
-       advert = err;
-
-       err = mii_read(np, np->phy_addr, MII_LPA);
-       if (unlikely(err < 0))
-               return err;
-       lpa = err;
-
-       if (likely(bmsr & BMSR_ESTATEN)) {
-               err = mii_read(np, np->phy_addr, MII_ESTATUS);
-               if (unlikely(err < 0))
-                       return err;
-               estatus = err;
-
-               err = mii_read(np, np->phy_addr, MII_CTRL1000);
-               if (unlikely(err < 0))
-                       return err;
-               ctrl1000 = err;
-
-               err = mii_read(np, np->phy_addr, MII_STAT1000);
-               if (unlikely(err < 0))
-                       return err;
-               stat1000 = err;
-       } else
-               estatus = ctrl1000 = stat1000 = 0;
-
-       supported = 0;
-       if (bmsr & BMSR_ANEGCAPABLE)
-               supported |= SUPPORTED_Autoneg;
-       if (bmsr & BMSR_10HALF)
-               supported |= SUPPORTED_10baseT_Half;
-       if (bmsr & BMSR_10FULL)
-               supported |= SUPPORTED_10baseT_Full;
-       if (bmsr & BMSR_100HALF)
-               supported |= SUPPORTED_100baseT_Half;
-       if (bmsr & BMSR_100FULL)
-               supported |= SUPPORTED_100baseT_Full;
-       if (estatus & ESTATUS_1000_THALF)
-               supported |= SUPPORTED_1000baseT_Half;
-       if (estatus & ESTATUS_1000_TFULL)
-               supported |= SUPPORTED_1000baseT_Full;
-       lp->supported = supported;
-
-       advertising = 0;
-       if (advert & ADVERTISE_10HALF)
-               advertising |= ADVERTISED_10baseT_Half;
-       if (advert & ADVERTISE_10FULL)
-               advertising |= ADVERTISED_10baseT_Full;
-       if (advert & ADVERTISE_100HALF)
-               advertising |= ADVERTISED_100baseT_Half;
-       if (advert & ADVERTISE_100FULL)
-               advertising |= ADVERTISED_100baseT_Full;
-       if (ctrl1000 & ADVERTISE_1000HALF)
-               advertising |= ADVERTISED_1000baseT_Half;
-       if (ctrl1000 & ADVERTISE_1000FULL)
-               advertising |= ADVERTISED_1000baseT_Full;
-
-       if (bmcr & BMCR_ANENABLE) {
-               int neg, neg1000;
-
-               lp->active_autoneg = 1;
-               advertising |= ADVERTISED_Autoneg;
-
-               neg = advert & lpa;
-               neg1000 = (ctrl1000 << 2) & stat1000;
-
-               if (neg1000 & (LPA_1000FULL | LPA_1000HALF))
-                       active_speed = SPEED_1000;
-               else if (neg & LPA_100)
-                       active_speed = SPEED_100;
-               else if (neg & (LPA_10HALF | LPA_10FULL))
-                       active_speed = SPEED_10;
-               else
-                       active_speed = SPEED_INVALID;
-
-               if ((neg1000 & LPA_1000FULL) || (neg & LPA_DUPLEX))
-                       active_duplex = DUPLEX_FULL;
-               else if (active_speed != SPEED_INVALID)
-                       active_duplex = DUPLEX_HALF;
-               else
-                       active_duplex = DUPLEX_INVALID;
-       } else {
-               lp->active_autoneg = 0;
-
-               if ((bmcr & BMCR_SPEED1000) && !(bmcr & BMCR_SPEED100))
-                       active_speed = SPEED_1000;
-               else if (bmcr & BMCR_SPEED100)
-                       active_speed = SPEED_100;
-               else
-                       active_speed = SPEED_10;
-
-               if (bmcr & BMCR_FULLDPLX)
-                       active_duplex = DUPLEX_FULL;
-               else
-                       active_duplex = DUPLEX_HALF;
-       }
-
-       lp->active_advertising = advertising;
-       lp->active_speed = active_speed;
-       lp->active_duplex = active_duplex;
-       *link_up_p = !!(bmsr & BMSR_LSTATUS);
-
-       return 0;
-}
-
-static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
-{
-       struct niu_link_config *lp = &np->link_config;
-       u16 current_speed, bmsr;
-       unsigned long flags;
-       u8 current_duplex;
-       int err, link_up;
-
-       link_up = 0;
-       current_speed = SPEED_INVALID;
-       current_duplex = DUPLEX_INVALID;
-
-       spin_lock_irqsave(&np->lock, flags);
-
-       err = -EINVAL;
-
-       err = mii_read(np, np->phy_addr, MII_BMSR);
-       if (err < 0)
-               goto out;
-
-       bmsr = err;
-       if (bmsr & BMSR_LSTATUS) {
-               u16 adv, lpa;
-
-               err = mii_read(np, np->phy_addr, MII_ADVERTISE);
-               if (err < 0)
-                       goto out;
-               adv = err;
-
-               err = mii_read(np, np->phy_addr, MII_LPA);
-               if (err < 0)
-                       goto out;
-               lpa = err;
-
-               err = mii_read(np, np->phy_addr, MII_ESTATUS);
-               if (err < 0)
-                       goto out;
-               link_up = 1;
-               current_speed = SPEED_1000;
-               current_duplex = DUPLEX_FULL;
-
-       }
-       lp->active_speed = current_speed;
-       lp->active_duplex = current_duplex;
-       err = 0;
-
-out:
-       spin_unlock_irqrestore(&np->lock, flags);
-
-       *link_up_p = link_up;
-       return err;
-}
-
-static int link_status_1g(struct niu *np, int *link_up_p)
-{
-       struct niu_link_config *lp = &np->link_config;
-       unsigned long flags;
-       int err;
-
-       spin_lock_irqsave(&np->lock, flags);
-
-       err = link_status_mii(np, link_up_p);
-       lp->supported |= SUPPORTED_TP;
-       lp->active_advertising |= ADVERTISED_TP;
-
-       spin_unlock_irqrestore(&np->lock, flags);
-       return err;
-}
-
-static int bcm8704_reset(struct niu *np)
-{
-       int err, limit;
-
-       err = mdio_read(np, np->phy_addr,
-                       BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
-       if (err < 0 || err == 0xffff)
-               return err;
-       err |= BMCR_RESET;
-       err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
-                        MII_BMCR, err);
-       if (err)
-               return err;
-
-       limit = 1000;
-       while (--limit >= 0) {
-               err = mdio_read(np, np->phy_addr,
-                               BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
-               if (err < 0)
-                       return err;
-               if (!(err & BMCR_RESET))
-                       break;
-       }
-       if (limit < 0) {
-               netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n",
-                          np->port, (err & 0xffff));
-               return -ENODEV;
-       }
-       return 0;
-}
-
-/* When written, certain PHY registers need to be read back twice
- * in order for the bits to settle properly.
- */
-static int bcm8704_user_dev3_readback(struct niu *np, int reg)
-{
-       int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
-       if (err < 0)
-               return err;
-       err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg);
-       if (err < 0)
-               return err;
-       return 0;
-}
-
-static int bcm8706_init_user_dev3(struct niu *np)
-{
-       int err;
-
-
-       err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
-                       BCM8704_USER_OPT_DIGITAL_CTRL);
-       if (err < 0)
-               return err;
-       err &= ~USER_ODIG_CTRL_GPIOS;
-       err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
-       err |=  USER_ODIG_CTRL_RESV2;
-       err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
-                        BCM8704_USER_OPT_DIGITAL_CTRL, err);
-       if (err)
-               return err;
-
-       mdelay(1000);
-
-       return 0;
-}
-
-static int bcm8704_init_user_dev3(struct niu *np)
-{
-       int err;
-
-       err = mdio_write(np, np->phy_addr,
-                        BCM8704_USER_DEV3_ADDR, BCM8704_USER_CONTROL,
-                        (USER_CONTROL_OPTXRST_LVL |
-                         USER_CONTROL_OPBIASFLT_LVL |
-                         USER_CONTROL_OBTMPFLT_LVL |
-                         USER_CONTROL_OPPRFLT_LVL |
-                         USER_CONTROL_OPTXFLT_LVL |
-                         USER_CONTROL_OPRXLOS_LVL |
-                         USER_CONTROL_OPRXFLT_LVL |
-                         USER_CONTROL_OPTXON_LVL |
-                         (0x3f << USER_CONTROL_RES1_SHIFT)));
-       if (err)
-               return err;
-
-       err = mdio_write(np, np->phy_addr,
-                        BCM8704_USER_DEV3_ADDR, BCM8704_USER_PMD_TX_CONTROL,
-                        (USER_PMD_TX_CTL_XFP_CLKEN |
-                         (1 << USER_PMD_TX_CTL_TX_DAC_TXD_SH) |
-                         (2 << USER_PMD_TX_CTL_TX_DAC_TXCK_SH) |
-                         USER_PMD_TX_CTL_TSCK_LPWREN));
-       if (err)
-               return err;
-
-       err = bcm8704_user_dev3_readback(np, BCM8704_USER_CONTROL);
-       if (err)
-               return err;
-       err = bcm8704_user_dev3_readback(np, BCM8704_USER_PMD_TX_CONTROL);
-       if (err)
-               return err;
-
-       err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
-                       BCM8704_USER_OPT_DIGITAL_CTRL);
-       if (err < 0)
-               return err;
-       err &= ~USER_ODIG_CTRL_GPIOS;
-       err |= (0x3 << USER_ODIG_CTRL_GPIOS_SHIFT);
-       err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
-                        BCM8704_USER_OPT_DIGITAL_CTRL, err);
-       if (err)
-               return err;
-
-       mdelay(1000);
-
-       return 0;
-}
-
-static int mrvl88x2011_act_led(struct niu *np, int val)
-{
-       int     err;
-
-       err  = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
-               MRVL88X2011_LED_8_TO_11_CTL);
-       if (err < 0)
-               return err;
-
-       err &= ~MRVL88X2011_LED(MRVL88X2011_LED_ACT,MRVL88X2011_LED_CTL_MASK);
-       err |=  MRVL88X2011_LED(MRVL88X2011_LED_ACT,val);
-
-       return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
-                         MRVL88X2011_LED_8_TO_11_CTL, err);
-}
-
-static int mrvl88x2011_led_blink_rate(struct niu *np, int rate)
-{
-       int     err;
-
-       err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
-                       MRVL88X2011_LED_BLINK_CTL);
-       if (err >= 0) {
-               err &= ~MRVL88X2011_LED_BLKRATE_MASK;
-               err |= (rate << 4);
-
-               err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR,
-                                MRVL88X2011_LED_BLINK_CTL, err);
-       }
-
-       return err;
-}
-
-static int xcvr_init_10g_mrvl88x2011(struct niu *np)
-{
-       int     err;
-
-       /* Set LED functions */
-       err = mrvl88x2011_led_blink_rate(np, MRVL88X2011_LED_BLKRATE_134MS);
-       if (err)
-               return err;
-
-       /* led activity */
-       err = mrvl88x2011_act_led(np, MRVL88X2011_LED_CTL_OFF);
-       if (err)
-               return err;
-
-       err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
-                       MRVL88X2011_GENERAL_CTL);
-       if (err < 0)
-               return err;
-
-       err |= MRVL88X2011_ENA_XFPREFCLK;
-
-       err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
-                        MRVL88X2011_GENERAL_CTL, err);
-       if (err < 0)
-               return err;
-
-       err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
-                       MRVL88X2011_PMA_PMD_CTL_1);
-       if (err < 0)
-               return err;
-
-       if (np->link_config.loopback_mode == LOOPBACK_MAC)
-               err |= MRVL88X2011_LOOPBACK;
-       else
-               err &= ~MRVL88X2011_LOOPBACK;
-
-       err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
-                        MRVL88X2011_PMA_PMD_CTL_1, err);
-       if (err < 0)
-               return err;
-
-       /* Enable PMD  */
-       return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
-                         MRVL88X2011_10G_PMD_TX_DIS, MRVL88X2011_ENA_PMDTX);
-}
-
-
-static int xcvr_diag_bcm870x(struct niu *np)
-{
-       u16 analog_stat0, tx_alarm_status;
-       int err = 0;
-
-#if 1
-       err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
-                       MII_STAT1000);
-       if (err < 0)
-               return err;
-       pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err);
-
-       err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
-       if (err < 0)
-               return err;
-       pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err);
-
-       err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
-                       MII_NWAYTEST);
-       if (err < 0)
-               return err;
-       pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err);
-#endif
-
-       /* XXX dig this out it might not be so useful XXX */
-       err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
-                       BCM8704_USER_ANALOG_STATUS0);
-       if (err < 0)
-               return err;
-       err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
-                       BCM8704_USER_ANALOG_STATUS0);
-       if (err < 0)
-               return err;
-       analog_stat0 = err;
-
-       err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
-                       BCM8704_USER_TX_ALARM_STATUS);
-       if (err < 0)
-               return err;
-       err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR,
-                       BCM8704_USER_TX_ALARM_STATUS);
-       if (err < 0)
-               return err;
-       tx_alarm_status = err;
-
-       if (analog_stat0 != 0x03fc) {
-               if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
-                       pr_info("Port %u cable not connected or bad cable\n",
-                               np->port);
-               } else if (analog_stat0 == 0x639c) {
-                       pr_info("Port %u optical module is bad or missing\n",
-                               np->port);
-               }
-       }
-
-       return 0;
-}
-
-static int xcvr_10g_set_lb_bcm870x(struct niu *np)
-{
-       struct niu_link_config *lp = &np->link_config;
-       int err;
-
-       err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
-                       MII_BMCR);
-       if (err < 0)
-               return err;
-
-       err &= ~BMCR_LOOPBACK;
-
-       if (lp->loopback_mode == LOOPBACK_MAC)
-               err |= BMCR_LOOPBACK;
-
-       err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
-                        MII_BMCR, err);
-       if (err)
-               return err;
-
-       return 0;
-}
-
-static int xcvr_init_10g_bcm8706(struct niu *np)
-{
-       int err = 0;
-       u64 val;
-
-       if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) &&
-           (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0)
-                       return err;
-
-       val = nr64_mac(XMAC_CONFIG);
-       val &= ~XMAC_CONFIG_LED_POLARITY;
-       val |= XMAC_CONFIG_FORCE_LED_ON;
-       nw64_mac(XMAC_CONFIG, val);
-
-       val = nr64(MIF_CONFIG);
-       val |= MIF_CONFIG_INDIRECT_MODE;
-       nw64(MIF_CONFIG, val);
-
-       err = bcm8704_reset(np);
-       if (err)
-               return err;
-
-       err = xcvr_10g_set_lb_bcm870x(np);
-       if (err)
-               return err;
-
-       err = bcm8706_init_user_dev3(np);
-       if (err)
-               return err;
-
-       err = xcvr_diag_bcm870x(np);
-       if (err)
-               return err;
-
-       return 0;
-}
-
-static int xcvr_init_10g_bcm8704(struct niu *np)
-{
-       int err;
-
-       err = bcm8704_reset(np);
-       if (err)
-               return err;
-
-       err = bcm8704_init_user_dev3(np);
-       if (err)
-               return err;
-
-       err = xcvr_10g_set_lb_bcm870x(np);
-       if (err)
-               return err;
-
-       err =  xcvr_diag_bcm870x(np);
-       if (err)
-               return err;
-
-       return 0;
-}
-
-static int xcvr_init_10g(struct niu *np)
-{
-       int phy_id, err;
-       u64 val;
-
-       val = nr64_mac(XMAC_CONFIG);
-       val &= ~XMAC_CONFIG_LED_POLARITY;
-       val |= XMAC_CONFIG_FORCE_LED_ON;
-       nw64_mac(XMAC_CONFIG, val);
-
-       /* XXX shared resource, lock parent XXX */
-       val = nr64(MIF_CONFIG);
-       val |= MIF_CONFIG_INDIRECT_MODE;
-       nw64(MIF_CONFIG, val);
-
-       phy_id = phy_decode(np->parent->port_phy, np->port);
-       phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
-
-       /* handle different phy types */
-       switch (phy_id & NIU_PHY_ID_MASK) {
-       case NIU_PHY_ID_MRVL88X2011:
-               err = xcvr_init_10g_mrvl88x2011(np);
-               break;
-
-       default: /* bcom 8704 */
-               err = xcvr_init_10g_bcm8704(np);
-               break;
-       }
-
-       return err;
-}
-
-static int mii_reset(struct niu *np)
-{
-       int limit, err;
-
-       err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET);
-       if (err)
-               return err;
-
-       limit = 1000;
-       while (--limit >= 0) {
-               udelay(500);
-               err = mii_read(np, np->phy_addr, MII_BMCR);
-               if (err < 0)
-                       return err;
-               if (!(err & BMCR_RESET))
-                       break;
-       }
-       if (limit < 0) {
-               netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n",
-                          np->port, err);
-               return -ENODEV;
-       }
-
-       return 0;
-}
-
-static int xcvr_init_1g_rgmii(struct niu *np)
-{
-       int err;
-       u64 val;
-       u16 bmcr, bmsr, estat;
-
-       val = nr64(MIF_CONFIG);
-       val &= ~MIF_CONFIG_INDIRECT_MODE;
-       nw64(MIF_CONFIG, val);
-
-       err = mii_reset(np);
-       if (err)
-               return err;
-
-       err = mii_read(np, np->phy_addr, MII_BMSR);
-       if (err < 0)
-               return err;
-       bmsr = err;
-
-       estat = 0;
-       if (bmsr & BMSR_ESTATEN) {
-               err = mii_read(np, np->phy_addr, MII_ESTATUS);
-               if (err < 0)
-                       return err;
-               estat = err;
-       }
-
-       bmcr = 0;
-       err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
-       if (err)
-               return err;
-
-       if (bmsr & BMSR_ESTATEN) {
-               u16 ctrl1000 = 0;
-
-               if (estat & ESTATUS_1000_TFULL)
-                       ctrl1000 |= ADVERTISE_1000FULL;
-               err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
-               if (err)
-                       return err;
-       }
-
-       bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX);
-
-       err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
-       if (err)
-               return err;
-
-       err = mii_read(np, np->phy_addr, MII_BMCR);
-       if (err < 0)
-               return err;
-       bmcr = mii_read(np, np->phy_addr, MII_BMCR);
-
-       err = mii_read(np, np->phy_addr, MII_BMSR);
-       if (err < 0)
-               return err;
-
-       return 0;
-}
-
-static int mii_init_common(struct niu *np)
-{
-       struct niu_link_config *lp = &np->link_config;
-       u16 bmcr, bmsr, adv, estat;
-       int err;
-
-       err = mii_reset(np);
-       if (err)
-               return err;
-
-       err = mii_read(np, np->phy_addr, MII_BMSR);
-       if (err < 0)
-               return err;
-       bmsr = err;
-
-       estat = 0;
-       if (bmsr & BMSR_ESTATEN) {
-               err = mii_read(np, np->phy_addr, MII_ESTATUS);
-               if (err < 0)
-                       return err;
-               estat = err;
-       }
-
-       bmcr = 0;
-       err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
-       if (err)
-               return err;
-
-       if (lp->loopback_mode == LOOPBACK_MAC) {
-               bmcr |= BMCR_LOOPBACK;
-               if (lp->active_speed == SPEED_1000)
-                       bmcr |= BMCR_SPEED1000;
-               if (lp->active_duplex == DUPLEX_FULL)
-                       bmcr |= BMCR_FULLDPLX;
-       }
-
-       if (lp->loopback_mode == LOOPBACK_PHY) {
-               u16 aux;
-
-               aux = (BCM5464R_AUX_CTL_EXT_LB |
-                      BCM5464R_AUX_CTL_WRITE_1);
-               err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux);
-               if (err)
-                       return err;
-       }
-
-       if (lp->autoneg) {
-               u16 ctrl1000;
-
-               adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
-               if ((bmsr & BMSR_10HALF) &&
-                       (lp->advertising & ADVERTISED_10baseT_Half))
-                       adv |= ADVERTISE_10HALF;
-               if ((bmsr & BMSR_10FULL) &&
-                       (lp->advertising & ADVERTISED_10baseT_Full))
-                       adv |= ADVERTISE_10FULL;
-               if ((bmsr & BMSR_100HALF) &&
-                       (lp->advertising & ADVERTISED_100baseT_Half))
-                       adv |= ADVERTISE_100HALF;
-               if ((bmsr & BMSR_100FULL) &&
-                       (lp->advertising & ADVERTISED_100baseT_Full))
-                       adv |= ADVERTISE_100FULL;
-               err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv);
-               if (err)
-                       return err;
-
-               if (likely(bmsr & BMSR_ESTATEN)) {
-                       ctrl1000 = 0;
-                       if ((estat & ESTATUS_1000_THALF) &&
-                               (lp->advertising & ADVERTISED_1000baseT_Half))
-                               ctrl1000 |= ADVERTISE_1000HALF;
-                       if ((estat & ESTATUS_1000_TFULL) &&
-                               (lp->advertising & ADVERTISED_1000baseT_Full))
-                               ctrl1000 |= ADVERTISE_1000FULL;
-                       err = mii_write(np, np->phy_addr,
-                                       MII_CTRL1000, ctrl1000);
-                       if (err)
-                               return err;
-               }
-
-               bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
-       } else {
-               /* !lp->autoneg */
-               int fulldpx;
-
-               if (lp->duplex == DUPLEX_FULL) {
-                       bmcr |= BMCR_FULLDPLX;
-                       fulldpx = 1;
-               } else if (lp->duplex == DUPLEX_HALF)
-                       fulldpx = 0;
-               else
-                       return -EINVAL;
-
-               if (lp->speed == SPEED_1000) {
-                       /* if X-full requested while not supported, or
-                          X-half requested while not supported... */
-                       if ((fulldpx && !(estat & ESTATUS_1000_TFULL)) ||
-                               (!fulldpx && !(estat & ESTATUS_1000_THALF)))
-                               return -EINVAL;
-                       bmcr |= BMCR_SPEED1000;
-               } else if (lp->speed == SPEED_100) {
-                       if ((fulldpx && !(bmsr & BMSR_100FULL)) ||
-                               (!fulldpx && !(bmsr & BMSR_100HALF)))
-                               return -EINVAL;
-                       bmcr |= BMCR_SPEED100;
-               } else if (lp->speed == SPEED_10) {
-                       if ((fulldpx && !(bmsr & BMSR_10FULL)) ||
-                               (!fulldpx && !(bmsr & BMSR_10HALF)))
-                               return -EINVAL;
-               } else
-                       return -EINVAL;
-       }
-
-       err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
-       if (err)
-               return err;
-
-#if 0
-       err = mii_read(np, np->phy_addr, MII_BMCR);
-       if (err < 0)
-               return err;
-       bmcr = err;
-
-       err = mii_read(np, np->phy_addr, MII_BMSR);
-       if (err < 0)
-               return err;
-       bmsr = err;
-
-       pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
-               np->port, bmcr, bmsr);
-#endif
-
-       return 0;
-}
-
-static int xcvr_init_1g(struct niu *np)
-{
-       u64 val;
-
-       /* XXX shared resource, lock parent XXX */
-       val = nr64(MIF_CONFIG);
-       val &= ~MIF_CONFIG_INDIRECT_MODE;
-       nw64(MIF_CONFIG, val);
-
-       return mii_init_common(np);
-}
-
-static int niu_xcvr_init(struct niu *np)
-{
-       const struct niu_phy_ops *ops = np->phy_ops;
-       int err;
-
-       err = 0;
-       if (ops->xcvr_init)
-               err = ops->xcvr_init(np);
-
-       return err;
-}
-
-static int niu_serdes_init(struct niu *np)
-{
-       const struct niu_phy_ops *ops = np->phy_ops;
-       int err;
-
-       err = 0;
-       if (ops->serdes_init)
-               err = ops->serdes_init(np);
-
-       return err;
-}
-
-static void niu_init_xif(struct niu *);
-static void niu_handle_led(struct niu *, int status);
-
-static int niu_link_status_common(struct niu *np, int link_up)
-{
-       struct niu_link_config *lp = &np->link_config;
-       struct net_device *dev = np->dev;
-       unsigned long flags;
-
-       if (!netif_carrier_ok(dev) && link_up) {
-               netif_info(np, link, dev, "Link is up at %s, %s duplex\n",
-                          lp->active_speed == SPEED_10000 ? "10Gb/sec" :
-                          lp->active_speed == SPEED_1000 ? "1Gb/sec" :
-                          lp->active_speed == SPEED_100 ? "100Mbit/sec" :
-                          "10Mbit/sec",
-                          lp->active_duplex == DUPLEX_FULL ? "full" : "half");
-
-               spin_lock_irqsave(&np->lock, flags);
-               niu_init_xif(np);
-               niu_handle_led(np, 1);
-               spin_unlock_irqrestore(&np->lock, flags);
-
-               netif_carrier_on(dev);
-       } else if (netif_carrier_ok(dev) && !link_up) {
-               netif_warn(np, link, dev, "Link is down\n");
-               spin_lock_irqsave(&np->lock, flags);
-               niu_handle_led(np, 0);
-               spin_unlock_irqrestore(&np->lock, flags);
-               netif_carrier_off(dev);
-       }
-
-       return 0;
-}
-
-static int link_status_10g_mrvl(struct niu *np, int *link_up_p)
-{
-       int err, link_up, pma_status, pcs_status;
-
-       link_up = 0;
-
-       err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
-                       MRVL88X2011_10G_PMD_STATUS_2);
-       if (err < 0)
-               goto out;
-
-       /* Check PMA/PMD Register: 1.0001.2 == 1 */
-       err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR,
-                       MRVL88X2011_PMA_PMD_STATUS_1);
-       if (err < 0)
-               goto out;
-
-       pma_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
-
-        /* Check PMC Register : 3.0001.2 == 1: read twice */
-       err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
-                       MRVL88X2011_PMA_PMD_STATUS_1);
-       if (err < 0)
-               goto out;
-
-       err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR,
-                       MRVL88X2011_PMA_PMD_STATUS_1);
-       if (err < 0)
-               goto out;
-
-       pcs_status = ((err & MRVL88X2011_LNK_STATUS_OK) ? 1 : 0);
-
-        /* Check XGXS Register : 4.0018.[0-3,12] */
-       err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR,
-                       MRVL88X2011_10G_XGXS_LANE_STAT);
-       if (err < 0)
-               goto out;
-
-       if (err == (PHYXS_XGXS_LANE_STAT_ALINGED | PHYXS_XGXS_LANE_STAT_LANE3 |
-                   PHYXS_XGXS_LANE_STAT_LANE2 | PHYXS_XGXS_LANE_STAT_LANE1 |
-                   PHYXS_XGXS_LANE_STAT_LANE0 | PHYXS_XGXS_LANE_STAT_MAGIC |
-                   0x800))
-               link_up = (pma_status && pcs_status) ? 1 : 0;
-
-       np->link_config.active_speed = SPEED_10000;
-       np->link_config.active_duplex = DUPLEX_FULL;
-       err = 0;
-out:
-       mrvl88x2011_act_led(np, (link_up ?
-                                MRVL88X2011_LED_CTL_PCS_ACT :
-                                MRVL88X2011_LED_CTL_OFF));
-
-       *link_up_p = link_up;
-       return err;
-}
-
-static int link_status_10g_bcm8706(struct niu *np, int *link_up_p)
-{
-       int err, link_up;
-       link_up = 0;
-
-       err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
-                       BCM8704_PMD_RCV_SIGDET);
-       if (err < 0 || err == 0xffff)
-               goto out;
-       if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
-               err = 0;
-               goto out;
-       }
-
-       err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
-                       BCM8704_PCS_10G_R_STATUS);
-       if (err < 0)
-               goto out;
-
-       if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
-               err = 0;
-               goto out;
-       }
-
-       err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
-                       BCM8704_PHYXS_XGXS_LANE_STAT);
-       if (err < 0)
-               goto out;
-       if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
-                   PHYXS_XGXS_LANE_STAT_MAGIC |
-                   PHYXS_XGXS_LANE_STAT_PATTEST |
-                   PHYXS_XGXS_LANE_STAT_LANE3 |
-                   PHYXS_XGXS_LANE_STAT_LANE2 |
-                   PHYXS_XGXS_LANE_STAT_LANE1 |
-                   PHYXS_XGXS_LANE_STAT_LANE0)) {
-               err = 0;
-               np->link_config.active_speed = SPEED_INVALID;
-               np->link_config.active_duplex = DUPLEX_INVALID;
-               goto out;
-       }
-
-       link_up = 1;
-       np->link_config.active_speed = SPEED_10000;
-       np->link_config.active_duplex = DUPLEX_FULL;
-       err = 0;
-
-out:
-       *link_up_p = link_up;
-       return err;
-}
-
-static int link_status_10g_bcom(struct niu *np, int *link_up_p)
-{
-       int err, link_up;
-
-       link_up = 0;
-
-       err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR,
-                       BCM8704_PMD_RCV_SIGDET);
-       if (err < 0)
-               goto out;
-       if (!(err & PMD_RCV_SIGDET_GLOBAL)) {
-               err = 0;
-               goto out;
-       }
-
-       err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR,
-                       BCM8704_PCS_10G_R_STATUS);
-       if (err < 0)
-               goto out;
-       if (!(err & PCS_10G_R_STATUS_BLK_LOCK)) {
-               err = 0;
-               goto out;
-       }
-
-       err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
-                       BCM8704_PHYXS_XGXS_LANE_STAT);
-       if (err < 0)
-               goto out;
-
-       if (err != (PHYXS_XGXS_LANE_STAT_ALINGED |
-                   PHYXS_XGXS_LANE_STAT_MAGIC |
-                   PHYXS_XGXS_LANE_STAT_LANE3 |
-                   PHYXS_XGXS_LANE_STAT_LANE2 |
-                   PHYXS_XGXS_LANE_STAT_LANE1 |
-                   PHYXS_XGXS_LANE_STAT_LANE0)) {
-               err = 0;
-               goto out;
-       }
-
-       link_up = 1;
-       np->link_config.active_speed = SPEED_10000;
-       np->link_config.active_duplex = DUPLEX_FULL;
-       err = 0;
-
-out:
-       *link_up_p = link_up;
-       return err;
-}
-
-static int link_status_10g(struct niu *np, int *link_up_p)
-{
-       unsigned long flags;
-       int err = -EINVAL;
-
-       spin_lock_irqsave(&np->lock, flags);
-
-       if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
-               int phy_id;
-
-               phy_id = phy_decode(np->parent->port_phy, np->port);
-               phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port];
-
-               /* handle different phy types */
-               switch (phy_id & NIU_PHY_ID_MASK) {
-               case NIU_PHY_ID_MRVL88X2011:
-                       err = link_status_10g_mrvl(np, link_up_p);
-                       break;
-
-               default: /* bcom 8704 */
-                       err = link_status_10g_bcom(np, link_up_p);
-                       break;
-               }
-       }
-
-       spin_unlock_irqrestore(&np->lock, flags);
-
-       return err;
-}
-
-static int niu_10g_phy_present(struct niu *np)
-{
-       u64 sig, mask, val;
-
-       sig = nr64(ESR_INT_SIGNALS);
-       switch (np->port) {
-       case 0:
-               mask = ESR_INT_SIGNALS_P0_BITS;
-               val = (ESR_INT_SRDY0_P0 |
-                      ESR_INT_DET0_P0 |
-                      ESR_INT_XSRDY_P0 |
-                      ESR_INT_XDP_P0_CH3 |
-                      ESR_INT_XDP_P0_CH2 |
-                      ESR_INT_XDP_P0_CH1 |
-                      ESR_INT_XDP_P0_CH0);
-               break;
-
-       case 1:
-               mask = ESR_INT_SIGNALS_P1_BITS;
-               val = (ESR_INT_SRDY0_P1 |
-                      ESR_INT_DET0_P1 |
-                      ESR_INT_XSRDY_P1 |
-                      ESR_INT_XDP_P1_CH3 |
-                      ESR_INT_XDP_P1_CH2 |
-                      ESR_INT_XDP_P1_CH1 |
-                      ESR_INT_XDP_P1_CH0);
-               break;
-
-       default:
-               return 0;
-       }
-
-       if ((sig & mask) != val)
-               return 0;
-       return 1;
-}
-
-static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
-{
-       unsigned long flags;
-       int err = 0;
-       int phy_present;
-       int phy_present_prev;
-
-       spin_lock_irqsave(&np->lock, flags);
-
-       if (np->link_config.loopback_mode == LOOPBACK_DISABLED) {
-               phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ?
-                       1 : 0;
-               phy_present = niu_10g_phy_present(np);
-               if (phy_present != phy_present_prev) {
-                       /* state change */
-                       if (phy_present) {
-                               /* A NEM was just plugged in */
-                               np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT;
-                               if (np->phy_ops->xcvr_init)
-                                       err = np->phy_ops->xcvr_init(np);
-                               if (err) {
-                                       err = mdio_read(np, np->phy_addr,
-                                               BCM8704_PHYXS_DEV_ADDR, MII_BMCR);
-                                       if (err == 0xffff) {
-                                               /* No mdio, back-to-back XAUI */
-                                               goto out;
-                                       }
-                                       /* debounce */
-                                       np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
-                               }
-                       } else {
-                               np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
-                               *link_up_p = 0;
-                               netif_warn(np, link, np->dev,
-                                          "Hotplug PHY Removed\n");
-                       }
-               }
-out:
-               if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) {
-                       err = link_status_10g_bcm8706(np, link_up_p);
-                       if (err == 0xffff) {
-                               /* No mdio, back-to-back XAUI: it is C10NEM */
-                               *link_up_p = 1;
-                               np->link_config.active_speed = SPEED_10000;
-                               np->link_config.active_duplex = DUPLEX_FULL;
-                       }
-               }
-       }
-
-       spin_unlock_irqrestore(&np->lock, flags);
-
-       return 0;
-}
-
-static int niu_link_status(struct niu *np, int *link_up_p)
-{
-       const struct niu_phy_ops *ops = np->phy_ops;
-       int err;
-
-       err = 0;
-       if (ops->link_status)
-               err = ops->link_status(np, link_up_p);
-
-       return err;
-}
-
-static void niu_timer(unsigned long __opaque)
-{
-       struct niu *np = (struct niu *) __opaque;
-       unsigned long off;
-       int err, link_up;
-
-       err = niu_link_status(np, &link_up);
-       if (!err)
-               niu_link_status_common(np, link_up);
-
-       if (netif_carrier_ok(np->dev))
-               off = 5 * HZ;
-       else
-               off = 1 * HZ;
-       np->timer.expires = jiffies + off;
-
-       add_timer(&np->timer);
-}
-
-static const struct niu_phy_ops phy_ops_10g_serdes = {
-       .serdes_init            = serdes_init_10g_serdes,
-       .link_status            = link_status_10g_serdes,
-};
-
-static const struct niu_phy_ops phy_ops_10g_serdes_niu = {
-       .serdes_init            = serdes_init_niu_10g_serdes,
-       .link_status            = link_status_10g_serdes,
-};
-
-static const struct niu_phy_ops phy_ops_1g_serdes_niu = {
-       .serdes_init            = serdes_init_niu_1g_serdes,
-       .link_status            = link_status_1g_serdes,
-};
-
-static const struct niu_phy_ops phy_ops_1g_rgmii = {
-       .xcvr_init              = xcvr_init_1g_rgmii,
-       .link_status            = link_status_1g_rgmii,
-};
-
-static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
-       .serdes_init            = serdes_init_niu_10g_fiber,
-       .xcvr_init              = xcvr_init_10g,
-       .link_status            = link_status_10g,
-};
-
-static const struct niu_phy_ops phy_ops_10g_fiber = {
-       .serdes_init            = serdes_init_10g,
-       .xcvr_init              = xcvr_init_10g,
-       .link_status            = link_status_10g,
-};
-
-static const struct niu_phy_ops phy_ops_10g_fiber_hotplug = {
-       .serdes_init            = serdes_init_10g,
-       .xcvr_init              = xcvr_init_10g_bcm8706,
-       .link_status            = link_status_10g_hotplug,
-};
-
-static const struct niu_phy_ops phy_ops_niu_10g_hotplug = {
-       .serdes_init            = serdes_init_niu_10g_fiber,
-       .xcvr_init              = xcvr_init_10g_bcm8706,
-       .link_status            = link_status_10g_hotplug,
-};
-
-static const struct niu_phy_ops phy_ops_10g_copper = {
-       .serdes_init            = serdes_init_10g,
-       .link_status            = link_status_10g, /* XXX */
-};
-
-static const struct niu_phy_ops phy_ops_1g_fiber = {
-       .serdes_init            = serdes_init_1g,
-       .xcvr_init              = xcvr_init_1g,
-       .link_status            = link_status_1g,
-};
-
-static const struct niu_phy_ops phy_ops_1g_copper = {
-       .xcvr_init              = xcvr_init_1g,
-       .link_status            = link_status_1g,
-};
-
-struct niu_phy_template {
-       const struct niu_phy_ops        *ops;
-       u32                             phy_addr_base;
-};
-
-static const struct niu_phy_template phy_template_niu_10g_fiber = {
-       .ops            = &phy_ops_10g_fiber_niu,
-       .phy_addr_base  = 16,
-};
-
-static const struct niu_phy_template phy_template_niu_10g_serdes = {
-       .ops            = &phy_ops_10g_serdes_niu,
-       .phy_addr_base  = 0,
-};
-
-static const struct niu_phy_template phy_template_niu_1g_serdes = {
-       .ops            = &phy_ops_1g_serdes_niu,
-       .phy_addr_base  = 0,
-};
-
-static const struct niu_phy_template phy_template_10g_fiber = {
-       .ops            = &phy_ops_10g_fiber,
-       .phy_addr_base  = 8,
-};
-
-static const struct niu_phy_template phy_template_10g_fiber_hotplug = {
-       .ops            = &phy_ops_10g_fiber_hotplug,
-       .phy_addr_base  = 8,
-};
-
-static const struct niu_phy_template phy_template_niu_10g_hotplug = {
-       .ops            = &phy_ops_niu_10g_hotplug,
-       .phy_addr_base  = 8,
-};
-
-static const struct niu_phy_template phy_template_10g_copper = {
-       .ops            = &phy_ops_10g_copper,
-       .phy_addr_base  = 10,
-};
-
-static const struct niu_phy_template phy_template_1g_fiber = {
-       .ops            = &phy_ops_1g_fiber,
-       .phy_addr_base  = 0,
-};
-
-static const struct niu_phy_template phy_template_1g_copper = {
-       .ops            = &phy_ops_1g_copper,
-       .phy_addr_base  = 0,
-};
-
-static const struct niu_phy_template phy_template_1g_rgmii = {
-       .ops            = &phy_ops_1g_rgmii,
-       .phy_addr_base  = 0,
-};
-
-static const struct niu_phy_template phy_template_10g_serdes = {
-       .ops            = &phy_ops_10g_serdes,
-       .phy_addr_base  = 0,
-};
-
-static int niu_atca_port_num[4] = {
-       0, 0,  11, 10
-};
-
-static int serdes_init_10g_serdes(struct niu *np)
-{
-       struct niu_link_config *lp = &np->link_config;
-       unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
-       u64 ctrl_val, test_cfg_val, sig, mask, val;
-
-       switch (np->port) {
-       case 0:
-               ctrl_reg = ENET_SERDES_0_CTRL_CFG;
-               test_cfg_reg = ENET_SERDES_0_TEST_CFG;
-               pll_cfg = ENET_SERDES_0_PLL_CFG;
-               break;
-       case 1:
-               ctrl_reg = ENET_SERDES_1_CTRL_CFG;
-               test_cfg_reg = ENET_SERDES_1_TEST_CFG;
-               pll_cfg = ENET_SERDES_1_PLL_CFG;
-               break;
-
-       default:
-               return -EINVAL;
-       }
-       ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
-                   ENET_SERDES_CTRL_SDET_1 |
-                   ENET_SERDES_CTRL_SDET_2 |
-                   ENET_SERDES_CTRL_SDET_3 |
-                   (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
-                   (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
-                   (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
-                   (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
-                   (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
-                   (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
-                   (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
-                   (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
-       test_cfg_val = 0;
-
-       if (lp->loopback_mode == LOOPBACK_PHY) {
-               test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
-                                 ENET_SERDES_TEST_MD_0_SHIFT) |
-                                (ENET_TEST_MD_PAD_LOOPBACK <<
-                                 ENET_SERDES_TEST_MD_1_SHIFT) |
-                                (ENET_TEST_MD_PAD_LOOPBACK <<
-                                 ENET_SERDES_TEST_MD_2_SHIFT) |
-                                (ENET_TEST_MD_PAD_LOOPBACK <<
-                                 ENET_SERDES_TEST_MD_3_SHIFT));
-       }
-
-       esr_reset(np);
-       nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2);
-       nw64(ctrl_reg, ctrl_val);
-       nw64(test_cfg_reg, test_cfg_val);
-
-       /* Initialize all 4 lanes of the SERDES.  */
-       for (i = 0; i < 4; i++) {
-               u32 rxtx_ctrl, glue0;
-               int err;
-
-               err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
-               if (err)
-                       return err;
-               err = esr_read_glue0(np, i, &glue0);
-               if (err)
-                       return err;
-
-               rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
-               rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
-                             (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
-
-               glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
-                          ESR_GLUE_CTRL0_THCNT |
-                          ESR_GLUE_CTRL0_BLTIME);
-               glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
-                         (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
-                         (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
-                         (BLTIME_300_CYCLES <<
-                          ESR_GLUE_CTRL0_BLTIME_SHIFT));
-
-               err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
-               if (err)
-                       return err;
-               err = esr_write_glue0(np, i, glue0);
-               if (err)
-                       return err;
-       }
-
-
-       sig = nr64(ESR_INT_SIGNALS);
-       switch (np->port) {
-       case 0:
-               mask = ESR_INT_SIGNALS_P0_BITS;
-               val = (ESR_INT_SRDY0_P0 |
-                      ESR_INT_DET0_P0 |
-                      ESR_INT_XSRDY_P0 |
-                      ESR_INT_XDP_P0_CH3 |
-                      ESR_INT_XDP_P0_CH2 |
-                      ESR_INT_XDP_P0_CH1 |
-                      ESR_INT_XDP_P0_CH0);
-               break;
-
-       case 1:
-               mask = ESR_INT_SIGNALS_P1_BITS;
-               val = (ESR_INT_SRDY0_P1 |
-                      ESR_INT_DET0_P1 |
-                      ESR_INT_XSRDY_P1 |
-                      ESR_INT_XDP_P1_CH3 |
-                      ESR_INT_XDP_P1_CH2 |
-                      ESR_INT_XDP_P1_CH1 |
-                      ESR_INT_XDP_P1_CH0);
-               break;
-
-       default:
-               return -EINVAL;
-       }
-
-       if ((sig & mask) != val) {
-               int err;
-               err = serdes_init_1g_serdes(np);
-               if (!err) {
-                       np->flags &= ~NIU_FLAGS_10G;
-                       np->mac_xcvr = MAC_XCVR_PCS;
-               }  else {
-                       netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
-                                  np->port);
-                       return -ENODEV;
-               }
-       }
-
-       return 0;
-}
-
-static int niu_determine_phy_disposition(struct niu *np)
-{
-       struct niu_parent *parent = np->parent;
-       u8 plat_type = parent->plat_type;
-       const struct niu_phy_template *tp;
-       u32 phy_addr_off = 0;
-
-       if (plat_type == PLAT_TYPE_NIU) {
-               switch (np->flags &
-                       (NIU_FLAGS_10G |
-                        NIU_FLAGS_FIBER |
-                        NIU_FLAGS_XCVR_SERDES)) {
-               case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
-                       /* 10G Serdes */
-                       tp = &phy_template_niu_10g_serdes;
-                       break;
-               case NIU_FLAGS_XCVR_SERDES:
-                       /* 1G Serdes */
-                       tp = &phy_template_niu_1g_serdes;
-                       break;
-               case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
-                       /* 10G Fiber */
-               default:
-                       if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
-                               tp = &phy_template_niu_10g_hotplug;
-                               if (np->port == 0)
-                                       phy_addr_off = 8;
-                               if (np->port == 1)
-                                       phy_addr_off = 12;
-                       } else {
-                               tp = &phy_template_niu_10g_fiber;
-                               phy_addr_off += np->port;
-                       }
-                       break;
-               }
-       } else {
-               switch (np->flags &
-                       (NIU_FLAGS_10G |
-                        NIU_FLAGS_FIBER |
-                        NIU_FLAGS_XCVR_SERDES)) {
-               case 0:
-                       /* 1G copper */
-                       tp = &phy_template_1g_copper;
-                       if (plat_type == PLAT_TYPE_VF_P0)
-                               phy_addr_off = 10;
-                       else if (plat_type == PLAT_TYPE_VF_P1)
-                               phy_addr_off = 26;
-
-                       phy_addr_off += (np->port ^ 0x3);
-                       break;
-
-               case NIU_FLAGS_10G:
-                       /* 10G copper */
-                       tp = &phy_template_10g_copper;
-                       break;
-
-               case NIU_FLAGS_FIBER:
-                       /* 1G fiber */
-                       tp = &phy_template_1g_fiber;
-                       break;
-
-               case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
-                       /* 10G fiber */
-                       tp = &phy_template_10g_fiber;
-                       if (plat_type == PLAT_TYPE_VF_P0 ||
-                           plat_type == PLAT_TYPE_VF_P1)
-                               phy_addr_off = 8;
-                       phy_addr_off += np->port;
-                       if (np->flags & NIU_FLAGS_HOTPLUG_PHY) {
-                               tp = &phy_template_10g_fiber_hotplug;
-                               if (np->port == 0)
-                                       phy_addr_off = 8;
-                               if (np->port == 1)
-                                       phy_addr_off = 12;
-                       }
-                       break;
-
-               case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
-               case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
-               case NIU_FLAGS_XCVR_SERDES:
-                       switch(np->port) {
-                       case 0:
-                       case 1:
-                               tp = &phy_template_10g_serdes;
-                               break;
-                       case 2:
-                       case 3:
-                               tp = &phy_template_1g_rgmii;
-                               break;
-                       default:
-                               return -EINVAL;
-                               break;
-                       }
-                       phy_addr_off = niu_atca_port_num[np->port];
-                       break;
-
-               default:
-                       return -EINVAL;
-               }
-       }
-
-       np->phy_ops = tp->ops;
-       np->phy_addr = tp->phy_addr_base + phy_addr_off;
-
-       return 0;
-}
-
-static int niu_init_link(struct niu *np)
-{
-       struct niu_parent *parent = np->parent;
-       int err, ignore;
-
-       if (parent->plat_type == PLAT_TYPE_NIU) {
-               err = niu_xcvr_init(np);
-               if (err)
-                       return err;
-               msleep(200);
-       }
-       err = niu_serdes_init(np);
-       if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY))
-               return err;
-       msleep(200);
-       err = niu_xcvr_init(np);
-       if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY))
-               niu_link_status(np, &ignore);
-       return 0;
-}
-
-static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
-{
-       u16 reg0 = addr[4] << 8 | addr[5];
-       u16 reg1 = addr[2] << 8 | addr[3];
-       u16 reg2 = addr[0] << 8 | addr[1];
-
-       if (np->flags & NIU_FLAGS_XMAC) {
-               nw64_mac(XMAC_ADDR0, reg0);
-               nw64_mac(XMAC_ADDR1, reg1);
-               nw64_mac(XMAC_ADDR2, reg2);
-       } else {
-               nw64_mac(BMAC_ADDR0, reg0);
-               nw64_mac(BMAC_ADDR1, reg1);
-               nw64_mac(BMAC_ADDR2, reg2);
-       }
-}
-
-static int niu_num_alt_addr(struct niu *np)
-{
-       if (np->flags & NIU_FLAGS_XMAC)
-               return XMAC_NUM_ALT_ADDR;
-       else
-               return BMAC_NUM_ALT_ADDR;
-}
-
-static int niu_set_alt_mac(struct niu *np, int index, unsigned char *addr)
-{
-       u16 reg0 = addr[4] << 8 | addr[5];
-       u16 reg1 = addr[2] << 8 | addr[3];
-       u16 reg2 = addr[0] << 8 | addr[1];
-
-       if (index >= niu_num_alt_addr(np))
-               return -EINVAL;
-
-       if (np->flags & NIU_FLAGS_XMAC) {
-               nw64_mac(XMAC_ALT_ADDR0(index), reg0);
-               nw64_mac(XMAC_ALT_ADDR1(index), reg1);
-               nw64_mac(XMAC_ALT_ADDR2(index), reg2);
-       } else {
-               nw64_mac(BMAC_ALT_ADDR0(index), reg0);
-               nw64_mac(BMAC_ALT_ADDR1(index), reg1);
-               nw64_mac(BMAC_ALT_ADDR2(index), reg2);
-       }
-
-       return 0;
-}
-
-static int niu_enable_alt_mac(struct niu *np, int index, int on)
-{
-       unsigned long reg;
-       u64 val, mask;
-
-       if (index >= niu_num_alt_addr(np))
-               return -EINVAL;
-
-       if (np->flags & NIU_FLAGS_XMAC) {
-               reg = XMAC_ADDR_CMPEN;
-               mask = 1 << index;
-       } else {
-               reg = BMAC_ADDR_CMPEN;
-               mask = 1 << (index + 1);
-       }
-
-       val = nr64_mac(reg);
-       if (on)
-               val |= mask;
-       else
-               val &= ~mask;
-       nw64_mac(reg, val);
-
-       return 0;
-}
-
-static void __set_rdc_table_num_hw(struct niu *np, unsigned long reg,
-                                  int num, int mac_pref)
-{
-       u64 val = nr64_mac(reg);
-       val &= ~(HOST_INFO_MACRDCTBLN | HOST_INFO_MPR);
-       val |= num;
-       if (mac_pref)
-               val |= HOST_INFO_MPR;
-       nw64_mac(reg, val);
-}
-
-static int __set_rdc_table_num(struct niu *np,
-                              int xmac_index, int bmac_index,
-                              int rdc_table_num, int mac_pref)
-{
-       unsigned long reg;
-
-       if (rdc_table_num & ~HOST_INFO_MACRDCTBLN)
-               return -EINVAL;
-       if (np->flags & NIU_FLAGS_XMAC)
-               reg = XMAC_HOST_INFO(xmac_index);
-       else
-               reg = BMAC_HOST_INFO(bmac_index);
-       __set_rdc_table_num_hw(np, reg, rdc_table_num, mac_pref);
-       return 0;
-}
-
-static int niu_set_primary_mac_rdc_table(struct niu *np, int table_num,
-                                        int mac_pref)
-{
-       return __set_rdc_table_num(np, 17, 0, table_num, mac_pref);
-}
-
-static int niu_set_multicast_mac_rdc_table(struct niu *np, int table_num,
-                                          int mac_pref)
-{
-       return __set_rdc_table_num(np, 16, 8, table_num, mac_pref);
-}
-
-static int niu_set_alt_mac_rdc_table(struct niu *np, int idx,
-                                    int table_num, int mac_pref)
-{
-       if (idx >= niu_num_alt_addr(np))
-               return -EINVAL;
-       return __set_rdc_table_num(np, idx, idx + 1, table_num, mac_pref);
-}
-
-static u64 vlan_entry_set_parity(u64 reg_val)
-{
-       u64 port01_mask;
-       u64 port23_mask;
-
-       port01_mask = 0x00ff;
-       port23_mask = 0xff00;
-
-       if (hweight64(reg_val & port01_mask) & 1)
-               reg_val |= ENET_VLAN_TBL_PARITY0;
-       else
-               reg_val &= ~ENET_VLAN_TBL_PARITY0;
-
-       if (hweight64(reg_val & port23_mask) & 1)
-               reg_val |= ENET_VLAN_TBL_PARITY1;
-       else
-               reg_val &= ~ENET_VLAN_TBL_PARITY1;
-
-       return reg_val;
-}
-
-static void vlan_tbl_write(struct niu *np, unsigned long index,
-                          int port, int vpr, int rdc_table)
-{
-       u64 reg_val = nr64(ENET_VLAN_TBL(index));
-
-       reg_val &= ~((ENET_VLAN_TBL_VPR |
-                     ENET_VLAN_TBL_VLANRDCTBLN) <<
-                    ENET_VLAN_TBL_SHIFT(port));
-       if (vpr)
-               reg_val |= (ENET_VLAN_TBL_VPR <<
-                           ENET_VLAN_TBL_SHIFT(port));
-       reg_val |= (rdc_table << ENET_VLAN_TBL_SHIFT(port));
-
-       reg_val = vlan_entry_set_parity(reg_val);
-
-       nw64(ENET_VLAN_TBL(index), reg_val);
-}
-
-static void vlan_tbl_clear(struct niu *np)
-{
-       int i;
-
-       for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++)
-               nw64(ENET_VLAN_TBL(i), 0);
-}
-
-static int tcam_wait_bit(struct niu *np, u64 bit)
-{
-       int limit = 1000;
-
-       while (--limit > 0) {
-               if (nr64(TCAM_CTL) & bit)
-                       break;
-               udelay(1);
-       }
-       if (limit <= 0)
-               return -ENODEV;
-
-       return 0;
-}
-
-static int tcam_flush(struct niu *np, int index)
-{
-       nw64(TCAM_KEY_0, 0x00);
-       nw64(TCAM_KEY_MASK_0, 0xff);
-       nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
-
-       return tcam_wait_bit(np, TCAM_CTL_STAT);
-}
-
-#if 0
-static int tcam_read(struct niu *np, int index,
-                    u64 *key, u64 *mask)
-{
-       int err;
-
-       nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_READ | index));
-       err = tcam_wait_bit(np, TCAM_CTL_STAT);
-       if (!err) {
-               key[0] = nr64(TCAM_KEY_0);
-               key[1] = nr64(TCAM_KEY_1);
-               key[2] = nr64(TCAM_KEY_2);
-               key[3] = nr64(TCAM_KEY_3);
-               mask[0] = nr64(TCAM_KEY_MASK_0);
-               mask[1] = nr64(TCAM_KEY_MASK_1);
-               mask[2] = nr64(TCAM_KEY_MASK_2);
-               mask[3] = nr64(TCAM_KEY_MASK_3);
-       }
-       return err;
-}
-#endif
-
-static int tcam_write(struct niu *np, int index,
-                     u64 *key, u64 *mask)
-{
-       nw64(TCAM_KEY_0, key[0]);
-       nw64(TCAM_KEY_1, key[1]);
-       nw64(TCAM_KEY_2, key[2]);
-       nw64(TCAM_KEY_3, key[3]);
-       nw64(TCAM_KEY_MASK_0, mask[0]);
-       nw64(TCAM_KEY_MASK_1, mask[1]);
-       nw64(TCAM_KEY_MASK_2, mask[2]);
-       nw64(TCAM_KEY_MASK_3, mask[3]);
-       nw64(TCAM_CTL, (TCAM_CTL_RWC_TCAM_WRITE | index));
-
-       return tcam_wait_bit(np, TCAM_CTL_STAT);
-}
-
-#if 0
-static int tcam_assoc_read(struct niu *np, int index, u64 *data)
-{
-       int err;
-
-       nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_READ | index));
-       err = tcam_wait_bit(np, TCAM_CTL_STAT);
-       if (!err)
-               *data = nr64(TCAM_KEY_1);
-
-       return err;
-}
-#endif
-
-static int tcam_assoc_write(struct niu *np, int index, u64 assoc_data)
-{
-       nw64(TCAM_KEY_1, assoc_data);
-       nw64(TCAM_CTL, (TCAM_CTL_RWC_RAM_WRITE | index));
-
-       return tcam_wait_bit(np, TCAM_CTL_STAT);
-}
-
-static void tcam_enable(struct niu *np, int on)
-{
-       u64 val = nr64(FFLP_CFG_1);
-
-       if (on)
-               val &= ~FFLP_CFG_1_TCAM_DIS;
-       else
-               val |= FFLP_CFG_1_TCAM_DIS;
-       nw64(FFLP_CFG_1, val);
-}
-
-static void tcam_set_lat_and_ratio(struct niu *np, u64 latency, u64 ratio)
-{
-       u64 val = nr64(FFLP_CFG_1);
-
-       val &= ~(FFLP_CFG_1_FFLPINITDONE |
-                FFLP_CFG_1_CAMLAT |
-                FFLP_CFG_1_CAMRATIO);
-       val |= (latency << FFLP_CFG_1_CAMLAT_SHIFT);
-       val |= (ratio << FFLP_CFG_1_CAMRATIO_SHIFT);
-       nw64(FFLP_CFG_1, val);
-
-       val = nr64(FFLP_CFG_1);
-       val |= FFLP_CFG_1_FFLPINITDONE;
-       nw64(FFLP_CFG_1, val);
-}
-
-static int tcam_user_eth_class_enable(struct niu *np, unsigned long class,
-                                     int on)
-{
-       unsigned long reg;
-       u64 val;
-
-       if (class < CLASS_CODE_ETHERTYPE1 ||
-           class > CLASS_CODE_ETHERTYPE2)
-               return -EINVAL;
-
-       reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
-       val = nr64(reg);
-       if (on)
-               val |= L2_CLS_VLD;
-       else
-               val &= ~L2_CLS_VLD;
-       nw64(reg, val);
-
-       return 0;
-}
-
-#if 0
-static int tcam_user_eth_class_set(struct niu *np, unsigned long class,
-                                  u64 ether_type)
-{
-       unsigned long reg;
-       u64 val;
-
-       if (class < CLASS_CODE_ETHERTYPE1 ||
-           class > CLASS_CODE_ETHERTYPE2 ||
-           (ether_type & ~(u64)0xffff) != 0)
-               return -EINVAL;
-
-       reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
-       val = nr64(reg);
-       val &= ~L2_CLS_ETYPE;
-       val |= (ether_type << L2_CLS_ETYPE_SHIFT);
-       nw64(reg, val);
-
-       return 0;
-}
-#endif
-
-static int tcam_user_ip_class_enable(struct niu *np, unsigned long class,
-                                    int on)
-{
-       unsigned long reg;
-       u64 val;
-
-       if (class < CLASS_CODE_USER_PROG1 ||
-           class > CLASS_CODE_USER_PROG4)
-               return -EINVAL;
-
-       reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
-       val = nr64(reg);
-       if (on)
-               val |= L3_CLS_VALID;
-       else
-               val &= ~L3_CLS_VALID;
-       nw64(reg, val);
-
-       return 0;
-}
-
-static int tcam_user_ip_class_set(struct niu *np, unsigned long class,
-                                 int ipv6, u64 protocol_id,
-                                 u64 tos_mask, u64 tos_val)
-{
-       unsigned long reg;
-       u64 val;
-
-       if (class < CLASS_CODE_USER_PROG1 ||
-           class > CLASS_CODE_USER_PROG4 ||
-           (protocol_id & ~(u64)0xff) != 0 ||
-           (tos_mask & ~(u64)0xff) != 0 ||
-           (tos_val & ~(u64)0xff) != 0)
-               return -EINVAL;
-
-       reg = L3_CLS(class - CLASS_CODE_USER_PROG1);
-       val = nr64(reg);
-       val &= ~(L3_CLS_IPVER | L3_CLS_PID |
-                L3_CLS_TOSMASK | L3_CLS_TOS);
-       if (ipv6)
-               val |= L3_CLS_IPVER;
-       val |= (protocol_id << L3_CLS_PID_SHIFT);
-       val |= (tos_mask << L3_CLS_TOSMASK_SHIFT);
-       val |= (tos_val << L3_CLS_TOS_SHIFT);
-       nw64(reg, val);
-
-       return 0;
-}
-
-static int tcam_early_init(struct niu *np)
-{
-       unsigned long i;
-       int err;
-
-       tcam_enable(np, 0);
-       tcam_set_lat_and_ratio(np,
-                              DEFAULT_TCAM_LATENCY,
-                              DEFAULT_TCAM_ACCESS_RATIO);
-       for (i = CLASS_CODE_ETHERTYPE1; i <= CLASS_CODE_ETHERTYPE2; i++) {
-               err = tcam_user_eth_class_enable(np, i, 0);
-               if (err)
-                       return err;
-       }
-       for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_USER_PROG4; i++) {
-               err = tcam_user_ip_class_enable(np, i, 0);
-               if (err)
-                       return err;
-       }
-
-       return 0;
-}
-
-static int tcam_flush_all(struct niu *np)
-{
-       unsigned long i;
-
-       for (i = 0; i < np->parent->tcam_num_entries; i++) {
-               int err = tcam_flush(np, i);
-               if (err)
-                       return err;
-       }
-       return 0;
-}
-
-static u64 hash_addr_regval(unsigned long index, unsigned long num_entries)
-{
-       return (u64)index | (num_entries == 1 ? HASH_TBL_ADDR_AUTOINC : 0);
-}
-
-#if 0
-static int hash_read(struct niu *np, unsigned long partition,
-                    unsigned long index, unsigned long num_entries,
-                    u64 *data)
-{
-       u64 val = hash_addr_regval(index, num_entries);
-       unsigned long i;
-
-       if (partition >= FCRAM_NUM_PARTITIONS ||
-           index + num_entries > FCRAM_SIZE)
-               return -EINVAL;
-
-       nw64(HASH_TBL_ADDR(partition), val);
-       for (i = 0; i < num_entries; i++)
-               data[i] = nr64(HASH_TBL_DATA(partition));
-
-       return 0;
-}
-#endif
-
-static int hash_write(struct niu *np, unsigned long partition,
-                     unsigned long index, unsigned long num_entries,
-                     u64 *data)
-{
-       u64 val = hash_addr_regval(index, num_entries);
-       unsigned long i;
-
-       if (partition >= FCRAM_NUM_PARTITIONS ||
-           index + (num_entries * 8) > FCRAM_SIZE)
-               return -EINVAL;
-
-       nw64(HASH_TBL_ADDR(partition), val);
-       for (i = 0; i < num_entries; i++)
-               nw64(HASH_TBL_DATA(partition), data[i]);
-
-       return 0;
-}
-
-static void fflp_reset(struct niu *np)
-{
-       u64 val;
-
-       nw64(FFLP_CFG_1, FFLP_CFG_1_PIO_FIO_RST);
-       udelay(10);
-       nw64(FFLP_CFG_1, 0);
-
-       val = FFLP_CFG_1_FCRAMOUTDR_NORMAL | FFLP_CFG_1_FFLPINITDONE;
-       nw64(FFLP_CFG_1, val);
-}
-
-static void fflp_set_timings(struct niu *np)
-{
-       u64 val = nr64(FFLP_CFG_1);
-
-       val &= ~FFLP_CFG_1_FFLPINITDONE;
-       val |= (DEFAULT_FCRAMRATIO << FFLP_CFG_1_FCRAMRATIO_SHIFT);
-       nw64(FFLP_CFG_1, val);
-
-       val = nr64(FFLP_CFG_1);
-       val |= FFLP_CFG_1_FFLPINITDONE;
-       nw64(FFLP_CFG_1, val);
-
-       val = nr64(FCRAM_REF_TMR);
-       val &= ~(FCRAM_REF_TMR_MAX | FCRAM_REF_TMR_MIN);
-       val |= (DEFAULT_FCRAM_REFRESH_MAX << FCRAM_REF_TMR_MAX_SHIFT);
-       val |= (DEFAULT_FCRAM_REFRESH_MIN << FCRAM_REF_TMR_MIN_SHIFT);
-       nw64(FCRAM_REF_TMR, val);
-}
-
-static int fflp_set_partition(struct niu *np, u64 partition,
-                             u64 mask, u64 base, int enable)
-{
-       unsigned long reg;
-       u64 val;
-
-       if (partition >= FCRAM_NUM_PARTITIONS ||
-           (mask & ~(u64)0x1f) != 0 ||
-           (base & ~(u64)0x1f) != 0)
-               return -EINVAL;
-
-       reg = FLW_PRT_SEL(partition);
-
-       val = nr64(reg);
-       val &= ~(FLW_PRT_SEL_EXT | FLW_PRT_SEL_MASK | FLW_PRT_SEL_BASE);
-       val |= (mask << FLW_PRT_SEL_MASK_SHIFT);
-       val |= (base << FLW_PRT_SEL_BASE_SHIFT);
-       if (enable)
-               val |= FLW_PRT_SEL_EXT;
-       nw64(reg, val);
-
-       return 0;
-}
-
-static int fflp_disable_all_partitions(struct niu *np)
-{
-       unsigned long i;
-
-       for (i = 0; i < FCRAM_NUM_PARTITIONS; i++) {
-               int err = fflp_set_partition(np, 0, 0, 0, 0);
-               if (err)
-                       return err;
-       }
-       return 0;
-}
-
-static void fflp_llcsnap_enable(struct niu *np, int on)
-{
-       u64 val = nr64(FFLP_CFG_1);
-
-       if (on)
-               val |= FFLP_CFG_1_LLCSNAP;
-       else
-               val &= ~FFLP_CFG_1_LLCSNAP;
-       nw64(FFLP_CFG_1, val);
-}
-
-static void fflp_errors_enable(struct niu *np, int on)
-{
-       u64 val = nr64(FFLP_CFG_1);
-
-       if (on)
-               val &= ~FFLP_CFG_1_ERRORDIS;
-       else
-               val |= FFLP_CFG_1_ERRORDIS;
-       nw64(FFLP_CFG_1, val);
-}
-
-static int fflp_hash_clear(struct niu *np)
-{
-       struct fcram_hash_ipv4 ent;
-       unsigned long i;
-
-       /* IPV4 hash entry with valid bit clear, rest is don't care.  */
-       memset(&ent, 0, sizeof(ent));
-       ent.header = HASH_HEADER_EXT;
-
-       for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) {
-               int err = hash_write(np, 0, i, 1, (u64 *) &ent);
-               if (err)
-                       return err;
-       }
-       return 0;
-}
-
-static int fflp_early_init(struct niu *np)
-{
-       struct niu_parent *parent;
-       unsigned long flags;
-       int err;
-
-       niu_lock_parent(np, flags);
-
-       parent = np->parent;
-       err = 0;
-       if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
-               if (np->parent->plat_type != PLAT_TYPE_NIU) {
-                       fflp_reset(np);
-                       fflp_set_timings(np);
-                       err = fflp_disable_all_partitions(np);
-                       if (err) {
-                               netif_printk(np, probe, KERN_DEBUG, np->dev,
-                                            "fflp_disable_all_partitions failed, err=%d\n",
-                                            err);
-                               goto out;
-                       }
-               }
-
-               err = tcam_early_init(np);
-               if (err) {
-                       netif_printk(np, probe, KERN_DEBUG, np->dev,
-                                    "tcam_early_init failed, err=%d\n", err);
-                       goto out;
-               }
-               fflp_llcsnap_enable(np, 1);
-               fflp_errors_enable(np, 0);
-               nw64(H1POLY, 0);
-               nw64(H2POLY, 0);
-
-               err = tcam_flush_all(np);
-               if (err) {
-                       netif_printk(np, probe, KERN_DEBUG, np->dev,
-                                    "tcam_flush_all failed, err=%d\n", err);
-                       goto out;
-               }
-               if (np->parent->plat_type != PLAT_TYPE_NIU) {
-                       err = fflp_hash_clear(np);
-                       if (err) {
-                               netif_printk(np, probe, KERN_DEBUG, np->dev,
-                                            "fflp_hash_clear failed, err=%d\n",
-                                            err);
-                               goto out;
-                       }
-               }
-
-               vlan_tbl_clear(np);
-
-               parent->flags |= PARENT_FLGS_CLS_HWINIT;
-       }
-out:
-       niu_unlock_parent(np, flags);
-       return err;
-}
-
-static int niu_set_flow_key(struct niu *np, unsigned long class_code, u64 key)
-{
-       if (class_code < CLASS_CODE_USER_PROG1 ||
-           class_code > CLASS_CODE_SCTP_IPV6)
-               return -EINVAL;
-
-       nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key);
-       return 0;
-}
-
-static int niu_set_tcam_key(struct niu *np, unsigned long class_code, u64 key)
-{
-       if (class_code < CLASS_CODE_USER_PROG1 ||
-           class_code > CLASS_CODE_SCTP_IPV6)
-               return -EINVAL;
-
-       nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key);
-       return 0;
-}
-
-/* Entries for the ports are interleaved in the TCAM */
-static u16 tcam_get_index(struct niu *np, u16 idx)
-{
-       /* One entry reserved for IP fragment rule */
-       if (idx >= (np->clas.tcam_sz - 1))
-               idx = 0;
-       return np->clas.tcam_top + ((idx+1) * np->parent->num_ports);
-}
-
-static u16 tcam_get_size(struct niu *np)
-{
-       /* One entry reserved for IP fragment rule */
-       return np->clas.tcam_sz - 1;
-}
-
-static u16 tcam_get_valid_entry_cnt(struct niu *np)
-{
-       /* One entry reserved for IP fragment rule */
-       return np->clas.tcam_valid_entries - 1;
-}
-
-static void niu_rx_skb_append(struct sk_buff *skb, struct page *page,
-                             u32 offset, u32 size)
-{
-       int i = skb_shinfo(skb)->nr_frags;
-       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
-       frag->page = page;
-       frag->page_offset = offset;
-       frag->size = size;
-
-       skb->len += size;
-       skb->data_len += size;
-       skb->truesize += size;
-
-       skb_shinfo(skb)->nr_frags = i + 1;
-}
-
-static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
-{
-       a >>= PAGE_SHIFT;
-       a ^= (a >> ilog2(MAX_RBR_RING_SIZE));
-
-       return a & (MAX_RBR_RING_SIZE - 1);
-}
-
-static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
-                                   struct page ***link)
-{
-       unsigned int h = niu_hash_rxaddr(rp, addr);
-       struct page *p, **pp;
-
-       addr &= PAGE_MASK;
-       pp = &rp->rxhash[h];
-       for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) {
-               if (p->index == addr) {
-                       *link = pp;
-                       goto found;
-               }
-       }
-       BUG();
-
-found:
-       return p;
-}
-
-static void niu_hash_page(struct rx_ring_info *rp, struct page *page, u64 base)
-{
-       unsigned int h = niu_hash_rxaddr(rp, base);
-
-       page->index = base;
-       page->mapping = (struct address_space *) rp->rxhash[h];
-       rp->rxhash[h] = page;
-}
-
-static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
-                           gfp_t mask, int start_index)
-{
-       struct page *page;
-       u64 addr;
-       int i;
-
-       page = alloc_page(mask);
-       if (!page)
-               return -ENOMEM;
-
-       addr = np->ops->map_page(np->device, page, 0,
-                                PAGE_SIZE, DMA_FROM_DEVICE);
-
-       niu_hash_page(rp, page, addr);
-       if (rp->rbr_blocks_per_page > 1)
-               atomic_add(rp->rbr_blocks_per_page - 1,
-                          &compound_head(page)->_count);
-
-       for (i = 0; i < rp->rbr_blocks_per_page; i++) {
-               __le32 *rbr = &rp->rbr[start_index + i];
-
-               *rbr = cpu_to_le32(addr >> RBR_DESCR_ADDR_SHIFT);
-               addr += rp->rbr_block_size;
-       }
-
-       return 0;
-}
-
-static void niu_rbr_refill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
-{
-       int index = rp->rbr_index;
-
-       rp->rbr_pending++;
-       if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) {
-               int err = niu_rbr_add_page(np, rp, mask, index);
-
-               if (unlikely(err)) {
-                       rp->rbr_pending--;
-                       return;
-               }
-
-               rp->rbr_index += rp->rbr_blocks_per_page;
-               BUG_ON(rp->rbr_index > rp->rbr_table_size);
-               if (rp->rbr_index == rp->rbr_table_size)
-                       rp->rbr_index = 0;
-
-               if (rp->rbr_pending >= rp->rbr_kick_thresh) {
-                       nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending);
-                       rp->rbr_pending = 0;
-               }
-       }
-}
-
-static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp)
-{
-       unsigned int index = rp->rcr_index;
-       int num_rcr = 0;
-
-       rp->rx_dropped++;
-       while (1) {
-               struct page *page, **link;
-               u64 addr, val;
-               u32 rcr_size;
-
-               num_rcr++;
-
-               val = le64_to_cpup(&rp->rcr[index]);
-               addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
-                       RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
-               page = niu_find_rxpage(rp, addr, &link);
-
-               rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
-                                        RCR_ENTRY_PKTBUFSZ_SHIFT];
-               if ((page->index + PAGE_SIZE) - rcr_size == addr) {
-                       *link = (struct page *) page->mapping;
-                       np->ops->unmap_page(np->device, page->index,
-                                           PAGE_SIZE, DMA_FROM_DEVICE);
-                       page->index = 0;
-                       page->mapping = NULL;
-                       __free_page(page);
-                       rp->rbr_refill_pending++;
-               }
-
-               index = NEXT_RCR(rp, index);
-               if (!(val & RCR_ENTRY_MULTI))
-                       break;
-
-       }
-       rp->rcr_index = index;
-
-       return num_rcr;
-}
-
-static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
-                             struct rx_ring_info *rp)
-{
-       unsigned int index = rp->rcr_index;
-       struct rx_pkt_hdr1 *rh;
-       struct sk_buff *skb;
-       int len, num_rcr;
-
-       skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE);
-       if (unlikely(!skb))
-               return niu_rx_pkt_ignore(np, rp);
-
-       num_rcr = 0;
-       while (1) {
-               struct page *page, **link;
-               u32 rcr_size, append_size;
-               u64 addr, val, off;
-
-               num_rcr++;
-
-               val = le64_to_cpup(&rp->rcr[index]);
-
-               len = (val & RCR_ENTRY_L2_LEN) >>
-                       RCR_ENTRY_L2_LEN_SHIFT;
-               len -= ETH_FCS_LEN;
-
-               addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
-                       RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
-               page = niu_find_rxpage(rp, addr, &link);
-
-               rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >>
-                                        RCR_ENTRY_PKTBUFSZ_SHIFT];
-
-               off = addr & ~PAGE_MASK;
-               append_size = rcr_size;
-               if (num_rcr == 1) {
-                       int ptype;
-
-                       ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
-                       if ((ptype == RCR_PKT_TYPE_TCP ||
-                            ptype == RCR_PKT_TYPE_UDP) &&
-                           !(val & (RCR_ENTRY_NOPORT |
-                                    RCR_ENTRY_ERROR)))
-                               skb->ip_summed = CHECKSUM_UNNECESSARY;
-                       else
-                               skb_checksum_none_assert(skb);
-               } else if (!(val & RCR_ENTRY_MULTI))
-                       append_size = len - skb->len;
-
-               niu_rx_skb_append(skb, page, off, append_size);
-               if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
-                       *link = (struct page *) page->mapping;
-                       np->ops->unmap_page(np->device, page->index,
-                                           PAGE_SIZE, DMA_FROM_DEVICE);
-                       page->index = 0;
-                       page->mapping = NULL;
-                       rp->rbr_refill_pending++;
-               } else
-                       get_page(page);
-
-               index = NEXT_RCR(rp, index);
-               if (!(val & RCR_ENTRY_MULTI))
-                       break;
-
-       }
-       rp->rcr_index = index;
-
-       len += sizeof(*rh);
-       len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN);
-       __pskb_pull_tail(skb, len);
-
-       rh = (struct rx_pkt_hdr1 *) skb->data;
-       if (np->dev->features & NETIF_F_RXHASH)
-               skb->rxhash = ((u32)rh->hashval2_0 << 24 |
-                              (u32)rh->hashval2_1 << 16 |
-                              (u32)rh->hashval1_1 << 8 |
-                              (u32)rh->hashval1_2 << 0);
-       skb_pull(skb, sizeof(*rh));
-
-       rp->rx_packets++;
-       rp->rx_bytes += skb->len;
-
-       skb->protocol = eth_type_trans(skb, np->dev);
-       skb_record_rx_queue(skb, rp->rx_channel);
-       napi_gro_receive(napi, skb);
-
-       return num_rcr;
-}
-
-static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
-{
-       int blocks_per_page = rp->rbr_blocks_per_page;
-       int err, index = rp->rbr_index;
-
-       err = 0;
-       while (index < (rp->rbr_table_size - blocks_per_page)) {
-               err = niu_rbr_add_page(np, rp, mask, index);
-               if (err)
-                       break;
-
-               index += blocks_per_page;
-       }
-
-       rp->rbr_index = index;
-       return err;
-}
-
-static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp)
-{
-       int i;
-
-       for (i = 0; i < MAX_RBR_RING_SIZE; i++) {
-               struct page *page;
-
-               page = rp->rxhash[i];
-               while (page) {
-                       struct page *next = (struct page *) page->mapping;
-                       u64 base = page->index;
-
-                       np->ops->unmap_page(np->device, base, PAGE_SIZE,
-                                           DMA_FROM_DEVICE);
-                       page->index = 0;
-                       page->mapping = NULL;
-
-                       __free_page(page);
-
-                       page = next;
-               }
-       }
-
-       for (i = 0; i < rp->rbr_table_size; i++)
-               rp->rbr[i] = cpu_to_le32(0);
-       rp->rbr_index = 0;
-}
-
-static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
-{
-       struct tx_buff_info *tb = &rp->tx_buffs[idx];
-       struct sk_buff *skb = tb->skb;
-       struct tx_pkt_hdr *tp;
-       u64 tx_flags;
-       int i, len;
-
-       tp = (struct tx_pkt_hdr *) skb->data;
-       tx_flags = le64_to_cpup(&tp->flags);
-
-       rp->tx_packets++;
-       rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) -
-                        ((tx_flags & TXHDR_PAD) / 2));
-
-       len = skb_headlen(skb);
-       np->ops->unmap_single(np->device, tb->mapping,
-                             len, DMA_TO_DEVICE);
-
-       if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK)
-               rp->mark_pending--;
-
-       tb->skb = NULL;
-       do {
-               idx = NEXT_TX(rp, idx);
-               len -= MAX_TX_DESC_LEN;
-       } while (len > 0);
-
-       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               tb = &rp->tx_buffs[idx];
-               BUG_ON(tb->skb != NULL);
-               np->ops->unmap_page(np->device, tb->mapping,
-                                   skb_shinfo(skb)->frags[i].size,
-                                   DMA_TO_DEVICE);
-               idx = NEXT_TX(rp, idx);
-       }
-
-       dev_kfree_skb(skb);
-
-       return idx;
-}
-
-#define NIU_TX_WAKEUP_THRESH(rp)               ((rp)->pending / 4)
-
-static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
-{
-       struct netdev_queue *txq;
-       u16 pkt_cnt, tmp;
-       int cons, index;
-       u64 cs;
-
-       index = (rp - np->tx_rings);
-       txq = netdev_get_tx_queue(np->dev, index);
-
-       cs = rp->tx_cs;
-       if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK))))
-               goto out;
-
-       tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT;
-       pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) &
-               (TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT);
-
-       rp->last_pkt_cnt = tmp;
-
-       cons = rp->cons;
-
-       netif_printk(np, tx_done, KERN_DEBUG, np->dev,
-                    "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
-
-       while (pkt_cnt--)
-               cons = release_tx_packet(np, rp, cons);
-
-       rp->cons = cons;
-       smp_mb();
-
-out:
-       if (unlikely(netif_tx_queue_stopped(txq) &&
-                    (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
-               __netif_tx_lock(txq, smp_processor_id());
-               if (netif_tx_queue_stopped(txq) &&
-                   (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
-                       netif_tx_wake_queue(txq);
-               __netif_tx_unlock(txq);
-       }
-}
-
-static inline void niu_sync_rx_discard_stats(struct niu *np,
-                                            struct rx_ring_info *rp,
-                                            const int limit)
-{
-       /* This elaborate scheme is needed for reading the RX discard
-        * counters, as they are only 16-bit and can overflow quickly,
-        * and because the overflow indication bit is not usable as
-        * the counter value does not wrap, but remains at max value
-        * 0xFFFF.
-        *
-        * In theory and in practice counters can be lost in between
-        * reading nr64() and clearing the counter nw64().  For this
-        * reason, the number of counter clearings nw64() is
-        * limited/reduced though the limit parameter.
-        */
-       int rx_channel = rp->rx_channel;
-       u32 misc, wred;
-
-       /* RXMISC (Receive Miscellaneous Discard Count), covers the
-        * following discard events: IPP (Input Port Process),
-        * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive
-        * Block Ring) prefetch buffer is empty.
-        */
-       misc = nr64(RXMISC(rx_channel));
-       if (unlikely((misc & RXMISC_COUNT) > limit)) {
-               nw64(RXMISC(rx_channel), 0);
-               rp->rx_errors += misc & RXMISC_COUNT;
-
-               if (unlikely(misc & RXMISC_OFLOW))
-                       dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n",
-                               rx_channel);
-
-               netif_printk(np, rx_err, KERN_DEBUG, np->dev,
-                            "rx-%d: MISC drop=%u over=%u\n",
-                            rx_channel, misc, misc-limit);
-       }
-
-       /* WRED (Weighted Random Early Discard) by hardware */
-       wred = nr64(RED_DIS_CNT(rx_channel));
-       if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) {
-               nw64(RED_DIS_CNT(rx_channel), 0);
-               rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
-
-               if (unlikely(wred & RED_DIS_CNT_OFLOW))
-                       dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel);
-
-               netif_printk(np, rx_err, KERN_DEBUG, np->dev,
-                            "rx-%d: WRED drop=%u over=%u\n",
-                            rx_channel, wred, wred-limit);
-       }
-}
-
-static int niu_rx_work(struct napi_struct *napi, struct niu *np,
-                      struct rx_ring_info *rp, int budget)
-{
-       int qlen, rcr_done = 0, work_done = 0;
-       struct rxdma_mailbox *mbox = rp->mbox;
-       u64 stat;
-
-#if 1
-       stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
-       qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN;
-#else
-       stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
-       qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN);
-#endif
-       mbox->rx_dma_ctl_stat = 0;
-       mbox->rcrstat_a = 0;
-
-       netif_printk(np, rx_status, KERN_DEBUG, np->dev,
-                    "%s(chan[%d]), stat[%llx] qlen=%d\n",
-                    __func__, rp->rx_channel, (unsigned long long)stat, qlen);
-
-       rcr_done = work_done = 0;
-       qlen = min(qlen, budget);
-       while (work_done < qlen) {
-               rcr_done += niu_process_rx_pkt(napi, np, rp);
-               work_done++;
-       }
-
-       if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) {
-               unsigned int i;
-
-               for (i = 0; i < rp->rbr_refill_pending; i++)
-                       niu_rbr_refill(np, rp, GFP_ATOMIC);
-               rp->rbr_refill_pending = 0;
-       }
-
-       stat = (RX_DMA_CTL_STAT_MEX |
-               ((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) |
-               ((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT));
-
-       nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
-
-       /* Only sync discards stats when qlen indicate potential for drops */
-       if (qlen > 10)
-               niu_sync_rx_discard_stats(np, rp, 0x7FFF);
-
-       return work_done;
-}
-
-static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
-{
-       u64 v0 = lp->v0;
-       u32 tx_vec = (v0 >> 32);
-       u32 rx_vec = (v0 & 0xffffffff);
-       int i, work_done = 0;
-
-       netif_printk(np, intr, KERN_DEBUG, np->dev,
-                    "%s() v0[%016llx]\n", __func__, (unsigned long long)v0);
-
-       for (i = 0; i < np->num_tx_rings; i++) {
-               struct tx_ring_info *rp = &np->tx_rings[i];
-               if (tx_vec & (1 << rp->tx_channel))
-                       niu_tx_work(np, rp);
-               nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0);
-       }
-
-       for (i = 0; i < np->num_rx_rings; i++) {
-               struct rx_ring_info *rp = &np->rx_rings[i];
-
-               if (rx_vec & (1 << rp->rx_channel)) {
-                       int this_work_done;
-
-                       this_work_done = niu_rx_work(&lp->napi, np, rp,
-                                                    budget);
-
-                       budget -= this_work_done;
-                       work_done += this_work_done;
-               }
-               nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0);
-       }
-
-       return work_done;
-}
-
-static int niu_poll(struct napi_struct *napi, int budget)
-{
-       struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi);
-       struct niu *np = lp->np;
-       int work_done;
-
-       work_done = niu_poll_core(np, lp, budget);
-
-       if (work_done < budget) {
-               napi_complete(napi);
-               niu_ldg_rearm(np, lp, 1);
-       }
-       return work_done;
-}
-
-static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
-                                 u64 stat)
-{
-       netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel);
-
-       if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
-               pr_cont("RBR_TMOUT ");
-       if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
-               pr_cont("RSP_CNT ");
-       if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
-               pr_cont("BYTE_EN_BUS ");
-       if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
-               pr_cont("RSP_DAT ");
-       if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
-               pr_cont("RCR_ACK ");
-       if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
-               pr_cont("RCR_SHA_PAR ");
-       if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
-               pr_cont("RBR_PRE_PAR ");
-       if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
-               pr_cont("CONFIG ");
-       if (stat & RX_DMA_CTL_STAT_RCRINCON)
-               pr_cont("RCRINCON ");
-       if (stat & RX_DMA_CTL_STAT_RCRFULL)
-               pr_cont("RCRFULL ");
-       if (stat & RX_DMA_CTL_STAT_RBRFULL)
-               pr_cont("RBRFULL ");
-       if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
-               pr_cont("RBRLOGPAGE ");
-       if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
-               pr_cont("CFIGLOGPAGE ");
-       if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
-               pr_cont("DC_FIDO ");
-
-       pr_cont(")\n");
-}
-
-static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
-{
-       u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel));
-       int err = 0;
-
-
-       if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL |
-                   RX_DMA_CTL_STAT_PORT_FATAL))
-               err = -EINVAL;
-
-       if (err) {
-               netdev_err(np->dev, "RX channel %u error, stat[%llx]\n",
-                          rp->rx_channel,
-                          (unsigned long long) stat);
-
-               niu_log_rxchan_errors(np, rp, stat);
-       }
-
-       nw64(RX_DMA_CTL_STAT(rp->rx_channel),
-            stat & RX_DMA_CTL_WRITE_CLEAR_ERRS);
-
-       return err;
-}
-
-static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
-                                 u64 cs)
-{
-       netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel);
-
-       if (cs & TX_CS_MBOX_ERR)
-               pr_cont("MBOX ");
-       if (cs & TX_CS_PKT_SIZE_ERR)
-               pr_cont("PKT_SIZE ");
-       if (cs & TX_CS_TX_RING_OFLOW)
-               pr_cont("TX_RING_OFLOW ");
-       if (cs & TX_CS_PREF_BUF_PAR_ERR)
-               pr_cont("PREF_BUF_PAR ");
-       if (cs & TX_CS_NACK_PREF)
-               pr_cont("NACK_PREF ");
-       if (cs & TX_CS_NACK_PKT_RD)
-               pr_cont("NACK_PKT_RD ");
-       if (cs & TX_CS_CONF_PART_ERR)
-               pr_cont("CONF_PART ");
-       if (cs & TX_CS_PKT_PRT_ERR)
-               pr_cont("PKT_PTR ");
-
-       pr_cont(")\n");
-}
-
-static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
-{
-       u64 cs, logh, logl;
-
-       cs = nr64(TX_CS(rp->tx_channel));
-       logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
-       logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
-
-       netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n",
-                  rp->tx_channel,
-                  (unsigned long long)cs,
-                  (unsigned long long)logh,
-                  (unsigned long long)logl);
-
-       niu_log_txchan_errors(np, rp, cs);
-
-       return -ENODEV;
-}
-
-static int niu_mif_interrupt(struct niu *np)
-{
-       u64 mif_status = nr64(MIF_STATUS);
-       int phy_mdint = 0;
-
-       if (np->flags & NIU_FLAGS_XMAC) {
-               u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS);
-
-               if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT)
-                       phy_mdint = 1;
-       }
-
-       netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n",
-                  (unsigned long long)mif_status, phy_mdint);
-
-       return -ENODEV;
-}
-
-static void niu_xmac_interrupt(struct niu *np)
-{
-       struct niu_xmac_stats *mp = &np->mac_stats.xmac;
-       u64 val;
-
-       val = nr64_mac(XTXMAC_STATUS);
-       if (val & XTXMAC_STATUS_FRAME_CNT_EXP)
-               mp->tx_frames += TXMAC_FRM_CNT_COUNT;
-       if (val & XTXMAC_STATUS_BYTE_CNT_EXP)
-               mp->tx_bytes += TXMAC_BYTE_CNT_COUNT;
-       if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR)
-               mp->tx_fifo_errors++;
-       if (val & XTXMAC_STATUS_TXMAC_OFLOW)
-               mp->tx_overflow_errors++;
-       if (val & XTXMAC_STATUS_MAX_PSIZE_ERR)
-               mp->tx_max_pkt_size_errors++;
-       if (val & XTXMAC_STATUS_TXMAC_UFLOW)
-               mp->tx_underflow_errors++;
-
-       val = nr64_mac(XRXMAC_STATUS);
-       if (val & XRXMAC_STATUS_LCL_FLT_STATUS)
-               mp->rx_local_faults++;
-       if (val & XRXMAC_STATUS_RFLT_DET)
-               mp->rx_remote_faults++;
-       if (val & XRXMAC_STATUS_LFLT_CNT_EXP)
-               mp->rx_link_faults += LINK_FAULT_CNT_COUNT;
-       if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP)
-               mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT;
-       if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP)
-               mp->rx_frags += RXMAC_FRAG_CNT_COUNT;
-       if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP)
-               mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT;
-       if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
-               mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
-       if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP)
-               mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT;
-       if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP)
-               mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT;
-       if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP)
-               mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT;
-       if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP)
-               mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT;
-       if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP)
-               mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT;
-       if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP)
-               mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT;
-       if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP)
-               mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT;
-       if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP)
-               mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT;
-       if (val & XRXMAC_STATUS_RXOCTET_CNT_EXP)
-               mp->rx_octets += RXMAC_BT_CNT_COUNT;
-       if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP)
-               mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT;
-       if (val & XRXMAC_STATUS_LENERR_CNT_EXP)
-               mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT;
-       if (val & XRXMAC_STATUS_CRCERR_CNT_EXP)
-               mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT;
-       if (val & XRXMAC_STATUS_RXUFLOW)
-               mp->rx_underflows++;
-       if (val & XRXMAC_STATUS_RXOFLOW)
-               mp->rx_overflows++;
-
-       val = nr64_mac(XMAC_FC_STAT);
-       if (val & XMAC_FC_STAT_TX_MAC_NPAUSE)
-               mp->pause_off_state++;
-       if (val & XMAC_FC_STAT_TX_MAC_PAUSE)
-               mp->pause_on_state++;
-       if (val & XMAC_FC_STAT_RX_MAC_RPAUSE)
-               mp->pause_received++;
-}
-
-static void niu_bmac_interrupt(struct niu *np)
-{
-       struct niu_bmac_stats *mp = &np->mac_stats.bmac;
-       u64 val;
-
-       val = nr64_mac(BTXMAC_STATUS);
-       if (val & BTXMAC_STATUS_UNDERRUN)
-               mp->tx_underflow_errors++;
-       if (val & BTXMAC_STATUS_MAX_PKT_ERR)
-               mp->tx_max_pkt_size_errors++;
-       if (val & BTXMAC_STATUS_BYTE_CNT_EXP)
-               mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT;
-       if (val & BTXMAC_STATUS_FRAME_CNT_EXP)
-               mp->tx_frames += BTXMAC_FRM_CNT_COUNT;
-
-       val = nr64_mac(BRXMAC_STATUS);
-       if (val & BRXMAC_STATUS_OVERFLOW)
-               mp->rx_overflows++;
-       if (val & BRXMAC_STATUS_FRAME_CNT_EXP)
-               mp->rx_frames += BRXMAC_FRAME_CNT_COUNT;
-       if (val & BRXMAC_STATUS_ALIGN_ERR_EXP)
-               mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
-       if (val & BRXMAC_STATUS_CRC_ERR_EXP)
-               mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT;
-       if (val & BRXMAC_STATUS_LEN_ERR_EXP)
-               mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT;
-
-       val = nr64_mac(BMAC_CTRL_STATUS);
-       if (val & BMAC_CTRL_STATUS_NOPAUSE)
-               mp->pause_off_state++;
-       if (val & BMAC_CTRL_STATUS_PAUSE)
-               mp->pause_on_state++;
-       if (val & BMAC_CTRL_STATUS_PAUSE_RECV)
-               mp->pause_received++;
-}
-
-static int niu_mac_interrupt(struct niu *np)
-{
-       if (np->flags & NIU_FLAGS_XMAC)
-               niu_xmac_interrupt(np);
-       else
-               niu_bmac_interrupt(np);
-
-       return 0;
-}
-
-static void niu_log_device_error(struct niu *np, u64 stat)
-{
-       netdev_err(np->dev, "Core device errors ( ");
-
-       if (stat & SYS_ERR_MASK_META2)
-               pr_cont("META2 ");
-       if (stat & SYS_ERR_MASK_META1)
-               pr_cont("META1 ");
-       if (stat & SYS_ERR_MASK_PEU)
-               pr_cont("PEU ");
-       if (stat & SYS_ERR_MASK_TXC)
-               pr_cont("TXC ");
-       if (stat & SYS_ERR_MASK_RDMC)
-               pr_cont("RDMC ");
-       if (stat & SYS_ERR_MASK_TDMC)
-               pr_cont("TDMC ");
-       if (stat & SYS_ERR_MASK_ZCP)
-               pr_cont("ZCP ");
-       if (stat & SYS_ERR_MASK_FFLP)
-               pr_cont("FFLP ");
-       if (stat & SYS_ERR_MASK_IPP)
-               pr_cont("IPP ");
-       if (stat & SYS_ERR_MASK_MAC)
-               pr_cont("MAC ");
-       if (stat & SYS_ERR_MASK_SMX)
-               pr_cont("SMX ");
-
-       pr_cont(")\n");
-}
-
-static int niu_device_error(struct niu *np)
-{
-       u64 stat = nr64(SYS_ERR_STAT);
-
-       netdev_err(np->dev, "Core device error, stat[%llx]\n",
-                  (unsigned long long)stat);
-
-       niu_log_device_error(np, stat);
-
-       return -ENODEV;
-}
-
-static int niu_slowpath_interrupt(struct niu *np, struct niu_ldg *lp,
-                             u64 v0, u64 v1, u64 v2)
-{
-
-       int i, err = 0;
-
-       lp->v0 = v0;
-       lp->v1 = v1;
-       lp->v2 = v2;
-
-       if (v1 & 0x00000000ffffffffULL) {
-               u32 rx_vec = (v1 & 0xffffffff);
-
-               for (i = 0; i < np->num_rx_rings; i++) {
-                       struct rx_ring_info *rp = &np->rx_rings[i];
-
-                       if (rx_vec & (1 << rp->rx_channel)) {
-                               int r = niu_rx_error(np, rp);
-                               if (r) {
-                                       err = r;
-                               } else {
-                                       if (!v0)
-                                               nw64(RX_DMA_CTL_STAT(rp->rx_channel),
-                                                    RX_DMA_CTL_STAT_MEX);
-                               }
-                       }
-               }
-       }
-       if (v1 & 0x7fffffff00000000ULL) {
-               u32 tx_vec = (v1 >> 32) & 0x7fffffff;
-
-               for (i = 0; i < np->num_tx_rings; i++) {
-                       struct tx_ring_info *rp = &np->tx_rings[i];
-
-                       if (tx_vec & (1 << rp->tx_channel)) {
-                               int r = niu_tx_error(np, rp);
-                               if (r)
-                                       err = r;
-                       }
-               }
-       }
-       if ((v0 | v1) & 0x8000000000000000ULL) {
-               int r = niu_mif_interrupt(np);
-               if (r)
-                       err = r;
-       }
-       if (v2) {
-               if (v2 & 0x01ef) {
-                       int r = niu_mac_interrupt(np);
-                       if (r)
-                               err = r;
-               }
-               if (v2 & 0x0210) {
-                       int r = niu_device_error(np);
-                       if (r)
-                               err = r;
-               }
-       }
-
-       if (err)
-               niu_enable_interrupts(np, 0);
-
-       return err;
-}
-
-static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
-                           int ldn)
-{
-       struct rxdma_mailbox *mbox = rp->mbox;
-       u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat);
-
-       stat_write = (RX_DMA_CTL_STAT_RCRTHRES |
-                     RX_DMA_CTL_STAT_RCRTO);
-       nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
-
-       netif_printk(np, intr, KERN_DEBUG, np->dev,
-                    "%s() stat[%llx]\n", __func__, (unsigned long long)stat);
-}
-
-static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
-                           int ldn)
-{
-       rp->tx_cs = nr64(TX_CS(rp->tx_channel));
-
-       netif_printk(np, intr, KERN_DEBUG, np->dev,
-                    "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs);
-}
-
-static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
-{
-       struct niu_parent *parent = np->parent;
-       u32 rx_vec, tx_vec;
-       int i;
-
-       tx_vec = (v0 >> 32);
-       rx_vec = (v0 & 0xffffffff);
-
-       for (i = 0; i < np->num_rx_rings; i++) {
-               struct rx_ring_info *rp = &np->rx_rings[i];
-               int ldn = LDN_RXDMA(rp->rx_channel);
-
-               if (parent->ldg_map[ldn] != ldg)
-                       continue;
-
-               nw64(LD_IM0(ldn), LD_IM0_MASK);
-               if (rx_vec & (1 << rp->rx_channel))
-                       niu_rxchan_intr(np, rp, ldn);
-       }
-
-       for (i = 0; i < np->num_tx_rings; i++) {
-               struct tx_ring_info *rp = &np->tx_rings[i];
-               int ldn = LDN_TXDMA(rp->tx_channel);
-
-               if (parent->ldg_map[ldn] != ldg)
-                       continue;
-
-               nw64(LD_IM0(ldn), LD_IM0_MASK);
-               if (tx_vec & (1 << rp->tx_channel))
-                       niu_txchan_intr(np, rp, ldn);
-       }
-}
-
-static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
-                             u64 v0, u64 v1, u64 v2)
-{
-       if (likely(napi_schedule_prep(&lp->napi))) {
-               lp->v0 = v0;
-               lp->v1 = v1;
-               lp->v2 = v2;
-               __niu_fastpath_interrupt(np, lp->ldg_num, v0);
-               __napi_schedule(&lp->napi);
-       }
-}
-
-static irqreturn_t niu_interrupt(int irq, void *dev_id)
-{
-       struct niu_ldg *lp = dev_id;
-       struct niu *np = lp->np;
-       int ldg = lp->ldg_num;
-       unsigned long flags;
-       u64 v0, v1, v2;
-
-       if (netif_msg_intr(np))
-               printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)",
-                      __func__, lp, ldg);
-
-       spin_lock_irqsave(&np->lock, flags);
-
-       v0 = nr64(LDSV0(ldg));
-       v1 = nr64(LDSV1(ldg));
-       v2 = nr64(LDSV2(ldg));
-
-       if (netif_msg_intr(np))
-               pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n",
-                      (unsigned long long) v0,
-                      (unsigned long long) v1,
-                      (unsigned long long) v2);
-
-       if (unlikely(!v0 && !v1 && !v2)) {
-               spin_unlock_irqrestore(&np->lock, flags);
-               return IRQ_NONE;
-       }
-
-       if (unlikely((v0 & ((u64)1 << LDN_MIF)) || v1 || v2)) {
-               int err = niu_slowpath_interrupt(np, lp, v0, v1, v2);
-               if (err)
-                       goto out;
-       }
-       if (likely(v0 & ~((u64)1 << LDN_MIF)))
-               niu_schedule_napi(np, lp, v0, v1, v2);
-       else
-               niu_ldg_rearm(np, lp, 1);
-out:
-       spin_unlock_irqrestore(&np->lock, flags);
-
-       return IRQ_HANDLED;
-}
-
-static void niu_free_rx_ring_info(struct niu *np, struct rx_ring_info *rp)
-{
-       if (rp->mbox) {
-               np->ops->free_coherent(np->device,
-                                      sizeof(struct rxdma_mailbox),
-                                      rp->mbox, rp->mbox_dma);
-               rp->mbox = NULL;
-       }
-       if (rp->rcr) {
-               np->ops->free_coherent(np->device,
-                                      MAX_RCR_RING_SIZE * sizeof(__le64),
-                                      rp->rcr, rp->rcr_dma);
-               rp->rcr = NULL;
-               rp->rcr_table_size = 0;
-               rp->rcr_index = 0;
-       }
-       if (rp->rbr) {
-               niu_rbr_free(np, rp);
-
-               np->ops->free_coherent(np->device,
-                                      MAX_RBR_RING_SIZE * sizeof(__le32),
-                                      rp->rbr, rp->rbr_dma);
-               rp->rbr = NULL;
-               rp->rbr_table_size = 0;
-               rp->rbr_index = 0;
-       }
-       kfree(rp->rxhash);
-       rp->rxhash = NULL;
-}
-
-static void niu_free_tx_ring_info(struct niu *np, struct tx_ring_info *rp)
-{
-       if (rp->mbox) {
-               np->ops->free_coherent(np->device,
-                                      sizeof(struct txdma_mailbox),
-                                      rp->mbox, rp->mbox_dma);
-               rp->mbox = NULL;
-       }
-       if (rp->descr) {
-               int i;
-
-               for (i = 0; i < MAX_TX_RING_SIZE; i++) {
-                       if (rp->tx_buffs[i].skb)
-                               (void) release_tx_packet(np, rp, i);
-               }
-
-               np->ops->free_coherent(np->device,
-                                      MAX_TX_RING_SIZE * sizeof(__le64),
-                                      rp->descr, rp->descr_dma);
-               rp->descr = NULL;
-               rp->pending = 0;
-               rp->prod = 0;
-               rp->cons = 0;
-               rp->wrap_bit = 0;
-       }
-}
-
-static void niu_free_channels(struct niu *np)
-{
-       int i;
-
-       if (np->rx_rings) {
-               for (i = 0; i < np->num_rx_rings; i++) {
-                       struct rx_ring_info *rp = &np->rx_rings[i];
-
-                       niu_free_rx_ring_info(np, rp);
-               }
-               kfree(np->rx_rings);
-               np->rx_rings = NULL;
-               np->num_rx_rings = 0;
-       }
-
-       if (np->tx_rings) {
-               for (i = 0; i < np->num_tx_rings; i++) {
-                       struct tx_ring_info *rp = &np->tx_rings[i];
-
-                       niu_free_tx_ring_info(np, rp);
-               }
-               kfree(np->tx_rings);
-               np->tx_rings = NULL;
-               np->num_tx_rings = 0;
-       }
-}
-
-static int niu_alloc_rx_ring_info(struct niu *np,
-                                 struct rx_ring_info *rp)
-{
-       BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64);
-
-       rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *),
-                            GFP_KERNEL);
-       if (!rp->rxhash)
-               return -ENOMEM;
-
-       rp->mbox = np->ops->alloc_coherent(np->device,
-                                          sizeof(struct rxdma_mailbox),
-                                          &rp->mbox_dma, GFP_KERNEL);
-       if (!rp->mbox)
-               return -ENOMEM;
-       if ((unsigned long)rp->mbox & (64UL - 1)) {
-               netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n",
-                          rp->mbox);
-               return -EINVAL;
-       }
-
-       rp->rcr = np->ops->alloc_coherent(np->device,
-                                         MAX_RCR_RING_SIZE * sizeof(__le64),
-                                         &rp->rcr_dma, GFP_KERNEL);
-       if (!rp->rcr)
-               return -ENOMEM;
-       if ((unsigned long)rp->rcr & (64UL - 1)) {
-               netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n",
-                          rp->rcr);
-               return -EINVAL;
-       }
-       rp->rcr_table_size = MAX_RCR_RING_SIZE;
-       rp->rcr_index = 0;
-
-       rp->rbr = np->ops->alloc_coherent(np->device,
-                                         MAX_RBR_RING_SIZE * sizeof(__le32),
-                                         &rp->rbr_dma, GFP_KERNEL);
-       if (!rp->rbr)
-               return -ENOMEM;
-       if ((unsigned long)rp->rbr & (64UL - 1)) {
-               netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n",
-                          rp->rbr);
-               return -EINVAL;
-       }
-       rp->rbr_table_size = MAX_RBR_RING_SIZE;
-       rp->rbr_index = 0;
-       rp->rbr_pending = 0;
-
-       return 0;
-}
-
-static void niu_set_max_burst(struct niu *np, struct tx_ring_info *rp)
-{
-       int mtu = np->dev->mtu;
-
-       /* These values are recommended by the HW designers for fair
-        * utilization of DRR amongst the rings.
-        */
-       rp->max_burst = mtu + 32;
-       if (rp->max_burst > 4096)
-               rp->max_burst = 4096;
-}
-
-static int niu_alloc_tx_ring_info(struct niu *np,
-                                 struct tx_ring_info *rp)
-{
-       BUILD_BUG_ON(sizeof(struct txdma_mailbox) != 64);
-
-       rp->mbox = np->ops->alloc_coherent(np->device,
-                                          sizeof(struct txdma_mailbox),
-                                          &rp->mbox_dma, GFP_KERNEL);
-       if (!rp->mbox)
-               return -ENOMEM;
-       if ((unsigned long)rp->mbox & (64UL - 1)) {
-               netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n",
-                          rp->mbox);
-               return -EINVAL;
-       }
-
-       rp->descr = np->ops->alloc_coherent(np->device,
-                                           MAX_TX_RING_SIZE * sizeof(__le64),
-                                           &rp->descr_dma, GFP_KERNEL);
-       if (!rp->descr)
-               return -ENOMEM;
-       if ((unsigned long)rp->descr & (64UL - 1)) {
-               netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n",
-                          rp->descr);
-               return -EINVAL;
-       }
-
-       rp->pending = MAX_TX_RING_SIZE;
-       rp->prod = 0;
-       rp->cons = 0;
-       rp->wrap_bit = 0;
-
-       /* XXX make these configurable... XXX */
-       rp->mark_freq = rp->pending / 4;
-
-       niu_set_max_burst(np, rp);
-
-       return 0;
-}
-
-static void niu_size_rbr(struct niu *np, struct rx_ring_info *rp)
-{
-       u16 bss;
-
-       bss = min(PAGE_SHIFT, 15);
-
-       rp->rbr_block_size = 1 << bss;
-       rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss);
-
-       rp->rbr_sizes[0] = 256;
-       rp->rbr_sizes[1] = 1024;
-       if (np->dev->mtu > ETH_DATA_LEN) {
-               switch (PAGE_SIZE) {
-               case 4 * 1024:
-                       rp->rbr_sizes[2] = 4096;
-                       break;
-
-               default:
-                       rp->rbr_sizes[2] = 8192;
-                       break;
-               }
-       } else {
-               rp->rbr_sizes[2] = 2048;
-       }
-       rp->rbr_sizes[3] = rp->rbr_block_size;
-}
-
-static int niu_alloc_channels(struct niu *np)
-{
-       struct niu_parent *parent = np->parent;
-       int first_rx_channel, first_tx_channel;
-       int num_rx_rings, num_tx_rings;
-       struct rx_ring_info *rx_rings;
-       struct tx_ring_info *tx_rings;
-       int i, port, err;
-
-       port = np->port;
-       first_rx_channel = first_tx_channel = 0;
-       for (i = 0; i < port; i++) {
-               first_rx_channel += parent->rxchan_per_port[i];
-               first_tx_channel += parent->txchan_per_port[i];
-       }
-
-       num_rx_rings = parent->rxchan_per_port[port];
-       num_tx_rings = parent->txchan_per_port[port];
-
-       rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info),
-                          GFP_KERNEL);
-       err = -ENOMEM;
-       if (!rx_rings)
-               goto out_err;
-
-       np->num_rx_rings = num_rx_rings;
-       smp_wmb();
-       np->rx_rings = rx_rings;
-
-       netif_set_real_num_rx_queues(np->dev, num_rx_rings);
-
-       for (i = 0; i < np->num_rx_rings; i++) {
-               struct rx_ring_info *rp = &np->rx_rings[i];
-
-               rp->np = np;
-               rp->rx_channel = first_rx_channel + i;
-
-               err = niu_alloc_rx_ring_info(np, rp);
-               if (err)
-                       goto out_err;
-
-               niu_size_rbr(np, rp);
-
-               /* XXX better defaults, configurable, etc... XXX */
-               rp->nonsyn_window = 64;
-               rp->nonsyn_threshold = rp->rcr_table_size - 64;
-               rp->syn_window = 64;
-               rp->syn_threshold = rp->rcr_table_size - 64;
-               rp->rcr_pkt_threshold = 16;
-               rp->rcr_timeout = 8;
-               rp->rbr_kick_thresh = RBR_REFILL_MIN;
-               if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page)
-                       rp->rbr_kick_thresh = rp->rbr_blocks_per_page;
-
-               err = niu_rbr_fill(np, rp, GFP_KERNEL);
-               if (err)
-                       return err;
-       }
-
-       tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
-                          GFP_KERNEL);
-       err = -ENOMEM;
-       if (!tx_rings)
-               goto out_err;
-
-       np->num_tx_rings = num_tx_rings;
-       smp_wmb();
-       np->tx_rings = tx_rings;
-
-       netif_set_real_num_tx_queues(np->dev, num_tx_rings);
-
-       for (i = 0; i < np->num_tx_rings; i++) {
-               struct tx_ring_info *rp = &np->tx_rings[i];
-
-               rp->np = np;
-               rp->tx_channel = first_tx_channel + i;
-
-               err = niu_alloc_tx_ring_info(np, rp);
-               if (err)
-                       goto out_err;
-       }
-
-       return 0;
-
-out_err:
-       niu_free_channels(np);
-       return err;
-}
-
-static int niu_tx_cs_sng_poll(struct niu *np, int channel)
-{
-       int limit = 1000;
-
-       while (--limit > 0) {
-               u64 val = nr64(TX_CS(channel));
-               if (val & TX_CS_SNG_STATE)
-                       return 0;
-       }
-       return -ENODEV;
-}
-
-static int niu_tx_channel_stop(struct niu *np, int channel)
-{
-       u64 val = nr64(TX_CS(channel));
-
-       val |= TX_CS_STOP_N_GO;
-       nw64(TX_CS(channel), val);
-
-       return niu_tx_cs_sng_poll(np, channel);
-}
-
-static int niu_tx_cs_reset_poll(struct niu *np, int channel)
-{
-       int limit = 1000;
-
-       while (--limit > 0) {
-               u64 val = nr64(TX_CS(channel));
-               if (!(val & TX_CS_RST))
-                       return 0;
-       }
-       return -ENODEV;
-}
-
-static int niu_tx_channel_reset(struct niu *np, int channel)
-{
-       u64 val = nr64(TX_CS(channel));
-       int err;
-
-       val |= TX_CS_RST;
-       nw64(TX_CS(channel), val);
-
-       err = niu_tx_cs_reset_poll(np, channel);
-       if (!err)
-               nw64(TX_RING_KICK(channel), 0);
-
-       return err;
-}
-
-static int niu_tx_channel_lpage_init(struct niu *np, int channel)
-{
-       u64 val;
-
-       nw64(TX_LOG_MASK1(channel), 0);
-       nw64(TX_LOG_VAL1(channel), 0);
-       nw64(TX_LOG_MASK2(channel), 0);
-       nw64(TX_LOG_VAL2(channel), 0);
-       nw64(TX_LOG_PAGE_RELO1(channel), 0);
-       nw64(TX_LOG_PAGE_RELO2(channel), 0);
-       nw64(TX_LOG_PAGE_HDL(channel), 0);
-
-       val  = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT;
-       val |= (TX_LOG_PAGE_VLD_PAGE0 | TX_LOG_PAGE_VLD_PAGE1);
-       nw64(TX_LOG_PAGE_VLD(channel), val);
-
-       /* XXX TXDMA 32bit mode? XXX */
-
-       return 0;
-}
-
-static void niu_txc_enable_port(struct niu *np, int on)
-{
-       unsigned long flags;
-       u64 val, mask;
-
-       niu_lock_parent(np, flags);
-       val = nr64(TXC_CONTROL);
-       mask = (u64)1 << np->port;
-       if (on) {
-               val |= TXC_CONTROL_ENABLE | mask;
-       } else {
-               val &= ~mask;
-               if ((val & ~TXC_CONTROL_ENABLE) == 0)
-                       val &= ~TXC_CONTROL_ENABLE;
-       }
-       nw64(TXC_CONTROL, val);
-       niu_unlock_parent(np, flags);
-}
-
-static void niu_txc_set_imask(struct niu *np, u64 imask)
-{
-       unsigned long flags;
-       u64 val;
-
-       niu_lock_parent(np, flags);
-       val = nr64(TXC_INT_MASK);
-       val &= ~TXC_INT_MASK_VAL(np->port);
-       val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port));
-       niu_unlock_parent(np, flags);
-}
-
-static void niu_txc_port_dma_enable(struct niu *np, int on)
-{
-       u64 val = 0;
-
-       if (on) {
-               int i;
-
-               for (i = 0; i < np->num_tx_rings; i++)
-                       val |= (1 << np->tx_rings[i].tx_channel);
-       }
-       nw64(TXC_PORT_DMA(np->port), val);
-}
-
-static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
-{
-       int err, channel = rp->tx_channel;
-       u64 val, ring_len;
-
-       err = niu_tx_channel_stop(np, channel);
-       if (err)
-               return err;
-
-       err = niu_tx_channel_reset(np, channel);
-       if (err)
-               return err;
-
-       err = niu_tx_channel_lpage_init(np, channel);
-       if (err)
-               return err;
-
-       nw64(TXC_DMA_MAX(channel), rp->max_burst);
-       nw64(TX_ENT_MSK(channel), 0);
-
-       if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
-                             TX_RNG_CFIG_STADDR)) {
-               netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n",
-                          channel, (unsigned long long)rp->descr_dma);
-               return -EINVAL;
-       }
-
-       /* The length field in TX_RNG_CFIG is measured in 64-byte
-        * blocks.  rp->pending is the number of TX descriptors in
-        * our ring, 8 bytes each, thus we divide by 8 bytes more
-        * to get the proper value the chip wants.
-        */
-       ring_len = (rp->pending / 8);
-
-       val = ((ring_len << TX_RNG_CFIG_LEN_SHIFT) |
-              rp->descr_dma);
-       nw64(TX_RNG_CFIG(channel), val);
-
-       if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
-           ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
-               netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n",
-                           channel, (unsigned long long)rp->mbox_dma);
-               return -EINVAL;
-       }
-       nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
-       nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR);
-
-       nw64(TX_CS(channel), 0);
-
-       rp->last_pkt_cnt = 0;
-
-       return 0;
-}
-
-static void niu_init_rdc_groups(struct niu *np)
-{
-       struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port];
-       int i, first_table_num = tp->first_table_num;
-
-       for (i = 0; i < tp->num_tables; i++) {
-               struct rdc_table *tbl = &tp->tables[i];
-               int this_table = first_table_num + i;
-               int slot;
-
-               for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++)
-                       nw64(RDC_TBL(this_table, slot),
-                            tbl->rxdma_channel[slot]);
-       }
-
-       nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]);
-}
-
-static void niu_init_drr_weight(struct niu *np)
-{
-       int type = phy_decode(np->parent->port_phy, np->port);
-       u64 val;
-
-       switch (type) {
-       case PORT_TYPE_10G:
-               val = PT_DRR_WEIGHT_DEFAULT_10G;
-               break;
-
-       case PORT_TYPE_1G:
-       default:
-               val = PT_DRR_WEIGHT_DEFAULT_1G;
-               break;
-       }
-       nw64(PT_DRR_WT(np->port), val);
-}
-
-static int niu_init_hostinfo(struct niu *np)
-{
-       struct niu_parent *parent = np->parent;
-       struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
-       int i, err, num_alt = niu_num_alt_addr(np);
-       int first_rdc_table = tp->first_table_num;
-
-       err = niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
-       if (err)
-               return err;
-
-       err = niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
-       if (err)
-               return err;
-
-       for (i = 0; i < num_alt; i++) {
-               err = niu_set_alt_mac_rdc_table(np, i, first_rdc_table, 1);
-               if (err)
-                       return err;
-       }
-
-       return 0;
-}
-
-static int niu_rx_channel_reset(struct niu *np, int channel)
-{
-       return niu_set_and_wait_clear(np, RXDMA_CFIG1(channel),
-                                     RXDMA_CFIG1_RST, 1000, 10,
-                                     "RXDMA_CFIG1");
-}
-
-static int niu_rx_channel_lpage_init(struct niu *np, int channel)
-{
-       u64 val;
-
-       nw64(RX_LOG_MASK1(channel), 0);
-       nw64(RX_LOG_VAL1(channel), 0);
-       nw64(RX_LOG_MASK2(channel), 0);
-       nw64(RX_LOG_VAL2(channel), 0);
-       nw64(RX_LOG_PAGE_RELO1(channel), 0);
-       nw64(RX_LOG_PAGE_RELO2(channel), 0);
-       nw64(RX_LOG_PAGE_HDL(channel), 0);
-
-       val  = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT;
-       val |= (RX_LOG_PAGE_VLD_PAGE0 | RX_LOG_PAGE_VLD_PAGE1);
-       nw64(RX_LOG_PAGE_VLD(channel), val);
-
-       return 0;
-}
-
-static void niu_rx_channel_wred_init(struct niu *np, struct rx_ring_info *rp)
-{
-       u64 val;
-
-       val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) |
-              ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) |
-              ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) |
-              ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT));
-       nw64(RDC_RED_PARA(rp->rx_channel), val);
-}
-
-static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret)
-{
-       u64 val = 0;
-
-       *ret = 0;
-       switch (rp->rbr_block_size) {
-       case 4 * 1024:
-               val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT);
-               break;
-       case 8 * 1024:
-               val |= (RBR_BLKSIZE_8K << RBR_CFIG_B_BLKSIZE_SHIFT);
-               break;
-       case 16 * 1024:
-               val |= (RBR_BLKSIZE_16K << RBR_CFIG_B_BLKSIZE_SHIFT);
-               break;
-       case 32 * 1024:
-               val |= (RBR_BLKSIZE_32K << RBR_CFIG_B_BLKSIZE_SHIFT);
-               break;
-       default:
-               return -EINVAL;
-       }
-       val |= RBR_CFIG_B_VLD2;
-       switch (rp->rbr_sizes[2]) {
-       case 2 * 1024:
-               val |= (RBR_BUFSZ2_2K << RBR_CFIG_B_BUFSZ2_SHIFT);
-               break;
-       case 4 * 1024:
-               val |= (RBR_BUFSZ2_4K << RBR_CFIG_B_BUFSZ2_SHIFT);
-               break;
-       case 8 * 1024:
-               val |= (RBR_BUFSZ2_8K << RBR_CFIG_B_BUFSZ2_SHIFT);
-               break;
-       case 16 * 1024:
-               val |= (RBR_BUFSZ2_16K << RBR_CFIG_B_BUFSZ2_SHIFT);
-               break;
-
-       default:
-               return -EINVAL;
-       }
-       val |= RBR_CFIG_B_VLD1;
-       switch (rp->rbr_sizes[1]) {
-       case 1 * 1024:
-               val |= (RBR_BUFSZ1_1K << RBR_CFIG_B_BUFSZ1_SHIFT);
-               break;
-       case 2 * 1024:
-               val |= (RBR_BUFSZ1_2K << RBR_CFIG_B_BUFSZ1_SHIFT);
-               break;
-       case 4 * 1024:
-               val |= (RBR_BUFSZ1_4K << RBR_CFIG_B_BUFSZ1_SHIFT);
-               break;
-       case 8 * 1024:
-               val |= (RBR_BUFSZ1_8K << RBR_CFIG_B_BUFSZ1_SHIFT);
-               break;
-
-       default:
-               return -EINVAL;
-       }
-       val |= RBR_CFIG_B_VLD0;
-       switch (rp->rbr_sizes[0]) {
-       case 256:
-               val |= (RBR_BUFSZ0_256 << RBR_CFIG_B_BUFSZ0_SHIFT);
-               break;
-       case 512:
-               val |= (RBR_BUFSZ0_512 << RBR_CFIG_B_BUFSZ0_SHIFT);
-               break;
-       case 1 * 1024:
-               val |= (RBR_BUFSZ0_1K << RBR_CFIG_B_BUFSZ0_SHIFT);
-               break;
-       case 2 * 1024:
-               val |= (RBR_BUFSZ0_2K << RBR_CFIG_B_BUFSZ0_SHIFT);
-               break;
-
-       default:
-               return -EINVAL;
-       }
-
-       *ret = val;
-       return 0;
-}
-
-static int niu_enable_rx_channel(struct niu *np, int channel, int on)
-{
-       u64 val = nr64(RXDMA_CFIG1(channel));
-       int limit;
-
-       if (on)
-               val |= RXDMA_CFIG1_EN;
-       else
-               val &= ~RXDMA_CFIG1_EN;
-       nw64(RXDMA_CFIG1(channel), val);
-
-       limit = 1000;
-       while (--limit > 0) {
-               if (nr64(RXDMA_CFIG1(channel)) & RXDMA_CFIG1_QST)
-                       break;
-               udelay(10);
-       }
-       if (limit <= 0)
-               return -ENODEV;
-       return 0;
-}
-
-static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
-{
-       int err, channel = rp->rx_channel;
-       u64 val;
-
-       err = niu_rx_channel_reset(np, channel);
-       if (err)
-               return err;
-
-       err = niu_rx_channel_lpage_init(np, channel);
-       if (err)
-               return err;
-
-       niu_rx_channel_wred_init(np, rp);
-
-       nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_RBR_EMPTY);
-       nw64(RX_DMA_CTL_STAT(channel),
-            (RX_DMA_CTL_STAT_MEX |
-             RX_DMA_CTL_STAT_RCRTHRES |
-             RX_DMA_CTL_STAT_RCRTO |
-             RX_DMA_CTL_STAT_RBR_EMPTY));
-       nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
-       nw64(RXDMA_CFIG2(channel),
-            ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) |
-             RXDMA_CFIG2_FULL_HDR));
-       nw64(RBR_CFIG_A(channel),
-            ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
-            (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
-       err = niu_compute_rbr_cfig_b(rp, &val);
-       if (err)
-               return err;
-       nw64(RBR_CFIG_B(channel), val);
-       nw64(RCRCFIG_A(channel),
-            ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) |
-            (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR)));
-       nw64(RCRCFIG_B(channel),
-            ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) |
-            RCRCFIG_B_ENTOUT |
-            ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT));
-
-       err = niu_enable_rx_channel(np, channel, 1);
-       if (err)
-               return err;
-
-       nw64(RBR_KICK(channel), rp->rbr_index);
-
-       val = nr64(RX_DMA_CTL_STAT(channel));
-       val |= RX_DMA_CTL_STAT_RBR_EMPTY;
-       nw64(RX_DMA_CTL_STAT(channel), val);
-
-       return 0;
-}
-
-static int niu_init_rx_channels(struct niu *np)
-{
-       unsigned long flags;
-       u64 seed = jiffies_64;
-       int err, i;
-
-       niu_lock_parent(np, flags);
-       nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider);
-       nw64(RED_RAN_INIT, RED_RAN_INIT_OPMODE | (seed & RED_RAN_INIT_VAL));
-       niu_unlock_parent(np, flags);
-
-       /* XXX RXDMA 32bit mode? XXX */
-
-       niu_init_rdc_groups(np);
-       niu_init_drr_weight(np);
-
-       err = niu_init_hostinfo(np);
-       if (err)
-               return err;
-
-       for (i = 0; i < np->num_rx_rings; i++) {
-               struct rx_ring_info *rp = &np->rx_rings[i];
-
-               err = niu_init_one_rx_channel(np, rp);
-               if (err)
-                       return err;
-       }
-
-       return 0;
-}
-
-static int niu_set_ip_frag_rule(struct niu *np)
-{
-       struct niu_parent *parent = np->parent;
-       struct niu_classifier *cp = &np->clas;
-       struct niu_tcam_entry *tp;
-       int index, err;
-
-       index = cp->tcam_top;
-       tp = &parent->tcam[index];
-
-       /* Note that the noport bit is the same in both ipv4 and
-        * ipv6 format TCAM entries.
-        */
-       memset(tp, 0, sizeof(*tp));
-       tp->key[1] = TCAM_V4KEY1_NOPORT;
-       tp->key_mask[1] = TCAM_V4KEY1_NOPORT;
-       tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
-                         ((u64)0 << TCAM_ASSOCDATA_OFFSET_SHIFT));
-       err = tcam_write(np, index, tp->key, tp->key_mask);
-       if (err)
-               return err;
-       err = tcam_assoc_write(np, index, tp->assoc_data);
-       if (err)
-               return err;
-       tp->valid = 1;
-       cp->tcam_valid_entries++;
-
-       return 0;
-}
-
-static int niu_init_classifier_hw(struct niu *np)
-{
-       struct niu_parent *parent = np->parent;
-       struct niu_classifier *cp = &np->clas;
-       int i, err;
-
-       nw64(H1POLY, cp->h1_init);
-       nw64(H2POLY, cp->h2_init);
-
-       err = niu_init_hostinfo(np);
-       if (err)
-               return err;
-
-       for (i = 0; i < ENET_VLAN_TBL_NUM_ENTRIES; i++) {
-               struct niu_vlan_rdc *vp = &cp->vlan_mappings[i];
-
-               vlan_tbl_write(np, i, np->port,
-                              vp->vlan_pref, vp->rdc_num);
-       }
-
-       for (i = 0; i < cp->num_alt_mac_mappings; i++) {
-               struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i];
-
-               err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num,
-                                               ap->rdc_num, ap->mac_pref);
-               if (err)
-                       return err;
-       }
-
-       for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
-               int index = i - CLASS_CODE_USER_PROG1;
-
-               err = niu_set_tcam_key(np, i, parent->tcam_key[index]);
-               if (err)
-                       return err;
-               err = niu_set_flow_key(np, i, parent->flow_key[index]);
-               if (err)
-                       return err;
-       }
-
-       err = niu_set_ip_frag_rule(np);
-       if (err)
-               return err;
-
-       tcam_enable(np, 1);
-
-       return 0;
-}
-
-static int niu_zcp_write(struct niu *np, int index, u64 *data)
-{
-       nw64(ZCP_RAM_DATA0, data[0]);
-       nw64(ZCP_RAM_DATA1, data[1]);
-       nw64(ZCP_RAM_DATA2, data[2]);
-       nw64(ZCP_RAM_DATA3, data[3]);
-       nw64(ZCP_RAM_DATA4, data[4]);
-       nw64(ZCP_RAM_BE, ZCP_RAM_BE_VAL);
-       nw64(ZCP_RAM_ACC,
-            (ZCP_RAM_ACC_WRITE |
-             (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
-             (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
-
-       return niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
-                                  1000, 100);
-}
-
-static int niu_zcp_read(struct niu *np, int index, u64 *data)
-{
-       int err;
-
-       err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
-                                 1000, 100);
-       if (err) {
-               netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n",
-                          (unsigned long long)nr64(ZCP_RAM_ACC));
-               return err;
-       }
-
-       nw64(ZCP_RAM_ACC,
-            (ZCP_RAM_ACC_READ |
-             (0 << ZCP_RAM_ACC_ZFCID_SHIFT) |
-             (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT)));
-
-       err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
-                                 1000, 100);
-       if (err) {
-               netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n",
-                          (unsigned long long)nr64(ZCP_RAM_ACC));
-               return err;
-       }
-
-       data[0] = nr64(ZCP_RAM_DATA0);
-       data[1] = nr64(ZCP_RAM_DATA1);
-       data[2] = nr64(ZCP_RAM_DATA2);
-       data[3] = nr64(ZCP_RAM_DATA3);
-       data[4] = nr64(ZCP_RAM_DATA4);
-
-       return 0;
-}
-
-static void niu_zcp_cfifo_reset(struct niu *np)
-{
-       u64 val = nr64(RESET_CFIFO);
-
-       val |= RESET_CFIFO_RST(np->port);
-       nw64(RESET_CFIFO, val);
-       udelay(10);
-
-       val &= ~RESET_CFIFO_RST(np->port);
-       nw64(RESET_CFIFO, val);
-}
-
-static int niu_init_zcp(struct niu *np)
-{
-       u64 data[5], rbuf[5];
-       int i, max, err;
-
-       if (np->parent->plat_type != PLAT_TYPE_NIU) {
-               if (np->port == 0 || np->port == 1)
-                       max = ATLAS_P0_P1_CFIFO_ENTRIES;
-               else
-                       max = ATLAS_P2_P3_CFIFO_ENTRIES;
-       } else
-               max = NIU_CFIFO_ENTRIES;
-
-       data[0] = 0;
-       data[1] = 0;
-       data[2] = 0;
-       data[3] = 0;
-       data[4] = 0;
-
-       for (i = 0; i < max; i++) {
-               err = niu_zcp_write(np, i, data);
-               if (err)
-                       return err;
-               err = niu_zcp_read(np, i, rbuf);
-               if (err)
-                       return err;
-       }
-
-       niu_zcp_cfifo_reset(np);
-       nw64(CFIFO_ECC(np->port), 0);
-       nw64(ZCP_INT_STAT, ZCP_INT_STAT_ALL);
-       (void) nr64(ZCP_INT_STAT);
-       nw64(ZCP_INT_MASK, ZCP_INT_MASK_ALL);
-
-       return 0;
-}
-
-static void niu_ipp_write(struct niu *np, int index, u64 *data)
-{
-       u64 val = nr64_ipp(IPP_CFIG);
-
-       nw64_ipp(IPP_CFIG, val | IPP_CFIG_DFIFO_PIO_W);
-       nw64_ipp(IPP_DFIFO_WR_PTR, index);
-       nw64_ipp(IPP_DFIFO_WR0, data[0]);
-       nw64_ipp(IPP_DFIFO_WR1, data[1]);
-       nw64_ipp(IPP_DFIFO_WR2, data[2]);
-       nw64_ipp(IPP_DFIFO_WR3, data[3]);
-       nw64_ipp(IPP_DFIFO_WR4, data[4]);
-       nw64_ipp(IPP_CFIG, val & ~IPP_CFIG_DFIFO_PIO_W);
-}
-
-static void niu_ipp_read(struct niu *np, int index, u64 *data)
-{
-       nw64_ipp(IPP_DFIFO_RD_PTR, index);
-       data[0] = nr64_ipp(IPP_DFIFO_RD0);
-       data[1] = nr64_ipp(IPP_DFIFO_RD1);
-       data[2] = nr64_ipp(IPP_DFIFO_RD2);
-       data[3] = nr64_ipp(IPP_DFIFO_RD3);
-       data[4] = nr64_ipp(IPP_DFIFO_RD4);
-}
-
-static int niu_ipp_reset(struct niu *np)
-{
-       return niu_set_and_wait_clear_ipp(np, IPP_CFIG, IPP_CFIG_SOFT_RST,
-                                         1000, 100, "IPP_CFIG");
-}
-
-static int niu_init_ipp(struct niu *np)
-{
-       u64 data[5], rbuf[5], val;
-       int i, max, err;
-
-       if (np->parent->plat_type != PLAT_TYPE_NIU) {
-               if (np->port == 0 || np->port == 1)
-                       max = ATLAS_P0_P1_DFIFO_ENTRIES;
-               else
-                       max = ATLAS_P2_P3_DFIFO_ENTRIES;
-       } else
-               max = NIU_DFIFO_ENTRIES;
-
-       data[0] = 0;
-       data[1] = 0;
-       data[2] = 0;
-       data[3] = 0;
-       data[4] = 0;
-
-       for (i = 0; i < max; i++) {
-               niu_ipp_write(np, i, data);
-               niu_ipp_read(np, i, rbuf);
-       }
-
-       (void) nr64_ipp(IPP_INT_STAT);
-       (void) nr64_ipp(IPP_INT_STAT);
-
-       err = niu_ipp_reset(np);
-       if (err)
-               return err;
-
-       (void) nr64_ipp(IPP_PKT_DIS);
-       (void) nr64_ipp(IPP_BAD_CS_CNT);
-       (void) nr64_ipp(IPP_ECC);
-
-       (void) nr64_ipp(IPP_INT_STAT);
-
-       nw64_ipp(IPP_MSK, ~IPP_MSK_ALL);
-
-       val = nr64_ipp(IPP_CFIG);
-       val &= ~IPP_CFIG_IP_MAX_PKT;
-       val |= (IPP_CFIG_IPP_ENABLE |
-               IPP_CFIG_DFIFO_ECC_EN |
-               IPP_CFIG_DROP_BAD_CRC |
-               IPP_CFIG_CKSUM_EN |
-               (0x1ffff << IPP_CFIG_IP_MAX_PKT_SHIFT));
-       nw64_ipp(IPP_CFIG, val);
-
-       return 0;
-}
-
-static void niu_handle_led(struct niu *np, int status)
-{
-       u64 val;
-       val = nr64_mac(XMAC_CONFIG);
-
-       if ((np->flags & NIU_FLAGS_10G) != 0 &&
-           (np->flags & NIU_FLAGS_FIBER) != 0) {
-               if (status) {
-                       val |= XMAC_CONFIG_LED_POLARITY;
-                       val &= ~XMAC_CONFIG_FORCE_LED_ON;
-               } else {
-                       val |= XMAC_CONFIG_FORCE_LED_ON;
-                       val &= ~XMAC_CONFIG_LED_POLARITY;
-               }
-       }
-
-       nw64_mac(XMAC_CONFIG, val);
-}
-
-static void niu_init_xif_xmac(struct niu *np)
-{
-       struct niu_link_config *lp = &np->link_config;
-       u64 val;
-
-       if (np->flags & NIU_FLAGS_XCVR_SERDES) {
-               val = nr64(MIF_CONFIG);
-               val |= MIF_CONFIG_ATCA_GE;
-               nw64(MIF_CONFIG, val);
-       }
-
-       val = nr64_mac(XMAC_CONFIG);
-       val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
-
-       val |= XMAC_CONFIG_TX_OUTPUT_EN;
-
-       if (lp->loopback_mode == LOOPBACK_MAC) {
-               val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
-               val |= XMAC_CONFIG_LOOPBACK;
-       } else {
-               val &= ~XMAC_CONFIG_LOOPBACK;
-       }
-
-       if (np->flags & NIU_FLAGS_10G) {
-               val &= ~XMAC_CONFIG_LFS_DISABLE;
-       } else {
-               val |= XMAC_CONFIG_LFS_DISABLE;
-               if (!(np->flags & NIU_FLAGS_FIBER) &&
-                   !(np->flags & NIU_FLAGS_XCVR_SERDES))
-                       val |= XMAC_CONFIG_1G_PCS_BYPASS;
-               else
-                       val &= ~XMAC_CONFIG_1G_PCS_BYPASS;
-       }
-
-       val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
-
-       if (lp->active_speed == SPEED_100)
-               val |= XMAC_CONFIG_SEL_CLK_25MHZ;
-       else
-               val &= ~XMAC_CONFIG_SEL_CLK_25MHZ;
-
-       nw64_mac(XMAC_CONFIG, val);
-
-       val = nr64_mac(XMAC_CONFIG);
-       val &= ~XMAC_CONFIG_MODE_MASK;
-       if (np->flags & NIU_FLAGS_10G) {
-               val |= XMAC_CONFIG_MODE_XGMII;
-       } else {
-               if (lp->active_speed == SPEED_1000)
-                       val |= XMAC_CONFIG_MODE_GMII;
-               else
-                       val |= XMAC_CONFIG_MODE_MII;
-       }
-
-       nw64_mac(XMAC_CONFIG, val);
-}
-
-static void niu_init_xif_bmac(struct niu *np)
-{
-       struct niu_link_config *lp = &np->link_config;
-       u64 val;
-
-       val = BMAC_XIF_CONFIG_TX_OUTPUT_EN;
-
-       if (lp->loopback_mode == LOOPBACK_MAC)
-               val |= BMAC_XIF_CONFIG_MII_LOOPBACK;
-       else
-               val &= ~BMAC_XIF_CONFIG_MII_LOOPBACK;
-
-       if (lp->active_speed == SPEED_1000)
-               val |= BMAC_XIF_CONFIG_GMII_MODE;
-       else
-               val &= ~BMAC_XIF_CONFIG_GMII_MODE;
-
-       val &= ~(BMAC_XIF_CONFIG_LINK_LED |
-                BMAC_XIF_CONFIG_LED_POLARITY);
-
-       if (!(np->flags & NIU_FLAGS_10G) &&
-           !(np->flags & NIU_FLAGS_FIBER) &&
-           lp->active_speed == SPEED_100)
-               val |= BMAC_XIF_CONFIG_25MHZ_CLOCK;
-       else
-               val &= ~BMAC_XIF_CONFIG_25MHZ_CLOCK;
-
-       nw64_mac(BMAC_XIF_CONFIG, val);
-}
-
-static void niu_init_xif(struct niu *np)
-{
-       if (np->flags & NIU_FLAGS_XMAC)
-               niu_init_xif_xmac(np);
-       else
-               niu_init_xif_bmac(np);
-}
-
-static void niu_pcs_mii_reset(struct niu *np)
-{
-       int limit = 1000;
-       u64 val = nr64_pcs(PCS_MII_CTL);
-       val |= PCS_MII_CTL_RST;
-       nw64_pcs(PCS_MII_CTL, val);
-       while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) {
-               udelay(100);
-               val = nr64_pcs(PCS_MII_CTL);
-       }
-}
-
-static void niu_xpcs_reset(struct niu *np)
-{
-       int limit = 1000;
-       u64 val = nr64_xpcs(XPCS_CONTROL1);
-       val |= XPCS_CONTROL1_RESET;
-       nw64_xpcs(XPCS_CONTROL1, val);
-       while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) {
-               udelay(100);
-               val = nr64_xpcs(XPCS_CONTROL1);
-       }
-}
-
-static int niu_init_pcs(struct niu *np)
-{
-       struct niu_link_config *lp = &np->link_config;
-       u64 val;
-
-       switch (np->flags & (NIU_FLAGS_10G |
-                            NIU_FLAGS_FIBER |
-                            NIU_FLAGS_XCVR_SERDES)) {
-       case NIU_FLAGS_FIBER:
-               /* 1G fiber */
-               nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
-               nw64_pcs(PCS_DPATH_MODE, 0);
-               niu_pcs_mii_reset(np);
-               break;
-
-       case NIU_FLAGS_10G:
-       case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
-       case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
-               /* 10G SERDES */
-               if (!(np->flags & NIU_FLAGS_XMAC))
-                       return -EINVAL;
-
-               /* 10G copper or fiber */
-               val = nr64_mac(XMAC_CONFIG);
-               val &= ~XMAC_CONFIG_10G_XPCS_BYPASS;
-               nw64_mac(XMAC_CONFIG, val);
-
-               niu_xpcs_reset(np);
-
-               val = nr64_xpcs(XPCS_CONTROL1);
-               if (lp->loopback_mode == LOOPBACK_PHY)
-                       val |= XPCS_CONTROL1_LOOPBACK;
-               else
-                       val &= ~XPCS_CONTROL1_LOOPBACK;
-               nw64_xpcs(XPCS_CONTROL1, val);
-
-               nw64_xpcs(XPCS_DESKEW_ERR_CNT, 0);
-               (void) nr64_xpcs(XPCS_SYMERR_CNT01);
-               (void) nr64_xpcs(XPCS_SYMERR_CNT23);
-               break;
-
-
-       case NIU_FLAGS_XCVR_SERDES:
-               /* 1G SERDES */
-               niu_pcs_mii_reset(np);
-               nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
-               nw64_pcs(PCS_DPATH_MODE, 0);
-               break;
-
-       case 0:
-               /* 1G copper */
-       case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
-               /* 1G RGMII FIBER */
-               nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII);
-               niu_pcs_mii_reset(np);
-               break;
-
-       default:
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int niu_reset_tx_xmac(struct niu *np)
-{
-       return niu_set_and_wait_clear_mac(np, XTXMAC_SW_RST,
-                                         (XTXMAC_SW_RST_REG_RS |
-                                          XTXMAC_SW_RST_SOFT_RST),
-                                         1000, 100, "XTXMAC_SW_RST");
-}
-
-static int niu_reset_tx_bmac(struct niu *np)
-{
-       int limit;
-
-       nw64_mac(BTXMAC_SW_RST, BTXMAC_SW_RST_RESET);
-       limit = 1000;
-       while (--limit >= 0) {
-               if (!(nr64_mac(BTXMAC_SW_RST) & BTXMAC_SW_RST_RESET))
-                       break;
-               udelay(100);
-       }
-       if (limit < 0) {
-               dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n",
-                       np->port,
-                       (unsigned long long) nr64_mac(BTXMAC_SW_RST));
-               return -ENODEV;
-       }
-
-       return 0;
-}
-
-static int niu_reset_tx_mac(struct niu *np)
-{
-       if (np->flags & NIU_FLAGS_XMAC)
-               return niu_reset_tx_xmac(np);
-       else
-               return niu_reset_tx_bmac(np);
-}
-
-static void niu_init_tx_xmac(struct niu *np, u64 min, u64 max)
-{
-       u64 val;
-
-       val = nr64_mac(XMAC_MIN);
-       val &= ~(XMAC_MIN_TX_MIN_PKT_SIZE |
-                XMAC_MIN_RX_MIN_PKT_SIZE);
-       val |= (min << XMAC_MIN_RX_MIN_PKT_SIZE_SHFT);
-       val |= (min << XMAC_MIN_TX_MIN_PKT_SIZE_SHFT);
-       nw64_mac(XMAC_MIN, val);
-
-       nw64_mac(XMAC_MAX, max);
-
-       nw64_mac(XTXMAC_STAT_MSK, ~(u64)0);
-
-       val = nr64_mac(XMAC_IPG);
-       if (np->flags & NIU_FLAGS_10G) {
-               val &= ~XMAC_IPG_IPG_XGMII;
-               val |= (IPG_12_15_XGMII << XMAC_IPG_IPG_XGMII_SHIFT);
-       } else {
-               val &= ~XMAC_IPG_IPG_MII_GMII;
-               val |= (IPG_12_MII_GMII << XMAC_IPG_IPG_MII_GMII_SHIFT);
-       }
-       nw64_mac(XMAC_IPG, val);
-
-       val = nr64_mac(XMAC_CONFIG);
-       val &= ~(XMAC_CONFIG_ALWAYS_NO_CRC |
-                XMAC_CONFIG_STRETCH_MODE |
-                XMAC_CONFIG_VAR_MIN_IPG_EN |
-                XMAC_CONFIG_TX_ENABLE);
-       nw64_mac(XMAC_CONFIG, val);
-
-       nw64_mac(TXMAC_FRM_CNT, 0);
-       nw64_mac(TXMAC_BYTE_CNT, 0);
-}
-
-static void niu_init_tx_bmac(struct niu *np, u64 min, u64 max)
-{
-       u64 val;
-
-       nw64_mac(BMAC_MIN_FRAME, min);
-       nw64_mac(BMAC_MAX_FRAME, max);
-
-       nw64_mac(BTXMAC_STATUS_MASK, ~(u64)0);
-       nw64_mac(BMAC_CTRL_TYPE, 0x8808);
-       nw64_mac(BMAC_PREAMBLE_SIZE, 7);
-
-       val = nr64_mac(BTXMAC_CONFIG);
-       val &= ~(BTXMAC_CONFIG_FCS_DISABLE |
-                BTXMAC_CONFIG_ENABLE);
-       nw64_mac(BTXMAC_CONFIG, val);
-}
-
-static void niu_init_tx_mac(struct niu *np)
-{
-       u64 min, max;
-
-       min = 64;
-       if (np->dev->mtu > ETH_DATA_LEN)
-               max = 9216;
-       else
-               max = 1522;
-
-       /* The XMAC_MIN register only accepts values for TX min which
-        * have the low 3 bits cleared.
-        */
-       BUG_ON(min & 0x7);
-
-       if (np->flags & NIU_FLAGS_XMAC)
-               niu_init_tx_xmac(np, min, max);
-       else
-               niu_init_tx_bmac(np, min, max);
-}
-
-static int niu_reset_rx_xmac(struct niu *np)
-{
-       int limit;
-
-       nw64_mac(XRXMAC_SW_RST,
-                XRXMAC_SW_RST_REG_RS | XRXMAC_SW_RST_SOFT_RST);
-       limit = 1000;
-       while (--limit >= 0) {
-               if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
-                                                XRXMAC_SW_RST_SOFT_RST)))
-                       break;
-               udelay(100);
-       }
-       if (limit < 0) {
-               dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n",
-                       np->port,
-                       (unsigned long long) nr64_mac(XRXMAC_SW_RST));
-               return -ENODEV;
-       }
-
-       return 0;
-}
-
-static int niu_reset_rx_bmac(struct niu *np)
-{
-       int limit;
-
-       nw64_mac(BRXMAC_SW_RST, BRXMAC_SW_RST_RESET);
-       limit = 1000;
-       while (--limit >= 0) {
-               if (!(nr64_mac(BRXMAC_SW_RST) & BRXMAC_SW_RST_RESET))
-                       break;
-               udelay(100);
-       }
-       if (limit < 0) {
-               dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n",
-                       np->port,
-                       (unsigned long long) nr64_mac(BRXMAC_SW_RST));
-               return -ENODEV;
-       }
-
-       return 0;
-}
-
-static int niu_reset_rx_mac(struct niu *np)
-{
-       if (np->flags & NIU_FLAGS_XMAC)
-               return niu_reset_rx_xmac(np);
-       else
-               return niu_reset_rx_bmac(np);
-}
-
-static void niu_init_rx_xmac(struct niu *np)
-{
-       struct niu_parent *parent = np->parent;
-       struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
-       int first_rdc_table = tp->first_table_num;
-       unsigned long i;
-       u64 val;
-
-       nw64_mac(XMAC_ADD_FILT0, 0);
-       nw64_mac(XMAC_ADD_FILT1, 0);
-       nw64_mac(XMAC_ADD_FILT2, 0);
-       nw64_mac(XMAC_ADD_FILT12_MASK, 0);
-       nw64_mac(XMAC_ADD_FILT00_MASK, 0);
-       for (i = 0; i < MAC_NUM_HASH; i++)
-               nw64_mac(XMAC_HASH_TBL(i), 0);
-       nw64_mac(XRXMAC_STAT_MSK, ~(u64)0);
-       niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
-       niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
-
-       val = nr64_mac(XMAC_CONFIG);
-       val &= ~(XMAC_CONFIG_RX_MAC_ENABLE |
-                XMAC_CONFIG_PROMISCUOUS |
-                XMAC_CONFIG_PROMISC_GROUP |
-                XMAC_CONFIG_ERR_CHK_DIS |
-                XMAC_CONFIG_RX_CRC_CHK_DIS |
-                XMAC_CONFIG_RESERVED_MULTICAST |
-                XMAC_CONFIG_RX_CODEV_CHK_DIS |
-                XMAC_CONFIG_ADDR_FILTER_EN |
-                XMAC_CONFIG_RCV_PAUSE_ENABLE |
-                XMAC_CONFIG_STRIP_CRC |
-                XMAC_CONFIG_PASS_FLOW_CTRL |
-                XMAC_CONFIG_MAC2IPP_PKT_CNT_EN);
-       val |= (XMAC_CONFIG_HASH_FILTER_EN);
-       nw64_mac(XMAC_CONFIG, val);
-
-       nw64_mac(RXMAC_BT_CNT, 0);
-       nw64_mac(RXMAC_BC_FRM_CNT, 0);
-       nw64_mac(RXMAC_MC_FRM_CNT, 0);
-       nw64_mac(RXMAC_FRAG_CNT, 0);
-       nw64_mac(RXMAC_HIST_CNT1, 0);
-       nw64_mac(RXMAC_HIST_CNT2, 0);
-       nw64_mac(RXMAC_HIST_CNT3, 0);
-       nw64_mac(RXMAC_HIST_CNT4, 0);
-       nw64_mac(RXMAC_HIST_CNT5, 0);
-       nw64_mac(RXMAC_HIST_CNT6, 0);
-       nw64_mac(RXMAC_HIST_CNT7, 0);
-       nw64_mac(RXMAC_MPSZER_CNT, 0);
-       nw64_mac(RXMAC_CRC_ER_CNT, 0);
-       nw64_mac(RXMAC_CD_VIO_CNT, 0);
-       nw64_mac(LINK_FAULT_CNT, 0);
-}
-
-static void niu_init_rx_bmac(struct niu *np)
-{
-       struct niu_parent *parent = np->parent;
-       struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port];
-       int first_rdc_table = tp->first_table_num;
-       unsigned long i;
-       u64 val;
-
-       nw64_mac(BMAC_ADD_FILT0, 0);
-       nw64_mac(BMAC_ADD_FILT1, 0);
-       nw64_mac(BMAC_ADD_FILT2, 0);
-       nw64_mac(BMAC_ADD_FILT12_MASK, 0);
-       nw64_mac(BMAC_ADD_FILT00_MASK, 0);
-       for (i = 0; i < MAC_NUM_HASH; i++)
-               nw64_mac(BMAC_HASH_TBL(i), 0);
-       niu_set_primary_mac_rdc_table(np, first_rdc_table, 1);
-       niu_set_multicast_mac_rdc_table(np, first_rdc_table, 1);
-       nw64_mac(BRXMAC_STATUS_MASK, ~(u64)0);
-
-       val = nr64_mac(BRXMAC_CONFIG);
-       val &= ~(BRXMAC_CONFIG_ENABLE |
-                BRXMAC_CONFIG_STRIP_PAD |
-                BRXMAC_CONFIG_STRIP_FCS |
-                BRXMAC_CONFIG_PROMISC |
-                BRXMAC_CONFIG_PROMISC_GRP |
-                BRXMAC_CONFIG_ADDR_FILT_EN |
-                BRXMAC_CONFIG_DISCARD_DIS);
-       val |= (BRXMAC_CONFIG_HASH_FILT_EN);
-       nw64_mac(BRXMAC_CONFIG, val);
-
-       val = nr64_mac(BMAC_ADDR_CMPEN);
-       val |= BMAC_ADDR_CMPEN_EN0;
-       nw64_mac(BMAC_ADDR_CMPEN, val);
-}
-
-static void niu_init_rx_mac(struct niu *np)
-{
-       niu_set_primary_mac(np, np->dev->dev_addr);
-
-       if (np->flags & NIU_FLAGS_XMAC)
-               niu_init_rx_xmac(np);
-       else
-               niu_init_rx_bmac(np);
-}
-
-static void niu_enable_tx_xmac(struct niu *np, int on)
-{
-       u64 val = nr64_mac(XMAC_CONFIG);
-
-       if (on)
-               val |= XMAC_CONFIG_TX_ENABLE;
-       else
-               val &= ~XMAC_CONFIG_TX_ENABLE;
-       nw64_mac(XMAC_CONFIG, val);
-}
-
-static void niu_enable_tx_bmac(struct niu *np, int on)
-{
-       u64 val = nr64_mac(BTXMAC_CONFIG);
-
-       if (on)
-               val |= BTXMAC_CONFIG_ENABLE;
-       else
-               val &= ~BTXMAC_CONFIG_ENABLE;
-       nw64_mac(BTXMAC_CONFIG, val);
-}
-
-static void niu_enable_tx_mac(struct niu *np, int on)
-{
-       if (np->flags & NIU_FLAGS_XMAC)
-               niu_enable_tx_xmac(np, on);
-       else
-               niu_enable_tx_bmac(np, on);
-}
-
-static void niu_enable_rx_xmac(struct niu *np, int on)
-{
-       u64 val = nr64_mac(XMAC_CONFIG);
-
-       val &= ~(XMAC_CONFIG_HASH_FILTER_EN |
-                XMAC_CONFIG_PROMISCUOUS);
-
-       if (np->flags & NIU_FLAGS_MCAST)
-               val |= XMAC_CONFIG_HASH_FILTER_EN;
-       if (np->flags & NIU_FLAGS_PROMISC)
-               val |= XMAC_CONFIG_PROMISCUOUS;
-
-       if (on)
-               val |= XMAC_CONFIG_RX_MAC_ENABLE;
-       else
-               val &= ~XMAC_CONFIG_RX_MAC_ENABLE;
-       nw64_mac(XMAC_CONFIG, val);
-}
-
-static void niu_enable_rx_bmac(struct niu *np, int on)
-{
-       u64 val = nr64_mac(BRXMAC_CONFIG);
-
-       val &= ~(BRXMAC_CONFIG_HASH_FILT_EN |
-                BRXMAC_CONFIG_PROMISC);
-
-       if (np->flags & NIU_FLAGS_MCAST)
-               val |= BRXMAC_CONFIG_HASH_FILT_EN;
-       if (np->flags & NIU_FLAGS_PROMISC)
-               val |= BRXMAC_CONFIG_PROMISC;
-
-       if (on)
-               val |= BRXMAC_CONFIG_ENABLE;
-       else
-               val &= ~BRXMAC_CONFIG_ENABLE;
-       nw64_mac(BRXMAC_CONFIG, val);
-}
-
-static void niu_enable_rx_mac(struct niu *np, int on)
-{
-       if (np->flags & NIU_FLAGS_XMAC)
-               niu_enable_rx_xmac(np, on);
-       else
-               niu_enable_rx_bmac(np, on);
-}
-
-static int niu_init_mac(struct niu *np)
-{
-       int err;
-
-       niu_init_xif(np);
-       err = niu_init_pcs(np);
-       if (err)
-               return err;
-
-       err = niu_reset_tx_mac(np);
-       if (err)
-               return err;
-       niu_init_tx_mac(np);
-       err = niu_reset_rx_mac(np);
-       if (err)
-               return err;
-       niu_init_rx_mac(np);
-
-       /* This looks hookey but the RX MAC reset we just did will
-        * undo some of the state we setup in niu_init_tx_mac() so we
-        * have to call it again.  In particular, the RX MAC reset will
-        * set the XMAC_MAX register back to it's default value.
-        */
-       niu_init_tx_mac(np);
-       niu_enable_tx_mac(np, 1);
-
-       niu_enable_rx_mac(np, 1);
-
-       return 0;
-}
-
-static void niu_stop_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
-{
-       (void) niu_tx_channel_stop(np, rp->tx_channel);
-}
-
-static void niu_stop_tx_channels(struct niu *np)
-{
-       int i;
-
-       for (i = 0; i < np->num_tx_rings; i++) {
-               struct tx_ring_info *rp = &np->tx_rings[i];
-
-               niu_stop_one_tx_channel(np, rp);
-       }
-}
-
-static void niu_reset_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
-{
-       (void) niu_tx_channel_reset(np, rp->tx_channel);
-}
-
-static void niu_reset_tx_channels(struct niu *np)
-{
-       int i;
-
-       for (i = 0; i < np->num_tx_rings; i++) {
-               struct tx_ring_info *rp = &np->tx_rings[i];
-
-               niu_reset_one_tx_channel(np, rp);
-       }
-}
-
-static void niu_stop_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
-{
-       (void) niu_enable_rx_channel(np, rp->rx_channel, 0);
-}
-
-static void niu_stop_rx_channels(struct niu *np)
-{
-       int i;
-
-       for (i = 0; i < np->num_rx_rings; i++) {
-               struct rx_ring_info *rp = &np->rx_rings[i];
-
-               niu_stop_one_rx_channel(np, rp);
-       }
-}
-
-static void niu_reset_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
-{
-       int channel = rp->rx_channel;
-
-       (void) niu_rx_channel_reset(np, channel);
-       nw64(RX_DMA_ENT_MSK(channel), RX_DMA_ENT_MSK_ALL);
-       nw64(RX_DMA_CTL_STAT(channel), 0);
-       (void) niu_enable_rx_channel(np, channel, 0);
-}
-
-static void niu_reset_rx_channels(struct niu *np)
-{
-       int i;
-
-       for (i = 0; i < np->num_rx_rings; i++) {
-               struct rx_ring_info *rp = &np->rx_rings[i];
-
-               niu_reset_one_rx_channel(np, rp);
-       }
-}
-
-static void niu_disable_ipp(struct niu *np)
-{
-       u64 rd, wr, val;
-       int limit;
-
-       rd = nr64_ipp(IPP_DFIFO_RD_PTR);
-       wr = nr64_ipp(IPP_DFIFO_WR_PTR);
-       limit = 100;
-       while (--limit >= 0 && (rd != wr)) {
-               rd = nr64_ipp(IPP_DFIFO_RD_PTR);
-               wr = nr64_ipp(IPP_DFIFO_WR_PTR);
-       }
-       if (limit < 0 &&
-           (rd != 0 && wr != 1)) {
-               netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n",
-                          (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR),
-                          (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR));
-       }
-
-       val = nr64_ipp(IPP_CFIG);
-       val &= ~(IPP_CFIG_IPP_ENABLE |
-                IPP_CFIG_DFIFO_ECC_EN |
-                IPP_CFIG_DROP_BAD_CRC |
-                IPP_CFIG_CKSUM_EN);
-       nw64_ipp(IPP_CFIG, val);
-
-       (void) niu_ipp_reset(np);
-}
-
-static int niu_init_hw(struct niu *np)
-{
-       int i, err;
-
-       netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n");
-       niu_txc_enable_port(np, 1);
-       niu_txc_port_dma_enable(np, 1);
-       niu_txc_set_imask(np, 0);
-
-       netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n");
-       for (i = 0; i < np->num_tx_rings; i++) {
-               struct tx_ring_info *rp = &np->tx_rings[i];
-
-               err = niu_init_one_tx_channel(np, rp);
-               if (err)
-                       return err;
-       }
-
-       netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n");
-       err = niu_init_rx_channels(np);
-       if (err)
-               goto out_uninit_tx_channels;
-
-       netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n");
-       err = niu_init_classifier_hw(np);
-       if (err)
-               goto out_uninit_rx_channels;
-
-       netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n");
-       err = niu_init_zcp(np);
-       if (err)
-               goto out_uninit_rx_channels;
-
-       netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n");
-       err = niu_init_ipp(np);
-       if (err)
-               goto out_uninit_rx_channels;
-
-       netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n");
-       err = niu_init_mac(np);
-       if (err)
-               goto out_uninit_ipp;
-
-       return 0;
-
-out_uninit_ipp:
-       netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n");
-       niu_disable_ipp(np);
-
-out_uninit_rx_channels:
-       netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n");
-       niu_stop_rx_channels(np);
-       niu_reset_rx_channels(np);
-
-out_uninit_tx_channels:
-       netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n");
-       niu_stop_tx_channels(np);
-       niu_reset_tx_channels(np);
-
-       return err;
-}
-
-static void niu_stop_hw(struct niu *np)
-{
-       netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n");
-       niu_enable_interrupts(np, 0);
-
-       netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n");
-       niu_enable_rx_mac(np, 0);
-
-       netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n");
-       niu_disable_ipp(np);
-
-       netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n");
-       niu_stop_tx_channels(np);
-
-       netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n");
-       niu_stop_rx_channels(np);
-
-       netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n");
-       niu_reset_tx_channels(np);
-
-       netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n");
-       niu_reset_rx_channels(np);
-}
-
-static void niu_set_irq_name(struct niu *np)
-{
-       int port = np->port;
-       int i, j = 1;
-
-       sprintf(np->irq_name[0], "%s:MAC", np->dev->name);
-
-       if (port == 0) {
-               sprintf(np->irq_name[1], "%s:MIF", np->dev->name);
-               sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name);
-               j = 3;
-       }
-
-       for (i = 0; i < np->num_ldg - j; i++) {
-               if (i < np->num_rx_rings)
-                       sprintf(np->irq_name[i+j], "%s-rx-%d",
-                               np->dev->name, i);
-               else if (i < np->num_tx_rings + np->num_rx_rings)
-                       sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name,
-                               i - np->num_rx_rings);
-       }
-}
-
-static int niu_request_irq(struct niu *np)
-{
-       int i, j, err;
-
-       niu_set_irq_name(np);
-
-       err = 0;
-       for (i = 0; i < np->num_ldg; i++) {
-               struct niu_ldg *lp = &np->ldg[i];
-
-               err = request_irq(lp->irq, niu_interrupt, IRQF_SHARED,
-                                 np->irq_name[i], lp);
-               if (err)
-                       goto out_free_irqs;
-
-       }
-
-       return 0;
-
-out_free_irqs:
-       for (j = 0; j < i; j++) {
-               struct niu_ldg *lp = &np->ldg[j];
-
-               free_irq(lp->irq, lp);
-       }
-       return err;
-}
-
-static void niu_free_irq(struct niu *np)
-{
-       int i;
-
-       for (i = 0; i < np->num_ldg; i++) {
-               struct niu_ldg *lp = &np->ldg[i];
-
-               free_irq(lp->irq, lp);
-       }
-}
-
-static void niu_enable_napi(struct niu *np)
-{
-       int i;
-
-       for (i = 0; i < np->num_ldg; i++)
-               napi_enable(&np->ldg[i].napi);
-}
-
-static void niu_disable_napi(struct niu *np)
-{
-       int i;
-
-       for (i = 0; i < np->num_ldg; i++)
-               napi_disable(&np->ldg[i].napi);
-}
-
-static int niu_open(struct net_device *dev)
-{
-       struct niu *np = netdev_priv(dev);
-       int err;
-
-       netif_carrier_off(dev);
-
-       err = niu_alloc_channels(np);
-       if (err)
-               goto out_err;
-
-       err = niu_enable_interrupts(np, 0);
-       if (err)
-               goto out_free_channels;
-
-       err = niu_request_irq(np);
-       if (err)
-               goto out_free_channels;
-
-       niu_enable_napi(np);
-
-       spin_lock_irq(&np->lock);
-
-       err = niu_init_hw(np);
-       if (!err) {
-               init_timer(&np->timer);
-               np->timer.expires = jiffies + HZ;
-               np->timer.data = (unsigned long) np;
-               np->timer.function = niu_timer;
-
-               err = niu_enable_interrupts(np, 1);
-               if (err)
-                       niu_stop_hw(np);
-       }
-
-       spin_unlock_irq(&np->lock);
-
-       if (err) {
-               niu_disable_napi(np);
-               goto out_free_irq;
-       }
-
-       netif_tx_start_all_queues(dev);
-
-       if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
-               netif_carrier_on(dev);
-
-       add_timer(&np->timer);
-
-       return 0;
-
-out_free_irq:
-       niu_free_irq(np);
-
-out_free_channels:
-       niu_free_channels(np);
-
-out_err:
-       return err;
-}
-
-static void niu_full_shutdown(struct niu *np, struct net_device *dev)
-{
-       cancel_work_sync(&np->reset_task);
-
-       niu_disable_napi(np);
-       netif_tx_stop_all_queues(dev);
-
-       del_timer_sync(&np->timer);
-
-       spin_lock_irq(&np->lock);
-
-       niu_stop_hw(np);
-
-       spin_unlock_irq(&np->lock);
-}
-
-static int niu_close(struct net_device *dev)
-{
-       struct niu *np = netdev_priv(dev);
-
-       niu_full_shutdown(np, dev);
-
-       niu_free_irq(np);
-
-       niu_free_channels(np);
-
-       niu_handle_led(np, 0);
-
-       return 0;
-}
-
-static void niu_sync_xmac_stats(struct niu *np)
-{
-       struct niu_xmac_stats *mp = &np->mac_stats.xmac;
-
-       mp->tx_frames += nr64_mac(TXMAC_FRM_CNT);
-       mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT);
-
-       mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT);
-       mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT);
-       mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT);
-       mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT);
-       mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT);
-       mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1);
-       mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2);
-       mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3);
-       mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4);
-       mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5);
-       mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6);
-       mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7);
-       mp->rx_octets += nr64_mac(RXMAC_BT_CNT);
-       mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT);
-       mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT);
-       mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT);
-}
-
-static void niu_sync_bmac_stats(struct niu *np)
-{
-       struct niu_bmac_stats *mp = &np->mac_stats.bmac;
-
-       mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT);
-       mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT);
-
-       mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT);
-       mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
-       mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT);
-       mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT);
-}
-
-static void niu_sync_mac_stats(struct niu *np)
-{
-       if (np->flags & NIU_FLAGS_XMAC)
-               niu_sync_xmac_stats(np);
-       else
-               niu_sync_bmac_stats(np);
-}
-
-static void niu_get_rx_stats(struct niu *np,
-                            struct rtnl_link_stats64 *stats)
-{
-       u64 pkts, dropped, errors, bytes;
-       struct rx_ring_info *rx_rings;
-       int i;
-
-       pkts = dropped = errors = bytes = 0;
-
-       rx_rings = ACCESS_ONCE(np->rx_rings);
-       if (!rx_rings)
-               goto no_rings;
-
-       for (i = 0; i < np->num_rx_rings; i++) {
-               struct rx_ring_info *rp = &rx_rings[i];
-
-               niu_sync_rx_discard_stats(np, rp, 0);
-
-               pkts += rp->rx_packets;
-               bytes += rp->rx_bytes;
-               dropped += rp->rx_dropped;
-               errors += rp->rx_errors;
-       }
-
-no_rings:
-       stats->rx_packets = pkts;
-       stats->rx_bytes = bytes;
-       stats->rx_dropped = dropped;
-       stats->rx_errors = errors;
-}
-
-static void niu_get_tx_stats(struct niu *np,
-                            struct rtnl_link_stats64 *stats)
-{
-       u64 pkts, errors, bytes;
-       struct tx_ring_info *tx_rings;
-       int i;
-
-       pkts = errors = bytes = 0;
-
-       tx_rings = ACCESS_ONCE(np->tx_rings);
-       if (!tx_rings)
-               goto no_rings;
-
-       for (i = 0; i < np->num_tx_rings; i++) {
-               struct tx_ring_info *rp = &tx_rings[i];
-
-               pkts += rp->tx_packets;
-               bytes += rp->tx_bytes;
-               errors += rp->tx_errors;
-       }
-
-no_rings:
-       stats->tx_packets = pkts;
-       stats->tx_bytes = bytes;
-       stats->tx_errors = errors;
-}
-
-static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev,
-                                              struct rtnl_link_stats64 *stats)
-{
-       struct niu *np = netdev_priv(dev);
-
-       if (netif_running(dev)) {
-               niu_get_rx_stats(np, stats);
-               niu_get_tx_stats(np, stats);
-       }
-
-       return stats;
-}
-
-static void niu_load_hash_xmac(struct niu *np, u16 *hash)
-{
-       int i;
-
-       for (i = 0; i < 16; i++)
-               nw64_mac(XMAC_HASH_TBL(i), hash[i]);
-}
-
-static void niu_load_hash_bmac(struct niu *np, u16 *hash)
-{
-       int i;
-
-       for (i = 0; i < 16; i++)
-               nw64_mac(BMAC_HASH_TBL(i), hash[i]);
-}
-
-static void niu_load_hash(struct niu *np, u16 *hash)
-{
-       if (np->flags & NIU_FLAGS_XMAC)
-               niu_load_hash_xmac(np, hash);
-       else
-               niu_load_hash_bmac(np, hash);
-}
-
-static void niu_set_rx_mode(struct net_device *dev)
-{
-       struct niu *np = netdev_priv(dev);
-       int i, alt_cnt, err;
-       struct netdev_hw_addr *ha;
-       unsigned long flags;
-       u16 hash[16] = { 0, };
-
-       spin_lock_irqsave(&np->lock, flags);
-       niu_enable_rx_mac(np, 0);
-
-       np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
-       if (dev->flags & IFF_PROMISC)
-               np->flags |= NIU_FLAGS_PROMISC;
-       if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev)))
-               np->flags |= NIU_FLAGS_MCAST;
-
-       alt_cnt = netdev_uc_count(dev);
-       if (alt_cnt > niu_num_alt_addr(np)) {
-               alt_cnt = 0;
-               np->flags |= NIU_FLAGS_PROMISC;
-       }
-
-       if (alt_cnt) {
-               int index = 0;
-
-               netdev_for_each_uc_addr(ha, dev) {
-                       err = niu_set_alt_mac(np, index, ha->addr);
-                       if (err)
-                               netdev_warn(dev, "Error %d adding alt mac %d\n",
-                                           err, index);
-                       err = niu_enable_alt_mac(np, index, 1);
-                       if (err)
-                               netdev_warn(dev, "Error %d enabling alt mac %d\n",
-                                           err, index);
-
-                       index++;
-               }
-       } else {
-               int alt_start;
-               if (np->flags & NIU_FLAGS_XMAC)
-                       alt_start = 0;
-               else
-                       alt_start = 1;
-               for (i = alt_start; i < niu_num_alt_addr(np); i++) {
-                       err = niu_enable_alt_mac(np, i, 0);
-                       if (err)
-                               netdev_warn(dev, "Error %d disabling alt mac %d\n",
-                                           err, i);
-               }
-       }
-       if (dev->flags & IFF_ALLMULTI) {
-               for (i = 0; i < 16; i++)
-                       hash[i] = 0xffff;
-       } else if (!netdev_mc_empty(dev)) {
-               netdev_for_each_mc_addr(ha, dev) {
-                       u32 crc = ether_crc_le(ETH_ALEN, ha->addr);
-
-                       crc >>= 24;
-                       hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
-               }
-       }
-
-       if (np->flags & NIU_FLAGS_MCAST)
-               niu_load_hash(np, hash);
-
-       niu_enable_rx_mac(np, 1);
-       spin_unlock_irqrestore(&np->lock, flags);
-}
-
-static int niu_set_mac_addr(struct net_device *dev, void *p)
-{
-       struct niu *np = netdev_priv(dev);
-       struct sockaddr *addr = p;
-       unsigned long flags;
-
-       if (!is_valid_ether_addr(addr->sa_data))
-               return -EINVAL;
-
-       memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
-
-       if (!netif_running(dev))
-               return 0;
-
-       spin_lock_irqsave(&np->lock, flags);
-       niu_enable_rx_mac(np, 0);
-       niu_set_primary_mac(np, dev->dev_addr);
-       niu_enable_rx_mac(np, 1);
-       spin_unlock_irqrestore(&np->lock, flags);
-
-       return 0;
-}
-
-static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
-       return -EOPNOTSUPP;
-}
-
-static void niu_netif_stop(struct niu *np)
-{
-       np->dev->trans_start = jiffies; /* prevent tx timeout */
-
-       niu_disable_napi(np);
-
-       netif_tx_disable(np->dev);
-}
-
-static void niu_netif_start(struct niu *np)
-{
-       /* NOTE: unconditional netif_wake_queue is only appropriate
-        * so long as all callers are assured to have free tx slots
-        * (such as after niu_init_hw).
-        */
-       netif_tx_wake_all_queues(np->dev);
-
-       niu_enable_napi(np);
-
-       niu_enable_interrupts(np, 1);
-}
-
-static void niu_reset_buffers(struct niu *np)
-{
-       int i, j, k, err;
-
-       if (np->rx_rings) {
-               for (i = 0; i < np->num_rx_rings; i++) {
-                       struct rx_ring_info *rp = &np->rx_rings[i];
-
-                       for (j = 0, k = 0; j < MAX_RBR_RING_SIZE; j++) {
-                               struct page *page;
-
-                               page = rp->rxhash[j];
-                               while (page) {
-                                       struct page *next =
-                                               (struct page *) page->mapping;
-                                       u64 base = page->index;
-                                       base = base >> RBR_DESCR_ADDR_SHIFT;
-                                       rp->rbr[k++] = cpu_to_le32(base);
-                                       page = next;
-                               }
-                       }
-                       for (; k < MAX_RBR_RING_SIZE; k++) {
-                               err = niu_rbr_add_page(np, rp, GFP_ATOMIC, k);
-                               if (unlikely(err))
-                                       break;
-                       }
-
-                       rp->rbr_index = rp->rbr_table_size - 1;
-                       rp->rcr_index = 0;
-                       rp->rbr_pending = 0;
-                       rp->rbr_refill_pending = 0;
-               }
-       }
-       if (np->tx_rings) {
-               for (i = 0; i < np->num_tx_rings; i++) {
-                       struct tx_ring_info *rp = &np->tx_rings[i];
-
-                       for (j = 0; j < MAX_TX_RING_SIZE; j++) {
-                               if (rp->tx_buffs[j].skb)
-                                       (void) release_tx_packet(np, rp, j);
-                       }
-
-                       rp->pending = MAX_TX_RING_SIZE;
-                       rp->prod = 0;
-                       rp->cons = 0;
-                       rp->wrap_bit = 0;
-               }
-       }
-}
-
-static void niu_reset_task(struct work_struct *work)
-{
-       struct niu *np = container_of(work, struct niu, reset_task);
-       unsigned long flags;
-       int err;
-
-       spin_lock_irqsave(&np->lock, flags);
-       if (!netif_running(np->dev)) {
-               spin_unlock_irqrestore(&np->lock, flags);
-               return;
-       }
-
-       spin_unlock_irqrestore(&np->lock, flags);
-
-       del_timer_sync(&np->timer);
-
-       niu_netif_stop(np);
-
-       spin_lock_irqsave(&np->lock, flags);
-
-       niu_stop_hw(np);
-
-       spin_unlock_irqrestore(&np->lock, flags);
-
-       niu_reset_buffers(np);
-
-       spin_lock_irqsave(&np->lock, flags);
-
-       err = niu_init_hw(np);
-       if (!err) {
-               np->timer.expires = jiffies + HZ;
-               add_timer(&np->timer);
-               niu_netif_start(np);
-       }
-
-       spin_unlock_irqrestore(&np->lock, flags);
-}
-
-static void niu_tx_timeout(struct net_device *dev)
-{
-       struct niu *np = netdev_priv(dev);
-
-       dev_err(np->device, "%s: Transmit timed out, resetting\n",
-               dev->name);
-
-       schedule_work(&np->reset_task);
-}
-
-static void niu_set_txd(struct tx_ring_info *rp, int index,
-                       u64 mapping, u64 len, u64 mark,
-                       u64 n_frags)
-{
-       __le64 *desc = &rp->descr[index];
-
-       *desc = cpu_to_le64(mark |
-                           (n_frags << TX_DESC_NUM_PTR_SHIFT) |
-                           (len << TX_DESC_TR_LEN_SHIFT) |
-                           (mapping & TX_DESC_SAD));
-}
-
-static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
-                               u64 pad_bytes, u64 len)
-{
-       u16 eth_proto, eth_proto_inner;
-       u64 csum_bits, l3off, ihl, ret;
-       u8 ip_proto;
-       int ipv6;
-
-       eth_proto = be16_to_cpu(ehdr->h_proto);
-       eth_proto_inner = eth_proto;
-       if (eth_proto == ETH_P_8021Q) {
-               struct vlan_ethhdr *vp = (struct vlan_ethhdr *) ehdr;
-               __be16 val = vp->h_vlan_encapsulated_proto;
-
-               eth_proto_inner = be16_to_cpu(val);
-       }
-
-       ipv6 = ihl = 0;
-       switch (skb->protocol) {
-       case cpu_to_be16(ETH_P_IP):
-               ip_proto = ip_hdr(skb)->protocol;
-               ihl = ip_hdr(skb)->ihl;
-               break;
-       case cpu_to_be16(ETH_P_IPV6):
-               ip_proto = ipv6_hdr(skb)->nexthdr;
-               ihl = (40 >> 2);
-               ipv6 = 1;
-               break;
-       default:
-               ip_proto = ihl = 0;
-               break;
-       }
-
-       csum_bits = TXHDR_CSUM_NONE;
-       if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               u64 start, stuff;
-
-               csum_bits = (ip_proto == IPPROTO_TCP ?
-                            TXHDR_CSUM_TCP :
-                            (ip_proto == IPPROTO_UDP ?
-                             TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
-
-               start = skb_checksum_start_offset(skb) -
-                       (pad_bytes + sizeof(struct tx_pkt_hdr));
-               stuff = start + skb->csum_offset;
-
-               csum_bits |= (start / 2) << TXHDR_L4START_SHIFT;
-               csum_bits |= (stuff / 2) << TXHDR_L4STUFF_SHIFT;
-       }
-
-       l3off = skb_network_offset(skb) -
-               (pad_bytes + sizeof(struct tx_pkt_hdr));
-
-       ret = (((pad_bytes / 2) << TXHDR_PAD_SHIFT) |
-              (len << TXHDR_LEN_SHIFT) |
-              ((l3off / 2) << TXHDR_L3START_SHIFT) |
-              (ihl << TXHDR_IHL_SHIFT) |
-              ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) |
-              ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) |
-              (ipv6 ? TXHDR_IP_VER : 0) |
-              csum_bits);
-
-       return ret;
-}
-
-static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
-                                 struct net_device *dev)
-{
-       struct niu *np = netdev_priv(dev);
-       unsigned long align, headroom;
-       struct netdev_queue *txq;
-       struct tx_ring_info *rp;
-       struct tx_pkt_hdr *tp;
-       unsigned int len, nfg;
-       struct ethhdr *ehdr;
-       int prod, i, tlen;
-       u64 mapping, mrk;
-
-       i = skb_get_queue_mapping(skb);
-       rp = &np->tx_rings[i];
-       txq = netdev_get_tx_queue(dev, i);
-
-       if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
-               netif_tx_stop_queue(txq);
-               dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name);
-               rp->tx_errors++;
-               return NETDEV_TX_BUSY;
-       }
-
-       if (skb->len < ETH_ZLEN) {
-               unsigned int pad_bytes = ETH_ZLEN - skb->len;
-
-               if (skb_pad(skb, pad_bytes))
-                       goto out;
-               skb_put(skb, pad_bytes);
-       }
-
-       len = sizeof(struct tx_pkt_hdr) + 15;
-       if (skb_headroom(skb) < len) {
-               struct sk_buff *skb_new;
-
-               skb_new = skb_realloc_headroom(skb, len);
-               if (!skb_new) {
-                       rp->tx_errors++;
-                       goto out_drop;
-               }
-               kfree_skb(skb);
-               skb = skb_new;
-       } else
-               skb_orphan(skb);
-
-       align = ((unsigned long) skb->data & (16 - 1));
-       headroom = align + sizeof(struct tx_pkt_hdr);
-
-       ehdr = (struct ethhdr *) skb->data;
-       tp = (struct tx_pkt_hdr *) skb_push(skb, headroom);
-
-       len = skb->len - sizeof(struct tx_pkt_hdr);
-       tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len));
-       tp->resv = 0;
-
-       len = skb_headlen(skb);
-       mapping = np->ops->map_single(np->device, skb->data,
-                                     len, DMA_TO_DEVICE);
-
-       prod = rp->prod;
-
-       rp->tx_buffs[prod].skb = skb;
-       rp->tx_buffs[prod].mapping = mapping;
-
-       mrk = TX_DESC_SOP;
-       if (++rp->mark_counter == rp->mark_freq) {
-               rp->mark_counter = 0;
-               mrk |= TX_DESC_MARK;
-               rp->mark_pending++;
-       }
-
-       tlen = len;
-       nfg = skb_shinfo(skb)->nr_frags;
-       while (tlen > 0) {
-               tlen -= MAX_TX_DESC_LEN;
-               nfg++;
-       }
-
-       while (len > 0) {
-               unsigned int this_len = len;
-
-               if (this_len > MAX_TX_DESC_LEN)
-                       this_len = MAX_TX_DESC_LEN;
-
-               niu_set_txd(rp, prod, mapping, this_len, mrk, nfg);
-               mrk = nfg = 0;
-
-               prod = NEXT_TX(rp, prod);
-               mapping += this_len;
-               len -= this_len;
-       }
-
-       for (i = 0; i <  skb_shinfo(skb)->nr_frags; i++) {
-               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
-               len = frag->size;
-               mapping = np->ops->map_page(np->device, frag->page,
-                                           frag->page_offset, len,
-                                           DMA_TO_DEVICE);
-
-               rp->tx_buffs[prod].skb = NULL;
-               rp->tx_buffs[prod].mapping = mapping;
-
-               niu_set_txd(rp, prod, mapping, len, 0, 0);
-
-               prod = NEXT_TX(rp, prod);
-       }
-
-       if (prod < rp->prod)
-               rp->wrap_bit ^= TX_RING_KICK_WRAP;
-       rp->prod = prod;
-
-       nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3));
-
-       if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) {
-               netif_tx_stop_queue(txq);
-               if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))
-                       netif_tx_wake_queue(txq);
-       }
-
-out:
-       return NETDEV_TX_OK;
-
-out_drop:
-       rp->tx_errors++;
-       kfree_skb(skb);
-       goto out;
-}
-
-static int niu_change_mtu(struct net_device *dev, int new_mtu)
-{
-       struct niu *np = netdev_priv(dev);
-       int err, orig_jumbo, new_jumbo;
-
-       if (new_mtu < 68 || new_mtu > NIU_MAX_MTU)
-               return -EINVAL;
-
-       orig_jumbo = (dev->mtu > ETH_DATA_LEN);
-       new_jumbo = (new_mtu > ETH_DATA_LEN);
-
-       dev->mtu = new_mtu;
-
-       if (!netif_running(dev) ||
-           (orig_jumbo == new_jumbo))
-               return 0;
-
-       niu_full_shutdown(np, dev);
-
-       niu_free_channels(np);
-
-       niu_enable_napi(np);
-
-       err = niu_alloc_channels(np);
-       if (err)
-               return err;
-
-       spin_lock_irq(&np->lock);
-
-       err = niu_init_hw(np);
-       if (!err) {
-               init_timer(&np->timer);
-               np->timer.expires = jiffies + HZ;
-               np->timer.data = (unsigned long) np;
-               np->timer.function = niu_timer;
-
-               err = niu_enable_interrupts(np, 1);
-               if (err)
-                       niu_stop_hw(np);
-       }
-
-       spin_unlock_irq(&np->lock);
-
-       if (!err) {
-               netif_tx_start_all_queues(dev);
-               if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
-                       netif_carrier_on(dev);
-
-               add_timer(&np->timer);
-       }
-
-       return err;
-}
-
-static void niu_get_drvinfo(struct net_device *dev,
-                           struct ethtool_drvinfo *info)
-{
-       struct niu *np = netdev_priv(dev);
-       struct niu_vpd *vpd = &np->vpd;
-
-       strcpy(info->driver, DRV_MODULE_NAME);
-       strcpy(info->version, DRV_MODULE_VERSION);
-       sprintf(info->fw_version, "%d.%d",
-               vpd->fcode_major, vpd->fcode_minor);
-       if (np->parent->plat_type != PLAT_TYPE_NIU)
-               strcpy(info->bus_info, pci_name(np->pdev));
-}
-
-static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-       struct niu *np = netdev_priv(dev);
-       struct niu_link_config *lp;
-
-       lp = &np->link_config;
-
-       memset(cmd, 0, sizeof(*cmd));
-       cmd->phy_address = np->phy_addr;
-       cmd->supported = lp->supported;
-       cmd->advertising = lp->active_advertising;
-       cmd->autoneg = lp->active_autoneg;
-       ethtool_cmd_speed_set(cmd, lp->active_speed);
-       cmd->duplex = lp->active_duplex;
-       cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
-       cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ?
-               XCVR_EXTERNAL : XCVR_INTERNAL;
-
-       return 0;
-}
-
-static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-       struct niu *np = netdev_priv(dev);
-       struct niu_link_config *lp = &np->link_config;
-
-       lp->advertising = cmd->advertising;
-       lp->speed = ethtool_cmd_speed(cmd);
-       lp->duplex = cmd->duplex;
-       lp->autoneg = cmd->autoneg;
-       return niu_init_link(np);
-}
-
-static u32 niu_get_msglevel(struct net_device *dev)
-{
-       struct niu *np = netdev_priv(dev);
-       return np->msg_enable;
-}
-
-static void niu_set_msglevel(struct net_device *dev, u32 value)
-{
-       struct niu *np = netdev_priv(dev);
-       np->msg_enable = value;
-}
-
-static int niu_nway_reset(struct net_device *dev)
-{
-       struct niu *np = netdev_priv(dev);
-
-       if (np->link_config.autoneg)
-               return niu_init_link(np);
-
-       return 0;
-}
-
-static int niu_get_eeprom_len(struct net_device *dev)
-{
-       struct niu *np = netdev_priv(dev);
-
-       return np->eeprom_len;
-}
-
-static int niu_get_eeprom(struct net_device *dev,
-                         struct ethtool_eeprom *eeprom, u8 *data)
-{
-       struct niu *np = netdev_priv(dev);
-       u32 offset, len, val;
-
-       offset = eeprom->offset;
-       len = eeprom->len;
-
-       if (offset + len < offset)
-               return -EINVAL;
-       if (offset >= np->eeprom_len)
-               return -EINVAL;
-       if (offset + len > np->eeprom_len)
-               len = eeprom->len = np->eeprom_len - offset;
-
-       if (offset & 3) {
-               u32 b_offset, b_count;
-
-               b_offset = offset & 3;
-               b_count = 4 - b_offset;
-               if (b_count > len)
-                       b_count = len;
-
-               val = nr64(ESPC_NCR((offset - b_offset) / 4));
-               memcpy(data, ((char *)&val) + b_offset, b_count);
-               data += b_count;
-               len -= b_count;
-               offset += b_count;
-       }
-       while (len >= 4) {
-               val = nr64(ESPC_NCR(offset / 4));
-               memcpy(data, &val, 4);
-               data += 4;
-               len -= 4;
-               offset += 4;
-       }
-       if (len) {
-               val = nr64(ESPC_NCR(offset / 4));
-               memcpy(data, &val, len);
-       }
-       return 0;
-}
-
-static void niu_ethflow_to_l3proto(int flow_type, u8 *pid)
-{
-       switch (flow_type) {
-       case TCP_V4_FLOW:
-       case TCP_V6_FLOW:
-               *pid = IPPROTO_TCP;
-               break;
-       case UDP_V4_FLOW:
-       case UDP_V6_FLOW:
-               *pid = IPPROTO_UDP;
-               break;
-       case SCTP_V4_FLOW:
-       case SCTP_V6_FLOW:
-               *pid = IPPROTO_SCTP;
-               break;
-       case AH_V4_FLOW:
-       case AH_V6_FLOW:
-               *pid = IPPROTO_AH;
-               break;
-       case ESP_V4_FLOW:
-       case ESP_V6_FLOW:
-               *pid = IPPROTO_ESP;
-               break;
-       default:
-               *pid = 0;
-               break;
-       }
-}
-
-static int niu_class_to_ethflow(u64 class, int *flow_type)
-{
-       switch (class) {
-       case CLASS_CODE_TCP_IPV4:
-               *flow_type = TCP_V4_FLOW;
-               break;
-       case CLASS_CODE_UDP_IPV4:
-               *flow_type = UDP_V4_FLOW;
-               break;
-       case CLASS_CODE_AH_ESP_IPV4:
-               *flow_type = AH_V4_FLOW;
-               break;
-       case CLASS_CODE_SCTP_IPV4:
-               *flow_type = SCTP_V4_FLOW;
-               break;
-       case CLASS_CODE_TCP_IPV6:
-               *flow_type = TCP_V6_FLOW;
-               break;
-       case CLASS_CODE_UDP_IPV6:
-               *flow_type = UDP_V6_FLOW;
-               break;
-       case CLASS_CODE_AH_ESP_IPV6:
-               *flow_type = AH_V6_FLOW;
-               break;
-       case CLASS_CODE_SCTP_IPV6:
-               *flow_type = SCTP_V6_FLOW;
-               break;
-       case CLASS_CODE_USER_PROG1:
-       case CLASS_CODE_USER_PROG2:
-       case CLASS_CODE_USER_PROG3:
-       case CLASS_CODE_USER_PROG4:
-               *flow_type = IP_USER_FLOW;
-               break;
-       default:
-               return 0;
-       }
-
-       return 1;
-}
-
-static int niu_ethflow_to_class(int flow_type, u64 *class)
-{
-       switch (flow_type) {
-       case TCP_V4_FLOW:
-               *class = CLASS_CODE_TCP_IPV4;
-               break;
-       case UDP_V4_FLOW:
-               *class = CLASS_CODE_UDP_IPV4;
-               break;
-       case AH_ESP_V4_FLOW:
-       case AH_V4_FLOW:
-       case ESP_V4_FLOW:
-               *class = CLASS_CODE_AH_ESP_IPV4;
-               break;
-       case SCTP_V4_FLOW:
-               *class = CLASS_CODE_SCTP_IPV4;
-               break;
-       case TCP_V6_FLOW:
-               *class = CLASS_CODE_TCP_IPV6;
-               break;
-       case UDP_V6_FLOW:
-               *class = CLASS_CODE_UDP_IPV6;
-               break;
-       case AH_ESP_V6_FLOW:
-       case AH_V6_FLOW:
-       case ESP_V6_FLOW:
-               *class = CLASS_CODE_AH_ESP_IPV6;
-               break;
-       case SCTP_V6_FLOW:
-               *class = CLASS_CODE_SCTP_IPV6;
-               break;
-       default:
-               return 0;
-       }
-
-       return 1;
-}
-
-static u64 niu_flowkey_to_ethflow(u64 flow_key)
-{
-       u64 ethflow = 0;
-
-       if (flow_key & FLOW_KEY_L2DA)
-               ethflow |= RXH_L2DA;
-       if (flow_key & FLOW_KEY_VLAN)
-               ethflow |= RXH_VLAN;
-       if (flow_key & FLOW_KEY_IPSA)
-               ethflow |= RXH_IP_SRC;
-       if (flow_key & FLOW_KEY_IPDA)
-               ethflow |= RXH_IP_DST;
-       if (flow_key & FLOW_KEY_PROTO)
-               ethflow |= RXH_L3_PROTO;
-       if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT))
-               ethflow |= RXH_L4_B_0_1;
-       if (flow_key & (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT))
-               ethflow |= RXH_L4_B_2_3;
-
-       return ethflow;
-
-}
-
-static int niu_ethflow_to_flowkey(u64 ethflow, u64 *flow_key)
-{
-       u64 key = 0;
-
-       if (ethflow & RXH_L2DA)
-               key |= FLOW_KEY_L2DA;
-       if (ethflow & RXH_VLAN)
-               key |= FLOW_KEY_VLAN;
-       if (ethflow & RXH_IP_SRC)
-               key |= FLOW_KEY_IPSA;
-       if (ethflow & RXH_IP_DST)
-               key |= FLOW_KEY_IPDA;
-       if (ethflow & RXH_L3_PROTO)
-               key |= FLOW_KEY_PROTO;
-       if (ethflow & RXH_L4_B_0_1)
-               key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_0_SHIFT);
-       if (ethflow & RXH_L4_B_2_3)
-               key |= (FLOW_KEY_L4_BYTE12 << FLOW_KEY_L4_1_SHIFT);
-
-       *flow_key = key;
-
-       return 1;
-
-}
-
-static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
-{
-       u64 class;
-
-       nfc->data = 0;
-
-       if (!niu_ethflow_to_class(nfc->flow_type, &class))
-               return -EINVAL;
-
-       if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
-           TCAM_KEY_DISC)
-               nfc->data = RXH_DISCARD;
-       else
-               nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class -
-                                                     CLASS_CODE_USER_PROG1]);
-       return 0;
-}
-
-static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp,
-                                       struct ethtool_rx_flow_spec *fsp)
-{
-       u32 tmp;
-       u16 prt;
-
-       tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
-       fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
-
-       tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
-       fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
-
-       tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
-       fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
-
-       tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
-       fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
-
-       fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >>
-               TCAM_V4KEY2_TOS_SHIFT;
-       fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >>
-               TCAM_V4KEY2_TOS_SHIFT;
-
-       switch (fsp->flow_type) {
-       case TCP_V4_FLOW:
-       case UDP_V4_FLOW:
-       case SCTP_V4_FLOW:
-               prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
-                       TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
-               fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
-
-               prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
-                       TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
-               fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
-
-               prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
-                       TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
-               fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
-
-               prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
-                        TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
-               fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
-               break;
-       case AH_V4_FLOW:
-       case ESP_V4_FLOW:
-               tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
-                       TCAM_V4KEY2_PORT_SPI_SHIFT;
-               fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
-
-               tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
-                       TCAM_V4KEY2_PORT_SPI_SHIFT;
-               fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
-               break;
-       case IP_USER_FLOW:
-               tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
-                       TCAM_V4KEY2_PORT_SPI_SHIFT;
-               fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
-
-               tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
-                       TCAM_V4KEY2_PORT_SPI_SHIFT;
-               fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
-
-               fsp->h_u.usr_ip4_spec.proto =
-                       (tp->key[2] & TCAM_V4KEY2_PROTO) >>
-                       TCAM_V4KEY2_PROTO_SHIFT;
-               fsp->m_u.usr_ip4_spec.proto =
-                       (tp->key_mask[2] & TCAM_V4KEY2_PROTO) >>
-                       TCAM_V4KEY2_PROTO_SHIFT;
-
-               fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
-               break;
-       default:
-               break;
-       }
-}
-
-static int niu_get_ethtool_tcam_entry(struct niu *np,
-                                     struct ethtool_rxnfc *nfc)
-{
-       struct niu_parent *parent = np->parent;
-       struct niu_tcam_entry *tp;
-       struct ethtool_rx_flow_spec *fsp = &nfc->fs;
-       u16 idx;
-       u64 class;
-       int ret = 0;
-
-       idx = tcam_get_index(np, (u16)nfc->fs.location);
-
-       tp = &parent->tcam[idx];
-       if (!tp->valid) {
-               netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n",
-                           parent->index, (u16)nfc->fs.location, idx);
-               return -EINVAL;
-       }
-
-       /* fill the flow spec entry */
-       class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
-               TCAM_V4KEY0_CLASS_CODE_SHIFT;
-       ret = niu_class_to_ethflow(class, &fsp->flow_type);
-
-       if (ret < 0) {
-               netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
-                           parent->index);
-               ret = -EINVAL;
-               goto out;
-       }
-
-       if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) {
-               u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >>
-                       TCAM_V4KEY2_PROTO_SHIFT;
-               if (proto == IPPROTO_ESP) {
-                       if (fsp->flow_type == AH_V4_FLOW)
-                               fsp->flow_type = ESP_V4_FLOW;
-                       else
-                               fsp->flow_type = ESP_V6_FLOW;
-               }
-       }
-
-       switch (fsp->flow_type) {
-       case TCP_V4_FLOW:
-       case UDP_V4_FLOW:
-       case SCTP_V4_FLOW:
-       case AH_V4_FLOW:
-       case ESP_V4_FLOW:
-               niu_get_ip4fs_from_tcam_key(tp, fsp);
-               break;
-       case TCP_V6_FLOW:
-       case UDP_V6_FLOW:
-       case SCTP_V6_FLOW:
-       case AH_V6_FLOW:
-       case ESP_V6_FLOW:
-               /* Not yet implemented */
-               ret = -EINVAL;
-               break;
-       case IP_USER_FLOW:
-               niu_get_ip4fs_from_tcam_key(tp, fsp);
-               break;
-       default:
-               ret = -EINVAL;
-               break;
-       }
-
-       if (ret < 0)
-               goto out;
-
-       if (tp->assoc_data & TCAM_ASSOCDATA_DISC)
-               fsp->ring_cookie = RX_CLS_FLOW_DISC;
-       else
-               fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >>
-                       TCAM_ASSOCDATA_OFFSET_SHIFT;
-
-       /* put the tcam size here */
-       nfc->data = tcam_get_size(np);
-out:
-       return ret;
-}
-
-static int niu_get_ethtool_tcam_all(struct niu *np,
-                                   struct ethtool_rxnfc *nfc,
-                                   u32 *rule_locs)
-{
-       struct niu_parent *parent = np->parent;
-       struct niu_tcam_entry *tp;
-       int i, idx, cnt;
-       unsigned long flags;
-       int ret = 0;
-
-       /* put the tcam size here */
-       nfc->data = tcam_get_size(np);
-
-       niu_lock_parent(np, flags);
-       for (cnt = 0, i = 0; i < nfc->data; i++) {
-               idx = tcam_get_index(np, i);
-               tp = &parent->tcam[idx];
-               if (!tp->valid)
-                       continue;
-               if (cnt == nfc->rule_cnt) {
-                       ret = -EMSGSIZE;
-                       break;
-               }
-               rule_locs[cnt] = i;
-               cnt++;
-       }
-       niu_unlock_parent(np, flags);
-
-       return ret;
-}
-
-static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
-                      void *rule_locs)
-{
-       struct niu *np = netdev_priv(dev);
-       int ret = 0;
-
-       switch (cmd->cmd) {
-       case ETHTOOL_GRXFH:
-               ret = niu_get_hash_opts(np, cmd);
-               break;
-       case ETHTOOL_GRXRINGS:
-               cmd->data = np->num_rx_rings;
-               break;
-       case ETHTOOL_GRXCLSRLCNT:
-               cmd->rule_cnt = tcam_get_valid_entry_cnt(np);
-               break;
-       case ETHTOOL_GRXCLSRULE:
-               ret = niu_get_ethtool_tcam_entry(np, cmd);
-               break;
-       case ETHTOOL_GRXCLSRLALL:
-               ret = niu_get_ethtool_tcam_all(np, cmd, (u32 *)rule_locs);
-               break;
-       default:
-               ret = -EINVAL;
-               break;
-       }
-
-       return ret;
-}
-
-static int niu_set_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
-{
-       u64 class;
-       u64 flow_key = 0;
-       unsigned long flags;
-
-       if (!niu_ethflow_to_class(nfc->flow_type, &class))
-               return -EINVAL;
-
-       if (class < CLASS_CODE_USER_PROG1 ||
-           class > CLASS_CODE_SCTP_IPV6)
-               return -EINVAL;
-
-       if (nfc->data & RXH_DISCARD) {
-               niu_lock_parent(np, flags);
-               flow_key = np->parent->tcam_key[class -
-                                              CLASS_CODE_USER_PROG1];
-               flow_key |= TCAM_KEY_DISC;
-               nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
-               np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key;
-               niu_unlock_parent(np, flags);
-               return 0;
-       } else {
-               /* Discard was set before, but is not set now */
-               if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] &
-                   TCAM_KEY_DISC) {
-                       niu_lock_parent(np, flags);
-                       flow_key = np->parent->tcam_key[class -
-                                              CLASS_CODE_USER_PROG1];
-                       flow_key &= ~TCAM_KEY_DISC;
-                       nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1),
-                            flow_key);
-                       np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] =
-                               flow_key;
-                       niu_unlock_parent(np, flags);
-               }
-       }
-
-       if (!niu_ethflow_to_flowkey(nfc->data, &flow_key))
-               return -EINVAL;
-
-       niu_lock_parent(np, flags);
-       nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key);
-       np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key;
-       niu_unlock_parent(np, flags);
-
-       return 0;
-}
-
-static void niu_get_tcamkey_from_ip4fs(struct ethtool_rx_flow_spec *fsp,
-                                      struct niu_tcam_entry *tp,
-                                      int l2_rdc_tab, u64 class)
-{
-       u8 pid = 0;
-       u32 sip, dip, sipm, dipm, spi, spim;
-       u16 sport, dport, spm, dpm;
-
-       sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src);
-       sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src);
-       dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst);
-       dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst);
-
-       tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT;
-       tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE;
-       tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT;
-       tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM;
-
-       tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT;
-       tp->key[3] |= dip;
-
-       tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT;
-       tp->key_mask[3] |= dipm;
-
-       tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos <<
-                      TCAM_V4KEY2_TOS_SHIFT);
-       tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos <<
-                           TCAM_V4KEY2_TOS_SHIFT);
-       switch (fsp->flow_type) {
-       case TCP_V4_FLOW:
-       case UDP_V4_FLOW:
-       case SCTP_V4_FLOW:
-               sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
-               spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
-               dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
-               dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
-
-               tp->key[2] |= (((u64)sport << 16) | dport);
-               tp->key_mask[2] |= (((u64)spm << 16) | dpm);
-               niu_ethflow_to_l3proto(fsp->flow_type, &pid);
-               break;
-       case AH_V4_FLOW:
-       case ESP_V4_FLOW:
-               spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi);
-               spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi);
-
-               tp->key[2] |= spi;
-               tp->key_mask[2] |= spim;
-               niu_ethflow_to_l3proto(fsp->flow_type, &pid);
-               break;
-       case IP_USER_FLOW:
-               spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes);
-               spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes);
-
-               tp->key[2] |= spi;
-               tp->key_mask[2] |= spim;
-               pid = fsp->h_u.usr_ip4_spec.proto;
-               break;
-       default:
-               break;
-       }
-
-       tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT);
-       if (pid) {
-               tp->key_mask[2] |= TCAM_V4KEY2_PROTO;
-       }
-}
-
-static int niu_add_ethtool_tcam_entry(struct niu *np,
-                                     struct ethtool_rxnfc *nfc)
-{
-       struct niu_parent *parent = np->parent;
-       struct niu_tcam_entry *tp;
-       struct ethtool_rx_flow_spec *fsp = &nfc->fs;
-       struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port];
-       int l2_rdc_table = rdc_table->first_table_num;
-       u16 idx;
-       u64 class;
-       unsigned long flags;
-       int err, ret;
-
-       ret = 0;
-
-       idx = nfc->fs.location;
-       if (idx >= tcam_get_size(np))
-               return -EINVAL;
-
-       if (fsp->flow_type == IP_USER_FLOW) {
-               int i;
-               int add_usr_cls = 0;
-               struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec;
-               struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec;
-
-               if (uspec->ip_ver != ETH_RX_NFC_IP4)
-                       return -EINVAL;
-
-               niu_lock_parent(np, flags);
-
-               for (i = 0; i < NIU_L3_PROG_CLS; i++) {
-                       if (parent->l3_cls[i]) {
-                               if (uspec->proto == parent->l3_cls_pid[i]) {
-                                       class = parent->l3_cls[i];
-                                       parent->l3_cls_refcnt[i]++;
-                                       add_usr_cls = 1;
-                                       break;
-                               }
-                       } else {
-                               /* Program new user IP class */
-                               switch (i) {
-                               case 0:
-                                       class = CLASS_CODE_USER_PROG1;
-                                       break;
-                               case 1:
-                                       class = CLASS_CODE_USER_PROG2;
-                                       break;
-                               case 2:
-                                       class = CLASS_CODE_USER_PROG3;
-                                       break;
-                               case 3:
-                                       class = CLASS_CODE_USER_PROG4;
-                                       break;
-                               default:
-                                       break;
-                               }
-                               ret = tcam_user_ip_class_set(np, class, 0,
-                                                            uspec->proto,
-                                                            uspec->tos,
-                                                            umask->tos);
-                               if (ret)
-                                       goto out;
-
-                               ret = tcam_user_ip_class_enable(np, class, 1);
-                               if (ret)
-                                       goto out;
-                               parent->l3_cls[i] = class;
-                               parent->l3_cls_pid[i] = uspec->proto;
-                               parent->l3_cls_refcnt[i]++;
-                               add_usr_cls = 1;
-                               break;
-                       }
-               }
-               if (!add_usr_cls) {
-                       netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n",
-                                   parent->index, __func__, uspec->proto);
-                       ret = -EINVAL;
-                       goto out;
-               }
-               niu_unlock_parent(np, flags);
-       } else {
-               if (!niu_ethflow_to_class(fsp->flow_type, &class)) {
-                       return -EINVAL;
-               }
-       }
-
-       niu_lock_parent(np, flags);
-
-       idx = tcam_get_index(np, idx);
-       tp = &parent->tcam[idx];
-
-       memset(tp, 0, sizeof(*tp));
-
-       /* fill in the tcam key and mask */
-       switch (fsp->flow_type) {
-       case TCP_V4_FLOW:
-       case UDP_V4_FLOW:
-       case SCTP_V4_FLOW:
-       case AH_V4_FLOW:
-       case ESP_V4_FLOW:
-               niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
-               break;
-       case TCP_V6_FLOW:
-       case UDP_V6_FLOW:
-       case SCTP_V6_FLOW:
-       case AH_V6_FLOW:
-       case ESP_V6_FLOW:
-               /* Not yet implemented */
-               netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n",
-                           parent->index, __func__, fsp->flow_type);
-               ret = -EINVAL;
-               goto out;
-       case IP_USER_FLOW:
-               niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
-               break;
-       default:
-               netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n",
-                           parent->index, __func__, fsp->flow_type);
-               ret = -EINVAL;
-               goto out;
-       }
-
-       /* fill in the assoc data */
-       if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
-               tp->assoc_data = TCAM_ASSOCDATA_DISC;
-       } else {
-               if (fsp->ring_cookie >= np->num_rx_rings) {
-                       netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n",
-                                   parent->index, __func__,
-                                   (long long)fsp->ring_cookie);
-                       ret = -EINVAL;
-                       goto out;
-               }
-               tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET |
-                                 (fsp->ring_cookie <<
-                                  TCAM_ASSOCDATA_OFFSET_SHIFT));
-       }
-
-       err = tcam_write(np, idx, tp->key, tp->key_mask);
-       if (err) {
-               ret = -EINVAL;
-               goto out;
-       }
-       err = tcam_assoc_write(np, idx, tp->assoc_data);
-       if (err) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       /* validate the entry */
-       tp->valid = 1;
-       np->clas.tcam_valid_entries++;
-out:
-       niu_unlock_parent(np, flags);
-
-       return ret;
-}
-
-static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc)
-{
-       struct niu_parent *parent = np->parent;
-       struct niu_tcam_entry *tp;
-       u16 idx;
-       unsigned long flags;
-       u64 class;
-       int ret = 0;
-
-       if (loc >= tcam_get_size(np))
-               return -EINVAL;
-
-       niu_lock_parent(np, flags);
-
-       idx = tcam_get_index(np, loc);
-       tp = &parent->tcam[idx];
-
-       /* if the entry is of a user defined class, then update*/
-       class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
-               TCAM_V4KEY0_CLASS_CODE_SHIFT;
-
-       if (class >= CLASS_CODE_USER_PROG1 && class <= CLASS_CODE_USER_PROG4) {
-               int i;
-               for (i = 0; i < NIU_L3_PROG_CLS; i++) {
-                       if (parent->l3_cls[i] == class) {
-                               parent->l3_cls_refcnt[i]--;
-                               if (!parent->l3_cls_refcnt[i]) {
-                                       /* disable class */
-                                       ret = tcam_user_ip_class_enable(np,
-                                                                       class,
-                                                                       0);
-                                       if (ret)
-                                               goto out;
-                                       parent->l3_cls[i] = 0;
-                                       parent->l3_cls_pid[i] = 0;
-                               }
-                               break;
-                       }
-               }
-               if (i == NIU_L3_PROG_CLS) {
-                       netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n",
-                                   parent->index, __func__,
-                                   (unsigned long long)class);
-                       ret = -EINVAL;
-                       goto out;
-               }
-       }
-
-       ret = tcam_flush(np, idx);
-       if (ret)
-               goto out;
-
-       /* invalidate the entry */
-       tp->valid = 0;
-       np->clas.tcam_valid_entries--;
-out:
-       niu_unlock_parent(np, flags);
-
-       return ret;
-}
-
-static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
-{
-       struct niu *np = netdev_priv(dev);
-       int ret = 0;
-
-       switch (cmd->cmd) {
-       case ETHTOOL_SRXFH:
-               ret = niu_set_hash_opts(np, cmd);
-               break;
-       case ETHTOOL_SRXCLSRLINS:
-               ret = niu_add_ethtool_tcam_entry(np, cmd);
-               break;
-       case ETHTOOL_SRXCLSRLDEL:
-               ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location);
-               break;
-       default:
-               ret = -EINVAL;
-               break;
-       }
-
-       return ret;
-}
-
-static const struct {
-       const char string[ETH_GSTRING_LEN];
-} niu_xmac_stat_keys[] = {
-       { "tx_frames" },
-       { "tx_bytes" },
-       { "tx_fifo_errors" },
-       { "tx_overflow_errors" },
-       { "tx_max_pkt_size_errors" },
-       { "tx_underflow_errors" },
-       { "rx_local_faults" },
-       { "rx_remote_faults" },
-       { "rx_link_faults" },
-       { "rx_align_errors" },
-       { "rx_frags" },
-       { "rx_mcasts" },
-       { "rx_bcasts" },
-       { "rx_hist_cnt1" },
-       { "rx_hist_cnt2" },
-       { "rx_hist_cnt3" },
-       { "rx_hist_cnt4" },
-       { "rx_hist_cnt5" },
-       { "rx_hist_cnt6" },
-       { "rx_hist_cnt7" },
-       { "rx_octets" },
-       { "rx_code_violations" },
-       { "rx_len_errors" },
-       { "rx_crc_errors" },
-       { "rx_underflows" },
-       { "rx_overflows" },
-       { "pause_off_state" },
-       { "pause_on_state" },
-       { "pause_received" },
-};
-
-#define NUM_XMAC_STAT_KEYS     ARRAY_SIZE(niu_xmac_stat_keys)
-
-static const struct {
-       const char string[ETH_GSTRING_LEN];
-} niu_bmac_stat_keys[] = {
-       { "tx_underflow_errors" },
-       { "tx_max_pkt_size_errors" },
-       { "tx_bytes" },
-       { "tx_frames" },
-       { "rx_overflows" },
-       { "rx_frames" },
-       { "rx_align_errors" },
-       { "rx_crc_errors" },
-       { "rx_len_errors" },
-       { "pause_off_state" },
-       { "pause_on_state" },
-       { "pause_received" },
-};
-
-#define NUM_BMAC_STAT_KEYS     ARRAY_SIZE(niu_bmac_stat_keys)
-
-static const struct {
-       const char string[ETH_GSTRING_LEN];
-} niu_rxchan_stat_keys[] = {
-       { "rx_channel" },
-       { "rx_packets" },
-       { "rx_bytes" },
-       { "rx_dropped" },
-       { "rx_errors" },
-};
-
-#define NUM_RXCHAN_STAT_KEYS   ARRAY_SIZE(niu_rxchan_stat_keys)
-
-static const struct {
-       const char string[ETH_GSTRING_LEN];
-} niu_txchan_stat_keys[] = {
-       { "tx_channel" },
-       { "tx_packets" },
-       { "tx_bytes" },
-       { "tx_errors" },
-};
-
-#define NUM_TXCHAN_STAT_KEYS   ARRAY_SIZE(niu_txchan_stat_keys)
-
-static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data)
-{
-       struct niu *np = netdev_priv(dev);
-       int i;
-
-       if (stringset != ETH_SS_STATS)
-               return;
-
-       if (np->flags & NIU_FLAGS_XMAC) {
-               memcpy(data, niu_xmac_stat_keys,
-                      sizeof(niu_xmac_stat_keys));
-               data += sizeof(niu_xmac_stat_keys);
-       } else {
-               memcpy(data, niu_bmac_stat_keys,
-                      sizeof(niu_bmac_stat_keys));
-               data += sizeof(niu_bmac_stat_keys);
-       }
-       for (i = 0; i < np->num_rx_rings; i++) {
-               memcpy(data, niu_rxchan_stat_keys,
-                      sizeof(niu_rxchan_stat_keys));
-               data += sizeof(niu_rxchan_stat_keys);
-       }
-       for (i = 0; i < np->num_tx_rings; i++) {
-               memcpy(data, niu_txchan_stat_keys,
-                      sizeof(niu_txchan_stat_keys));
-               data += sizeof(niu_txchan_stat_keys);
-       }
-}
-
-static int niu_get_sset_count(struct net_device *dev, int stringset)
-{
-       struct niu *np = netdev_priv(dev);
-
-       if (stringset != ETH_SS_STATS)
-               return -EINVAL;
-
-       return (np->flags & NIU_FLAGS_XMAC ?
-                NUM_XMAC_STAT_KEYS :
-                NUM_BMAC_STAT_KEYS) +
-               (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) +
-               (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS);
-}
-
-static void niu_get_ethtool_stats(struct net_device *dev,
-                                 struct ethtool_stats *stats, u64 *data)
-{
-       struct niu *np = netdev_priv(dev);
-       int i;
-
-       niu_sync_mac_stats(np);
-       if (np->flags & NIU_FLAGS_XMAC) {
-               memcpy(data, &np->mac_stats.xmac,
-                      sizeof(struct niu_xmac_stats));
-               data += (sizeof(struct niu_xmac_stats) / sizeof(u64));
-       } else {
-               memcpy(data, &np->mac_stats.bmac,
-                      sizeof(struct niu_bmac_stats));
-               data += (sizeof(struct niu_bmac_stats) / sizeof(u64));
-       }
-       for (i = 0; i < np->num_rx_rings; i++) {
-               struct rx_ring_info *rp = &np->rx_rings[i];
-
-               niu_sync_rx_discard_stats(np, rp, 0);
-
-               data[0] = rp->rx_channel;
-               data[1] = rp->rx_packets;
-               data[2] = rp->rx_bytes;
-               data[3] = rp->rx_dropped;
-               data[4] = rp->rx_errors;
-               data += 5;
-       }
-       for (i = 0; i < np->num_tx_rings; i++) {
-               struct tx_ring_info *rp = &np->tx_rings[i];
-
-               data[0] = rp->tx_channel;
-               data[1] = rp->tx_packets;
-               data[2] = rp->tx_bytes;
-               data[3] = rp->tx_errors;
-               data += 4;
-       }
-}
-
-static u64 niu_led_state_save(struct niu *np)
-{
-       if (np->flags & NIU_FLAGS_XMAC)
-               return nr64_mac(XMAC_CONFIG);
-       else
-               return nr64_mac(BMAC_XIF_CONFIG);
-}
-
-static void niu_led_state_restore(struct niu *np, u64 val)
-{
-       if (np->flags & NIU_FLAGS_XMAC)
-               nw64_mac(XMAC_CONFIG, val);
-       else
-               nw64_mac(BMAC_XIF_CONFIG, val);
-}
-
-static void niu_force_led(struct niu *np, int on)
-{
-       u64 val, reg, bit;
-
-       if (np->flags & NIU_FLAGS_XMAC) {
-               reg = XMAC_CONFIG;
-               bit = XMAC_CONFIG_FORCE_LED_ON;
-       } else {
-               reg = BMAC_XIF_CONFIG;
-               bit = BMAC_XIF_CONFIG_LINK_LED;
-       }
-
-       val = nr64_mac(reg);
-       if (on)
-               val |= bit;
-       else
-               val &= ~bit;
-       nw64_mac(reg, val);
-}
-
-static int niu_set_phys_id(struct net_device *dev,
-                          enum ethtool_phys_id_state state)
-
-{
-       struct niu *np = netdev_priv(dev);
-
-       if (!netif_running(dev))
-               return -EAGAIN;
-
-       switch (state) {
-       case ETHTOOL_ID_ACTIVE:
-               np->orig_led_state = niu_led_state_save(np);
-               return 1;       /* cycle on/off once per second */
-
-       case ETHTOOL_ID_ON:
-               niu_force_led(np, 1);
-               break;
-
-       case ETHTOOL_ID_OFF:
-               niu_force_led(np, 0);
-               break;
-
-       case ETHTOOL_ID_INACTIVE:
-               niu_led_state_restore(np, np->orig_led_state);
-       }
-
-       return 0;
-}
-
-static const struct ethtool_ops niu_ethtool_ops = {
-       .get_drvinfo            = niu_get_drvinfo,
-       .get_link               = ethtool_op_get_link,
-       .get_msglevel           = niu_get_msglevel,
-       .set_msglevel           = niu_set_msglevel,
-       .nway_reset             = niu_nway_reset,
-       .get_eeprom_len         = niu_get_eeprom_len,
-       .get_eeprom             = niu_get_eeprom,
-       .get_settings           = niu_get_settings,
-       .set_settings           = niu_set_settings,
-       .get_strings            = niu_get_strings,
-       .get_sset_count         = niu_get_sset_count,
-       .get_ethtool_stats      = niu_get_ethtool_stats,
-       .set_phys_id            = niu_set_phys_id,
-       .get_rxnfc              = niu_get_nfc,
-       .set_rxnfc              = niu_set_nfc,
-};
-
-static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
-                             int ldg, int ldn)
-{
-       if (ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX)
-               return -EINVAL;
-       if (ldn < 0 || ldn > LDN_MAX)
-               return -EINVAL;
-
-       parent->ldg_map[ldn] = ldg;
-
-       if (np->parent->plat_type == PLAT_TYPE_NIU) {
-               /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by
-                * the firmware, and we're not supposed to change them.
-                * Validate the mapping, because if it's wrong we probably
-                * won't get any interrupts and that's painful to debug.
-                */
-               if (nr64(LDG_NUM(ldn)) != ldg) {
-                       dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
-                               np->port, ldn, ldg,
-                               (unsigned long long) nr64(LDG_NUM(ldn)));
-                       return -EINVAL;
-               }
-       } else
-               nw64(LDG_NUM(ldn), ldg);
-
-       return 0;
-}
-
-static int niu_set_ldg_timer_res(struct niu *np, int res)
-{
-       if (res < 0 || res > LDG_TIMER_RES_VAL)
-               return -EINVAL;
-
-
-       nw64(LDG_TIMER_RES, res);
-
-       return 0;
-}
-
-static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector)
-{
-       if ((ldg < NIU_LDG_MIN || ldg > NIU_LDG_MAX) ||
-           (func < 0 || func > 3) ||
-           (vector < 0 || vector > 0x1f))
-               return -EINVAL;
-
-       nw64(SID(ldg), (func << SID_FUNC_SHIFT) | vector);
-
-       return 0;
-}
-
-static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr)
-{
-       u64 frame, frame_base = (ESPC_PIO_STAT_READ_START |
-                                (addr << ESPC_PIO_STAT_ADDR_SHIFT));
-       int limit;
-
-       if (addr > (ESPC_PIO_STAT_ADDR >> ESPC_PIO_STAT_ADDR_SHIFT))
-               return -EINVAL;
-
-       frame = frame_base;
-       nw64(ESPC_PIO_STAT, frame);
-       limit = 64;
-       do {
-               udelay(5);
-               frame = nr64(ESPC_PIO_STAT);
-               if (frame & ESPC_PIO_STAT_READ_END)
-                       break;
-       } while (limit--);
-       if (!(frame & ESPC_PIO_STAT_READ_END)) {
-               dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
-                       (unsigned long long) frame);
-               return -ENODEV;
-       }
-
-       frame = frame_base;
-       nw64(ESPC_PIO_STAT, frame);
-       limit = 64;
-       do {
-               udelay(5);
-               frame = nr64(ESPC_PIO_STAT);
-               if (frame & ESPC_PIO_STAT_READ_END)
-                       break;
-       } while (limit--);
-       if (!(frame & ESPC_PIO_STAT_READ_END)) {
-               dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
-                       (unsigned long long) frame);
-               return -ENODEV;
-       }
-
-       frame = nr64(ESPC_PIO_STAT);
-       return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT;
-}
-
-static int __devinit niu_pci_eeprom_read16(struct niu *np, u32 off)
-{
-       int err = niu_pci_eeprom_read(np, off);
-       u16 val;
-
-       if (err < 0)
-               return err;
-       val = (err << 8);
-       err = niu_pci_eeprom_read(np, off + 1);
-       if (err < 0)
-               return err;
-       val |= (err & 0xff);
-
-       return val;
-}
-
-static int __devinit niu_pci_eeprom_read16_swp(struct niu *np, u32 off)
-{
-       int err = niu_pci_eeprom_read(np, off);
-       u16 val;
-
-       if (err < 0)
-               return err;
-
-       val = (err & 0xff);
-       err = niu_pci_eeprom_read(np, off + 1);
-       if (err < 0)
-               return err;
-
-       val |= (err & 0xff) << 8;
-
-       return val;
-}
-
-static int __devinit niu_pci_vpd_get_propname(struct niu *np,
-                                             u32 off,
-                                             char *namebuf,
-                                             int namebuf_len)
-{
-       int i;
-
-       for (i = 0; i < namebuf_len; i++) {
-               int err = niu_pci_eeprom_read(np, off + i);
-               if (err < 0)
-                       return err;
-               *namebuf++ = err;
-               if (!err)
-                       break;
-       }
-       if (i >= namebuf_len)
-               return -EINVAL;
-
-       return i + 1;
-}
-
-static void __devinit niu_vpd_parse_version(struct niu *np)
-{
-       struct niu_vpd *vpd = &np->vpd;
-       int len = strlen(vpd->version) + 1;
-       const char *s = vpd->version;
-       int i;
-
-       for (i = 0; i < len - 5; i++) {
-               if (!strncmp(s + i, "FCode ", 6))
-                       break;
-       }
-       if (i >= len - 5)
-               return;
-
-       s += i + 5;
-       sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor);
-
-       netif_printk(np, probe, KERN_DEBUG, np->dev,
-                    "VPD_SCAN: FCODE major(%d) minor(%d)\n",
-                    vpd->fcode_major, vpd->fcode_minor);
-       if (vpd->fcode_major > NIU_VPD_MIN_MAJOR ||
-           (vpd->fcode_major == NIU_VPD_MIN_MAJOR &&
-            vpd->fcode_minor >= NIU_VPD_MIN_MINOR))
-               np->flags |= NIU_FLAGS_VPD_VALID;
-}
-
-/* ESPC_PIO_EN_ENABLE must be set */
-static int __devinit niu_pci_vpd_scan_props(struct niu *np,
-                                           u32 start, u32 end)
-{
-       unsigned int found_mask = 0;
-#define FOUND_MASK_MODEL       0x00000001
-#define FOUND_MASK_BMODEL      0x00000002
-#define FOUND_MASK_VERS                0x00000004
-#define FOUND_MASK_MAC         0x00000008
-#define FOUND_MASK_NMAC                0x00000010
-#define FOUND_MASK_PHY         0x00000020
-#define FOUND_MASK_ALL         0x0000003f
-
-       netif_printk(np, probe, KERN_DEBUG, np->dev,
-                    "VPD_SCAN: start[%x] end[%x]\n", start, end);
-       while (start < end) {
-               int len, err, prop_len;
-               char namebuf[64];
-               u8 *prop_buf;
-               int max_len;
-
-               if (found_mask == FOUND_MASK_ALL) {
-                       niu_vpd_parse_version(np);
-                       return 1;
-               }
-
-               err = niu_pci_eeprom_read(np, start + 2);
-               if (err < 0)
-                       return err;
-               len = err;
-               start += 3;
-
-               prop_len = niu_pci_eeprom_read(np, start + 4);
-               err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
-               if (err < 0)
-                       return err;
-
-               prop_buf = NULL;
-               max_len = 0;
-               if (!strcmp(namebuf, "model")) {
-                       prop_buf = np->vpd.model;
-                       max_len = NIU_VPD_MODEL_MAX;
-                       found_mask |= FOUND_MASK_MODEL;
-               } else if (!strcmp(namebuf, "board-model")) {
-                       prop_buf = np->vpd.board_model;
-                       max_len = NIU_VPD_BD_MODEL_MAX;
-                       found_mask |= FOUND_MASK_BMODEL;
-               } else if (!strcmp(namebuf, "version")) {
-                       prop_buf = np->vpd.version;
-                       max_len = NIU_VPD_VERSION_MAX;
-                       found_mask |= FOUND_MASK_VERS;
-               } else if (!strcmp(namebuf, "local-mac-address")) {
-                       prop_buf = np->vpd.local_mac;
-                       max_len = ETH_ALEN;
-                       found_mask |= FOUND_MASK_MAC;
-               } else if (!strcmp(namebuf, "num-mac-addresses")) {
-                       prop_buf = &np->vpd.mac_num;
-                       max_len = 1;
-                       found_mask |= FOUND_MASK_NMAC;
-               } else if (!strcmp(namebuf, "phy-type")) {
-                       prop_buf = np->vpd.phy_type;
-                       max_len = NIU_VPD_PHY_TYPE_MAX;
-                       found_mask |= FOUND_MASK_PHY;
-               }
-
-               if (max_len && prop_len > max_len) {
-                       dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len);
-                       return -EINVAL;
-               }
-
-               if (prop_buf) {
-                       u32 off = start + 5 + err;
-                       int i;
-
-                       netif_printk(np, probe, KERN_DEBUG, np->dev,
-                                    "VPD_SCAN: Reading in property [%s] len[%d]\n",
-                                    namebuf, prop_len);
-                       for (i = 0; i < prop_len; i++)
-                               *prop_buf++ = niu_pci_eeprom_read(np, off + i);
-               }
-
-               start += len;
-       }
-
-       return 0;
-}
-
-/* ESPC_PIO_EN_ENABLE must be set */
-static void __devinit niu_pci_vpd_fetch(struct niu *np, u32 start)
-{
-       u32 offset;
-       int err;
-
-       err = niu_pci_eeprom_read16_swp(np, start + 1);
-       if (err < 0)
-               return;
-
-       offset = err + 3;
-
-       while (start + offset < ESPC_EEPROM_SIZE) {
-               u32 here = start + offset;
-               u32 end;
-
-               err = niu_pci_eeprom_read(np, here);
-               if (err != 0x90)
-                       return;
-
-               err = niu_pci_eeprom_read16_swp(np, here + 1);
-               if (err < 0)
-                       return;
-
-               here = start + offset + 3;
-               end = start + offset + err;
-
-               offset += err;
-
-               err = niu_pci_vpd_scan_props(np, here, end);
-               if (err < 0 || err == 1)
-                       return;
-       }
-}
-
-/* ESPC_PIO_EN_ENABLE must be set */
-static u32 __devinit niu_pci_vpd_offset(struct niu *np)
-{
-       u32 start = 0, end = ESPC_EEPROM_SIZE, ret;
-       int err;
-
-       while (start < end) {
-               ret = start;
-
-               /* ROM header signature?  */
-               err = niu_pci_eeprom_read16(np, start +  0);
-               if (err != 0x55aa)
-                       return 0;
-
-               /* Apply offset to PCI data structure.  */
-               err = niu_pci_eeprom_read16(np, start + 23);
-               if (err < 0)
-                       return 0;
-               start += err;
-
-               /* Check for "PCIR" signature.  */
-               err = niu_pci_eeprom_read16(np, start +  0);
-               if (err != 0x5043)
-                       return 0;
-               err = niu_pci_eeprom_read16(np, start +  2);
-               if (err != 0x4952)
-                       return 0;
-
-               /* Check for OBP image type.  */
-               err = niu_pci_eeprom_read(np, start + 20);
-               if (err < 0)
-                       return 0;
-               if (err != 0x01) {
-                       err = niu_pci_eeprom_read(np, ret + 2);
-                       if (err < 0)
-                               return 0;
-
-                       start = ret + (err * 512);
-                       continue;
-               }
-
-               err = niu_pci_eeprom_read16_swp(np, start + 8);
-               if (err < 0)
-                       return err;
-               ret += err;
-
-               err = niu_pci_eeprom_read(np, ret + 0);
-               if (err != 0x82)
-                       return 0;
-
-               return ret;
-       }
-
-       return 0;
-}
-
-static int __devinit niu_phy_type_prop_decode(struct niu *np,
-                                             const char *phy_prop)
-{
-       if (!strcmp(phy_prop, "mif")) {
-               /* 1G copper, MII */
-               np->flags &= ~(NIU_FLAGS_FIBER |
-                              NIU_FLAGS_10G);
-               np->mac_xcvr = MAC_XCVR_MII;
-       } else if (!strcmp(phy_prop, "xgf")) {
-               /* 10G fiber, XPCS */
-               np->flags |= (NIU_FLAGS_10G |
-                             NIU_FLAGS_FIBER);
-               np->mac_xcvr = MAC_XCVR_XPCS;
-       } else if (!strcmp(phy_prop, "pcs")) {
-               /* 1G fiber, PCS */
-               np->flags &= ~NIU_FLAGS_10G;
-               np->flags |= NIU_FLAGS_FIBER;
-               np->mac_xcvr = MAC_XCVR_PCS;
-       } else if (!strcmp(phy_prop, "xgc")) {
-               /* 10G copper, XPCS */
-               np->flags |= NIU_FLAGS_10G;
-               np->flags &= ~NIU_FLAGS_FIBER;
-               np->mac_xcvr = MAC_XCVR_XPCS;
-       } else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) {
-               /* 10G Serdes or 1G Serdes, default to 10G */
-               np->flags |= NIU_FLAGS_10G;
-               np->flags &= ~NIU_FLAGS_FIBER;
-               np->flags |= NIU_FLAGS_XCVR_SERDES;
-               np->mac_xcvr = MAC_XCVR_XPCS;
-       } else {
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static int niu_pci_vpd_get_nports(struct niu *np)
-{
-       int ports = 0;
-
-       if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) ||
-           (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) ||
-           (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) ||
-           (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) ||
-           (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) {
-               ports = 4;
-       } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) ||
-                  (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) ||
-                  (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) ||
-                  (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) {
-               ports = 2;
-       }
-
-       return ports;
-}
-
-static void __devinit niu_pci_vpd_validate(struct niu *np)
-{
-       struct net_device *dev = np->dev;
-       struct niu_vpd *vpd = &np->vpd;
-       u8 val8;
-
-       if (!is_valid_ether_addr(&vpd->local_mac[0])) {
-               dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n");
-
-               np->flags &= ~NIU_FLAGS_VPD_VALID;
-               return;
-       }
-
-       if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
-           !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
-               np->flags |= NIU_FLAGS_10G;
-               np->flags &= ~NIU_FLAGS_FIBER;
-               np->flags |= NIU_FLAGS_XCVR_SERDES;
-               np->mac_xcvr = MAC_XCVR_PCS;
-               if (np->port > 1) {
-                       np->flags |= NIU_FLAGS_FIBER;
-                       np->flags &= ~NIU_FLAGS_10G;
-               }
-               if (np->flags & NIU_FLAGS_10G)
-                       np->mac_xcvr = MAC_XCVR_XPCS;
-       } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
-               np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
-                             NIU_FLAGS_HOTPLUG_PHY);
-       } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
-               dev_err(np->device, "Illegal phy string [%s]\n",
-                       np->vpd.phy_type);
-               dev_err(np->device, "Falling back to SPROM\n");
-               np->flags &= ~NIU_FLAGS_VPD_VALID;
-               return;
-       }
-
-       memcpy(dev->perm_addr, vpd->local_mac, ETH_ALEN);
-
-       val8 = dev->perm_addr[5];
-       dev->perm_addr[5] += np->port;
-       if (dev->perm_addr[5] < val8)
-               dev->perm_addr[4]++;
-
-       memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
-}
-
-static int __devinit niu_pci_probe_sprom(struct niu *np)
-{
-       struct net_device *dev = np->dev;
-       int len, i;
-       u64 val, sum;
-       u8 val8;
-
-       val = (nr64(ESPC_VER_IMGSZ) & ESPC_VER_IMGSZ_IMGSZ);
-       val >>= ESPC_VER_IMGSZ_IMGSZ_SHIFT;
-       len = val / 4;
-
-       np->eeprom_len = len;
-
-       netif_printk(np, probe, KERN_DEBUG, np->dev,
-                    "SPROM: Image size %llu\n", (unsigned long long)val);
-
-       sum = 0;
-       for (i = 0; i < len; i++) {
-               val = nr64(ESPC_NCR(i));
-               sum += (val >>  0) & 0xff;
-               sum += (val >>  8) & 0xff;
-               sum += (val >> 16) & 0xff;
-               sum += (val >> 24) & 0xff;
-       }
-       netif_printk(np, probe, KERN_DEBUG, np->dev,
-                    "SPROM: Checksum %x\n", (int)(sum & 0xff));
-       if ((sum & 0xff) != 0xab) {
-               dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff));
-               return -EINVAL;
-       }
-
-       val = nr64(ESPC_PHY_TYPE);
-       switch (np->port) {
-       case 0:
-               val8 = (val & ESPC_PHY_TYPE_PORT0) >>
-                       ESPC_PHY_TYPE_PORT0_SHIFT;
-               break;
-       case 1:
-               val8 = (val & ESPC_PHY_TYPE_PORT1) >>
-                       ESPC_PHY_TYPE_PORT1_SHIFT;
-               break;
-       case 2:
-               val8 = (val & ESPC_PHY_TYPE_PORT2) >>
-                       ESPC_PHY_TYPE_PORT2_SHIFT;
-               break;
-       case 3:
-               val8 = (val & ESPC_PHY_TYPE_PORT3) >>
-                       ESPC_PHY_TYPE_PORT3_SHIFT;
-               break;
-       default:
-               dev_err(np->device, "Bogus port number %u\n",
-                       np->port);
-               return -EINVAL;
-       }
-       netif_printk(np, probe, KERN_DEBUG, np->dev,
-                    "SPROM: PHY type %x\n", val8);
-
-       switch (val8) {
-       case ESPC_PHY_TYPE_1G_COPPER:
-               /* 1G copper, MII */
-               np->flags &= ~(NIU_FLAGS_FIBER |
-                              NIU_FLAGS_10G);
-               np->mac_xcvr = MAC_XCVR_MII;
-               break;
-
-       case ESPC_PHY_TYPE_1G_FIBER:
-               /* 1G fiber, PCS */
-               np->flags &= ~NIU_FLAGS_10G;
-               np->flags |= NIU_FLAGS_FIBER;
-               np->mac_xcvr = MAC_XCVR_PCS;
-               break;
-
-       case ESPC_PHY_TYPE_10G_COPPER:
-               /* 10G copper, XPCS */
-               np->flags |= NIU_FLAGS_10G;
-               np->flags &= ~NIU_FLAGS_FIBER;
-               np->mac_xcvr = MAC_XCVR_XPCS;
-               break;
-
-       case ESPC_PHY_TYPE_10G_FIBER:
-               /* 10G fiber, XPCS */
-               np->flags |= (NIU_FLAGS_10G |
-                             NIU_FLAGS_FIBER);
-               np->mac_xcvr = MAC_XCVR_XPCS;
-               break;
-
-       default:
-               dev_err(np->device, "Bogus SPROM phy type %u\n", val8);
-               return -EINVAL;
-       }
-
-       val = nr64(ESPC_MAC_ADDR0);
-       netif_printk(np, probe, KERN_DEBUG, np->dev,
-                    "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val);
-       dev->perm_addr[0] = (val >>  0) & 0xff;
-       dev->perm_addr[1] = (val >>  8) & 0xff;
-       dev->perm_addr[2] = (val >> 16) & 0xff;
-       dev->perm_addr[3] = (val >> 24) & 0xff;
-
-       val = nr64(ESPC_MAC_ADDR1);
-       netif_printk(np, probe, KERN_DEBUG, np->dev,
-                    "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val);
-       dev->perm_addr[4] = (val >>  0) & 0xff;
-       dev->perm_addr[5] = (val >>  8) & 0xff;
-
-       if (!is_valid_ether_addr(&dev->perm_addr[0])) {
-               dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
-                       dev->perm_addr);
-               return -EINVAL;
-       }
-
-       val8 = dev->perm_addr[5];
-       dev->perm_addr[5] += np->port;
-       if (dev->perm_addr[5] < val8)
-               dev->perm_addr[4]++;
-
-       memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
-
-       val = nr64(ESPC_MOD_STR_LEN);
-       netif_printk(np, probe, KERN_DEBUG, np->dev,
-                    "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val);
-       if (val >= 8 * 4)
-               return -EINVAL;
-
-       for (i = 0; i < val; i += 4) {
-               u64 tmp = nr64(ESPC_NCR(5 + (i / 4)));
-
-               np->vpd.model[i + 3] = (tmp >>  0) & 0xff;
-               np->vpd.model[i + 2] = (tmp >>  8) & 0xff;
-               np->vpd.model[i + 1] = (tmp >> 16) & 0xff;
-               np->vpd.model[i + 0] = (tmp >> 24) & 0xff;
-       }
-       np->vpd.model[val] = '\0';
-
-       val = nr64(ESPC_BD_MOD_STR_LEN);
-       netif_printk(np, probe, KERN_DEBUG, np->dev,
-                    "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val);
-       if (val >= 4 * 4)
-               return -EINVAL;
-
-       for (i = 0; i < val; i += 4) {
-               u64 tmp = nr64(ESPC_NCR(14 + (i / 4)));
-
-               np->vpd.board_model[i + 3] = (tmp >>  0) & 0xff;
-               np->vpd.board_model[i + 2] = (tmp >>  8) & 0xff;
-               np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff;
-               np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff;
-       }
-       np->vpd.board_model[val] = '\0';
-
-       np->vpd.mac_num =
-               nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL;
-       netif_printk(np, probe, KERN_DEBUG, np->dev,
-                    "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num);
-
-       return 0;
-}
-
-static int __devinit niu_get_and_validate_port(struct niu *np)
-{
-       struct niu_parent *parent = np->parent;
-
-       if (np->port <= 1)
-               np->flags |= NIU_FLAGS_XMAC;
-
-       if (!parent->num_ports) {
-               if (parent->plat_type == PLAT_TYPE_NIU) {
-                       parent->num_ports = 2;
-               } else {
-                       parent->num_ports = niu_pci_vpd_get_nports(np);
-                       if (!parent->num_ports) {
-                               /* Fall back to SPROM as last resort.
-                                * This will fail on most cards.
-                                */
-                               parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) &
-                                       ESPC_NUM_PORTS_MACS_VAL;
-
-                               /* All of the current probing methods fail on
-                                * Maramba on-board parts.
-                                */
-                               if (!parent->num_ports)
-                                       parent->num_ports = 4;
-                       }
-               }
-       }
-
-       if (np->port >= parent->num_ports)
-               return -ENODEV;
-
-       return 0;
-}
-
-static int __devinit phy_record(struct niu_parent *parent,
-                               struct phy_probe_info *p,
-                               int dev_id_1, int dev_id_2, u8 phy_port,
-                               int type)
-{
-       u32 id = (dev_id_1 << 16) | dev_id_2;
-       u8 idx;
-
-       if (dev_id_1 < 0 || dev_id_2 < 0)
-               return 0;
-       if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
-               if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
-                   ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011) &&
-                   ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8706))
-                       return 0;
-       } else {
-               if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
-                       return 0;
-       }
-
-       pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
-               parent->index, id,
-               type == PHY_TYPE_PMA_PMD ? "PMA/PMD" :
-               type == PHY_TYPE_PCS ? "PCS" : "MII",
-               phy_port);
-
-       if (p->cur[type] >= NIU_MAX_PORTS) {
-               pr_err("Too many PHY ports\n");
-               return -EINVAL;
-       }
-       idx = p->cur[type];
-       p->phy_id[type][idx] = id;
-       p->phy_port[type][idx] = phy_port;
-       p->cur[type] = idx + 1;
-       return 0;
-}
-
-static int __devinit port_has_10g(struct phy_probe_info *p, int port)
-{
-       int i;
-
-       for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) {
-               if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port)
-                       return 1;
-       }
-       for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) {
-               if (p->phy_port[PHY_TYPE_PCS][i] == port)
-                       return 1;
-       }
-
-       return 0;
-}
-
-static int __devinit count_10g_ports(struct phy_probe_info *p, int *lowest)
-{
-       int port, cnt;
-
-       cnt = 0;
-       *lowest = 32;
-       for (port = 8; port < 32; port++) {
-               if (port_has_10g(p, port)) {
-                       if (!cnt)
-                               *lowest = port;
-                       cnt++;
-               }
-       }
-
-       return cnt;
-}
-
-static int __devinit count_1g_ports(struct phy_probe_info *p, int *lowest)
-{
-       *lowest = 32;
-       if (p->cur[PHY_TYPE_MII])
-               *lowest = p->phy_port[PHY_TYPE_MII][0];
-
-       return p->cur[PHY_TYPE_MII];
-}
-
-static void __devinit niu_n2_divide_channels(struct niu_parent *parent)
-{
-       int num_ports = parent->num_ports;
-       int i;
-
-       for (i = 0; i < num_ports; i++) {
-               parent->rxchan_per_port[i] = (16 / num_ports);
-               parent->txchan_per_port[i] = (16 / num_ports);
-
-               pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
-                       parent->index, i,
-                       parent->rxchan_per_port[i],
-                       parent->txchan_per_port[i]);
-       }
-}
-
-static void __devinit niu_divide_channels(struct niu_parent *parent,
-                                         int num_10g, int num_1g)
-{
-       int num_ports = parent->num_ports;
-       int rx_chans_per_10g, rx_chans_per_1g;
-       int tx_chans_per_10g, tx_chans_per_1g;
-       int i, tot_rx, tot_tx;
-
-       if (!num_10g || !num_1g) {
-               rx_chans_per_10g = rx_chans_per_1g =
-                       (NIU_NUM_RXCHAN / num_ports);
-               tx_chans_per_10g = tx_chans_per_1g =
-                       (NIU_NUM_TXCHAN / num_ports);
-       } else {
-               rx_chans_per_1g = NIU_NUM_RXCHAN / 8;
-               rx_chans_per_10g = (NIU_NUM_RXCHAN -
-                                   (rx_chans_per_1g * num_1g)) /
-                       num_10g;
-
-               tx_chans_per_1g = NIU_NUM_TXCHAN / 6;
-               tx_chans_per_10g = (NIU_NUM_TXCHAN -
-                                   (tx_chans_per_1g * num_1g)) /
-                       num_10g;
-       }
-
-       tot_rx = tot_tx = 0;
-       for (i = 0; i < num_ports; i++) {
-               int type = phy_decode(parent->port_phy, i);
-
-               if (type == PORT_TYPE_10G) {
-                       parent->rxchan_per_port[i] = rx_chans_per_10g;
-                       parent->txchan_per_port[i] = tx_chans_per_10g;
-               } else {
-                       parent->rxchan_per_port[i] = rx_chans_per_1g;
-                       parent->txchan_per_port[i] = tx_chans_per_1g;
-               }
-               pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
-                       parent->index, i,
-                       parent->rxchan_per_port[i],
-                       parent->txchan_per_port[i]);
-               tot_rx += parent->rxchan_per_port[i];
-               tot_tx += parent->txchan_per_port[i];
-       }
-
-       if (tot_rx > NIU_NUM_RXCHAN) {
-               pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n",
-                      parent->index, tot_rx);
-               for (i = 0; i < num_ports; i++)
-                       parent->rxchan_per_port[i] = 1;
-       }
-       if (tot_tx > NIU_NUM_TXCHAN) {
-               pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n",
-                      parent->index, tot_tx);
-               for (i = 0; i < num_ports; i++)
-                       parent->txchan_per_port[i] = 1;
-       }
-       if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) {
-               pr_warning("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n",
-                          parent->index, tot_rx, tot_tx);
-       }
-}
-
-static void __devinit niu_divide_rdc_groups(struct niu_parent *parent,
-                                           int num_10g, int num_1g)
-{
-       int i, num_ports = parent->num_ports;
-       int rdc_group, rdc_groups_per_port;
-       int rdc_channel_base;
-
-       rdc_group = 0;
-       rdc_groups_per_port = NIU_NUM_RDC_TABLES / num_ports;
-
-       rdc_channel_base = 0;
-
-       for (i = 0; i < num_ports; i++) {
-               struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i];
-               int grp, num_channels = parent->rxchan_per_port[i];
-               int this_channel_offset;
-
-               tp->first_table_num = rdc_group;
-               tp->num_tables = rdc_groups_per_port;
-               this_channel_offset = 0;
-               for (grp = 0; grp < tp->num_tables; grp++) {
-                       struct rdc_table *rt = &tp->tables[grp];
-                       int slot;
-
-                       pr_info("niu%d: Port %d RDC tbl(%d) [ ",
-                               parent->index, i, tp->first_table_num + grp);
-                       for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) {
-                               rt->rxdma_channel[slot] =
-                                       rdc_channel_base + this_channel_offset;
-
-                               pr_cont("%d ", rt->rxdma_channel[slot]);
-
-                               if (++this_channel_offset == num_channels)
-                                       this_channel_offset = 0;
-                       }
-                       pr_cont("]\n");
-               }
-
-               parent->rdc_default[i] = rdc_channel_base;
-
-               rdc_channel_base += num_channels;
-               rdc_group += rdc_groups_per_port;
-       }
-}
-
-static int __devinit fill_phy_probe_info(struct niu *np,
-                                        struct niu_parent *parent,
-                                        struct phy_probe_info *info)
-{
-       unsigned long flags;
-       int port, err;
-
-       memset(info, 0, sizeof(*info));
-
-       /* Port 0 to 7 are reserved for onboard Serdes, probe the rest.  */
-       niu_lock_parent(np, flags);
-       err = 0;
-       for (port = 8; port < 32; port++) {
-               int dev_id_1, dev_id_2;
-
-               dev_id_1 = mdio_read(np, port,
-                                    NIU_PMA_PMD_DEV_ADDR, MII_PHYSID1);
-               dev_id_2 = mdio_read(np, port,
-                                    NIU_PMA_PMD_DEV_ADDR, MII_PHYSID2);
-               err = phy_record(parent, info, dev_id_1, dev_id_2, port,
-                                PHY_TYPE_PMA_PMD);
-               if (err)
-                       break;
-               dev_id_1 = mdio_read(np, port,
-                                    NIU_PCS_DEV_ADDR, MII_PHYSID1);
-               dev_id_2 = mdio_read(np, port,
-                                    NIU_PCS_DEV_ADDR, MII_PHYSID2);
-               err = phy_record(parent, info, dev_id_1, dev_id_2, port,
-                                PHY_TYPE_PCS);
-               if (err)
-                       break;
-               dev_id_1 = mii_read(np, port, MII_PHYSID1);
-               dev_id_2 = mii_read(np, port, MII_PHYSID2);
-               err = phy_record(parent, info, dev_id_1, dev_id_2, port,
-                                PHY_TYPE_MII);
-               if (err)
-                       break;
-       }
-       niu_unlock_parent(np, flags);
-
-       return err;
-}
-
-static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
-{
-       struct phy_probe_info *info = &parent->phy_probe_info;
-       int lowest_10g, lowest_1g;
-       int num_10g, num_1g;
-       u32 val;
-       int err;
-
-       num_10g = num_1g = 0;
-
-       if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
-           !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
-               num_10g = 0;
-               num_1g = 2;
-               parent->plat_type = PLAT_TYPE_ATCA_CP3220;
-               parent->num_ports = 4;
-               val = (phy_encode(PORT_TYPE_1G, 0) |
-                      phy_encode(PORT_TYPE_1G, 1) |
-                      phy_encode(PORT_TYPE_1G, 2) |
-                      phy_encode(PORT_TYPE_1G, 3));
-       } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
-               num_10g = 2;
-               num_1g = 0;
-               parent->num_ports = 2;
-               val = (phy_encode(PORT_TYPE_10G, 0) |
-                      phy_encode(PORT_TYPE_10G, 1));
-       } else if ((np->flags & NIU_FLAGS_XCVR_SERDES) &&
-                  (parent->plat_type == PLAT_TYPE_NIU)) {
-               /* this is the Monza case */
-               if (np->flags & NIU_FLAGS_10G) {
-                       val = (phy_encode(PORT_TYPE_10G, 0) |
-                              phy_encode(PORT_TYPE_10G, 1));
-               } else {
-                       val = (phy_encode(PORT_TYPE_1G, 0) |
-                              phy_encode(PORT_TYPE_1G, 1));
-               }
-       } else {
-               err = fill_phy_probe_info(np, parent, info);
-               if (err)
-                       return err;
-
-               num_10g = count_10g_ports(info, &lowest_10g);
-               num_1g = count_1g_ports(info, &lowest_1g);
-
-               switch ((num_10g << 4) | num_1g) {
-               case 0x24:
-                       if (lowest_1g == 10)
-                               parent->plat_type = PLAT_TYPE_VF_P0;
-                       else if (lowest_1g == 26)
-                               parent->plat_type = PLAT_TYPE_VF_P1;
-                       else
-                               goto unknown_vg_1g_port;
-
-                       /* fallthru */
-               case 0x22:
-                       val = (phy_encode(PORT_TYPE_10G, 0) |
-                              phy_encode(PORT_TYPE_10G, 1) |
-                              phy_encode(PORT_TYPE_1G, 2) |
-                              phy_encode(PORT_TYPE_1G, 3));
-                       break;
-
-               case 0x20:
-                       val = (phy_encode(PORT_TYPE_10G, 0) |
-                              phy_encode(PORT_TYPE_10G, 1));
-                       break;
-
-               case 0x10:
-                       val = phy_encode(PORT_TYPE_10G, np->port);
-                       break;
-
-               case 0x14:
-                       if (lowest_1g == 10)
-                               parent->plat_type = PLAT_TYPE_VF_P0;
-                       else if (lowest_1g == 26)
-                               parent->plat_type = PLAT_TYPE_VF_P1;
-                       else
-                               goto unknown_vg_1g_port;
-
-                       /* fallthru */
-               case 0x13:
-                       if ((lowest_10g & 0x7) == 0)
-                               val = (phy_encode(PORT_TYPE_10G, 0) |
-                                      phy_encode(PORT_TYPE_1G, 1) |
-                                      phy_encode(PORT_TYPE_1G, 2) |
-                                      phy_encode(PORT_TYPE_1G, 3));
-                       else
-                               val = (phy_encode(PORT_TYPE_1G, 0) |
-                                      phy_encode(PORT_TYPE_10G, 1) |
-                                      phy_encode(PORT_TYPE_1G, 2) |
-                                      phy_encode(PORT_TYPE_1G, 3));
-                       break;
-
-               case 0x04:
-                       if (lowest_1g == 10)
-                               parent->plat_type = PLAT_TYPE_VF_P0;
-                       else if (lowest_1g == 26)
-                               parent->plat_type = PLAT_TYPE_VF_P1;
-                       else
-                               goto unknown_vg_1g_port;
-
-                       val = (phy_encode(PORT_TYPE_1G, 0) |
-                              phy_encode(PORT_TYPE_1G, 1) |
-                              phy_encode(PORT_TYPE_1G, 2) |
-                              phy_encode(PORT_TYPE_1G, 3));
-                       break;
-
-               default:
-                       pr_err("Unsupported port config 10G[%d] 1G[%d]\n",
-                              num_10g, num_1g);
-                       return -EINVAL;
-               }
-       }
-
-       parent->port_phy = val;
-
-       if (parent->plat_type == PLAT_TYPE_NIU)
-               niu_n2_divide_channels(parent);
-       else
-               niu_divide_channels(parent, num_10g, num_1g);
-
-       niu_divide_rdc_groups(parent, num_10g, num_1g);
-
-       return 0;
-
-unknown_vg_1g_port:
-       pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g);
-       return -EINVAL;
-}
-
-static int __devinit niu_probe_ports(struct niu *np)
-{
-       struct niu_parent *parent = np->parent;
-       int err, i;
-
-       if (parent->port_phy == PORT_PHY_UNKNOWN) {
-               err = walk_phys(np, parent);
-               if (err)
-                       return err;
-
-               niu_set_ldg_timer_res(np, 2);
-               for (i = 0; i <= LDN_MAX; i++)
-                       niu_ldn_irq_enable(np, i, 0);
-       }
-
-       if (parent->port_phy == PORT_PHY_INVALID)
-               return -EINVAL;
-
-       return 0;
-}
-
-static int __devinit niu_classifier_swstate_init(struct niu *np)
-{
-       struct niu_classifier *cp = &np->clas;
-
-       cp->tcam_top = (u16) np->port;
-       cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports;
-       cp->h1_init = 0xffffffff;
-       cp->h2_init = 0xffff;
-
-       return fflp_early_init(np);
-}
-
-static void __devinit niu_link_config_init(struct niu *np)
-{
-       struct niu_link_config *lp = &np->link_config;
-
-       lp->advertising = (ADVERTISED_10baseT_Half |
-                          ADVERTISED_10baseT_Full |
-                          ADVERTISED_100baseT_Half |
-                          ADVERTISED_100baseT_Full |
-                          ADVERTISED_1000baseT_Half |
-                          ADVERTISED_1000baseT_Full |
-                          ADVERTISED_10000baseT_Full |
-                          ADVERTISED_Autoneg);
-       lp->speed = lp->active_speed = SPEED_INVALID;
-       lp->duplex = DUPLEX_FULL;
-       lp->active_duplex = DUPLEX_INVALID;
-       lp->autoneg = 1;
-#if 0
-       lp->loopback_mode = LOOPBACK_MAC;
-       lp->active_speed = SPEED_10000;
-       lp->active_duplex = DUPLEX_FULL;
-#else
-       lp->loopback_mode = LOOPBACK_DISABLED;
-#endif
-}
-
-static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np)
-{
-       switch (np->port) {
-       case 0:
-               np->mac_regs = np->regs + XMAC_PORT0_OFF;
-               np->ipp_off  = 0x00000;
-               np->pcs_off  = 0x04000;
-               np->xpcs_off = 0x02000;
-               break;
-
-       case 1:
-               np->mac_regs = np->regs + XMAC_PORT1_OFF;
-               np->ipp_off  = 0x08000;
-               np->pcs_off  = 0x0a000;
-               np->xpcs_off = 0x08000;
-               break;
-
-       case 2:
-               np->mac_regs = np->regs + BMAC_PORT2_OFF;
-               np->ipp_off  = 0x04000;
-               np->pcs_off  = 0x0e000;
-               np->xpcs_off = ~0UL;
-               break;
-
-       case 3:
-               np->mac_regs = np->regs + BMAC_PORT3_OFF;
-               np->ipp_off  = 0x0c000;
-               np->pcs_off  = 0x12000;
-               np->xpcs_off = ~0UL;
-               break;
-
-       default:
-               dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
-{
-       struct msix_entry msi_vec[NIU_NUM_LDG];
-       struct niu_parent *parent = np->parent;
-       struct pci_dev *pdev = np->pdev;
-       int i, num_irqs, err;
-       u8 first_ldg;
-
-       first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
-       for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
-               ldg_num_map[i] = first_ldg + i;
-
-       num_irqs = (parent->rxchan_per_port[np->port] +
-                   parent->txchan_per_port[np->port] +
-                   (np->port == 0 ? 3 : 1));
-       BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports));
-
-retry:
-       for (i = 0; i < num_irqs; i++) {
-               msi_vec[i].vector = 0;
-               msi_vec[i].entry = i;
-       }
-
-       err = pci_enable_msix(pdev, msi_vec, num_irqs);
-       if (err < 0) {
-               np->flags &= ~NIU_FLAGS_MSIX;
-               return;
-       }
-       if (err > 0) {
-               num_irqs = err;
-               goto retry;
-       }
-
-       np->flags |= NIU_FLAGS_MSIX;
-       for (i = 0; i < num_irqs; i++)
-               np->ldg[i].irq = msi_vec[i].vector;
-       np->num_ldg = num_irqs;
-}
-
-static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
-{
-#ifdef CONFIG_SPARC64
-       struct platform_device *op = np->op;
-       const u32 *int_prop;
-       int i;
-
-       int_prop = of_get_property(op->dev.of_node, "interrupts", NULL);
-       if (!int_prop)
-               return -ENODEV;
-
-       for (i = 0; i < op->archdata.num_irqs; i++) {
-               ldg_num_map[i] = int_prop[i];
-               np->ldg[i].irq = op->archdata.irqs[i];
-       }
-
-       np->num_ldg = op->archdata.num_irqs;
-
-       return 0;
-#else
-       return -EINVAL;
-#endif
-}
-
-static int __devinit niu_ldg_init(struct niu *np)
-{
-       struct niu_parent *parent = np->parent;
-       u8 ldg_num_map[NIU_NUM_LDG];
-       int first_chan, num_chan;
-       int i, err, ldg_rotor;
-       u8 port;
-
-       np->num_ldg = 1;
-       np->ldg[0].irq = np->dev->irq;
-       if (parent->plat_type == PLAT_TYPE_NIU) {
-               err = niu_n2_irq_init(np, ldg_num_map);
-               if (err)
-                       return err;
-       } else
-               niu_try_msix(np, ldg_num_map);
-
-       port = np->port;
-       for (i = 0; i < np->num_ldg; i++) {
-               struct niu_ldg *lp = &np->ldg[i];
-
-               netif_napi_add(np->dev, &lp->napi, niu_poll, 64);
-
-               lp->np = np;
-               lp->ldg_num = ldg_num_map[i];
-               lp->timer = 2; /* XXX */
-
-               /* On N2 NIU the firmware has setup the SID mappings so they go
-                * to the correct values that will route the LDG to the proper
-                * interrupt in the NCU interrupt table.
-                */
-               if (np->parent->plat_type != PLAT_TYPE_NIU) {
-                       err = niu_set_ldg_sid(np, lp->ldg_num, port, i);
-                       if (err)
-                               return err;
-               }
-       }
-
-       /* We adopt the LDG assignment ordering used by the N2 NIU
-        * 'interrupt' properties because that simplifies a lot of
-        * things.  This ordering is:
-        *
-        *      MAC
-        *      MIF     (if port zero)
-        *      SYSERR  (if port zero)
-        *      RX channels
-        *      TX channels
-        */
-
-       ldg_rotor = 0;
-
-       err = niu_ldg_assign_ldn(np, parent, ldg_num_map[ldg_rotor],
-                                 LDN_MAC(port));
-       if (err)
-               return err;
-
-       ldg_rotor++;
-       if (ldg_rotor == np->num_ldg)
-               ldg_rotor = 0;
-
-       if (port == 0) {
-               err = niu_ldg_assign_ldn(np, parent,
-                                        ldg_num_map[ldg_rotor],
-                                        LDN_MIF);
-               if (err)
-                       return err;
-
-               ldg_rotor++;
-               if (ldg_rotor == np->num_ldg)
-                       ldg_rotor = 0;
-
-               err = niu_ldg_assign_ldn(np, parent,
-                                        ldg_num_map[ldg_rotor],
-                                        LDN_DEVICE_ERROR);
-               if (err)
-                       return err;
-
-               ldg_rotor++;
-               if (ldg_rotor == np->num_ldg)
-                       ldg_rotor = 0;
-
-       }
-
-       first_chan = 0;
-       for (i = 0; i < port; i++)
-               first_chan += parent->rxchan_per_port[i];
-       num_chan = parent->rxchan_per_port[port];
-
-       for (i = first_chan; i < (first_chan + num_chan); i++) {
-               err = niu_ldg_assign_ldn(np, parent,
-                                        ldg_num_map[ldg_rotor],
-                                        LDN_RXDMA(i));
-               if (err)
-                       return err;
-               ldg_rotor++;
-               if (ldg_rotor == np->num_ldg)
-                       ldg_rotor = 0;
-       }
-
-       first_chan = 0;
-       for (i = 0; i < port; i++)
-               first_chan += parent->txchan_per_port[i];
-       num_chan = parent->txchan_per_port[port];
-       for (i = first_chan; i < (first_chan + num_chan); i++) {
-               err = niu_ldg_assign_ldn(np, parent,
-                                        ldg_num_map[ldg_rotor],
-                                        LDN_TXDMA(i));
-               if (err)
-                       return err;
-               ldg_rotor++;
-               if (ldg_rotor == np->num_ldg)
-                       ldg_rotor = 0;
-       }
-
-       return 0;
-}
-
-static void __devexit niu_ldg_free(struct niu *np)
-{
-       if (np->flags & NIU_FLAGS_MSIX)
-               pci_disable_msix(np->pdev);
-}
-
-static int __devinit niu_get_of_props(struct niu *np)
-{
-#ifdef CONFIG_SPARC64
-       struct net_device *dev = np->dev;
-       struct device_node *dp;
-       const char *phy_type;
-       const u8 *mac_addr;
-       const char *model;
-       int prop_len;
-
-       if (np->parent->plat_type == PLAT_TYPE_NIU)
-               dp = np->op->dev.of_node;
-       else
-               dp = pci_device_to_OF_node(np->pdev);
-
-       phy_type = of_get_property(dp, "phy-type", &prop_len);
-       if (!phy_type) {
-               netdev_err(dev, "%s: OF node lacks phy-type property\n",
-                          dp->full_name);
-               return -EINVAL;
-       }
-
-       if (!strcmp(phy_type, "none"))
-               return -ENODEV;
-
-       strcpy(np->vpd.phy_type, phy_type);
-
-       if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
-               netdev_err(dev, "%s: Illegal phy string [%s]\n",
-                          dp->full_name, np->vpd.phy_type);
-               return -EINVAL;
-       }
-
-       mac_addr = of_get_property(dp, "local-mac-address", &prop_len);
-       if (!mac_addr) {
-               netdev_err(dev, "%s: OF node lacks local-mac-address property\n",
-                          dp->full_name);
-               return -EINVAL;
-       }
-       if (prop_len != dev->addr_len) {
-               netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n",
-                          dp->full_name, prop_len);
-       }
-       memcpy(dev->perm_addr, mac_addr, dev->addr_len);
-       if (!is_valid_ether_addr(&dev->perm_addr[0])) {
-               netdev_err(dev, "%s: OF MAC address is invalid\n",
-                          dp->full_name);
-               netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->perm_addr);
-               return -EINVAL;
-       }
-
-       memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
-
-       model = of_get_property(dp, "model", &prop_len);
-
-       if (model)
-               strcpy(np->vpd.model, model);
-
-       if (of_find_property(dp, "hot-swappable-phy", &prop_len)) {
-               np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
-                       NIU_FLAGS_HOTPLUG_PHY);
-       }
-
-       return 0;
-#else
-       return -EINVAL;
-#endif
-}
-
-static int __devinit niu_get_invariants(struct niu *np)
-{
-       int err, have_props;
-       u32 offset;
-
-       err = niu_get_of_props(np);
-       if (err == -ENODEV)
-               return err;
-
-       have_props = !err;
-
-       err = niu_init_mac_ipp_pcs_base(np);
-       if (err)
-               return err;
-
-       if (have_props) {
-               err = niu_get_and_validate_port(np);
-               if (err)
-                       return err;
-
-       } else  {
-               if (np->parent->plat_type == PLAT_TYPE_NIU)
-                       return -EINVAL;
-
-               nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE);
-               offset = niu_pci_vpd_offset(np);
-               netif_printk(np, probe, KERN_DEBUG, np->dev,
-                            "%s() VPD offset [%08x]\n", __func__, offset);
-               if (offset)
-                       niu_pci_vpd_fetch(np, offset);
-               nw64(ESPC_PIO_EN, 0);
-
-               if (np->flags & NIU_FLAGS_VPD_VALID) {
-                       niu_pci_vpd_validate(np);
-                       err = niu_get_and_validate_port(np);
-                       if (err)
-                               return err;
-               }
-
-               if (!(np->flags & NIU_FLAGS_VPD_VALID)) {
-                       err = niu_get_and_validate_port(np);
-                       if (err)
-                               return err;
-                       err = niu_pci_probe_sprom(np);
-                       if (err)
-                               return err;
-               }
-       }
-
-       err = niu_probe_ports(np);
-       if (err)
-               return err;
-
-       niu_ldg_init(np);
-
-       niu_classifier_swstate_init(np);
-       niu_link_config_init(np);
-
-       err = niu_determine_phy_disposition(np);
-       if (!err)
-               err = niu_init_link(np);
-
-       return err;
-}
-
-static LIST_HEAD(niu_parent_list);
-static DEFINE_MUTEX(niu_parent_lock);
-static int niu_parent_index;
-
-static ssize_t show_port_phy(struct device *dev,
-                            struct device_attribute *attr, char *buf)
-{
-       struct platform_device *plat_dev = to_platform_device(dev);
-       struct niu_parent *p = plat_dev->dev.platform_data;
-       u32 port_phy = p->port_phy;
-       char *orig_buf = buf;
-       int i;
-
-       if (port_phy == PORT_PHY_UNKNOWN ||
-           port_phy == PORT_PHY_INVALID)
-               return 0;
-
-       for (i = 0; i < p->num_ports; i++) {
-               const char *type_str;
-               int type;
-
-               type = phy_decode(port_phy, i);
-               if (type == PORT_TYPE_10G)
-                       type_str = "10G";
-               else
-                       type_str = "1G";
-               buf += sprintf(buf,
-                              (i == 0) ? "%s" : " %s",
-                              type_str);
-       }
-       buf += sprintf(buf, "\n");
-       return buf - orig_buf;
-}
-
-static ssize_t show_plat_type(struct device *dev,
-                             struct device_attribute *attr, char *buf)
-{
-       struct platform_device *plat_dev = to_platform_device(dev);
-       struct niu_parent *p = plat_dev->dev.platform_data;
-       const char *type_str;
-
-       switch (p->plat_type) {
-       case PLAT_TYPE_ATLAS:
-               type_str = "atlas";
-               break;
-       case PLAT_TYPE_NIU:
-               type_str = "niu";
-               break;
-       case PLAT_TYPE_VF_P0:
-               type_str = "vf_p0";
-               break;
-       case PLAT_TYPE_VF_P1:
-               type_str = "vf_p1";
-               break;
-       default:
-               type_str = "unknown";
-               break;
-       }
-
-       return sprintf(buf, "%s\n", type_str);
-}
-
-static ssize_t __show_chan_per_port(struct device *dev,
-                                   struct device_attribute *attr, char *buf,
-                                   int rx)
-{
-       struct platform_device *plat_dev = to_platform_device(dev);
-       struct niu_parent *p = plat_dev->dev.platform_data;
-       char *orig_buf = buf;
-       u8 *arr;
-       int i;
-
-       arr = (rx ? p->rxchan_per_port : p->txchan_per_port);
-
-       for (i = 0; i < p->num_ports; i++) {
-               buf += sprintf(buf,
-                              (i == 0) ? "%d" : " %d",
-                              arr[i]);
-       }
-       buf += sprintf(buf, "\n");
-
-       return buf - orig_buf;
-}
-
-static ssize_t show_rxchan_per_port(struct device *dev,
-                                   struct device_attribute *attr, char *buf)
-{
-       return __show_chan_per_port(dev, attr, buf, 1);
-}
-
-static ssize_t show_txchan_per_port(struct device *dev,
-                                   struct device_attribute *attr, char *buf)
-{
-       return __show_chan_per_port(dev, attr, buf, 1);
-}
-
-static ssize_t show_num_ports(struct device *dev,
-                             struct device_attribute *attr, char *buf)
-{
-       struct platform_device *plat_dev = to_platform_device(dev);
-       struct niu_parent *p = plat_dev->dev.platform_data;
-
-       return sprintf(buf, "%d\n", p->num_ports);
-}
-
-static struct device_attribute niu_parent_attributes[] = {
-       __ATTR(port_phy, S_IRUGO, show_port_phy, NULL),
-       __ATTR(plat_type, S_IRUGO, show_plat_type, NULL),
-       __ATTR(rxchan_per_port, S_IRUGO, show_rxchan_per_port, NULL),
-       __ATTR(txchan_per_port, S_IRUGO, show_txchan_per_port, NULL),
-       __ATTR(num_ports, S_IRUGO, show_num_ports, NULL),
-       {}
-};
-
-static struct niu_parent * __devinit niu_new_parent(struct niu *np,
-                                                   union niu_parent_id *id,
-                                                   u8 ptype)
-{
-       struct platform_device *plat_dev;
-       struct niu_parent *p;
-       int i;
-
-       plat_dev = platform_device_register_simple("niu-board", niu_parent_index,
-                                                  NULL, 0);
-       if (IS_ERR(plat_dev))
-               return NULL;
-
-       for (i = 0; attr_name(niu_parent_attributes[i]); i++) {
-               int err = device_create_file(&plat_dev->dev,
-                                            &niu_parent_attributes[i]);
-               if (err)
-                       goto fail_unregister;
-       }
-
-       p = kzalloc(sizeof(*p), GFP_KERNEL);
-       if (!p)
-               goto fail_unregister;
-
-       p->index = niu_parent_index++;
-
-       plat_dev->dev.platform_data = p;
-       p->plat_dev = plat_dev;
-
-       memcpy(&p->id, id, sizeof(*id));
-       p->plat_type = ptype;
-       INIT_LIST_HEAD(&p->list);
-       atomic_set(&p->refcnt, 0);
-       list_add(&p->list, &niu_parent_list);
-       spin_lock_init(&p->lock);
-
-       p->rxdma_clock_divider = 7500;
-
-       p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES;
-       if (p->plat_type == PLAT_TYPE_NIU)
-               p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES;
-
-       for (i = CLASS_CODE_USER_PROG1; i <= CLASS_CODE_SCTP_IPV6; i++) {
-               int index = i - CLASS_CODE_USER_PROG1;
-
-               p->tcam_key[index] = TCAM_KEY_TSEL;
-               p->flow_key[index] = (FLOW_KEY_IPSA |
-                                     FLOW_KEY_IPDA |
-                                     FLOW_KEY_PROTO |
-                                     (FLOW_KEY_L4_BYTE12 <<
-                                      FLOW_KEY_L4_0_SHIFT) |
-                                     (FLOW_KEY_L4_BYTE12 <<
-                                      FLOW_KEY_L4_1_SHIFT));
-       }
-
-       for (i = 0; i < LDN_MAX + 1; i++)
-               p->ldg_map[i] = LDG_INVALID;
-
-       return p;
-
-fail_unregister:
-       platform_device_unregister(plat_dev);
-       return NULL;
-}
-
-static struct niu_parent * __devinit niu_get_parent(struct niu *np,
-                                                   union niu_parent_id *id,
-                                                   u8 ptype)
-{
-       struct niu_parent *p, *tmp;
-       int port = np->port;
-
-       mutex_lock(&niu_parent_lock);
-       p = NULL;
-       list_for_each_entry(tmp, &niu_parent_list, list) {
-               if (!memcmp(id, &tmp->id, sizeof(*id))) {
-                       p = tmp;
-                       break;
-               }
-       }
-       if (!p)
-               p = niu_new_parent(np, id, ptype);
-
-       if (p) {
-               char port_name[6];
-               int err;
-
-               sprintf(port_name, "port%d", port);
-               err = sysfs_create_link(&p->plat_dev->dev.kobj,
-                                       &np->device->kobj,
-                                       port_name);
-               if (!err) {
-                       p->ports[port] = np;
-                       atomic_inc(&p->refcnt);
-               }
-       }
-       mutex_unlock(&niu_parent_lock);
-
-       return p;
-}
-
-static void niu_put_parent(struct niu *np)
-{
-       struct niu_parent *p = np->parent;
-       u8 port = np->port;
-       char port_name[6];
-
-       BUG_ON(!p || p->ports[port] != np);
-
-       netif_printk(np, probe, KERN_DEBUG, np->dev,
-                    "%s() port[%u]\n", __func__, port);
-
-       sprintf(port_name, "port%d", port);
-
-       mutex_lock(&niu_parent_lock);
-
-       sysfs_remove_link(&p->plat_dev->dev.kobj, port_name);
-
-       p->ports[port] = NULL;
-       np->parent = NULL;
-
-       if (atomic_dec_and_test(&p->refcnt)) {
-               list_del(&p->list);
-               platform_device_unregister(p->plat_dev);
-       }
-
-       mutex_unlock(&niu_parent_lock);
-}
-
-static void *niu_pci_alloc_coherent(struct device *dev, size_t size,
-                                   u64 *handle, gfp_t flag)
-{
-       dma_addr_t dh;
-       void *ret;
-
-       ret = dma_alloc_coherent(dev, size, &dh, flag);
-       if (ret)
-               *handle = dh;
-       return ret;
-}
-
-static void niu_pci_free_coherent(struct device *dev, size_t size,
-                                 void *cpu_addr, u64 handle)
-{
-       dma_free_coherent(dev, size, cpu_addr, handle);
-}
-
-static u64 niu_pci_map_page(struct device *dev, struct page *page,
-                           unsigned long offset, size_t size,
-                           enum dma_data_direction direction)
-{
-       return dma_map_page(dev, page, offset, size, direction);
-}
-
-static void niu_pci_unmap_page(struct device *dev, u64 dma_address,
-                              size_t size, enum dma_data_direction direction)
-{
-       dma_unmap_page(dev, dma_address, size, direction);
-}
-
-static u64 niu_pci_map_single(struct device *dev, void *cpu_addr,
-                             size_t size,
-                             enum dma_data_direction direction)
-{
-       return dma_map_single(dev, cpu_addr, size, direction);
-}
-
-static void niu_pci_unmap_single(struct device *dev, u64 dma_address,
-                                size_t size,
-                                enum dma_data_direction direction)
-{
-       dma_unmap_single(dev, dma_address, size, direction);
-}
-
-static const struct niu_ops niu_pci_ops = {
-       .alloc_coherent = niu_pci_alloc_coherent,
-       .free_coherent  = niu_pci_free_coherent,
-       .map_page       = niu_pci_map_page,
-       .unmap_page     = niu_pci_unmap_page,
-       .map_single     = niu_pci_map_single,
-       .unmap_single   = niu_pci_unmap_single,
-};
-
-static void __devinit niu_driver_version(void)
-{
-       static int niu_version_printed;
-
-       if (niu_version_printed++ == 0)
-               pr_info("%s", version);
-}
-
-static struct net_device * __devinit niu_alloc_and_init(
-       struct device *gen_dev, struct pci_dev *pdev,
-       struct platform_device *op, const struct niu_ops *ops,
-       u8 port)
-{
-       struct net_device *dev;
-       struct niu *np;
-
-       dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
-       if (!dev) {
-               dev_err(gen_dev, "Etherdev alloc failed, aborting\n");
-               return NULL;
-       }
-
-       SET_NETDEV_DEV(dev, gen_dev);
-
-       np = netdev_priv(dev);
-       np->dev = dev;
-       np->pdev = pdev;
-       np->op = op;
-       np->device = gen_dev;
-       np->ops = ops;
-
-       np->msg_enable = niu_debug;
-
-       spin_lock_init(&np->lock);
-       INIT_WORK(&np->reset_task, niu_reset_task);
-
-       np->port = port;
-
-       return dev;
-}
-
-static const struct net_device_ops niu_netdev_ops = {
-       .ndo_open               = niu_open,
-       .ndo_stop               = niu_close,
-       .ndo_start_xmit         = niu_start_xmit,
-       .ndo_get_stats64        = niu_get_stats,
-       .ndo_set_multicast_list = niu_set_rx_mode,
-       .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = niu_set_mac_addr,
-       .ndo_do_ioctl           = niu_ioctl,
-       .ndo_tx_timeout         = niu_tx_timeout,
-       .ndo_change_mtu         = niu_change_mtu,
-};
-
-static void __devinit niu_assign_netdev_ops(struct net_device *dev)
-{
-       dev->netdev_ops = &niu_netdev_ops;
-       dev->ethtool_ops = &niu_ethtool_ops;
-       dev->watchdog_timeo = NIU_TX_TIMEOUT;
-}
-
-static void __devinit niu_device_announce(struct niu *np)
-{
-       struct net_device *dev = np->dev;
-
-       pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr);
-
-       if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) {
-               pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
-                               dev->name,
-                               (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
-                               (np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
-                               (np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"),
-                               (np->mac_xcvr == MAC_XCVR_MII ? "MII" :
-                                (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
-                               np->vpd.phy_type);
-       } else {
-               pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
-                               dev->name,
-                               (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
-                               (np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
-                               (np->flags & NIU_FLAGS_FIBER ? "FIBER" :
-                                (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" :
-                                 "COPPER")),
-                               (np->mac_xcvr == MAC_XCVR_MII ? "MII" :
-                                (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
-                               np->vpd.phy_type);
-       }
-}
-
-static void __devinit niu_set_basic_features(struct net_device *dev)
-{
-       dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH;
-       dev->features |= dev->hw_features | NETIF_F_RXCSUM;
-}
-
-static int __devinit niu_pci_init_one(struct pci_dev *pdev,
-                                     const struct pci_device_id *ent)
-{
-       union niu_parent_id parent_id;
-       struct net_device *dev;
-       struct niu *np;
-       int err, pos;
-       u64 dma_mask;
-       u16 val16;
-
-       niu_driver_version();
-
-       err = pci_enable_device(pdev);
-       if (err) {
-               dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
-               return err;
-       }
-
-       if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
-           !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
-               dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n");
-               err = -ENODEV;
-               goto err_out_disable_pdev;
-       }
-
-       err = pci_request_regions(pdev, DRV_MODULE_NAME);
-       if (err) {
-               dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
-               goto err_out_disable_pdev;
-       }
-
-       pos = pci_pcie_cap(pdev);
-       if (pos <= 0) {
-               dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
-               goto err_out_free_res;
-       }
-
-       dev = niu_alloc_and_init(&pdev->dev, pdev, NULL,
-                                &niu_pci_ops, PCI_FUNC(pdev->devfn));
-       if (!dev) {
-               err = -ENOMEM;
-               goto err_out_free_res;
-       }
-       np = netdev_priv(dev);
-
-       memset(&parent_id, 0, sizeof(parent_id));
-       parent_id.pci.domain = pci_domain_nr(pdev->bus);
-       parent_id.pci.bus = pdev->bus->number;
-       parent_id.pci.device = PCI_SLOT(pdev->devfn);
-
-       np->parent = niu_get_parent(np, &parent_id,
-                                   PLAT_TYPE_ATLAS);
-       if (!np->parent) {
-               err = -ENOMEM;
-               goto err_out_free_dev;
-       }
-
-       pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
-       val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
-       val16 |= (PCI_EXP_DEVCTL_CERE |
-                 PCI_EXP_DEVCTL_NFERE |
-                 PCI_EXP_DEVCTL_FERE |
-                 PCI_EXP_DEVCTL_URRE |
-                 PCI_EXP_DEVCTL_RELAX_EN);
-       pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
-
-       dma_mask = DMA_BIT_MASK(44);
-       err = pci_set_dma_mask(pdev, dma_mask);
-       if (!err) {
-               dev->features |= NETIF_F_HIGHDMA;
-               err = pci_set_consistent_dma_mask(pdev, dma_mask);
-               if (err) {
-                       dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n");
-                       goto err_out_release_parent;
-               }
-       }
-       if (err || dma_mask == DMA_BIT_MASK(32)) {
-               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (err) {
-                       dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
-                       goto err_out_release_parent;
-               }
-       }
-
-       niu_set_basic_features(dev);
-
-       np->regs = pci_ioremap_bar(pdev, 0);
-       if (!np->regs) {
-               dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
-               err = -ENOMEM;
-               goto err_out_release_parent;
-       }
-
-       pci_set_master(pdev);
-       pci_save_state(pdev);
-
-       dev->irq = pdev->irq;
-
-       niu_assign_netdev_ops(dev);
-
-       err = niu_get_invariants(np);
-       if (err) {
-               if (err != -ENODEV)
-                       dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n");
-               goto err_out_iounmap;
-       }
-
-       err = register_netdev(dev);
-       if (err) {
-               dev_err(&pdev->dev, "Cannot register net device, aborting\n");
-               goto err_out_iounmap;
-       }
-
-       pci_set_drvdata(pdev, dev);
-
-       niu_device_announce(np);
-
-       return 0;
-
-err_out_iounmap:
-       if (np->regs) {
-               iounmap(np->regs);
-               np->regs = NULL;
-       }
-
-err_out_release_parent:
-       niu_put_parent(np);
-
-err_out_free_dev:
-       free_netdev(dev);
-
-err_out_free_res:
-       pci_release_regions(pdev);
-
-err_out_disable_pdev:
-       pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
-
-       return err;
-}
-
-static void __devexit niu_pci_remove_one(struct pci_dev *pdev)
-{
-       struct net_device *dev = pci_get_drvdata(pdev);
-
-       if (dev) {
-               struct niu *np = netdev_priv(dev);
-
-               unregister_netdev(dev);
-               if (np->regs) {
-                       iounmap(np->regs);
-                       np->regs = NULL;
-               }
-
-               niu_ldg_free(np);
-
-               niu_put_parent(np);
-
-               free_netdev(dev);
-               pci_release_regions(pdev);
-               pci_disable_device(pdev);
-               pci_set_drvdata(pdev, NULL);
-       }
-}
-
-static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
-{
-       struct net_device *dev = pci_get_drvdata(pdev);
-       struct niu *np = netdev_priv(dev);
-       unsigned long flags;
-
-       if (!netif_running(dev))
-               return 0;
-
-       flush_work_sync(&np->reset_task);
-       niu_netif_stop(np);
-
-       del_timer_sync(&np->timer);
-
-       spin_lock_irqsave(&np->lock, flags);
-       niu_enable_interrupts(np, 0);
-       spin_unlock_irqrestore(&np->lock, flags);
-
-       netif_device_detach(dev);
-
-       spin_lock_irqsave(&np->lock, flags);
-       niu_stop_hw(np);
-       spin_unlock_irqrestore(&np->lock, flags);
-
-       pci_save_state(pdev);
-
-       return 0;
-}
-
-static int niu_resume(struct pci_dev *pdev)
-{
-       struct net_device *dev = pci_get_drvdata(pdev);
-       struct niu *np = netdev_priv(dev);
-       unsigned long flags;
-       int err;
-
-       if (!netif_running(dev))
-               return 0;
-
-       pci_restore_state(pdev);
-
-       netif_device_attach(dev);
-
-       spin_lock_irqsave(&np->lock, flags);
-
-       err = niu_init_hw(np);
-       if (!err) {
-               np->timer.expires = jiffies + HZ;
-               add_timer(&np->timer);
-               niu_netif_start(np);
-       }
-
-       spin_unlock_irqrestore(&np->lock, flags);
-
-       return err;
-}
-
-static struct pci_driver niu_pci_driver = {
-       .name           = DRV_MODULE_NAME,
-       .id_table       = niu_pci_tbl,
-       .probe          = niu_pci_init_one,
-       .remove         = __devexit_p(niu_pci_remove_one),
-       .suspend        = niu_suspend,
-       .resume         = niu_resume,
-};
-
-#ifdef CONFIG_SPARC64
-static void *niu_phys_alloc_coherent(struct device *dev, size_t size,
-                                    u64 *dma_addr, gfp_t flag)
-{
-       unsigned long order = get_order(size);
-       unsigned long page = __get_free_pages(flag, order);
-
-       if (page == 0UL)
-               return NULL;
-       memset((char *)page, 0, PAGE_SIZE << order);
-       *dma_addr = __pa(page);
-
-       return (void *) page;
-}
-
-static void niu_phys_free_coherent(struct device *dev, size_t size,
-                                  void *cpu_addr, u64 handle)
-{
-       unsigned long order = get_order(size);
-
-       free_pages((unsigned long) cpu_addr, order);
-}
-
-static u64 niu_phys_map_page(struct device *dev, struct page *page,
-                            unsigned long offset, size_t size,
-                            enum dma_data_direction direction)
-{
-       return page_to_phys(page) + offset;
-}
-
-static void niu_phys_unmap_page(struct device *dev, u64 dma_address,
-                               size_t size, enum dma_data_direction direction)
-{
-       /* Nothing to do.  */
-}
-
-static u64 niu_phys_map_single(struct device *dev, void *cpu_addr,
-                              size_t size,
-                              enum dma_data_direction direction)
-{
-       return __pa(cpu_addr);
-}
-
-static void niu_phys_unmap_single(struct device *dev, u64 dma_address,
-                                 size_t size,
-                                 enum dma_data_direction direction)
-{
-       /* Nothing to do.  */
-}
-
-static const struct niu_ops niu_phys_ops = {
-       .alloc_coherent = niu_phys_alloc_coherent,
-       .free_coherent  = niu_phys_free_coherent,
-       .map_page       = niu_phys_map_page,
-       .unmap_page     = niu_phys_unmap_page,
-       .map_single     = niu_phys_map_single,
-       .unmap_single   = niu_phys_unmap_single,
-};
-
-static int __devinit niu_of_probe(struct platform_device *op)
-{
-       union niu_parent_id parent_id;
-       struct net_device *dev;
-       struct niu *np;
-       const u32 *reg;
-       int err;
-
-       niu_driver_version();
-
-       reg = of_get_property(op->dev.of_node, "reg", NULL);
-       if (!reg) {
-               dev_err(&op->dev, "%s: No 'reg' property, aborting\n",
-                       op->dev.of_node->full_name);
-               return -ENODEV;
-       }
-
-       dev = niu_alloc_and_init(&op->dev, NULL, op,
-                                &niu_phys_ops, reg[0] & 0x1);
-       if (!dev) {
-               err = -ENOMEM;
-               goto err_out;
-       }
-       np = netdev_priv(dev);
-
-       memset(&parent_id, 0, sizeof(parent_id));
-       parent_id.of = of_get_parent(op->dev.of_node);
-
-       np->parent = niu_get_parent(np, &parent_id,
-                                   PLAT_TYPE_NIU);
-       if (!np->parent) {
-               err = -ENOMEM;
-               goto err_out_free_dev;
-       }
-
-       niu_set_basic_features(dev);
-
-       np->regs = of_ioremap(&op->resource[1], 0,
-                             resource_size(&op->resource[1]),
-                             "niu regs");
-       if (!np->regs) {
-               dev_err(&op->dev, "Cannot map device registers, aborting\n");
-               err = -ENOMEM;
-               goto err_out_release_parent;
-       }
-
-       np->vir_regs_1 = of_ioremap(&op->resource[2], 0,
-                                   resource_size(&op->resource[2]),
-                                   "niu vregs-1");
-       if (!np->vir_regs_1) {
-               dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n");
-               err = -ENOMEM;
-               goto err_out_iounmap;
-       }
-
-       np->vir_regs_2 = of_ioremap(&op->resource[3], 0,
-                                   resource_size(&op->resource[3]),
-                                   "niu vregs-2");
-       if (!np->vir_regs_2) {
-               dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n");
-               err = -ENOMEM;
-               goto err_out_iounmap;
-       }
-
-       niu_assign_netdev_ops(dev);
-
-       err = niu_get_invariants(np);
-       if (err) {
-               if (err != -ENODEV)
-                       dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n");
-               goto err_out_iounmap;
-       }
-
-       err = register_netdev(dev);
-       if (err) {
-               dev_err(&op->dev, "Cannot register net device, aborting\n");
-               goto err_out_iounmap;
-       }
-
-       dev_set_drvdata(&op->dev, dev);
-
-       niu_device_announce(np);
-
-       return 0;
-
-err_out_iounmap:
-       if (np->vir_regs_1) {
-               of_iounmap(&op->resource[2], np->vir_regs_1,
-                          resource_size(&op->resource[2]));
-               np->vir_regs_1 = NULL;
-       }
-
-       if (np->vir_regs_2) {
-               of_iounmap(&op->resource[3], np->vir_regs_2,
-                          resource_size(&op->resource[3]));
-               np->vir_regs_2 = NULL;
-       }
-
-       if (np->regs) {
-               of_iounmap(&op->resource[1], np->regs,
-                          resource_size(&op->resource[1]));
-               np->regs = NULL;
-       }
-
-err_out_release_parent:
-       niu_put_parent(np);
-
-err_out_free_dev:
-       free_netdev(dev);
-
-err_out:
-       return err;
-}
-
-static int __devexit niu_of_remove(struct platform_device *op)
-{
-       struct net_device *dev = dev_get_drvdata(&op->dev);
-
-       if (dev) {
-               struct niu *np = netdev_priv(dev);
-
-               unregister_netdev(dev);
-
-               if (np->vir_regs_1) {
-                       of_iounmap(&op->resource[2], np->vir_regs_1,
-                                  resource_size(&op->resource[2]));
-                       np->vir_regs_1 = NULL;
-               }
-
-               if (np->vir_regs_2) {
-                       of_iounmap(&op->resource[3], np->vir_regs_2,
-                                  resource_size(&op->resource[3]));
-                       np->vir_regs_2 = NULL;
-               }
-
-               if (np->regs) {
-                       of_iounmap(&op->resource[1], np->regs,
-                                  resource_size(&op->resource[1]));
-                       np->regs = NULL;
-               }
-
-               niu_ldg_free(np);
-
-               niu_put_parent(np);
-
-               free_netdev(dev);
-               dev_set_drvdata(&op->dev, NULL);
-       }
-       return 0;
-}
-
-static const struct of_device_id niu_match[] = {
-       {
-               .name = "network",
-               .compatible = "SUNW,niusl",
-       },
-       {},
-};
-MODULE_DEVICE_TABLE(of, niu_match);
-
-static struct platform_driver niu_of_driver = {
-       .driver = {
-               .name = "niu",
-               .owner = THIS_MODULE,
-               .of_match_table = niu_match,
-       },
-       .probe          = niu_of_probe,
-       .remove         = __devexit_p(niu_of_remove),
-};
-
-#endif /* CONFIG_SPARC64 */
-
-static int __init niu_init(void)
-{
-       int err = 0;
-
-       BUILD_BUG_ON(PAGE_SIZE < 4 * 1024);
-
-       niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT);
-
-#ifdef CONFIG_SPARC64
-       err = platform_driver_register(&niu_of_driver);
-#endif
-
-       if (!err) {
-               err = pci_register_driver(&niu_pci_driver);
-#ifdef CONFIG_SPARC64
-               if (err)
-                       platform_driver_unregister(&niu_of_driver);
-#endif
-       }
-
-       return err;
-}
-
-static void __exit niu_exit(void)
-{
-       pci_unregister_driver(&niu_pci_driver);
-#ifdef CONFIG_SPARC64
-       platform_driver_unregister(&niu_of_driver);
-#endif
-}
-
-module_init(niu_init);
-module_exit(niu_exit);
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
deleted file mode 100644 (file)
index 51e177e..0000000
+++ /dev/null
@@ -1,3306 +0,0 @@
-/* niu.h: Definitions for Neptune ethernet driver.
- *
- * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
- */
-
-#ifndef _NIU_H
-#define _NIU_H
-
-#define PIO                    0x000000UL
-#define FZC_PIO                        0x080000UL
-#define FZC_MAC                        0x180000UL
-#define FZC_IPP                        0x280000UL
-#define FFLP                   0x300000UL
-#define FZC_FFLP               0x380000UL
-#define PIO_VADDR              0x400000UL
-#define ZCP                    0x500000UL
-#define FZC_ZCP                        0x580000UL
-#define DMC                    0x600000UL
-#define FZC_DMC                        0x680000UL
-#define TXC                    0x700000UL
-#define FZC_TXC                        0x780000UL
-#define PIO_LDSV               0x800000UL
-#define PIO_PIO_LDGIM          0x900000UL
-#define PIO_IMASK0             0xa00000UL
-#define PIO_IMASK1             0xb00000UL
-#define FZC_PROM               0xc80000UL
-#define FZC_PIM                        0xd80000UL
-
-#define LDSV0(LDG)             (PIO_LDSV + 0x00000UL + (LDG) * 0x2000UL)
-#define LDSV1(LDG)             (PIO_LDSV + 0x00008UL + (LDG) * 0x2000UL)
-#define LDSV2(LDG)             (PIO_LDSV + 0x00010UL + (LDG) * 0x2000UL)
-
-#define LDG_IMGMT(LDG)         (PIO_LDSV + 0x00018UL + (LDG) * 0x2000UL)
-#define  LDG_IMGMT_ARM         0x0000000080000000ULL
-#define  LDG_IMGMT_TIMER       0x000000000000003fULL
-
-#define LD_IM0(IDX)            (PIO_IMASK0 + 0x00000UL + (IDX) * 0x2000UL)
-#define  LD_IM0_MASK           0x0000000000000003ULL
-
-#define LD_IM1(IDX)            (PIO_IMASK1 + 0x00000UL + (IDX) * 0x2000UL)
-#define  LD_IM1_MASK           0x0000000000000003ULL
-
-#define LDG_TIMER_RES          (FZC_PIO + 0x00008UL)
-#define  LDG_TIMER_RES_VAL     0x00000000000fffffULL
-
-#define DIRTY_TID_CTL          (FZC_PIO + 0x00010UL)
-#define  DIRTY_TID_CTL_NPTHRED 0x00000000003f0000ULL
-#define  DIRTY_TID_CTL_RDTHRED 0x00000000000003f0ULL
-#define  DIRTY_TID_CTL_DTIDCLR 0x0000000000000002ULL
-#define  DIRTY_TID_CTL_DTIDENAB        0x0000000000000001ULL
-
-#define DIRTY_TID_STAT         (FZC_PIO + 0x00018UL)
-#define  DIRTY_TID_STAT_NPWSTAT        0x0000000000003f00ULL
-#define  DIRTY_TID_STAT_RDSTAT 0x000000000000003fULL
-
-#define RST_CTL                        (FZC_PIO + 0x00038UL)
-#define  RST_CTL_MAC_RST3      0x0000000000400000ULL
-#define  RST_CTL_MAC_RST2      0x0000000000200000ULL
-#define  RST_CTL_MAC_RST1      0x0000000000100000ULL
-#define  RST_CTL_MAC_RST0      0x0000000000080000ULL
-#define  RST_CTL_ACK_TO_EN     0x0000000000000800ULL
-#define  RST_CTL_ACK_TO_VAL    0x00000000000007feULL
-
-#define SMX_CFIG_DAT           (FZC_PIO + 0x00040UL)
-#define  SMX_CFIG_DAT_RAS_DET  0x0000000080000000ULL
-#define  SMX_CFIG_DAT_RAS_INJ  0x0000000040000000ULL
-#define  SMX_CFIG_DAT_XACT_TO  0x000000000fffffffULL
-
-#define SMX_INT_STAT           (FZC_PIO + 0x00048UL)
-#define  SMX_INT_STAT_STAT     0x00000000ffffffffULL
-
-#define SMX_CTL                        (FZC_PIO + 0x00050UL)
-#define  SMX_CTL_CTL           0x00000000ffffffffULL
-
-#define SMX_DBG_VEC            (FZC_PIO + 0x00058UL)
-#define  SMX_DBG_VEC_VEC       0x00000000ffffffffULL
-
-#define PIO_DBG_SEL            (FZC_PIO + 0x00060UL)
-#define  PIO_DBG_SEL_SEL       0x000000000000003fULL
-
-#define PIO_TRAIN_VEC          (FZC_PIO + 0x00068UL)
-#define  PIO_TRAIN_VEC_VEC     0x00000000ffffffffULL
-
-#define PIO_ARB_CTL            (FZC_PIO + 0x00070UL)
-#define  PIO_ARB_CTL_CTL       0x00000000ffffffffULL
-
-#define PIO_ARB_DBG_VEC                (FZC_PIO + 0x00078UL)
-#define  PIO_ARB_DBG_VEC_VEC   0x00000000ffffffffULL
-
-#define SYS_ERR_MASK           (FZC_PIO + 0x00090UL)
-#define  SYS_ERR_MASK_META2    0x0000000000000400ULL
-#define  SYS_ERR_MASK_META1    0x0000000000000200ULL
-#define  SYS_ERR_MASK_PEU      0x0000000000000100ULL
-#define  SYS_ERR_MASK_TXC      0x0000000000000080ULL
-#define  SYS_ERR_MASK_RDMC     0x0000000000000040ULL
-#define  SYS_ERR_MASK_TDMC     0x0000000000000020ULL
-#define  SYS_ERR_MASK_ZCP      0x0000000000000010ULL
-#define  SYS_ERR_MASK_FFLP     0x0000000000000008ULL
-#define  SYS_ERR_MASK_IPP      0x0000000000000004ULL
-#define  SYS_ERR_MASK_MAC      0x0000000000000002ULL
-#define  SYS_ERR_MASK_SMX      0x0000000000000001ULL
-
-#define SYS_ERR_STAT                   (FZC_PIO + 0x00098UL)
-#define  SYS_ERR_STAT_META2            0x0000000000000400ULL
-#define  SYS_ERR_STAT_META1            0x0000000000000200ULL
-#define  SYS_ERR_STAT_PEU              0x0000000000000100ULL
-#define  SYS_ERR_STAT_TXC              0x0000000000000080ULL
-#define  SYS_ERR_STAT_RDMC             0x0000000000000040ULL
-#define  SYS_ERR_STAT_TDMC             0x0000000000000020ULL
-#define  SYS_ERR_STAT_ZCP              0x0000000000000010ULL
-#define  SYS_ERR_STAT_FFLP             0x0000000000000008ULL
-#define  SYS_ERR_STAT_IPP              0x0000000000000004ULL
-#define  SYS_ERR_STAT_MAC              0x0000000000000002ULL
-#define  SYS_ERR_STAT_SMX              0x0000000000000001ULL
-
-#define SID(LDG)                       (FZC_PIO + 0x10200UL + (LDG) * 8UL)
-#define  SID_FUNC                      0x0000000000000060ULL
-#define  SID_FUNC_SHIFT                        5
-#define  SID_VECTOR                    0x000000000000001fULL
-#define  SID_VECTOR_SHIFT              0
-
-#define LDG_NUM(LDN)                   (FZC_PIO + 0x20000UL + (LDN) * 8UL)
-
-#define XMAC_PORT0_OFF                 (FZC_MAC + 0x000000)
-#define XMAC_PORT1_OFF                 (FZC_MAC + 0x006000)
-#define BMAC_PORT2_OFF                 (FZC_MAC + 0x00c000)
-#define BMAC_PORT3_OFF                 (FZC_MAC + 0x010000)
-
-/* XMAC registers, offset from np->mac_regs  */
-
-#define XTXMAC_SW_RST                  0x00000UL
-#define  XTXMAC_SW_RST_REG_RS          0x0000000000000002ULL
-#define  XTXMAC_SW_RST_SOFT_RST                0x0000000000000001ULL
-
-#define XRXMAC_SW_RST                  0x00008UL
-#define  XRXMAC_SW_RST_REG_RS          0x0000000000000002ULL
-#define  XRXMAC_SW_RST_SOFT_RST                0x0000000000000001ULL
-
-#define XTXMAC_STATUS                  0x00020UL
-#define  XTXMAC_STATUS_FRAME_CNT_EXP   0x0000000000000800ULL
-#define  XTXMAC_STATUS_BYTE_CNT_EXP    0x0000000000000400ULL
-#define  XTXMAC_STATUS_TXFIFO_XFR_ERR  0x0000000000000010ULL
-#define  XTXMAC_STATUS_TXMAC_OFLOW     0x0000000000000008ULL
-#define  XTXMAC_STATUS_MAX_PSIZE_ERR   0x0000000000000004ULL
-#define  XTXMAC_STATUS_TXMAC_UFLOW     0x0000000000000002ULL
-#define  XTXMAC_STATUS_FRAME_XMITED    0x0000000000000001ULL
-
-#define XRXMAC_STATUS                  0x00028UL
-#define  XRXMAC_STATUS_RXHIST7_CNT_EXP 0x0000000000100000ULL
-#define  XRXMAC_STATUS_LCL_FLT_STATUS  0x0000000000080000ULL
-#define  XRXMAC_STATUS_RFLT_DET                0x0000000000040000ULL
-#define  XRXMAC_STATUS_LFLT_CNT_EXP    0x0000000000020000ULL
-#define  XRXMAC_STATUS_PHY_MDINT       0x0000000000010000ULL
-#define  XRXMAC_STATUS_ALIGNERR_CNT_EXP        0x0000000000010000ULL
-#define  XRXMAC_STATUS_RXFRAG_CNT_EXP  0x0000000000008000ULL
-#define  XRXMAC_STATUS_RXMULTF_CNT_EXP 0x0000000000004000ULL
-#define  XRXMAC_STATUS_RXBCAST_CNT_EXP 0x0000000000002000ULL
-#define  XRXMAC_STATUS_RXHIST6_CNT_EXP 0x0000000000001000ULL
-#define  XRXMAC_STATUS_RXHIST5_CNT_EXP 0x0000000000000800ULL
-#define  XRXMAC_STATUS_RXHIST4_CNT_EXP 0x0000000000000400ULL
-#define  XRXMAC_STATUS_RXHIST3_CNT_EXP 0x0000000000000200ULL
-#define  XRXMAC_STATUS_RXHIST2_CNT_EXP 0x0000000000000100ULL
-#define  XRXMAC_STATUS_RXHIST1_CNT_EXP 0x0000000000000080ULL
-#define  XRXMAC_STATUS_RXOCTET_CNT_EXP 0x0000000000000040ULL
-#define  XRXMAC_STATUS_CVIOLERR_CNT_EXP        0x0000000000000020ULL
-#define  XRXMAC_STATUS_LENERR_CNT_EXP  0x0000000000000010ULL
-#define  XRXMAC_STATUS_CRCERR_CNT_EXP  0x0000000000000008ULL
-#define  XRXMAC_STATUS_RXUFLOW         0x0000000000000004ULL
-#define  XRXMAC_STATUS_RXOFLOW         0x0000000000000002ULL
-#define  XRXMAC_STATUS_FRAME_RCVD      0x0000000000000001ULL
-
-#define XMAC_FC_STAT                   0x00030UL
-#define  XMAC_FC_STAT_RX_RCV_PAUSE_TIME        0x00000000ffff0000ULL
-#define  XMAC_FC_STAT_TX_MAC_NPAUSE    0x0000000000000004ULL
-#define  XMAC_FC_STAT_TX_MAC_PAUSE     0x0000000000000002ULL
-#define  XMAC_FC_STAT_RX_MAC_RPAUSE    0x0000000000000001ULL
-
-#define XTXMAC_STAT_MSK                        0x00040UL
-#define  XTXMAC_STAT_MSK_FRAME_CNT_EXP 0x0000000000000800ULL
-#define  XTXMAC_STAT_MSK_BYTE_CNT_EXP  0x0000000000000400ULL
-#define  XTXMAC_STAT_MSK_TXFIFO_XFR_ERR        0x0000000000000010ULL
-#define  XTXMAC_STAT_MSK_TXMAC_OFLOW   0x0000000000000008ULL
-#define  XTXMAC_STAT_MSK_MAX_PSIZE_ERR 0x0000000000000004ULL
-#define  XTXMAC_STAT_MSK_TXMAC_UFLOW   0x0000000000000002ULL
-#define  XTXMAC_STAT_MSK_FRAME_XMITED  0x0000000000000001ULL
-
-#define XRXMAC_STAT_MSK                                0x00048UL
-#define  XRXMAC_STAT_MSK_LCL_FLT_STAT_MSK      0x0000000000080000ULL
-#define  XRXMAC_STAT_MSK_RFLT_DET              0x0000000000040000ULL
-#define  XRXMAC_STAT_MSK_LFLT_CNT_EXP          0x0000000000020000ULL
-#define  XRXMAC_STAT_MSK_PHY_MDINT             0x0000000000010000ULL
-#define  XRXMAC_STAT_MSK_RXFRAG_CNT_EXP                0x0000000000008000ULL
-#define  XRXMAC_STAT_MSK_RXMULTF_CNT_EXP       0x0000000000004000ULL
-#define  XRXMAC_STAT_MSK_RXBCAST_CNT_EXP       0x0000000000002000ULL
-#define  XRXMAC_STAT_MSK_RXHIST6_CNT_EXP       0x0000000000001000ULL
-#define  XRXMAC_STAT_MSK_RXHIST5_CNT_EXP       0x0000000000000800ULL
-#define  XRXMAC_STAT_MSK_RXHIST4_CNT_EXP       0x0000000000000400ULL
-#define  XRXMAC_STAT_MSK_RXHIST3_CNT_EXP       0x0000000000000200ULL
-#define  XRXMAC_STAT_MSK_RXHIST2_CNT_EXP       0x0000000000000100ULL
-#define  XRXMAC_STAT_MSK_RXHIST1_CNT_EXP       0x0000000000000080ULL
-#define  XRXMAC_STAT_MSK_RXOCTET_CNT_EXP       0x0000000000000040ULL
-#define  XRXMAC_STAT_MSK_CVIOLERR_CNT_EXP      0x0000000000000020ULL
-#define  XRXMAC_STAT_MSK_LENERR_CNT_EXP                0x0000000000000010ULL
-#define  XRXMAC_STAT_MSK_CRCERR_CNT_EXP                0x0000000000000008ULL
-#define  XRXMAC_STAT_MSK_RXUFLOW_CNT_EXP       0x0000000000000004ULL
-#define  XRXMAC_STAT_MSK_RXOFLOW_CNT_EXP       0x0000000000000002ULL
-#define  XRXMAC_STAT_MSK_FRAME_RCVD            0x0000000000000001ULL
-
-#define XMAC_FC_MSK                    0x00050UL
-#define  XMAC_FC_MSK_TX_MAC_NPAUSE     0x0000000000000004ULL
-#define  XMAC_FC_MSK_TX_MAC_PAUSE      0x0000000000000002ULL
-#define  XMAC_FC_MSK_RX_MAC_RPAUSE     0x0000000000000001ULL
-
-#define XMAC_CONFIG                    0x00060UL
-#define  XMAC_CONFIG_SEL_CLK_25MHZ     0x0000000080000000ULL
-#define  XMAC_CONFIG_1G_PCS_BYPASS     0x0000000040000000ULL
-#define  XMAC_CONFIG_10G_XPCS_BYPASS   0x0000000020000000ULL
-#define  XMAC_CONFIG_MODE_MASK         0x0000000018000000ULL
-#define  XMAC_CONFIG_MODE_XGMII                0x0000000000000000ULL
-#define  XMAC_CONFIG_MODE_GMII         0x0000000008000000ULL
-#define  XMAC_CONFIG_MODE_MII          0x0000000010000000ULL
-#define  XMAC_CONFIG_LFS_DISABLE       0x0000000004000000ULL
-#define  XMAC_CONFIG_LOOPBACK          0x0000000002000000ULL
-#define  XMAC_CONFIG_TX_OUTPUT_EN      0x0000000001000000ULL
-#define  XMAC_CONFIG_SEL_POR_CLK_SRC   0x0000000000800000ULL
-#define  XMAC_CONFIG_LED_POLARITY      0x0000000000400000ULL
-#define  XMAC_CONFIG_FORCE_LED_ON      0x0000000000200000ULL
-#define  XMAC_CONFIG_PASS_FLOW_CTRL    0x0000000000100000ULL
-#define  XMAC_CONFIG_RCV_PAUSE_ENABLE  0x0000000000080000ULL
-#define  XMAC_CONFIG_MAC2IPP_PKT_CNT_EN        0x0000000000040000ULL
-#define  XMAC_CONFIG_STRIP_CRC         0x0000000000020000ULL
-#define  XMAC_CONFIG_ADDR_FILTER_EN    0x0000000000010000ULL
-#define  XMAC_CONFIG_HASH_FILTER_EN    0x0000000000008000ULL
-#define  XMAC_CONFIG_RX_CODEV_CHK_DIS  0x0000000000004000ULL
-#define  XMAC_CONFIG_RESERVED_MULTICAST        0x0000000000002000ULL
-#define  XMAC_CONFIG_RX_CRC_CHK_DIS    0x0000000000001000ULL
-#define  XMAC_CONFIG_ERR_CHK_DIS       0x0000000000000800ULL
-#define  XMAC_CONFIG_PROMISC_GROUP     0x0000000000000400ULL
-#define  XMAC_CONFIG_PROMISCUOUS       0x0000000000000200ULL
-#define  XMAC_CONFIG_RX_MAC_ENABLE     0x0000000000000100ULL
-#define  XMAC_CONFIG_WARNING_MSG_EN    0x0000000000000080ULL
-#define  XMAC_CONFIG_ALWAYS_NO_CRC     0x0000000000000008ULL
-#define  XMAC_CONFIG_VAR_MIN_IPG_EN    0x0000000000000004ULL
-#define  XMAC_CONFIG_STRETCH_MODE      0x0000000000000002ULL
-#define  XMAC_CONFIG_TX_ENABLE         0x0000000000000001ULL
-
-#define XMAC_IPG                       0x00080UL
-#define  XMAC_IPG_STRETCH_CONST                0x0000000000e00000ULL
-#define  XMAC_IPG_STRETCH_CONST_SHIFT  21
-#define  XMAC_IPG_STRETCH_RATIO                0x00000000001f0000ULL
-#define  XMAC_IPG_STRETCH_RATIO_SHIFT  16
-#define  XMAC_IPG_IPG_MII_GMII         0x000000000000ff00ULL
-#define  XMAC_IPG_IPG_MII_GMII_SHIFT   8
-#define  XMAC_IPG_IPG_XGMII            0x0000000000000007ULL
-#define  XMAC_IPG_IPG_XGMII_SHIFT      0
-
-#define IPG_12_15_XGMII                        3
-#define IPG_16_19_XGMII                        4
-#define IPG_20_23_XGMII                        5
-#define IPG_12_MII_GMII                        10
-#define IPG_13_MII_GMII                        11
-#define IPG_14_MII_GMII                        12
-#define IPG_15_MII_GMII                        13
-#define IPG_16_MII_GMII                        14
-
-#define XMAC_MIN                       0x00088UL
-#define  XMAC_MIN_RX_MIN_PKT_SIZE      0x000000003ff00000ULL
-#define  XMAC_MIN_RX_MIN_PKT_SIZE_SHFT 20
-#define  XMAC_MIN_SLOT_TIME            0x000000000003fc00ULL
-#define  XMAC_MIN_SLOT_TIME_SHFT       10
-#define  XMAC_MIN_TX_MIN_PKT_SIZE      0x00000000000003ffULL
-#define  XMAC_MIN_TX_MIN_PKT_SIZE_SHFT 0
-
-#define XMAC_MAX                       0x00090UL
-#define  XMAC_MAX_FRAME_SIZE           0x0000000000003fffULL
-#define  XMAC_MAX_FRAME_SIZE_SHFT      0
-
-#define XMAC_ADDR0                     0x000a0UL
-#define  XMAC_ADDR0_ADDR0              0x000000000000ffffULL
-
-#define XMAC_ADDR1                     0x000a8UL
-#define  XMAC_ADDR1_ADDR1              0x000000000000ffffULL
-
-#define XMAC_ADDR2                     0x000b0UL
-#define  XMAC_ADDR2_ADDR2              0x000000000000ffffULL
-
-#define XMAC_ADDR_CMPEN                        0x00208UL
-#define  XMAC_ADDR_CMPEN_EN15          0x0000000000008000ULL
-#define  XMAC_ADDR_CMPEN_EN14          0x0000000000004000ULL
-#define  XMAC_ADDR_CMPEN_EN13          0x0000000000002000ULL
-#define  XMAC_ADDR_CMPEN_EN12          0x0000000000001000ULL
-#define  XMAC_ADDR_CMPEN_EN11          0x0000000000000800ULL
-#define  XMAC_ADDR_CMPEN_EN10          0x0000000000000400ULL
-#define  XMAC_ADDR_CMPEN_EN9           0x0000000000000200ULL
-#define  XMAC_ADDR_CMPEN_EN8           0x0000000000000100ULL
-#define  XMAC_ADDR_CMPEN_EN7           0x0000000000000080ULL
-#define  XMAC_ADDR_CMPEN_EN6           0x0000000000000040ULL
-#define  XMAC_ADDR_CMPEN_EN5           0x0000000000000020ULL
-#define  XMAC_ADDR_CMPEN_EN4           0x0000000000000010ULL
-#define  XMAC_ADDR_CMPEN_EN3           0x0000000000000008ULL
-#define  XMAC_ADDR_CMPEN_EN2           0x0000000000000004ULL
-#define  XMAC_ADDR_CMPEN_EN1           0x0000000000000002ULL
-#define  XMAC_ADDR_CMPEN_EN0           0x0000000000000001ULL
-
-#define XMAC_NUM_ALT_ADDR              16
-
-#define XMAC_ALT_ADDR0(NUM)            (0x00218UL + (NUM)*0x18UL)
-#define  XMAC_ALT_ADDR0_ADDR0          0x000000000000ffffULL
-
-#define XMAC_ALT_ADDR1(NUM)            (0x00220UL + (NUM)*0x18UL)
-#define  XMAC_ALT_ADDR1_ADDR1          0x000000000000ffffULL
-
-#define XMAC_ALT_ADDR2(NUM)            (0x00228UL + (NUM)*0x18UL)
-#define  XMAC_ALT_ADDR2_ADDR2          0x000000000000ffffULL
-
-#define XMAC_ADD_FILT0                 0x00818UL
-#define  XMAC_ADD_FILT0_FILT0          0x000000000000ffffULL
-
-#define XMAC_ADD_FILT1                 0x00820UL
-#define  XMAC_ADD_FILT1_FILT1          0x000000000000ffffULL
-
-#define XMAC_ADD_FILT2                 0x00828UL
-#define  XMAC_ADD_FILT2_FILT2          0x000000000000ffffULL
-
-#define XMAC_ADD_FILT12_MASK           0x00830UL
-#define  XMAC_ADD_FILT12_MASK_VAL      0x00000000000000ffULL
-
-#define XMAC_ADD_FILT00_MASK           0x00838UL
-#define  XMAC_ADD_FILT00_MASK_VAL      0x000000000000ffffULL
-
-#define XMAC_HASH_TBL(NUM)             (0x00840UL + (NUM) * 0x8UL)
-#define XMAC_HASH_TBL_VAL              0x000000000000ffffULL
-
-#define XMAC_NUM_HOST_INFO             20
-
-#define XMAC_HOST_INFO(NUM)            (0x00900UL + (NUM) * 0x8UL)
-
-#define XMAC_PA_DATA0                  0x00b80UL
-#define XMAC_PA_DATA0_VAL              0x00000000ffffffffULL
-
-#define XMAC_PA_DATA1                  0x00b88UL
-#define XMAC_PA_DATA1_VAL              0x00000000ffffffffULL
-
-#define XMAC_DEBUG_SEL                 0x00b90UL
-#define  XMAC_DEBUG_SEL_XMAC           0x0000000000000078ULL
-#define  XMAC_DEBUG_SEL_MAC            0x0000000000000007ULL
-
-#define XMAC_TRAIN_VEC                 0x00b98UL
-#define  XMAC_TRAIN_VEC_VAL            0x00000000ffffffffULL
-
-#define RXMAC_BT_CNT                   0x00100UL
-#define  RXMAC_BT_CNT_COUNT            0x00000000ffffffffULL
-
-#define RXMAC_BC_FRM_CNT               0x00108UL
-#define  RXMAC_BC_FRM_CNT_COUNT                0x00000000001fffffULL
-
-#define RXMAC_MC_FRM_CNT               0x00110UL
-#define  RXMAC_MC_FRM_CNT_COUNT                0x00000000001fffffULL
-
-#define RXMAC_FRAG_CNT                 0x00118UL
-#define  RXMAC_FRAG_CNT_COUNT          0x00000000001fffffULL
-
-#define RXMAC_HIST_CNT1                        0x00120UL
-#define  RXMAC_HIST_CNT1_COUNT         0x00000000001fffffULL
-
-#define RXMAC_HIST_CNT2                        0x00128UL
-#define  RXMAC_HIST_CNT2_COUNT         0x00000000001fffffULL
-
-#define RXMAC_HIST_CNT3                        0x00130UL
-#define  RXMAC_HIST_CNT3_COUNT         0x00000000000fffffULL
-
-#define RXMAC_HIST_CNT4                        0x00138UL
-#define  RXMAC_HIST_CNT4_COUNT         0x000000000007ffffULL
-
-#define RXMAC_HIST_CNT5                        0x00140UL
-#define  RXMAC_HIST_CNT5_COUNT         0x000000000003ffffULL
-
-#define RXMAC_HIST_CNT6                        0x00148UL
-#define  RXMAC_HIST_CNT6_COUNT         0x000000000000ffffULL
-
-#define RXMAC_MPSZER_CNT               0x00150UL
-#define  RXMAC_MPSZER_CNT_COUNT                0x00000000000000ffULL
-
-#define RXMAC_CRC_ER_CNT               0x00158UL
-#define  RXMAC_CRC_ER_CNT_COUNT                0x00000000000000ffULL
-
-#define RXMAC_CD_VIO_CNT               0x00160UL
-#define  RXMAC_CD_VIO_CNT_COUNT                0x00000000000000ffULL
-
-#define RXMAC_ALIGN_ERR_CNT            0x00168UL
-#define  RXMAC_ALIGN_ERR_CNT_COUNT     0x00000000000000ffULL
-
-#define TXMAC_FRM_CNT                  0x00170UL
-#define  TXMAC_FRM_CNT_COUNT           0x00000000ffffffffULL
-
-#define TXMAC_BYTE_CNT                 0x00178UL
-#define  TXMAC_BYTE_CNT_COUNT          0x00000000ffffffffULL
-
-#define LINK_FAULT_CNT                 0x00180UL
-#define  LINK_FAULT_CNT_COUNT          0x00000000000000ffULL
-
-#define RXMAC_HIST_CNT7                        0x00188UL
-#define  RXMAC_HIST_CNT7_COUNT         0x0000000007ffffffULL
-
-#define XMAC_SM_REG                    0x001a8UL
-#define  XMAC_SM_REG_STATE             0x00000000ffffffffULL
-
-#define XMAC_INTER1                    0x001b0UL
-#define  XMAC_INTERN1_SIGNALS1         0x00000000ffffffffULL
-
-#define XMAC_INTER2                    0x001b8UL
-#define  XMAC_INTERN2_SIGNALS2         0x00000000ffffffffULL
-
-/* BMAC registers, offset from np->mac_regs  */
-
-#define BTXMAC_SW_RST                  0x00000UL
-#define  BTXMAC_SW_RST_RESET           0x0000000000000001ULL
-
-#define BRXMAC_SW_RST                  0x00008UL
-#define  BRXMAC_SW_RST_RESET           0x0000000000000001ULL
-
-#define BMAC_SEND_PAUSE                        0x00010UL
-#define  BMAC_SEND_PAUSE_SEND          0x0000000000010000ULL
-#define  BMAC_SEND_PAUSE_TIME          0x000000000000ffffULL
-
-#define BTXMAC_STATUS                  0x00020UL
-#define  BTXMAC_STATUS_XMIT            0x0000000000000001ULL
-#define  BTXMAC_STATUS_UNDERRUN                0x0000000000000002ULL
-#define  BTXMAC_STATUS_MAX_PKT_ERR     0x0000000000000004ULL
-#define  BTXMAC_STATUS_BYTE_CNT_EXP    0x0000000000000400ULL
-#define  BTXMAC_STATUS_FRAME_CNT_EXP   0x0000000000000800ULL
-
-#define BRXMAC_STATUS                  0x00028UL
-#define  BRXMAC_STATUS_RX_PKT          0x0000000000000001ULL
-#define  BRXMAC_STATUS_OVERFLOW                0x0000000000000002ULL
-#define  BRXMAC_STATUS_FRAME_CNT_EXP   0x0000000000000004ULL
-#define  BRXMAC_STATUS_ALIGN_ERR_EXP   0x0000000000000008ULL
-#define  BRXMAC_STATUS_CRC_ERR_EXP     0x0000000000000010ULL
-#define  BRXMAC_STATUS_LEN_ERR_EXP     0x0000000000000020ULL
-
-#define BMAC_CTRL_STATUS               0x00030UL
-#define  BMAC_CTRL_STATUS_PAUSE_RECV   0x0000000000000001ULL
-#define  BMAC_CTRL_STATUS_PAUSE                0x0000000000000002ULL
-#define  BMAC_CTRL_STATUS_NOPAUSE      0x0000000000000004ULL
-#define  BMAC_CTRL_STATUS_TIME         0x00000000ffff0000ULL
-#define  BMAC_CTRL_STATUS_TIME_SHIFT   16
-
-#define BTXMAC_STATUS_MASK             0x00040UL
-#define BRXMAC_STATUS_MASK             0x00048UL
-#define BMAC_CTRL_STATUS_MASK          0x00050UL
-
-#define BTXMAC_CONFIG                  0x00060UL
-#define  BTXMAC_CONFIG_ENABLE          0x0000000000000001ULL
-#define  BTXMAC_CONFIG_FCS_DISABLE     0x0000000000000002ULL
-
-#define BRXMAC_CONFIG                  0x00068UL
-#define  BRXMAC_CONFIG_DISCARD_DIS     0x0000000000000080ULL
-#define  BRXMAC_CONFIG_ADDR_FILT_EN    0x0000000000000040ULL
-#define  BRXMAC_CONFIG_HASH_FILT_EN    0x0000000000000020ULL
-#define  BRXMAC_CONFIG_PROMISC_GRP     0x0000000000000010ULL
-#define  BRXMAC_CONFIG_PROMISC         0x0000000000000008ULL
-#define  BRXMAC_CONFIG_STRIP_FCS       0x0000000000000004ULL
-#define  BRXMAC_CONFIG_STRIP_PAD       0x0000000000000002ULL
-#define  BRXMAC_CONFIG_ENABLE          0x0000000000000001ULL
-
-#define BMAC_CTRL_CONFIG               0x00070UL
-#define  BMAC_CTRL_CONFIG_TX_PAUSE_EN  0x0000000000000001ULL
-#define  BMAC_CTRL_CONFIG_RX_PAUSE_EN  0x0000000000000002ULL
-#define  BMAC_CTRL_CONFIG_PASS_CTRL    0x0000000000000004ULL
-
-#define BMAC_XIF_CONFIG                        0x00078UL
-#define  BMAC_XIF_CONFIG_TX_OUTPUT_EN  0x0000000000000001ULL
-#define  BMAC_XIF_CONFIG_MII_LOOPBACK  0x0000000000000002ULL
-#define  BMAC_XIF_CONFIG_GMII_MODE     0x0000000000000008ULL
-#define  BMAC_XIF_CONFIG_LINK_LED      0x0000000000000020ULL
-#define  BMAC_XIF_CONFIG_LED_POLARITY  0x0000000000000040ULL
-#define  BMAC_XIF_CONFIG_25MHZ_CLOCK   0x0000000000000080ULL
-
-#define BMAC_MIN_FRAME                 0x000a0UL
-#define  BMAC_MIN_FRAME_VAL            0x00000000000003ffULL
-
-#define BMAC_MAX_FRAME                 0x000a8UL
-#define  BMAC_MAX_FRAME_MAX_BURST      0x000000003fff0000ULL
-#define  BMAC_MAX_FRAME_MAX_BURST_SHIFT        16
-#define  BMAC_MAX_FRAME_MAX_FRAME      0x0000000000003fffULL
-#define  BMAC_MAX_FRAME_MAX_FRAME_SHIFT        0
-
-#define BMAC_PREAMBLE_SIZE             0x000b0UL
-#define  BMAC_PREAMBLE_SIZE_VAL                0x00000000000003ffULL
-
-#define BMAC_CTRL_TYPE                 0x000c8UL
-
-#define BMAC_ADDR0                     0x00100UL
-#define  BMAC_ADDR0_ADDR0              0x000000000000ffffULL
-
-#define BMAC_ADDR1                     0x00108UL
-#define  BMAC_ADDR1_ADDR1              0x000000000000ffffULL
-
-#define BMAC_ADDR2                     0x00110UL
-#define  BMAC_ADDR2_ADDR2              0x000000000000ffffULL
-
-#define BMAC_NUM_ALT_ADDR              6
-
-#define BMAC_ALT_ADDR0(NUM)            (0x00118UL + (NUM)*0x18UL)
-#define  BMAC_ALT_ADDR0_ADDR0          0x000000000000ffffULL
-
-#define BMAC_ALT_ADDR1(NUM)            (0x00120UL + (NUM)*0x18UL)
-#define  BMAC_ALT_ADDR1_ADDR1          0x000000000000ffffULL
-
-#define BMAC_ALT_ADDR2(NUM)            (0x00128UL + (NUM)*0x18UL)
-#define  BMAC_ALT_ADDR2_ADDR2          0x000000000000ffffULL
-
-#define BMAC_FC_ADDR0                  0x00268UL
-#define  BMAC_FC_ADDR0_ADDR0           0x000000000000ffffULL
-
-#define BMAC_FC_ADDR1                  0x00270UL
-#define  BMAC_FC_ADDR1_ADDR1           0x000000000000ffffULL
-
-#define BMAC_FC_ADDR2                  0x00278UL
-#define  BMAC_FC_ADDR2_ADDR2           0x000000000000ffffULL
-
-#define BMAC_ADD_FILT0                 0x00298UL
-#define  BMAC_ADD_FILT0_FILT0          0x000000000000ffffULL
-
-#define BMAC_ADD_FILT1                 0x002a0UL
-#define  BMAC_ADD_FILT1_FILT1          0x000000000000ffffULL
-
-#define BMAC_ADD_FILT2                 0x002a8UL
-#define  BMAC_ADD_FILT2_FILT2          0x000000000000ffffULL
-
-#define BMAC_ADD_FILT12_MASK           0x002b0UL
-#define  BMAC_ADD_FILT12_MASK_VAL      0x00000000000000ffULL
-
-#define BMAC_ADD_FILT00_MASK           0x002b8UL
-#define  BMAC_ADD_FILT00_MASK_VAL      0x000000000000ffffULL
-
-#define BMAC_HASH_TBL(NUM)             (0x002c0UL + (NUM) * 0x8UL)
-#define BMAC_HASH_TBL_VAL              0x000000000000ffffULL
-
-#define BRXMAC_FRAME_CNT               0x00370
-#define  BRXMAC_FRAME_CNT_COUNT                0x000000000000ffffULL
-
-#define BRXMAC_MAX_LEN_ERR_CNT         0x00378
-
-#define BRXMAC_ALIGN_ERR_CNT           0x00380
-#define  BRXMAC_ALIGN_ERR_CNT_COUNT    0x000000000000ffffULL
-
-#define BRXMAC_CRC_ERR_CNT             0x00388
-#define  BRXMAC_ALIGN_ERR_CNT_COUNT    0x000000000000ffffULL
-
-#define BRXMAC_CODE_VIOL_ERR_CNT       0x00390
-#define  BRXMAC_CODE_VIOL_ERR_CNT_COUNT        0x000000000000ffffULL
-
-#define BMAC_STATE_MACHINE             0x003a0
-
-#define BMAC_ADDR_CMPEN                        0x003f8UL
-#define  BMAC_ADDR_CMPEN_EN15          0x0000000000008000ULL
-#define  BMAC_ADDR_CMPEN_EN14          0x0000000000004000ULL
-#define  BMAC_ADDR_CMPEN_EN13          0x0000000000002000ULL
-#define  BMAC_ADDR_CMPEN_EN12          0x0000000000001000ULL
-#define  BMAC_ADDR_CMPEN_EN11          0x0000000000000800ULL
-#define  BMAC_ADDR_CMPEN_EN10          0x0000000000000400ULL
-#define  BMAC_ADDR_CMPEN_EN9           0x0000000000000200ULL
-#define  BMAC_ADDR_CMPEN_EN8           0x0000000000000100ULL
-#define  BMAC_ADDR_CMPEN_EN7           0x0000000000000080ULL
-#define  BMAC_ADDR_CMPEN_EN6           0x0000000000000040ULL
-#define  BMAC_ADDR_CMPEN_EN5           0x0000000000000020ULL
-#define  BMAC_ADDR_CMPEN_EN4           0x0000000000000010ULL
-#define  BMAC_ADDR_CMPEN_EN3           0x0000000000000008ULL
-#define  BMAC_ADDR_CMPEN_EN2           0x0000000000000004ULL
-#define  BMAC_ADDR_CMPEN_EN1           0x0000000000000002ULL
-#define  BMAC_ADDR_CMPEN_EN0           0x0000000000000001ULL
-
-#define BMAC_NUM_HOST_INFO             9
-
-#define BMAC_HOST_INFO(NUM)            (0x00400UL + (NUM) * 0x8UL)
-
-#define BTXMAC_BYTE_CNT                        0x00448UL
-#define  BTXMAC_BYTE_CNT_COUNT         0x00000000ffffffffULL
-
-#define BTXMAC_FRM_CNT                 0x00450UL
-#define  BTXMAC_FRM_CNT_COUNT          0x00000000ffffffffULL
-
-#define BRXMAC_BYTE_CNT                        0x00458UL
-#define  BRXMAC_BYTE_CNT_COUNT         0x00000000ffffffffULL
-
-#define HOST_INFO_MPR                  0x0000000000000100ULL
-#define HOST_INFO_MACRDCTBLN           0x0000000000000007ULL
-
-/* XPCS registers, offset from np->regs + np->xpcs_off  */
-
-#define XPCS_CONTROL1                  (FZC_MAC + 0x00000UL)
-#define  XPCS_CONTROL1_RESET           0x0000000000008000ULL
-#define  XPCS_CONTROL1_LOOPBACK                0x0000000000004000ULL
-#define  XPCS_CONTROL1_SPEED_SELECT3   0x0000000000002000ULL
-#define  XPCS_CONTROL1_CSR_LOW_PWR     0x0000000000000800ULL
-#define  XPCS_CONTROL1_CSR_SPEED1      0x0000000000000040ULL
-#define  XPCS_CONTROL1_CSR_SPEED0      0x000000000000003cULL
-
-#define XPCS_STATUS1                   (FZC_MAC + 0x00008UL)
-#define  XPCS_STATUS1_CSR_FAULT                0x0000000000000080ULL
-#define  XPCS_STATUS1_CSR_RXLNK_STAT   0x0000000000000004ULL
-#define  XPCS_STATUS1_CSR_LPWR_ABLE    0x0000000000000002ULL
-
-#define XPCS_DEVICE_IDENTIFIER         (FZC_MAC + 0x00010UL)
-#define  XPCS_DEVICE_IDENTIFIER_VAL    0x00000000ffffffffULL
-
-#define XPCS_SPEED_ABILITY             (FZC_MAC + 0x00018UL)
-#define  XPCS_SPEED_ABILITY_10GIG      0x0000000000000001ULL
-
-#define XPCS_DEV_IN_PKG                        (FZC_MAC + 0x00020UL)
-#define  XPCS_DEV_IN_PKG_CSR_VEND2     0x0000000080000000ULL
-#define  XPCS_DEV_IN_PKG_CSR_VEND1     0x0000000040000000ULL
-#define  XPCS_DEV_IN_PKG_DTE_XS                0x0000000000000020ULL
-#define  XPCS_DEV_IN_PKG_PHY_XS                0x0000000000000010ULL
-#define  XPCS_DEV_IN_PKG_PCS           0x0000000000000008ULL
-#define  XPCS_DEV_IN_PKG_WIS           0x0000000000000004ULL
-#define  XPCS_DEV_IN_PKG_PMD_PMA       0x0000000000000002ULL
-#define  XPCS_DEV_IN_PKG_CLS22         0x0000000000000001ULL
-
-#define XPCS_CONTROL2                  (FZC_MAC + 0x00028UL)
-#define  XPCS_CONTROL2_CSR_PSC_SEL     0x0000000000000003ULL
-
-#define XPCS_STATUS2                   (FZC_MAC + 0x00030UL)
-#define  XPCS_STATUS2_CSR_DEV_PRES     0x000000000000c000ULL
-#define  XPCS_STATUS2_CSR_TX_FAULT     0x0000000000000800ULL
-#define  XPCS_STATUS2_CSR_RCV_FAULT    0x0000000000000400ULL
-#define  XPCS_STATUS2_TEN_GBASE_W      0x0000000000000004ULL
-#define  XPCS_STATUS2_TEN_GBASE_X      0x0000000000000002ULL
-#define  XPCS_STATUS2_TEN_GBASE_R      0x0000000000000001ULL
-
-#define XPCS_PKG_ID                    (FZC_MAC + 0x00038UL)
-#define  XPCS_PKG_ID_VAL               0x00000000ffffffffULL
-
-#define XPCS_STATUS(IDX)               (FZC_MAC + 0x00040UL)
-#define  XPCS_STATUS_CSR_LANE_ALIGN    0x0000000000001000ULL
-#define  XPCS_STATUS_CSR_PATTEST_CAP   0x0000000000000800ULL
-#define  XPCS_STATUS_CSR_LANE3_SYNC    0x0000000000000008ULL
-#define  XPCS_STATUS_CSR_LANE2_SYNC    0x0000000000000004ULL
-#define  XPCS_STATUS_CSR_LANE1_SYNC    0x0000000000000002ULL
-#define  XPCS_STATUS_CSR_LANE0_SYNC    0x0000000000000001ULL
-
-#define XPCS_TEST_CONTROL              (FZC_MAC + 0x00048UL)
-#define  XPCS_TEST_CONTROL_TXTST_EN    0x0000000000000004ULL
-#define  XPCS_TEST_CONTROL_TPAT_SEL    0x0000000000000003ULL
-
-#define XPCS_CFG_VENDOR1               (FZC_MAC + 0x00050UL)
-#define  XPCS_CFG_VENDOR1_DBG_IOTST    0x0000000000000080ULL
-#define  XPCS_CFG_VENDOR1_DBG_SEL      0x0000000000000078ULL
-#define  XPCS_CFG_VENDOR1_BYPASS_DET   0x0000000000000004ULL
-#define  XPCS_CFG_VENDOR1_TXBUF_EN     0x0000000000000002ULL
-#define  XPCS_CFG_VENDOR1_XPCS_EN      0x0000000000000001ULL
-
-#define XPCS_DIAG_VENDOR2              (FZC_MAC + 0x00058UL)
-#define  XPCS_DIAG_VENDOR2_SSM_LANE3   0x0000000001e00000ULL
-#define  XPCS_DIAG_VENDOR2_SSM_LANE2   0x00000000001e0000ULL
-#define  XPCS_DIAG_VENDOR2_SSM_LANE1   0x000000000001e000ULL
-#define  XPCS_DIAG_VENDOR2_SSM_LANE0   0x0000000000001e00ULL
-#define  XPCS_DIAG_VENDOR2_EBUF_SM     0x00000000000001feULL
-#define  XPCS_DIAG_VENDOR2_RCV_SM      0x0000000000000001ULL
-
-#define XPCS_MASK1                     (FZC_MAC + 0x00060UL)
-#define  XPCS_MASK1_FAULT_MASK         0x0000000000000080ULL
-#define  XPCS_MASK1_RXALIGN_STAT_MSK   0x0000000000000004ULL
-
-#define XPCS_PKT_COUNT                 (FZC_MAC + 0x00068UL)
-#define  XPCS_PKT_COUNT_TX             0x00000000ffff0000ULL
-#define  XPCS_PKT_COUNT_RX             0x000000000000ffffULL
-
-#define XPCS_TX_SM                     (FZC_MAC + 0x00070UL)
-#define  XPCS_TX_SM_VAL                        0x000000000000000fULL
-
-#define XPCS_DESKEW_ERR_CNT            (FZC_MAC + 0x00078UL)
-#define  XPCS_DESKEW_ERR_CNT_VAL       0x00000000000000ffULL
-
-#define XPCS_SYMERR_CNT01              (FZC_MAC + 0x00080UL)
-#define  XPCS_SYMERR_CNT01_LANE1       0x00000000ffff0000ULL
-#define  XPCS_SYMERR_CNT01_LANE0       0x000000000000ffffULL
-
-#define XPCS_SYMERR_CNT23              (FZC_MAC + 0x00088UL)
-#define  XPCS_SYMERR_CNT23_LANE3       0x00000000ffff0000ULL
-#define  XPCS_SYMERR_CNT23_LANE2       0x000000000000ffffULL
-
-#define XPCS_TRAINING_VECTOR           (FZC_MAC + 0x00090UL)
-#define  XPCS_TRAINING_VECTOR_VAL      0x00000000ffffffffULL
-
-/* PCS registers, offset from np->regs + np->pcs_off  */
-
-#define PCS_MII_CTL                    (FZC_MAC + 0x00000UL)
-#define  PCS_MII_CTL_RST               0x0000000000008000ULL
-#define  PCS_MII_CTL_10_100_SPEED      0x0000000000002000ULL
-#define  PCS_MII_AUTONEG_EN            0x0000000000001000ULL
-#define  PCS_MII_PWR_DOWN              0x0000000000000800ULL
-#define  PCS_MII_ISOLATE               0x0000000000000400ULL
-#define  PCS_MII_AUTONEG_RESTART       0x0000000000000200ULL
-#define  PCS_MII_DUPLEX                        0x0000000000000100ULL
-#define  PCS_MII_COLL_TEST             0x0000000000000080ULL
-#define  PCS_MII_1000MB_SPEED          0x0000000000000040ULL
-
-#define PCS_MII_STAT                   (FZC_MAC + 0x00008UL)
-#define  PCS_MII_STAT_EXT_STATUS       0x0000000000000100ULL
-#define  PCS_MII_STAT_AUTONEG_DONE     0x0000000000000020ULL
-#define  PCS_MII_STAT_REMOTE_FAULT     0x0000000000000010ULL
-#define  PCS_MII_STAT_AUTONEG_ABLE     0x0000000000000008ULL
-#define  PCS_MII_STAT_LINK_STATUS      0x0000000000000004ULL
-#define  PCS_MII_STAT_JABBER_DET       0x0000000000000002ULL
-#define  PCS_MII_STAT_EXT_CAP          0x0000000000000001ULL
-
-#define PCS_MII_ADV                    (FZC_MAC + 0x00010UL)
-#define  PCS_MII_ADV_NEXT_PAGE         0x0000000000008000ULL
-#define  PCS_MII_ADV_ACK               0x0000000000004000ULL
-#define  PCS_MII_ADV_REMOTE_FAULT      0x0000000000003000ULL
-#define  PCS_MII_ADV_ASM_DIR           0x0000000000000100ULL
-#define  PCS_MII_ADV_PAUSE             0x0000000000000080ULL
-#define  PCS_MII_ADV_HALF_DUPLEX       0x0000000000000040ULL
-#define  PCS_MII_ADV_FULL_DUPLEX       0x0000000000000020ULL
-
-#define PCS_MII_PARTNER                        (FZC_MAC + 0x00018UL)
-#define  PCS_MII_PARTNER_NEXT_PAGE     0x0000000000008000ULL
-#define  PCS_MII_PARTNER_ACK           0x0000000000004000ULL
-#define  PCS_MII_PARTNER_REMOTE_FAULT  0x0000000000002000ULL
-#define  PCS_MII_PARTNER_PAUSE         0x0000000000000180ULL
-#define  PCS_MII_PARTNER_HALF_DUPLEX   0x0000000000000040ULL
-#define  PCS_MII_PARTNER_FULL_DUPLEX   0x0000000000000020ULL
-
-#define PCS_CONF                       (FZC_MAC + 0x00020UL)
-#define  PCS_CONF_MASK                 0x0000000000000040ULL
-#define  PCS_CONF_10MS_TMR_OVERRIDE    0x0000000000000020ULL
-#define  PCS_CONF_JITTER_STUDY         0x0000000000000018ULL
-#define  PCS_CONF_SIGDET_ACTIVE_LOW    0x0000000000000004ULL
-#define  PCS_CONF_SIGDET_OVERRIDE      0x0000000000000002ULL
-#define  PCS_CONF_ENABLE               0x0000000000000001ULL
-
-#define PCS_STATE                      (FZC_MAC + 0x00028UL)
-#define  PCS_STATE_D_PARTNER_FAIL      0x0000000020000000ULL
-#define  PCS_STATE_D_WAIT_C_CODES_ACK  0x0000000010000000ULL
-#define  PCS_STATE_D_SYNC_LOSS         0x0000000008000000ULL
-#define  PCS_STATE_D_NO_GOOD_C_CODES   0x0000000004000000ULL
-#define  PCS_STATE_D_SERDES            0x0000000002000000ULL
-#define  PCS_STATE_D_BREAKLINK_C_CODES 0x0000000001000000ULL
-#define  PCS_STATE_L_SIGDET            0x0000000000400000ULL
-#define  PCS_STATE_L_SYNC_LOSS         0x0000000000200000ULL
-#define  PCS_STATE_L_C_CODES           0x0000000000100000ULL
-#define  PCS_STATE_LINK_CFG_STATE      0x000000000001e000ULL
-#define  PCS_STATE_SEQ_DET_STATE       0x0000000000001800ULL
-#define  PCS_STATE_WORD_SYNC_STATE     0x0000000000000700ULL
-#define  PCS_STATE_NO_IDLE             0x000000000000000fULL
-
-#define PCS_INTERRUPT                  (FZC_MAC + 0x00030UL)
-#define  PCS_INTERRUPT_LSTATUS         0x0000000000000004ULL
-
-#define PCS_DPATH_MODE                 (FZC_MAC + 0x000a0UL)
-#define  PCS_DPATH_MODE_PCS            0x0000000000000000ULL
-#define  PCS_DPATH_MODE_MII            0x0000000000000002ULL
-#define  PCS_DPATH_MODE_LINKUP_F_ENAB  0x0000000000000001ULL
-
-#define PCS_PKT_CNT                    (FZC_MAC + 0x000c0UL)
-#define  PCS_PKT_CNT_RX                        0x0000000007ff0000ULL
-#define  PCS_PKT_CNT_TX                        0x00000000000007ffULL
-
-#define MIF_BB_MDC                     (FZC_MAC + 0x16000UL)
-#define  MIF_BB_MDC_CLK                        0x0000000000000001ULL
-
-#define MIF_BB_MDO                     (FZC_MAC + 0x16008UL)
-#define  MIF_BB_MDO_DAT                        0x0000000000000001ULL
-
-#define MIF_BB_MDO_EN                  (FZC_MAC + 0x16010UL)
-#define  MIF_BB_MDO_EN_VAL             0x0000000000000001ULL
-
-#define MIF_FRAME_OUTPUT               (FZC_MAC + 0x16018UL)
-#define  MIF_FRAME_OUTPUT_ST           0x00000000c0000000ULL
-#define  MIF_FRAME_OUTPUT_ST_SHIFT     30
-#define  MIF_FRAME_OUTPUT_OP_ADDR      0x0000000000000000ULL
-#define  MIF_FRAME_OUTPUT_OP_WRITE     0x0000000010000000ULL
-#define  MIF_FRAME_OUTPUT_OP_READ_INC  0x0000000020000000ULL
-#define  MIF_FRAME_OUTPUT_OP_READ      0x0000000030000000ULL
-#define  MIF_FRAME_OUTPUT_OP_SHIFT     28
-#define  MIF_FRAME_OUTPUT_PORT         0x000000000f800000ULL
-#define  MIF_FRAME_OUTPUT_PORT_SHIFT   23
-#define  MIF_FRAME_OUTPUT_REG          0x00000000007c0000ULL
-#define  MIF_FRAME_OUTPUT_REG_SHIFT    18
-#define  MIF_FRAME_OUTPUT_TA           0x0000000000030000ULL
-#define  MIF_FRAME_OUTPUT_TA_SHIFT     16
-#define  MIF_FRAME_OUTPUT_DATA         0x000000000000ffffULL
-#define  MIF_FRAME_OUTPUT_DATA_SHIFT   0
-
-#define MDIO_ADDR_OP(port, dev, reg) \
-       ((0 << MIF_FRAME_OUTPUT_ST_SHIFT) | \
-        MIF_FRAME_OUTPUT_OP_ADDR | \
-        (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \
-        (dev << MIF_FRAME_OUTPUT_REG_SHIFT) | \
-        (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT) | \
-        (reg << MIF_FRAME_OUTPUT_DATA_SHIFT))
-
-#define MDIO_READ_OP(port, dev) \
-       ((0 << MIF_FRAME_OUTPUT_ST_SHIFT) | \
-        MIF_FRAME_OUTPUT_OP_READ | \
-        (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \
-        (dev << MIF_FRAME_OUTPUT_REG_SHIFT) | \
-        (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT))
-
-#define MDIO_WRITE_OP(port, dev, data) \
-       ((0 << MIF_FRAME_OUTPUT_ST_SHIFT) | \
-        MIF_FRAME_OUTPUT_OP_WRITE | \
-        (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \
-        (dev << MIF_FRAME_OUTPUT_REG_SHIFT) | \
-        (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT) | \
-        (data << MIF_FRAME_OUTPUT_DATA_SHIFT))
-
-#define MII_READ_OP(port, reg) \
-       ((1 << MIF_FRAME_OUTPUT_ST_SHIFT) | \
-        (2 << MIF_FRAME_OUTPUT_OP_SHIFT) | \
-        (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \
-        (reg << MIF_FRAME_OUTPUT_REG_SHIFT) | \
-        (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT))
-
-#define MII_WRITE_OP(port, reg, data) \
-       ((1 << MIF_FRAME_OUTPUT_ST_SHIFT) | \
-        (1 << MIF_FRAME_OUTPUT_OP_SHIFT) | \
-        (port << MIF_FRAME_OUTPUT_PORT_SHIFT) | \
-        (reg << MIF_FRAME_OUTPUT_REG_SHIFT) | \
-        (0x2 << MIF_FRAME_OUTPUT_TA_SHIFT) | \
-        (data << MIF_FRAME_OUTPUT_DATA_SHIFT))
-
-#define MIF_CONFIG                     (FZC_MAC + 0x16020UL)
-#define  MIF_CONFIG_ATCA_GE            0x0000000000010000ULL
-#define  MIF_CONFIG_INDIRECT_MODE      0x0000000000008000ULL
-#define  MIF_CONFIG_POLL_PRT_PHYADDR   0x0000000000003c00ULL
-#define  MIF_CONFIG_POLL_DEV_REG_ADDR  0x00000000000003e0ULL
-#define  MIF_CONFIG_BB_MODE            0x0000000000000010ULL
-#define  MIF_CONFIG_POLL_EN            0x0000000000000008ULL
-#define  MIF_CONFIG_BB_SER_SEL         0x0000000000000006ULL
-#define  MIF_CONFIG_MANUAL_MODE                0x0000000000000001ULL
-
-#define MIF_POLL_STATUS                        (FZC_MAC + 0x16028UL)
-#define  MIF_POLL_STATUS_DATA          0x00000000ffff0000ULL
-#define  MIF_POLL_STATUS_STAT          0x000000000000ffffULL
-
-#define MIF_POLL_MASK                  (FZC_MAC + 0x16030UL)
-#define  MIF_POLL_MASK_VAL             0x000000000000ffffULL
-
-#define MIF_SM                         (FZC_MAC + 0x16038UL)
-#define  MIF_SM_PORT_ADDR              0x00000000001f0000ULL
-#define  MIF_SM_MDI_1                  0x0000000000004000ULL
-#define  MIF_SM_MDI_0                  0x0000000000002400ULL
-#define  MIF_SM_MDCLK                  0x0000000000001000ULL
-#define  MIF_SM_MDO_EN                 0x0000000000000800ULL
-#define  MIF_SM_MDO                    0x0000000000000400ULL
-#define  MIF_SM_MDI                    0x0000000000000200ULL
-#define  MIF_SM_CTL                    0x00000000000001c0ULL
-#define  MIF_SM_EX                     0x000000000000003fULL
-
-#define MIF_STATUS                     (FZC_MAC + 0x16040UL)
-#define  MIF_STATUS_MDINT1             0x0000000000000020ULL
-#define  MIF_STATUS_MDINT0             0x0000000000000010ULL
-
-#define MIF_MASK                       (FZC_MAC + 0x16048UL)
-#define  MIF_MASK_MDINT1               0x0000000000000020ULL
-#define  MIF_MASK_MDINT0               0x0000000000000010ULL
-#define  MIF_MASK_PEU_ERR              0x0000000000000008ULL
-#define  MIF_MASK_YC                   0x0000000000000004ULL
-#define  MIF_MASK_XGE_ERR0             0x0000000000000002ULL
-#define  MIF_MASK_MIF_INIT_DONE                0x0000000000000001ULL
-
-#define ENET_SERDES_RESET              (FZC_MAC + 0x14000UL)
-#define  ENET_SERDES_RESET_1           0x0000000000000002ULL
-#define  ENET_SERDES_RESET_0           0x0000000000000001ULL
-
-#define ENET_SERDES_CFG                        (FZC_MAC + 0x14008UL)
-#define  ENET_SERDES_BE_LOOPBACK       0x0000000000000002ULL
-#define  ENET_SERDES_CFG_FORCE_RDY     0x0000000000000001ULL
-
-#define ENET_SERDES_0_PLL_CFG          (FZC_MAC + 0x14010UL)
-#define  ENET_SERDES_PLL_FBDIV0                0x0000000000000001ULL
-#define  ENET_SERDES_PLL_FBDIV1                0x0000000000000002ULL
-#define  ENET_SERDES_PLL_FBDIV2                0x0000000000000004ULL
-#define  ENET_SERDES_PLL_HRATE0                0x0000000000000008ULL
-#define  ENET_SERDES_PLL_HRATE1                0x0000000000000010ULL
-#define  ENET_SERDES_PLL_HRATE2                0x0000000000000020ULL
-#define  ENET_SERDES_PLL_HRATE3                0x0000000000000040ULL
-
-#define ENET_SERDES_0_CTRL_CFG         (FZC_MAC + 0x14018UL)
-#define  ENET_SERDES_CTRL_SDET_0       0x0000000000000001ULL
-#define  ENET_SERDES_CTRL_SDET_1       0x0000000000000002ULL
-#define  ENET_SERDES_CTRL_SDET_2       0x0000000000000004ULL
-#define  ENET_SERDES_CTRL_SDET_3       0x0000000000000008ULL
-#define  ENET_SERDES_CTRL_EMPH_0       0x0000000000000070ULL
-#define  ENET_SERDES_CTRL_EMPH_0_SHIFT 4
-#define  ENET_SERDES_CTRL_EMPH_1       0x0000000000000380ULL
-#define  ENET_SERDES_CTRL_EMPH_1_SHIFT 7
-#define  ENET_SERDES_CTRL_EMPH_2       0x0000000000001c00ULL
-#define  ENET_SERDES_CTRL_EMPH_2_SHIFT 10
-#define  ENET_SERDES_CTRL_EMPH_3       0x000000000000e000ULL
-#define  ENET_SERDES_CTRL_EMPH_3_SHIFT 13
-#define  ENET_SERDES_CTRL_LADJ_0       0x0000000000070000ULL
-#define  ENET_SERDES_CTRL_LADJ_0_SHIFT 16
-#define  ENET_SERDES_CTRL_LADJ_1       0x0000000000380000ULL
-#define  ENET_SERDES_CTRL_LADJ_1_SHIFT 19
-#define  ENET_SERDES_CTRL_LADJ_2       0x0000000001c00000ULL
-#define  ENET_SERDES_CTRL_LADJ_2_SHIFT 22
-#define  ENET_SERDES_CTRL_LADJ_3       0x000000000e000000ULL
-#define  ENET_SERDES_CTRL_LADJ_3_SHIFT 25
-#define  ENET_SERDES_CTRL_RXITERM_0    0x0000000010000000ULL
-#define  ENET_SERDES_CTRL_RXITERM_1    0x0000000020000000ULL
-#define  ENET_SERDES_CTRL_RXITERM_2    0x0000000040000000ULL
-#define  ENET_SERDES_CTRL_RXITERM_3    0x0000000080000000ULL
-
-#define ENET_SERDES_0_TEST_CFG         (FZC_MAC + 0x14020UL)
-#define  ENET_SERDES_TEST_MD_0         0x0000000000000003ULL
-#define  ENET_SERDES_TEST_MD_0_SHIFT   0
-#define  ENET_SERDES_TEST_MD_1         0x000000000000000cULL
-#define  ENET_SERDES_TEST_MD_1_SHIFT   2
-#define  ENET_SERDES_TEST_MD_2         0x0000000000000030ULL
-#define  ENET_SERDES_TEST_MD_2_SHIFT   4
-#define  ENET_SERDES_TEST_MD_3         0x00000000000000c0ULL
-#define  ENET_SERDES_TEST_MD_3_SHIFT   6
-
-#define ENET_TEST_MD_NO_LOOPBACK       0x0
-#define ENET_TEST_MD_EWRAP             0x1
-#define ENET_TEST_MD_PAD_LOOPBACK      0x2
-#define ENET_TEST_MD_REV_LOOPBACK      0x3
-
-#define ENET_SERDES_1_PLL_CFG          (FZC_MAC + 0x14028UL)
-#define ENET_SERDES_1_CTRL_CFG         (FZC_MAC + 0x14030UL)
-#define ENET_SERDES_1_TEST_CFG         (FZC_MAC + 0x14038UL)
-
-#define ENET_RGMII_CFG_REG             (FZC_MAC + 0x14040UL)
-
-#define ESR_INT_SIGNALS                        (FZC_MAC + 0x14800UL)
-#define  ESR_INT_SIGNALS_ALL           0x00000000ffffffffULL
-#define  ESR_INT_SIGNALS_P0_BITS       0x0000000033e0000fULL
-#define  ESR_INT_SIGNALS_P1_BITS       0x000000000c1f00f0ULL
-#define  ESR_INT_SRDY0_P0              0x0000000020000000ULL
-#define  ESR_INT_DET0_P0               0x0000000010000000ULL
-#define  ESR_INT_SRDY0_P1              0x0000000008000000ULL
-#define  ESR_INT_DET0_P1               0x0000000004000000ULL
-#define  ESR_INT_XSRDY_P0              0x0000000002000000ULL
-#define  ESR_INT_XDP_P0_CH3            0x0000000001000000ULL
-#define  ESR_INT_XDP_P0_CH2            0x0000000000800000ULL
-#define  ESR_INT_XDP_P0_CH1            0x0000000000400000ULL
-#define  ESR_INT_XDP_P0_CH0            0x0000000000200000ULL
-#define  ESR_INT_XSRDY_P1              0x0000000000100000ULL
-#define  ESR_INT_XDP_P1_CH3            0x0000000000080000ULL
-#define  ESR_INT_XDP_P1_CH2            0x0000000000040000ULL
-#define  ESR_INT_XDP_P1_CH1            0x0000000000020000ULL
-#define  ESR_INT_XDP_P1_CH0            0x0000000000010000ULL
-#define  ESR_INT_SLOSS_P1_CH3          0x0000000000000080ULL
-#define  ESR_INT_SLOSS_P1_CH2          0x0000000000000040ULL
-#define  ESR_INT_SLOSS_P1_CH1          0x0000000000000020ULL
-#define  ESR_INT_SLOSS_P1_CH0          0x0000000000000010ULL
-#define  ESR_INT_SLOSS_P0_CH3          0x0000000000000008ULL
-#define  ESR_INT_SLOSS_P0_CH2          0x0000000000000004ULL
-#define  ESR_INT_SLOSS_P0_CH1          0x0000000000000002ULL
-#define  ESR_INT_SLOSS_P0_CH0          0x0000000000000001ULL
-
-#define ESR_DEBUG_SEL                  (FZC_MAC + 0x14808UL)
-#define  ESR_DEBUG_SEL_VAL             0x000000000000003fULL
-
-/* SerDes registers behind MIF */
-#define NIU_ESR_DEV_ADDR               0x1e
-#define ESR_BASE                       0x0000
-
-#define ESR_RXTX_COMM_CTRL_L           (ESR_BASE + 0x0000)
-#define ESR_RXTX_COMM_CTRL_H           (ESR_BASE + 0x0001)
-
-#define ESR_RXTX_RESET_CTRL_L          (ESR_BASE + 0x0002)
-#define ESR_RXTX_RESET_CTRL_H          (ESR_BASE + 0x0003)
-
-#define ESR_RX_POWER_CTRL_L            (ESR_BASE + 0x0004)
-#define ESR_RX_POWER_CTRL_H            (ESR_BASE + 0x0005)
-
-#define ESR_TX_POWER_CTRL_L            (ESR_BASE + 0x0006)
-#define ESR_TX_POWER_CTRL_H            (ESR_BASE + 0x0007)
-
-#define ESR_MISC_POWER_CTRL_L          (ESR_BASE + 0x0008)
-#define ESR_MISC_POWER_CTRL_H          (ESR_BASE + 0x0009)
-
-#define ESR_RXTX_CTRL_L(CHAN)          (ESR_BASE + 0x0080 + (CHAN) * 0x10)
-#define ESR_RXTX_CTRL_H(CHAN)          (ESR_BASE + 0x0081 + (CHAN) * 0x10)
-#define  ESR_RXTX_CTRL_BIASCNTL                0x80000000
-#define  ESR_RXTX_CTRL_RESV1           0x7c000000
-#define  ESR_RXTX_CTRL_TDENFIFO                0x02000000
-#define  ESR_RXTX_CTRL_TDWS20          0x01000000
-#define  ESR_RXTX_CTRL_VMUXLO          0x00c00000
-#define  ESR_RXTX_CTRL_VMUXLO_SHIFT    22
-#define  ESR_RXTX_CTRL_VPULSELO                0x00300000
-#define  ESR_RXTX_CTRL_VPULSELO_SHIFT  20
-#define  ESR_RXTX_CTRL_RESV2           0x000f0000
-#define  ESR_RXTX_CTRL_RESV3           0x0000c000
-#define  ESR_RXTX_CTRL_RXPRESWIN       0x00003000
-#define  ESR_RXTX_CTRL_RXPRESWIN_SHIFT 12
-#define  ESR_RXTX_CTRL_RESV4           0x00000800
-#define  ESR_RXTX_CTRL_RISEFALL                0x00000700
-#define  ESR_RXTX_CTRL_RISEFALL_SHIFT  8
-#define  ESR_RXTX_CTRL_RESV5           0x000000fe
-#define  ESR_RXTX_CTRL_ENSTRETCH       0x00000001
-
-#define ESR_RXTX_TUNING_L(CHAN)                (ESR_BASE + 0x0082 + (CHAN) * 0x10)
-#define ESR_RXTX_TUNING_H(CHAN)                (ESR_BASE + 0x0083 + (CHAN) * 0x10)
-
-#define ESR_RX_SYNCCHAR_L(CHAN)                (ESR_BASE + 0x0084 + (CHAN) * 0x10)
-#define ESR_RX_SYNCCHAR_H(CHAN)                (ESR_BASE + 0x0085 + (CHAN) * 0x10)
-
-#define ESR_RXTX_TEST_L(CHAN)          (ESR_BASE + 0x0086 + (CHAN) * 0x10)
-#define ESR_RXTX_TEST_H(CHAN)          (ESR_BASE + 0x0087 + (CHAN) * 0x10)
-
-#define ESR_GLUE_CTRL0_L(CHAN)         (ESR_BASE + 0x0088 + (CHAN) * 0x10)
-#define ESR_GLUE_CTRL0_H(CHAN)         (ESR_BASE + 0x0089 + (CHAN) * 0x10)
-#define  ESR_GLUE_CTRL0_RESV1          0xf8000000
-#define  ESR_GLUE_CTRL0_BLTIME         0x07000000
-#define  ESR_GLUE_CTRL0_BLTIME_SHIFT   24
-#define  ESR_GLUE_CTRL0_RESV2          0x00ff0000
-#define  ESR_GLUE_CTRL0_RXLOS_TEST     0x00008000
-#define  ESR_GLUE_CTRL0_RESV3          0x00004000
-#define  ESR_GLUE_CTRL0_RXLOSENAB      0x00002000
-#define  ESR_GLUE_CTRL0_FASTRESYNC     0x00001000
-#define  ESR_GLUE_CTRL0_SRATE          0x00000f00
-#define  ESR_GLUE_CTRL0_SRATE_SHIFT    8
-#define  ESR_GLUE_CTRL0_THCNT          0x000000ff
-#define  ESR_GLUE_CTRL0_THCNT_SHIFT    0
-
-#define BLTIME_64_CYCLES               0
-#define BLTIME_128_CYCLES              1
-#define BLTIME_256_CYCLES              2
-#define BLTIME_300_CYCLES              3
-#define BLTIME_384_CYCLES              4
-#define BLTIME_512_CYCLES              5
-#define BLTIME_1024_CYCLES             6
-#define BLTIME_2048_CYCLES             7
-
-#define ESR_GLUE_CTRL1_L(CHAN)         (ESR_BASE + 0x008a + (CHAN) * 0x10)
-#define ESR_GLUE_CTRL1_H(CHAN)         (ESR_BASE + 0x008b + (CHAN) * 0x10)
-#define ESR_RXTX_TUNING1_L(CHAN)       (ESR_BASE + 0x00c2 + (CHAN) * 0x10)
-#define ESR_RXTX_TUNING1_H(CHAN)       (ESR_BASE + 0x00c2 + (CHAN) * 0x10)
-#define ESR_RXTX_TUNING2_L(CHAN)       (ESR_BASE + 0x0102 + (CHAN) * 0x10)
-#define ESR_RXTX_TUNING2_H(CHAN)       (ESR_BASE + 0x0102 + (CHAN) * 0x10)
-#define ESR_RXTX_TUNING3_L(CHAN)       (ESR_BASE + 0x0142 + (CHAN) * 0x10)
-#define ESR_RXTX_TUNING3_H(CHAN)       (ESR_BASE + 0x0142 + (CHAN) * 0x10)
-
-#define NIU_ESR2_DEV_ADDR              0x1e
-#define ESR2_BASE                      0x8000
-
-#define ESR2_TI_PLL_CFG_L              (ESR2_BASE + 0x000)
-#define ESR2_TI_PLL_CFG_H              (ESR2_BASE + 0x001)
-#define  PLL_CFG_STD                   0x00000c00
-#define  PLL_CFG_STD_SHIFT             10
-#define  PLL_CFG_LD                    0x00000300
-#define  PLL_CFG_LD_SHIFT              8
-#define  PLL_CFG_MPY                   0x0000001e
-#define  PLL_CFG_MPY_SHIFT             1
-#define  PLL_CFG_MPY_4X                0x0
-#define  PLL_CFG_MPY_5X                0x00000002
-#define  PLL_CFG_MPY_6X                0x00000004
-#define  PLL_CFG_MPY_8X                0x00000008
-#define  PLL_CFG_MPY_10X               0x0000000a
-#define  PLL_CFG_MPY_12X               0x0000000c
-#define  PLL_CFG_MPY_12P5X             0x0000000e
-#define  PLL_CFG_ENPLL                 0x00000001
-
-#define ESR2_TI_PLL_STS_L              (ESR2_BASE + 0x002)
-#define ESR2_TI_PLL_STS_H              (ESR2_BASE + 0x003)
-#define  PLL_STS_LOCK                  0x00000001
-
-#define ESR2_TI_PLL_TEST_CFG_L         (ESR2_BASE + 0x004)
-#define ESR2_TI_PLL_TEST_CFG_H         (ESR2_BASE + 0x005)
-#define  PLL_TEST_INVPATT              0x00004000
-#define  PLL_TEST_RATE                 0x00003000
-#define  PLL_TEST_RATE_SHIFT           12
-#define  PLL_TEST_CFG_ENBSAC           0x00000400
-#define  PLL_TEST_CFG_ENBSRX           0x00000200
-#define  PLL_TEST_CFG_ENBSTX           0x00000100
-#define  PLL_TEST_CFG_LOOPBACK_PAD     0x00000040
-#define  PLL_TEST_CFG_LOOPBACK_CML_DIS 0x00000080
-#define  PLL_TEST_CFG_LOOPBACK_CML_EN  0x000000c0
-#define  PLL_TEST_CFG_CLKBYP           0x00000030
-#define  PLL_TEST_CFG_CLKBYP_SHIFT     4
-#define  PLL_TEST_CFG_EN_RXPATT                0x00000008
-#define  PLL_TEST_CFG_EN_TXPATT                0x00000004
-#define  PLL_TEST_CFG_TPATT            0x00000003
-#define  PLL_TEST_CFG_TPATT_SHIFT      0
-
-#define ESR2_TI_PLL_TX_CFG_L(CHAN)     (ESR2_BASE + 0x100 + (CHAN) * 4)
-#define ESR2_TI_PLL_TX_CFG_H(CHAN)     (ESR2_BASE + 0x101 + (CHAN) * 4)
-#define  PLL_TX_CFG_RDTCT              0x00600000
-#define  PLL_TX_CFG_RDTCT_SHIFT                21
-#define  PLL_TX_CFG_ENIDL              0x00100000
-#define  PLL_TX_CFG_BSTX               0x00020000
-#define  PLL_TX_CFG_ENFTP              0x00010000
-#define  PLL_TX_CFG_DE                 0x0000f000
-#define  PLL_TX_CFG_DE_SHIFT           12
-#define  PLL_TX_CFG_SWING_125MV                0x00000000
-#define  PLL_TX_CFG_SWING_250MV                0x00000200
-#define  PLL_TX_CFG_SWING_500MV                0x00000400
-#define  PLL_TX_CFG_SWING_625MV                0x00000600
-#define  PLL_TX_CFG_SWING_750MV                0x00000800
-#define  PLL_TX_CFG_SWING_1000MV       0x00000a00
-#define  PLL_TX_CFG_SWING_1250MV       0x00000c00
-#define  PLL_TX_CFG_SWING_1375MV       0x00000e00
-#define  PLL_TX_CFG_CM                 0x00000100
-#define  PLL_TX_CFG_INVPAIR            0x00000080
-#define  PLL_TX_CFG_RATE               0x00000060
-#define  PLL_TX_CFG_RATE_SHIFT         5
-#define  PLL_TX_CFG_RATE_FULL          0x0
-#define  PLL_TX_CFG_RATE_HALF          0x20
-#define  PLL_TX_CFG_RATE_QUAD          0x40
-#define  PLL_TX_CFG_BUSWIDTH           0x0000001c
-#define  PLL_TX_CFG_BUSWIDTH_SHIFT     2
-#define  PLL_TX_CFG_ENTEST             0x00000002
-#define  PLL_TX_CFG_ENTX               0x00000001
-
-#define ESR2_TI_PLL_TX_STS_L(CHAN)     (ESR2_BASE + 0x102 + (CHAN) * 4)
-#define ESR2_TI_PLL_TX_STS_H(CHAN)     (ESR2_BASE + 0x103 + (CHAN) * 4)
-#define  PLL_TX_STS_RDTCTIP            0x00000002
-#define  PLL_TX_STS_TESTFAIL           0x00000001
-
-#define ESR2_TI_PLL_RX_CFG_L(CHAN)     (ESR2_BASE + 0x120 + (CHAN) * 4)
-#define ESR2_TI_PLL_RX_CFG_H(CHAN)     (ESR2_BASE + 0x121 + (CHAN) * 4)
-#define  PLL_RX_CFG_BSINRXN            0x02000000
-#define  PLL_RX_CFG_BSINRXP            0x01000000
-#define  PLL_RX_CFG_EQ_MAX_LF          0x00000000
-#define  PLL_RX_CFG_EQ_LP_ADAPTIVE     0x00080000
-#define  PLL_RX_CFG_EQ_LP_1084MHZ      0x00400000
-#define  PLL_RX_CFG_EQ_LP_805MHZ       0x00480000
-#define  PLL_RX_CFG_EQ_LP_573MHZ       0x00500000
-#define  PLL_RX_CFG_EQ_LP_402MHZ       0x00580000
-#define  PLL_RX_CFG_EQ_LP_304MHZ       0x00600000
-#define  PLL_RX_CFG_EQ_LP_216MHZ       0x00680000
-#define  PLL_RX_CFG_EQ_LP_156MHZ       0x00700000
-#define  PLL_RX_CFG_EQ_LP_135MHZ       0x00780000
-#define  PLL_RX_CFG_EQ_SHIFT           19
-#define  PLL_RX_CFG_CDR                        0x00070000
-#define  PLL_RX_CFG_CDR_SHIFT          16
-#define  PLL_RX_CFG_LOS_DIS            0x00000000
-#define  PLL_RX_CFG_LOS_HTHRESH                0x00004000
-#define  PLL_RX_CFG_LOS_LTHRESH                0x00008000
-#define  PLL_RX_CFG_ALIGN_DIS          0x00000000
-#define  PLL_RX_CFG_ALIGN_ENA          0x00001000
-#define  PLL_RX_CFG_ALIGN_JOG          0x00002000
-#define  PLL_RX_CFG_TERM_VDDT          0x00000000
-#define  PLL_RX_CFG_TERM_0P8VDDT       0x00000100
-#define  PLL_RX_CFG_TERM_FLOAT         0x00000300
-#define  PLL_RX_CFG_INVPAIR            0x00000080
-#define  PLL_RX_CFG_RATE               0x00000060
-#define  PLL_RX_CFG_RATE_SHIFT         5
-#define  PLL_RX_CFG_RATE_FULL          0x0
-#define  PLL_RX_CFG_RATE_HALF          0x20
-#define  PLL_RX_CFG_RATE_QUAD          0x40
-#define  PLL_RX_CFG_BUSWIDTH           0x0000001c
-#define  PLL_RX_CFG_BUSWIDTH_SHIFT     2
-#define  PLL_RX_CFG_ENTEST             0x00000002
-#define  PLL_RX_CFG_ENRX               0x00000001
-
-#define ESR2_TI_PLL_RX_STS_L(CHAN)     (ESR2_BASE + 0x122 + (CHAN) * 4)
-#define ESR2_TI_PLL_RX_STS_H(CHAN)     (ESR2_BASE + 0x123 + (CHAN) * 4)
-#define  PLL_RX_STS_CRCIDTCT           0x00000200
-#define  PLL_RX_STS_CWDTCT             0x00000100
-#define  PLL_RX_STS_BSRXN              0x00000020
-#define  PLL_RX_STS_BSRXP              0x00000010
-#define  PLL_RX_STS_LOSDTCT            0x00000008
-#define  PLL_RX_STS_ODDCG              0x00000004
-#define  PLL_RX_STS_SYNC               0x00000002
-#define  PLL_RX_STS_TESTFAIL           0x00000001
-
-#define ENET_VLAN_TBL(IDX)             (FZC_FFLP + 0x00000UL + (IDX) * 8UL)
-#define  ENET_VLAN_TBL_PARITY1         0x0000000000020000ULL
-#define  ENET_VLAN_TBL_PARITY0         0x0000000000010000ULL
-#define  ENET_VLAN_TBL_VPR             0x0000000000000008ULL
-#define  ENET_VLAN_TBL_VLANRDCTBLN     0x0000000000000007ULL
-#define  ENET_VLAN_TBL_SHIFT(PORT)     ((PORT) * 4)
-
-#define ENET_VLAN_TBL_NUM_ENTRIES      4096
-
-#define FFLP_VLAN_PAR_ERR              (FZC_FFLP + 0x0800UL)
-#define  FFLP_VLAN_PAR_ERR_ERR         0x0000000080000000ULL
-#define  FFLP_VLAN_PAR_ERR_M_ERR       0x0000000040000000ULL
-#define  FFLP_VLAN_PAR_ERR_ADDR                0x000000003ffc0000ULL
-#define  FFLP_VLAN_PAR_ERR_DATA                0x000000000003ffffULL
-
-#define L2_CLS(IDX)                    (FZC_FFLP + 0x20000UL + (IDX) * 8UL)
-#define  L2_CLS_VLD                    0x0000000000010000ULL
-#define  L2_CLS_ETYPE                  0x000000000000ffffULL
-#define  L2_CLS_ETYPE_SHIFT            0
-
-#define L3_CLS(IDX)                    (FZC_FFLP + 0x20010UL + (IDX) * 8UL)
-#define  L3_CLS_VALID                  0x0000000002000000ULL
-#define  L3_CLS_IPVER                  0x0000000001000000ULL
-#define  L3_CLS_PID                    0x0000000000ff0000ULL
-#define  L3_CLS_PID_SHIFT              16
-#define  L3_CLS_TOSMASK                        0x000000000000ff00ULL
-#define  L3_CLS_TOSMASK_SHIFT          8
-#define  L3_CLS_TOS                    0x00000000000000ffULL
-#define  L3_CLS_TOS_SHIFT              0
-
-#define TCAM_KEY(IDX)                  (FZC_FFLP + 0x20030UL + (IDX) * 8UL)
-#define  TCAM_KEY_DISC                 0x0000000000000008ULL
-#define  TCAM_KEY_TSEL                 0x0000000000000004ULL
-#define  TCAM_KEY_IPADDR               0x0000000000000001ULL
-
-#define TCAM_KEY_0                     (FZC_FFLP + 0x20090UL)
-#define  TCAM_KEY_0_KEY                        0x00000000000000ffULL /* bits 192-199 */
-
-#define TCAM_KEY_1                     (FZC_FFLP + 0x20098UL)
-#define  TCAM_KEY_1_KEY                        0xffffffffffffffffULL /* bits 128-191 */
-
-#define TCAM_KEY_2                     (FZC_FFLP + 0x200a0UL)
-#define  TCAM_KEY_2_KEY                        0xffffffffffffffffULL /* bits 64-127 */
-
-#define TCAM_KEY_3                     (FZC_FFLP + 0x200a8UL)
-#define  TCAM_KEY_3_KEY                        0xffffffffffffffffULL /* bits 0-63 */
-
-#define TCAM_KEY_MASK_0                        (FZC_FFLP + 0x200b0UL)
-#define  TCAM_KEY_MASK_0_KEY_SEL       0x00000000000000ffULL /* bits 192-199 */
-
-#define TCAM_KEY_MASK_1                        (FZC_FFLP + 0x200b8UL)
-#define  TCAM_KEY_MASK_1_KEY_SEL       0xffffffffffffffffULL /* bits 128-191 */
-
-#define TCAM_KEY_MASK_2                        (FZC_FFLP + 0x200c0UL)
-#define  TCAM_KEY_MASK_2_KEY_SEL       0xffffffffffffffffULL /* bits 64-127 */
-
-#define TCAM_KEY_MASK_3                        (FZC_FFLP + 0x200c8UL)
-#define  TCAM_KEY_MASK_3_KEY_SEL       0xffffffffffffffffULL /* bits 0-63 */
-
-#define TCAM_CTL                       (FZC_FFLP + 0x200d0UL)
-#define  TCAM_CTL_RWC                  0x00000000001c0000ULL
-#define  TCAM_CTL_RWC_TCAM_WRITE       0x0000000000000000ULL
-#define  TCAM_CTL_RWC_TCAM_READ                0x0000000000040000ULL
-#define  TCAM_CTL_RWC_TCAM_COMPARE     0x0000000000080000ULL
-#define  TCAM_CTL_RWC_RAM_WRITE                0x0000000000100000ULL
-#define  TCAM_CTL_RWC_RAM_READ         0x0000000000140000ULL
-#define  TCAM_CTL_STAT                 0x0000000000020000ULL
-#define  TCAM_CTL_MATCH                        0x0000000000010000ULL
-#define  TCAM_CTL_LOC                  0x00000000000003ffULL
-
-#define TCAM_ERR                       (FZC_FFLP + 0x200d8UL)
-#define  TCAM_ERR_ERR                  0x0000000080000000ULL
-#define  TCAM_ERR_P_ECC                        0x0000000040000000ULL
-#define  TCAM_ERR_MULT                 0x0000000020000000ULL
-#define  TCAM_ERR_ADDR                 0x0000000000ff0000ULL
-#define  TCAM_ERR_SYNDROME             0x000000000000ffffULL
-
-#define HASH_LOOKUP_ERR_LOG1           (FZC_FFLP + 0x200e0UL)
-#define  HASH_LOOKUP_ERR_LOG1_ERR      0x0000000000000008ULL
-#define  HASH_LOOKUP_ERR_LOG1_MULT_LK  0x0000000000000004ULL
-#define  HASH_LOOKUP_ERR_LOG1_CU       0x0000000000000002ULL
-#define  HASH_LOOKUP_ERR_LOG1_MULT_BIT 0x0000000000000001ULL
-
-#define HASH_LOOKUP_ERR_LOG2           (FZC_FFLP + 0x200e8UL)
-#define  HASH_LOOKUP_ERR_LOG2_H1       0x000000007ffff800ULL
-#define  HASH_LOOKUP_ERR_LOG2_SUBAREA  0x0000000000000700ULL
-#define  HASH_LOOKUP_ERR_LOG2_SYNDROME 0x00000000000000ffULL
-
-#define FFLP_CFG_1                     (FZC_FFLP + 0x20100UL)
-#define  FFLP_CFG_1_TCAM_DIS           0x0000000004000000ULL
-#define  FFLP_CFG_1_PIO_DBG_SEL                0x0000000003800000ULL
-#define  FFLP_CFG_1_PIO_FIO_RST                0x0000000000400000ULL
-#define  FFLP_CFG_1_PIO_FIO_LAT                0x0000000000300000ULL
-#define  FFLP_CFG_1_CAMLAT             0x00000000000f0000ULL
-#define  FFLP_CFG_1_CAMLAT_SHIFT       16
-#define  FFLP_CFG_1_CAMRATIO           0x000000000000f000ULL
-#define  FFLP_CFG_1_CAMRATIO_SHIFT     12
-#define  FFLP_CFG_1_FCRAMRATIO         0x0000000000000f00ULL
-#define  FFLP_CFG_1_FCRAMRATIO_SHIFT   8
-#define  FFLP_CFG_1_FCRAMOUTDR_MASK    0x00000000000000f0ULL
-#define  FFLP_CFG_1_FCRAMOUTDR_NORMAL  0x0000000000000000ULL
-#define  FFLP_CFG_1_FCRAMOUTDR_STRONG  0x0000000000000050ULL
-#define  FFLP_CFG_1_FCRAMOUTDR_WEAK    0x00000000000000a0ULL
-#define  FFLP_CFG_1_FCRAMQS            0x0000000000000008ULL
-#define  FFLP_CFG_1_ERRORDIS           0x0000000000000004ULL
-#define  FFLP_CFG_1_FFLPINITDONE       0x0000000000000002ULL
-#define  FFLP_CFG_1_LLCSNAP            0x0000000000000001ULL
-
-#define DEFAULT_FCRAMRATIO             10
-
-#define DEFAULT_TCAM_LATENCY           4
-#define DEFAULT_TCAM_ACCESS_RATIO      10
-
-#define TCP_CFLAG_MSK                  (FZC_FFLP + 0x20108UL)
-#define  TCP_CFLAG_MSK_MASK            0x0000000000000fffULL
-
-#define FCRAM_REF_TMR                  (FZC_FFLP + 0x20110UL)
-#define  FCRAM_REF_TMR_MAX             0x00000000ffff0000ULL
-#define  FCRAM_REF_TMR_MAX_SHIFT       16
-#define  FCRAM_REF_TMR_MIN             0x000000000000ffffULL
-#define  FCRAM_REF_TMR_MIN_SHIFT       0
-
-#define DEFAULT_FCRAM_REFRESH_MAX      512
-#define DEFAULT_FCRAM_REFRESH_MIN      512
-
-#define FCRAM_FIO_ADDR                 (FZC_FFLP + 0x20118UL)
-#define  FCRAM_FIO_ADDR_ADDR           0x00000000000000ffULL
-
-#define FCRAM_FIO_DAT                  (FZC_FFLP + 0x20120UL)
-#define  FCRAM_FIO_DAT_DATA            0x000000000000ffffULL
-
-#define FCRAM_ERR_TST0                 (FZC_FFLP + 0x20128UL)
-#define  FCRAM_ERR_TST0_SYND           0x00000000000000ffULL
-
-#define FCRAM_ERR_TST1                 (FZC_FFLP + 0x20130UL)
-#define  FCRAM_ERR_TST1_DAT            0x00000000ffffffffULL
-
-#define FCRAM_ERR_TST2                 (FZC_FFLP + 0x20138UL)
-#define  FCRAM_ERR_TST2_DAT            0x00000000ffffffffULL
-
-#define FFLP_ERR_MASK                  (FZC_FFLP + 0x20140UL)
-#define  FFLP_ERR_MASK_HSH_TBL_DAT     0x00000000000007f8ULL
-#define  FFLP_ERR_MASK_HSH_TBL_LKUP    0x0000000000000004ULL
-#define  FFLP_ERR_MASK_TCAM            0x0000000000000002ULL
-#define  FFLP_ERR_MASK_VLAN            0x0000000000000001ULL
-
-#define FFLP_DBG_TRAIN_VCT             (FZC_FFLP + 0x20148UL)
-#define  FFLP_DBG_TRAIN_VCT_VECTOR     0x00000000ffffffffULL
-
-#define FCRAM_PHY_RD_LAT               (FZC_FFLP + 0x20150UL)
-#define  FCRAM_PHY_RD_LAT_LAT          0x00000000000000ffULL
-
-/* Ethernet TCAM format */
-#define TCAM_ETHKEY0_RESV1             0xffffffffffffff00ULL
-#define TCAM_ETHKEY0_CLASS_CODE                0x00000000000000f8ULL
-#define TCAM_ETHKEY0_CLASS_CODE_SHIFT  3
-#define TCAM_ETHKEY0_RESV2             0x0000000000000007ULL
-#define TCAM_ETHKEY1_FRAME_BYTE0_7(NUM)        (0xff << ((7 - NUM) * 8))
-#define TCAM_ETHKEY2_FRAME_BYTE8       0xff00000000000000ULL
-#define TCAM_ETHKEY2_FRAME_BYTE8_SHIFT 56
-#define TCAM_ETHKEY2_FRAME_BYTE9       0x00ff000000000000ULL
-#define TCAM_ETHKEY2_FRAME_BYTE9_SHIFT 48
-#define TCAM_ETHKEY2_FRAME_BYTE10      0x0000ff0000000000ULL
-#define TCAM_ETHKEY2_FRAME_BYTE10_SHIFT        40
-#define TCAM_ETHKEY2_FRAME_RESV                0x000000ffffffffffULL
-#define TCAM_ETHKEY3_FRAME_RESV                0xffffffffffffffffULL
-
-/* IPV4 TCAM format */
-#define TCAM_V4KEY0_RESV1              0xffffffffffffff00ULL
-#define TCAM_V4KEY0_CLASS_CODE         0x00000000000000f8ULL
-#define TCAM_V4KEY0_CLASS_CODE_SHIFT   3
-#define TCAM_V4KEY0_RESV2              0x0000000000000007ULL
-#define TCAM_V4KEY1_L2RDCNUM           0xf800000000000000ULL
-#define TCAM_V4KEY1_L2RDCNUM_SHIFT     59
-#define TCAM_V4KEY1_NOPORT             0x0400000000000000ULL
-#define TCAM_V4KEY1_RESV               0x03ffffffffffffffULL
-#define TCAM_V4KEY2_RESV               0xffff000000000000ULL
-#define TCAM_V4KEY2_TOS                        0x0000ff0000000000ULL
-#define TCAM_V4KEY2_TOS_SHIFT          40
-#define TCAM_V4KEY2_PROTO              0x000000ff00000000ULL
-#define TCAM_V4KEY2_PROTO_SHIFT                32
-#define TCAM_V4KEY2_PORT_SPI           0x00000000ffffffffULL
-#define TCAM_V4KEY2_PORT_SPI_SHIFT     0
-#define TCAM_V4KEY3_SADDR              0xffffffff00000000ULL
-#define TCAM_V4KEY3_SADDR_SHIFT                32
-#define TCAM_V4KEY3_DADDR              0x00000000ffffffffULL
-#define TCAM_V4KEY3_DADDR_SHIFT                0
-
-/* IPV6 TCAM format */
-#define TCAM_V6KEY0_RESV1              0xffffffffffffff00ULL
-#define TCAM_V6KEY0_CLASS_CODE         0x00000000000000f8ULL
-#define TCAM_V6KEY0_CLASS_CODE_SHIFT   3
-#define TCAM_V6KEY0_RESV2              0x0000000000000007ULL
-#define TCAM_V6KEY1_L2RDCNUM           0xf800000000000000ULL
-#define TCAM_V6KEY1_L2RDCNUM_SHIFT     59
-#define TCAM_V6KEY1_NOPORT             0x0400000000000000ULL
-#define TCAM_V6KEY1_RESV               0x03ff000000000000ULL
-#define TCAM_V6KEY1_TOS                        0x0000ff0000000000ULL
-#define TCAM_V6KEY1_TOS_SHIFT          40
-#define TCAM_V6KEY1_NEXT_HDR           0x000000ff00000000ULL
-#define TCAM_V6KEY1_NEXT_HDR_SHIFT     32
-#define TCAM_V6KEY1_PORT_SPI           0x00000000ffffffffULL
-#define TCAM_V6KEY1_PORT_SPI_SHIFT     0
-#define TCAM_V6KEY2_ADDR_HIGH          0xffffffffffffffffULL
-#define TCAM_V6KEY3_ADDR_LOW           0xffffffffffffffffULL
-
-#define TCAM_ASSOCDATA_SYNDROME                0x000003fffc000000ULL
-#define TCAM_ASSOCDATA_SYNDROME_SHIFT  26
-#define TCAM_ASSOCDATA_ZFID            0x0000000003ffc000ULL
-#define TCAM_ASSOCDATA_ZFID_SHIFT      14
-#define TCAM_ASSOCDATA_V4_ECC_OK       0x0000000000002000ULL
-#define TCAM_ASSOCDATA_DISC            0x0000000000001000ULL
-#define TCAM_ASSOCDATA_TRES_MASK       0x0000000000000c00ULL
-#define TCAM_ASSOCDATA_TRES_USE_L2RDC  0x0000000000000000ULL
-#define TCAM_ASSOCDATA_TRES_USE_OFFSET 0x0000000000000400ULL
-#define TCAM_ASSOCDATA_TRES_OVR_RDC    0x0000000000000800ULL
-#define TCAM_ASSOCDATA_TRES_OVR_RDC_OFF        0x0000000000000c00ULL
-#define TCAM_ASSOCDATA_RDCTBL          0x0000000000000380ULL
-#define TCAM_ASSOCDATA_RDCTBL_SHIFT    7
-#define TCAM_ASSOCDATA_OFFSET          0x000000000000007cULL
-#define TCAM_ASSOCDATA_OFFSET_SHIFT    2
-#define TCAM_ASSOCDATA_ZFVLD           0x0000000000000002ULL
-#define TCAM_ASSOCDATA_AGE             0x0000000000000001ULL
-
-#define FLOW_KEY(IDX)                  (FZC_FFLP + 0x40000UL + (IDX) * 8UL)
-#define  FLOW_KEY_PORT                 0x0000000000000200ULL
-#define  FLOW_KEY_L2DA                 0x0000000000000100ULL
-#define  FLOW_KEY_VLAN                 0x0000000000000080ULL
-#define  FLOW_KEY_IPSA                 0x0000000000000040ULL
-#define  FLOW_KEY_IPDA                 0x0000000000000020ULL
-#define  FLOW_KEY_PROTO                        0x0000000000000010ULL
-#define  FLOW_KEY_L4_0                 0x000000000000000cULL
-#define  FLOW_KEY_L4_0_SHIFT           2
-#define  FLOW_KEY_L4_1                 0x0000000000000003ULL
-#define  FLOW_KEY_L4_1_SHIFT           0
-
-#define  FLOW_KEY_L4_NONE              0x0
-#define  FLOW_KEY_L4_RESV              0x1
-#define  FLOW_KEY_L4_BYTE12            0x2
-#define  FLOW_KEY_L4_BYTE56            0x3
-
-#define H1POLY                         (FZC_FFLP + 0x40060UL)
-#define  H1POLY_INITVAL                        0x00000000ffffffffULL
-
-#define H2POLY                         (FZC_FFLP + 0x40068UL)
-#define  H2POLY_INITVAL                        0x000000000000ffffULL
-
-#define FLW_PRT_SEL(IDX)               (FZC_FFLP + 0x40070UL + (IDX) * 8UL)
-#define  FLW_PRT_SEL_EXT               0x0000000000010000ULL
-#define  FLW_PRT_SEL_MASK              0x0000000000001f00ULL
-#define  FLW_PRT_SEL_MASK_SHIFT                8
-#define  FLW_PRT_SEL_BASE              0x000000000000001fULL
-#define  FLW_PRT_SEL_BASE_SHIFT                0
-
-#define HASH_TBL_ADDR(IDX)             (FFLP + 0x00000UL + (IDX) * 8192UL)
-#define  HASH_TBL_ADDR_AUTOINC         0x0000000000800000ULL
-#define  HASH_TBL_ADDR_ADDR            0x00000000007fffffULL
-
-#define HASH_TBL_DATA(IDX)             (FFLP + 0x00008UL + (IDX) * 8192UL)
-#define  HASH_TBL_DATA_DATA            0xffffffffffffffffULL
-
-/* FCRAM hash table entries are up to 8 64-bit words in size.
- * The layout of each entry is determined by the settings in the
- * first word, which is the header.
- *
- * The indexing is controllable per partition (there is one partition
- * per RDC group, thus a total of eight) using the BASE and MASK fields
- * of FLW_PRT_SEL above.
- */
-#define FCRAM_SIZE                     0x800000
-#define FCRAM_NUM_PARTITIONS           8
-
-/* Generic HASH entry header, used for all non-optimized formats.  */
-#define HASH_HEADER_FMT                        0x8000000000000000ULL
-#define HASH_HEADER_EXT                        0x4000000000000000ULL
-#define HASH_HEADER_VALID              0x2000000000000000ULL
-#define HASH_HEADER_RESVD              0x1000000000000000ULL
-#define HASH_HEADER_L2_DADDR           0x0ffffffffffff000ULL
-#define HASH_HEADER_L2_DADDR_SHIFT     12
-#define HASH_HEADER_VLAN               0x0000000000000fffULL
-#define HASH_HEADER_VLAN_SHIFT         0
-
-/* Optimized format, just a header with a special layout defined below.
- * Set FMT and EXT both to zero to indicate this layout is being used.
- */
-#define HASH_OPT_HEADER_FMT            0x8000000000000000ULL
-#define HASH_OPT_HEADER_EXT            0x4000000000000000ULL
-#define HASH_OPT_HEADER_VALID          0x2000000000000000ULL
-#define HASH_OPT_HEADER_RDCOFF         0x1f00000000000000ULL
-#define HASH_OPT_HEADER_RDCOFF_SHIFT   56
-#define HASH_OPT_HEADER_HASH2          0x00ffff0000000000ULL
-#define HASH_OPT_HEADER_HASH2_SHIFT    40
-#define HASH_OPT_HEADER_RESVD          0x000000ff00000000ULL
-#define HASH_OPT_HEADER_USERINFO       0x00000000ffffffffULL
-#define HASH_OPT_HEADER_USERINFO_SHIFT 0
-
-/* Port and protocol word used for ipv4 and ipv6 layouts.  */
-#define HASH_PORT_DPORT                        0xffff000000000000ULL
-#define HASH_PORT_DPORT_SHIFT          48
-#define HASH_PORT_SPORT                        0x0000ffff00000000ULL
-#define HASH_PORT_SPORT_SHIFT          32
-#define HASH_PORT_PROTO                        0x00000000ff000000ULL
-#define HASH_PORT_PROTO_SHIFT          24
-#define HASH_PORT_PORT_OFF             0x0000000000c00000ULL
-#define HASH_PORT_PORT_OFF_SHIFT       22
-#define HASH_PORT_PORT_RESV            0x00000000003fffffULL
-
-/* Action word used for ipv4 and ipv6 layouts.  */
-#define HASH_ACTION_RESV1              0xe000000000000000ULL
-#define HASH_ACTION_RDCOFF             0x1f00000000000000ULL
-#define HASH_ACTION_RDCOFF_SHIFT       56
-#define HASH_ACTION_ZFVALID            0x0080000000000000ULL
-#define HASH_ACTION_RESV2              0x0070000000000000ULL
-#define HASH_ACTION_ZFID               0x000fff0000000000ULL
-#define HASH_ACTION_ZFID_SHIFT         40
-#define HASH_ACTION_RESV3              0x000000ff00000000ULL
-#define HASH_ACTION_USERINFO           0x00000000ffffffffULL
-#define HASH_ACTION_USERINFO_SHIFT     0
-
-/* IPV4 address word.  Addresses are in network endian. */
-#define HASH_IP4ADDR_SADDR             0xffffffff00000000ULL
-#define HASH_IP4ADDR_SADDR_SHIFT       32
-#define HASH_IP4ADDR_DADDR             0x00000000ffffffffULL
-#define HASH_IP4ADDR_DADDR_SHIFT       0
-
-/* IPV6 address layout is 4 words, first two are saddr, next two
- * are daddr.  Addresses are in network endian.
- */
-
-struct fcram_hash_opt {
-       u64     header;
-};
-
-/* EXT=1, FMT=0 */
-struct fcram_hash_ipv4 {
-       u64     header;
-       u64     addrs;
-       u64     ports;
-       u64     action;
-};
-
-/* EXT=1, FMT=1 */
-struct fcram_hash_ipv6 {
-       u64     header;
-       u64     addrs[4];
-       u64     ports;
-       u64     action;
-};
-
-#define HASH_TBL_DATA_LOG(IDX)         (FFLP + 0x00010UL + (IDX) * 8192UL)
-#define  HASH_TBL_DATA_LOG_ERR         0x0000000080000000ULL
-#define  HASH_TBL_DATA_LOG_ADDR                0x000000007fffff00ULL
-#define  HASH_TBL_DATA_LOG_SYNDROME    0x00000000000000ffULL
-
-#define RX_DMA_CK_DIV                  (FZC_DMC + 0x00000UL)
-#define  RX_DMA_CK_DIV_CNT             0x000000000000ffffULL
-
-#define DEF_RDC(IDX)                   (FZC_DMC + 0x00008UL + (IDX) * 0x8UL)
-#define  DEF_RDC_VAL                   0x000000000000001fULL
-
-#define PT_DRR_WT(IDX)                 (FZC_DMC + 0x00028UL + (IDX) * 0x8UL)
-#define  PT_DRR_WT_VAL                 0x000000000000ffffULL
-
-#define PT_DRR_WEIGHT_DEFAULT_10G      0x0400
-#define PT_DRR_WEIGHT_DEFAULT_1G       0x0066
-
-#define PT_USE(IDX)                    (FZC_DMC + 0x00048UL + (IDX) * 0x8UL)
-#define  PT_USE_CNT                    0x00000000000fffffULL
-
-#define RED_RAN_INIT                   (FZC_DMC + 0x00068UL)
-#define  RED_RAN_INIT_OPMODE           0x0000000000010000ULL
-#define  RED_RAN_INIT_VAL              0x000000000000ffffULL
-
-#define RX_ADDR_MD                     (FZC_DMC + 0x00070UL)
-#define  RX_ADDR_MD_DBG_PT_MUX_SEL     0x000000000000000cULL
-#define  RX_ADDR_MD_RAM_ACC            0x0000000000000002ULL
-#define  RX_ADDR_MD_MODE32             0x0000000000000001ULL
-
-#define RDMC_PRE_PAR_ERR               (FZC_DMC + 0x00078UL)
-#define  RDMC_PRE_PAR_ERR_ERR          0x0000000000008000ULL
-#define  RDMC_PRE_PAR_ERR_MERR         0x0000000000004000ULL
-#define  RDMC_PRE_PAR_ERR_ADDR         0x00000000000000ffULL
-
-#define RDMC_SHA_PAR_ERR               (FZC_DMC + 0x00080UL)
-#define  RDMC_SHA_PAR_ERR_ERR          0x0000000000008000ULL
-#define  RDMC_SHA_PAR_ERR_MERR         0x0000000000004000ULL
-#define  RDMC_SHA_PAR_ERR_ADDR         0x00000000000000ffULL
-
-#define RDMC_MEM_ADDR                  (FZC_DMC + 0x00088UL)
-#define  RDMC_MEM_ADDR_PRE_SHAD                0x0000000000000100ULL
-#define  RDMC_MEM_ADDR_ADDR            0x00000000000000ffULL
-
-#define RDMC_MEM_DAT0                  (FZC_DMC + 0x00090UL)
-#define  RDMC_MEM_DAT0_DATA            0x00000000ffffffffULL /* bits 31:0 */
-
-#define RDMC_MEM_DAT1                  (FZC_DMC + 0x00098UL)
-#define  RDMC_MEM_DAT1_DATA            0x00000000ffffffffULL /* bits 63:32 */
-
-#define RDMC_MEM_DAT2                  (FZC_DMC + 0x000a0UL)
-#define  RDMC_MEM_DAT2_DATA            0x00000000ffffffffULL /* bits 95:64 */
-
-#define RDMC_MEM_DAT3                  (FZC_DMC + 0x000a8UL)
-#define  RDMC_MEM_DAT3_DATA            0x00000000ffffffffULL /* bits 127:96 */
-
-#define RDMC_MEM_DAT4                  (FZC_DMC + 0x000b0UL)
-#define  RDMC_MEM_DAT4_DATA            0x00000000000fffffULL /* bits 147:128 */
-
-#define RX_CTL_DAT_FIFO_STAT                   (FZC_DMC + 0x000b8UL)
-#define  RX_CTL_DAT_FIFO_STAT_ID_MISMATCH      0x0000000000000100ULL
-#define  RX_CTL_DAT_FIFO_STAT_ZCP_EOP_ERR      0x00000000000000f0ULL
-#define  RX_CTL_DAT_FIFO_STAT_IPP_EOP_ERR      0x000000000000000fULL
-
-#define RX_CTL_DAT_FIFO_MASK                   (FZC_DMC + 0x000c0UL)
-#define  RX_CTL_DAT_FIFO_MASK_ID_MISMATCH      0x0000000000000100ULL
-#define  RX_CTL_DAT_FIFO_MASK_ZCP_EOP_ERR      0x00000000000000f0ULL
-#define  RX_CTL_DAT_FIFO_MASK_IPP_EOP_ERR      0x000000000000000fULL
-
-#define RDMC_TRAINING_VECTOR                   (FZC_DMC + 0x000c8UL)
-#define  RDMC_TRAINING_VECTOR_TRAINING_VECTOR  0x00000000ffffffffULL
-
-#define RX_CTL_DAT_FIFO_STAT_DBG               (FZC_DMC + 0x000d0UL)
-#define  RX_CTL_DAT_FIFO_STAT_DBG_ID_MISMATCH  0x0000000000000100ULL
-#define  RX_CTL_DAT_FIFO_STAT_DBG_ZCP_EOP_ERR  0x00000000000000f0ULL
-#define  RX_CTL_DAT_FIFO_STAT_DBG_IPP_EOP_ERR  0x000000000000000fULL
-
-#define RDC_TBL(TBL,SLOT)              (FZC_ZCP + 0x10000UL + \
-                                        (TBL) * (8UL * 16UL) + \
-                                        (SLOT) * 8UL)
-#define  RDC_TBL_RDC                   0x000000000000000fULL
-
-#define RX_LOG_PAGE_VLD(IDX)           (FZC_DMC + 0x20000UL + (IDX) * 0x40UL)
-#define  RX_LOG_PAGE_VLD_FUNC          0x000000000000000cULL
-#define  RX_LOG_PAGE_VLD_FUNC_SHIFT    2
-#define  RX_LOG_PAGE_VLD_PAGE1         0x0000000000000002ULL
-#define  RX_LOG_PAGE_VLD_PAGE0         0x0000000000000001ULL
-
-#define RX_LOG_MASK1(IDX)              (FZC_DMC + 0x20008UL + (IDX) * 0x40UL)
-#define  RX_LOG_MASK1_MASK             0x00000000ffffffffULL
-
-#define RX_LOG_VAL1(IDX)               (FZC_DMC + 0x20010UL + (IDX) * 0x40UL)
-#define  RX_LOG_VAL1_VALUE             0x00000000ffffffffULL
-
-#define RX_LOG_MASK2(IDX)              (FZC_DMC + 0x20018UL + (IDX) * 0x40UL)
-#define  RX_LOG_MASK2_MASK             0x00000000ffffffffULL
-
-#define RX_LOG_VAL2(IDX)               (FZC_DMC + 0x20020UL + (IDX) * 0x40UL)
-#define  RX_LOG_VAL2_VALUE             0x00000000ffffffffULL
-
-#define RX_LOG_PAGE_RELO1(IDX)         (FZC_DMC + 0x20028UL + (IDX) * 0x40UL)
-#define  RX_LOG_PAGE_RELO1_RELO                0x00000000ffffffffULL
-
-#define RX_LOG_PAGE_RELO2(IDX)         (FZC_DMC + 0x20030UL + (IDX) * 0x40UL)
-#define  RX_LOG_PAGE_RELO2_RELO                0x00000000ffffffffULL
-
-#define RX_LOG_PAGE_HDL(IDX)           (FZC_DMC + 0x20038UL + (IDX) * 0x40UL)
-#define  RX_LOG_PAGE_HDL_HANDLE                0x00000000000fffffULL
-
-#define TX_LOG_PAGE_VLD(IDX)           (FZC_DMC + 0x40000UL + (IDX) * 0x200UL)
-#define  TX_LOG_PAGE_VLD_FUNC          0x000000000000000cULL
-#define  TX_LOG_PAGE_VLD_FUNC_SHIFT    2
-#define  TX_LOG_PAGE_VLD_PAGE1         0x0000000000000002ULL
-#define  TX_LOG_PAGE_VLD_PAGE0         0x0000000000000001ULL
-
-#define TX_LOG_MASK1(IDX)              (FZC_DMC + 0x40008UL + (IDX) * 0x200UL)
-#define  TX_LOG_MASK1_MASK             0x00000000ffffffffULL
-
-#define TX_LOG_VAL1(IDX)               (FZC_DMC + 0x40010UL + (IDX) * 0x200UL)
-#define  TX_LOG_VAL1_VALUE             0x00000000ffffffffULL
-
-#define TX_LOG_MASK2(IDX)              (FZC_DMC + 0x40018UL + (IDX) * 0x200UL)
-#define  TX_LOG_MASK2_MASK             0x00000000ffffffffULL
-
-#define TX_LOG_VAL2(IDX)               (FZC_DMC + 0x40020UL + (IDX) * 0x200UL)
-#define  TX_LOG_VAL2_VALUE             0x00000000ffffffffULL
-
-#define TX_LOG_PAGE_RELO1(IDX)         (FZC_DMC + 0x40028UL + (IDX) * 0x200UL)
-#define  TX_LOG_PAGE_RELO1_RELO                0x00000000ffffffffULL
-
-#define TX_LOG_PAGE_RELO2(IDX)         (FZC_DMC + 0x40030UL + (IDX) * 0x200UL)
-#define  TX_LOG_PAGE_RELO2_RELO                0x00000000ffffffffULL
-
-#define TX_LOG_PAGE_HDL(IDX)           (FZC_DMC + 0x40038UL + (IDX) * 0x200UL)
-#define  TX_LOG_PAGE_HDL_HANDLE                0x00000000000fffffULL
-
-#define TX_ADDR_MD                     (FZC_DMC + 0x45000UL)
-#define  TX_ADDR_MD_MODE32             0x0000000000000001ULL
-
-#define RDC_RED_PARA(IDX)              (FZC_DMC + 0x30000UL + (IDX) * 0x40UL)
-#define  RDC_RED_PARA_THRE_SYN         0x00000000fff00000ULL
-#define  RDC_RED_PARA_THRE_SYN_SHIFT   20
-#define  RDC_RED_PARA_WIN_SYN          0x00000000000f0000ULL
-#define  RDC_RED_PARA_WIN_SYN_SHIFT    16
-#define  RDC_RED_PARA_THRE             0x000000000000fff0ULL
-#define  RDC_RED_PARA_THRE_SHIFT       4
-#define  RDC_RED_PARA_WIN              0x000000000000000fULL
-#define  RDC_RED_PARA_WIN_SHIFT                0
-
-#define RED_DIS_CNT(IDX)               (FZC_DMC + 0x30008UL + (IDX) * 0x40UL)
-#define  RED_DIS_CNT_OFLOW             0x0000000000010000ULL
-#define  RED_DIS_CNT_COUNT             0x000000000000ffffULL
-
-#define IPP_CFIG                       (FZC_IPP + 0x00000UL)
-#define  IPP_CFIG_SOFT_RST             0x0000000080000000ULL
-#define  IPP_CFIG_IP_MAX_PKT           0x0000000001ffff00ULL
-#define  IPP_CFIG_IP_MAX_PKT_SHIFT     8
-#define  IPP_CFIG_FFLP_CS_PIO_W                0x0000000000000080ULL
-#define  IPP_CFIG_PFIFO_PIO_W          0x0000000000000040ULL
-#define  IPP_CFIG_DFIFO_PIO_W          0x0000000000000020ULL
-#define  IPP_CFIG_CKSUM_EN             0x0000000000000010ULL
-#define  IPP_CFIG_DROP_BAD_CRC         0x0000000000000008ULL
-#define  IPP_CFIG_DFIFO_ECC_EN         0x0000000000000004ULL
-#define  IPP_CFIG_DEBUG_BUS_OUT_EN     0x0000000000000002ULL
-#define  IPP_CFIG_IPP_ENABLE           0x0000000000000001ULL
-
-#define IPP_PKT_DIS                    (FZC_IPP + 0x00020UL)
-#define  IPP_PKT_DIS_COUNT             0x0000000000003fffULL
-
-#define IPP_BAD_CS_CNT                 (FZC_IPP + 0x00028UL)
-#define  IPP_BAD_CS_CNT_COUNT          0x0000000000003fffULL
-
-#define IPP_ECC                                (FZC_IPP + 0x00030UL)
-#define  IPP_ECC_COUNT                 0x00000000000000ffULL
-
-#define IPP_INT_STAT                   (FZC_IPP + 0x00040UL)
-#define  IPP_INT_STAT_SOP_MISS         0x0000000080000000ULL
-#define  IPP_INT_STAT_EOP_MISS         0x0000000040000000ULL
-#define  IPP_INT_STAT_DFIFO_UE         0x0000000030000000ULL
-#define  IPP_INT_STAT_DFIFO_CE         0x000000000c000000ULL
-#define  IPP_INT_STAT_DFIFO_ECC                0x0000000003000000ULL
-#define  IPP_INT_STAT_DFIFO_ECC_IDX    0x00000000007ff000ULL
-#define  IPP_INT_STAT_PFIFO_PERR       0x0000000000000800ULL
-#define  IPP_INT_STAT_ECC_ERR_MAX      0x0000000000000400ULL
-#define  IPP_INT_STAT_PFIFO_ERR_IDX    0x00000000000003f0ULL
-#define  IPP_INT_STAT_PFIFO_OVER       0x0000000000000008ULL
-#define  IPP_INT_STAT_PFIFO_UND                0x0000000000000004ULL
-#define  IPP_INT_STAT_BAD_CS_MX                0x0000000000000002ULL
-#define  IPP_INT_STAT_PKT_DIS_MX       0x0000000000000001ULL
-#define  IPP_INT_STAT_ALL              0x00000000ff7fffffULL
-
-#define IPP_MSK                                (FZC_IPP + 0x00048UL)
-#define  IPP_MSK_ECC_ERR_MX            0x0000000000000080ULL
-#define  IPP_MSK_DFIFO_EOP_SOP         0x0000000000000040ULL
-#define  IPP_MSK_DFIFO_UC              0x0000000000000020ULL
-#define  IPP_MSK_PFIFO_PAR             0x0000000000000010ULL
-#define  IPP_MSK_PFIFO_OVER            0x0000000000000008ULL
-#define  IPP_MSK_PFIFO_UND             0x0000000000000004ULL
-#define  IPP_MSK_BAD_CS                        0x0000000000000002ULL
-#define  IPP_MSK_PKT_DIS_CNT           0x0000000000000001ULL
-#define  IPP_MSK_ALL                   0x00000000000000ffULL
-
-#define IPP_PFIFO_RD0                  (FZC_IPP + 0x00060UL)
-#define  IPP_PFIFO_RD0_DATA            0x00000000ffffffffULL /* bits 31:0 */
-
-#define IPP_PFIFO_RD1                  (FZC_IPP + 0x00068UL)
-#define  IPP_PFIFO_RD1_DATA            0x00000000ffffffffULL /* bits 63:32 */
-
-#define IPP_PFIFO_RD2                  (FZC_IPP + 0x00070UL)
-#define  IPP_PFIFO_RD2_DATA            0x00000000ffffffffULL /* bits 95:64 */
-
-#define IPP_PFIFO_RD3                  (FZC_IPP + 0x00078UL)
-#define  IPP_PFIFO_RD3_DATA            0x00000000ffffffffULL /* bits 127:96 */
-
-#define IPP_PFIFO_RD4                  (FZC_IPP + 0x00080UL)
-#define  IPP_PFIFO_RD4_DATA            0x00000000ffffffffULL /* bits 145:128 */
-
-#define IPP_PFIFO_WR0                  (FZC_IPP + 0x00088UL)
-#define  IPP_PFIFO_WR0_DATA            0x00000000ffffffffULL /* bits 31:0 */
-
-#define IPP_PFIFO_WR1                  (FZC_IPP + 0x00090UL)
-#define  IPP_PFIFO_WR1_DATA            0x00000000ffffffffULL /* bits 63:32 */
-
-#define IPP_PFIFO_WR2                  (FZC_IPP + 0x00098UL)
-#define  IPP_PFIFO_WR2_DATA            0x00000000ffffffffULL /* bits 95:64 */
-
-#define IPP_PFIFO_WR3                  (FZC_IPP + 0x000a0UL)
-#define  IPP_PFIFO_WR3_DATA            0x00000000ffffffffULL /* bits 127:96 */
-
-#define IPP_PFIFO_WR4                  (FZC_IPP + 0x000a8UL)
-#define  IPP_PFIFO_WR4_DATA            0x00000000ffffffffULL /* bits 145:128 */
-
-#define IPP_PFIFO_RD_PTR               (FZC_IPP + 0x000b0UL)
-#define  IPP_PFIFO_RD_PTR_PTR          0x000000000000003fULL
-
-#define IPP_PFIFO_WR_PTR               (FZC_IPP + 0x000b8UL)
-#define  IPP_PFIFO_WR_PTR_PTR          0x000000000000007fULL
-
-#define IPP_DFIFO_RD0                  (FZC_IPP + 0x000c0UL)
-#define  IPP_DFIFO_RD0_DATA            0x00000000ffffffffULL /* bits 31:0 */
-
-#define IPP_DFIFO_RD1                  (FZC_IPP + 0x000c8UL)
-#define  IPP_DFIFO_RD1_DATA            0x00000000ffffffffULL /* bits 63:32 */
-
-#define IPP_DFIFO_RD2                  (FZC_IPP + 0x000d0UL)
-#define  IPP_DFIFO_RD2_DATA            0x00000000ffffffffULL /* bits 95:64 */
-
-#define IPP_DFIFO_RD3                  (FZC_IPP + 0x000d8UL)
-#define  IPP_DFIFO_RD3_DATA            0x00000000ffffffffULL /* bits 127:96 */
-
-#define IPP_DFIFO_RD4                  (FZC_IPP + 0x000e0UL)
-#define  IPP_DFIFO_RD4_DATA            0x00000000ffffffffULL /* bits 145:128 */
-
-#define IPP_DFIFO_WR0                  (FZC_IPP + 0x000e8UL)
-#define  IPP_DFIFO_WR0_DATA            0x00000000ffffffffULL /* bits 31:0 */
-
-#define IPP_DFIFO_WR1                  (FZC_IPP + 0x000f0UL)
-#define  IPP_DFIFO_WR1_DATA            0x00000000ffffffffULL /* bits 63:32 */
-
-#define IPP_DFIFO_WR2                  (FZC_IPP + 0x000f8UL)
-#define  IPP_DFIFO_WR2_DATA            0x00000000ffffffffULL /* bits 95:64 */
-
-#define IPP_DFIFO_WR3                  (FZC_IPP + 0x00100UL)
-#define  IPP_DFIFO_WR3_DATA            0x00000000ffffffffULL /* bits 127:96 */
-
-#define IPP_DFIFO_WR4                  (FZC_IPP + 0x00108UL)
-#define  IPP_DFIFO_WR4_DATA            0x00000000ffffffffULL /* bits 145:128 */
-
-#define IPP_DFIFO_RD_PTR               (FZC_IPP + 0x00110UL)
-#define  IPP_DFIFO_RD_PTR_PTR          0x0000000000000fffULL
-
-#define IPP_DFIFO_WR_PTR               (FZC_IPP + 0x00118UL)
-#define  IPP_DFIFO_WR_PTR_PTR          0x0000000000000fffULL
-
-#define IPP_SM                         (FZC_IPP + 0x00120UL)
-#define  IPP_SM_SM                     0x00000000ffffffffULL
-
-#define IPP_CS_STAT                    (FZC_IPP + 0x00128UL)
-#define  IPP_CS_STAT_BCYC_CNT          0x00000000ff000000ULL
-#define  IPP_CS_STAT_IP_LEN            0x0000000000fff000ULL
-#define  IPP_CS_STAT_CS_FAIL           0x0000000000000800ULL
-#define  IPP_CS_STAT_TERM              0x0000000000000400ULL
-#define  IPP_CS_STAT_BAD_NUM           0x0000000000000200ULL
-#define  IPP_CS_STAT_CS_STATE          0x00000000000001ffULL
-
-#define IPP_FFLP_CS_INFO               (FZC_IPP + 0x00130UL)
-#define  IPP_FFLP_CS_INFO_PKT_ID       0x0000000000003c00ULL
-#define  IPP_FFLP_CS_INFO_L4_PROTO     0x0000000000000300ULL
-#define  IPP_FFLP_CS_INFO_V4_HD_LEN    0x00000000000000f0ULL
-#define  IPP_FFLP_CS_INFO_L3_VER       0x000000000000000cULL
-#define  IPP_FFLP_CS_INFO_L2_OP                0x0000000000000003ULL
-
-#define IPP_DBG_SEL                    (FZC_IPP + 0x00138UL)
-#define  IPP_DBG_SEL_SEL               0x000000000000000fULL
-
-#define IPP_DFIFO_ECC_SYND             (FZC_IPP + 0x00140UL)
-#define  IPP_DFIFO_ECC_SYND_SYND       0x000000000000ffffULL
-
-#define IPP_DFIFO_EOP_RD_PTR           (FZC_IPP + 0x00148UL)
-#define  IPP_DFIFO_EOP_RD_PTR_PTR      0x0000000000000fffULL
-
-#define IPP_ECC_CTL                    (FZC_IPP + 0x00150UL)
-#define  IPP_ECC_CTL_DIS_DBL           0x0000000080000000ULL
-#define  IPP_ECC_CTL_COR_DBL           0x0000000000020000ULL
-#define  IPP_ECC_CTL_COR_SNG           0x0000000000010000ULL
-#define  IPP_ECC_CTL_COR_ALL           0x0000000000000400ULL
-#define  IPP_ECC_CTL_COR_1             0x0000000000000100ULL
-#define  IPP_ECC_CTL_COR_LST           0x0000000000000004ULL
-#define  IPP_ECC_CTL_COR_SND           0x0000000000000002ULL
-#define  IPP_ECC_CTL_COR_FSR           0x0000000000000001ULL
-
-#define NIU_DFIFO_ENTRIES              1024
-#define ATLAS_P0_P1_DFIFO_ENTRIES      2048
-#define ATLAS_P2_P3_DFIFO_ENTRIES      1024
-
-#define ZCP_CFIG                       (FZC_ZCP + 0x00000UL)
-#define  ZCP_CFIG_ZCP_32BIT_MODE       0x0000000001000000ULL
-#define  ZCP_CFIG_ZCP_DEBUG_SEL                0x0000000000ff0000ULL
-#define  ZCP_CFIG_DMA_TH               0x000000000000ffe0ULL
-#define  ZCP_CFIG_ECC_CHK_DIS          0x0000000000000010ULL
-#define  ZCP_CFIG_PAR_CHK_DIS          0x0000000000000008ULL
-#define  ZCP_CFIG_DIS_BUFF_RSP_IF      0x0000000000000004ULL
-#define  ZCP_CFIG_DIS_BUFF_REQ_IF      0x0000000000000002ULL
-#define  ZCP_CFIG_ZC_ENABLE            0x0000000000000001ULL
-
-#define ZCP_INT_STAT                   (FZC_ZCP + 0x00008UL)
-#define  ZCP_INT_STAT_RRFIFO_UNDERRUN  0x0000000000008000ULL
-#define  ZCP_INT_STAT_RRFIFO_OVERRUN   0x0000000000004000ULL
-#define  ZCP_INT_STAT_RSPFIFO_UNCOR_ERR        0x0000000000001000ULL
-#define  ZCP_INT_STAT_BUFFER_OVERFLOW  0x0000000000000800ULL
-#define  ZCP_INT_STAT_STAT_TBL_PERR    0x0000000000000400ULL
-#define  ZCP_INT_STAT_DYN_TBL_PERR     0x0000000000000200ULL
-#define  ZCP_INT_STAT_BUF_TBL_PERR     0x0000000000000100ULL
-#define  ZCP_INT_STAT_TT_PROGRAM_ERR   0x0000000000000080ULL
-#define  ZCP_INT_STAT_RSP_TT_INDEX_ERR 0x0000000000000040ULL
-#define  ZCP_INT_STAT_SLV_TT_INDEX_ERR 0x0000000000000020ULL
-#define  ZCP_INT_STAT_ZCP_TT_INDEX_ERR 0x0000000000000010ULL
-#define  ZCP_INT_STAT_CFIFO_ECC3       0x0000000000000008ULL
-#define  ZCP_INT_STAT_CFIFO_ECC2       0x0000000000000004ULL
-#define  ZCP_INT_STAT_CFIFO_ECC1       0x0000000000000002ULL
-#define  ZCP_INT_STAT_CFIFO_ECC0       0x0000000000000001ULL
-#define  ZCP_INT_STAT_ALL              0x000000000000ffffULL
-
-#define ZCP_INT_MASK                   (FZC_ZCP + 0x00010UL)
-#define  ZCP_INT_MASK_RRFIFO_UNDERRUN  0x0000000000008000ULL
-#define  ZCP_INT_MASK_RRFIFO_OVERRUN   0x0000000000004000ULL
-#define  ZCP_INT_MASK_LOJ              0x0000000000002000ULL
-#define  ZCP_INT_MASK_RSPFIFO_UNCOR_ERR        0x0000000000001000ULL
-#define  ZCP_INT_MASK_BUFFER_OVERFLOW  0x0000000000000800ULL
-#define  ZCP_INT_MASK_STAT_TBL_PERR    0x0000000000000400ULL
-#define  ZCP_INT_MASK_DYN_TBL_PERR     0x0000000000000200ULL
-#define  ZCP_INT_MASK_BUF_TBL_PERR     0x0000000000000100ULL
-#define  ZCP_INT_MASK_TT_PROGRAM_ERR   0x0000000000000080ULL
-#define  ZCP_INT_MASK_RSP_TT_INDEX_ERR 0x0000000000000040ULL
-#define  ZCP_INT_MASK_SLV_TT_INDEX_ERR 0x0000000000000020ULL
-#define  ZCP_INT_MASK_ZCP_TT_INDEX_ERR 0x0000000000000010ULL
-#define  ZCP_INT_MASK_CFIFO_ECC3       0x0000000000000008ULL
-#define  ZCP_INT_MASK_CFIFO_ECC2       0x0000000000000004ULL
-#define  ZCP_INT_MASK_CFIFO_ECC1       0x0000000000000002ULL
-#define  ZCP_INT_MASK_CFIFO_ECC0       0x0000000000000001ULL
-#define  ZCP_INT_MASK_ALL              0x000000000000ffffULL
-
-#define BAM4BUF                                (FZC_ZCP + 0x00018UL)
-#define  BAM4BUF_LOJ                   0x0000000080000000ULL
-#define  BAM4BUF_EN_CK                 0x0000000040000000ULL
-#define  BAM4BUF_IDX_END0              0x000000003ff00000ULL
-#define  BAM4BUF_IDX_ST0               0x00000000000ffc00ULL
-#define  BAM4BUF_OFFSET0               0x00000000000003ffULL
-
-#define BAM8BUF                                (FZC_ZCP + 0x00020UL)
-#define  BAM8BUF_LOJ                   0x0000000080000000ULL
-#define  BAM8BUF_EN_CK                 0x0000000040000000ULL
-#define  BAM8BUF_IDX_END1              0x000000003ff00000ULL
-#define  BAM8BUF_IDX_ST1               0x00000000000ffc00ULL
-#define  BAM8BUF_OFFSET1               0x00000000000003ffULL
-
-#define BAM16BUF                       (FZC_ZCP + 0x00028UL)
-#define  BAM16BUF_LOJ                  0x0000000080000000ULL
-#define  BAM16BUF_EN_CK                        0x0000000040000000ULL
-#define  BAM16BUF_IDX_END2             0x000000003ff00000ULL
-#define  BAM16BUF_IDX_ST2              0x00000000000ffc00ULL
-#define  BAM16BUF_OFFSET2              0x00000000000003ffULL
-
-#define BAM32BUF                       (FZC_ZCP + 0x00030UL)
-#define  BAM32BUF_LOJ                  0x0000000080000000ULL
-#define  BAM32BUF_EN_CK                        0x0000000040000000ULL
-#define  BAM32BUF_IDX_END3             0x000000003ff00000ULL
-#define  BAM32BUF_IDX_ST3              0x00000000000ffc00ULL
-#define  BAM32BUF_OFFSET3              0x00000000000003ffULL
-
-#define DST4BUF                                (FZC_ZCP + 0x00038UL)
-#define  DST4BUF_DS_OFFSET0            0x00000000000003ffULL
-
-#define DST8BUF                                (FZC_ZCP + 0x00040UL)
-#define  DST8BUF_DS_OFFSET1            0x00000000000003ffULL
-
-#define DST16BUF                       (FZC_ZCP + 0x00048UL)
-#define  DST16BUF_DS_OFFSET2           0x00000000000003ffULL
-
-#define DST32BUF                       (FZC_ZCP + 0x00050UL)
-#define  DST32BUF_DS_OFFSET3           0x00000000000003ffULL
-
-#define ZCP_RAM_DATA0                  (FZC_ZCP + 0x00058UL)
-#define  ZCP_RAM_DATA0_DAT0            0x00000000ffffffffULL
-
-#define ZCP_RAM_DATA1                  (FZC_ZCP + 0x00060UL)
-#define  ZCP_RAM_DAT10_DAT1            0x00000000ffffffffULL
-
-#define ZCP_RAM_DATA2                  (FZC_ZCP + 0x00068UL)
-#define  ZCP_RAM_DATA2_DAT2            0x00000000ffffffffULL
-
-#define ZCP_RAM_DATA3                  (FZC_ZCP + 0x00070UL)
-#define  ZCP_RAM_DATA3_DAT3            0x00000000ffffffffULL
-
-#define ZCP_RAM_DATA4                  (FZC_ZCP + 0x00078UL)
-#define  ZCP_RAM_DATA4_DAT4            0x00000000000000ffULL
-
-#define ZCP_RAM_BE                     (FZC_ZCP + 0x00080UL)
-#define  ZCP_RAM_BE_VAL                        0x000000000001ffffULL
-
-#define ZCP_RAM_ACC                    (FZC_ZCP + 0x00088UL)
-#define  ZCP_RAM_ACC_BUSY              0x0000000080000000ULL
-#define  ZCP_RAM_ACC_READ              0x0000000040000000ULL
-#define  ZCP_RAM_ACC_WRITE             0x0000000000000000ULL
-#define  ZCP_RAM_ACC_LOJ               0x0000000020000000ULL
-#define  ZCP_RAM_ACC_ZFCID             0x000000001ffe0000ULL
-#define  ZCP_RAM_ACC_ZFCID_SHIFT       17
-#define  ZCP_RAM_ACC_RAM_SEL           0x000000000001f000ULL
-#define  ZCP_RAM_ACC_RAM_SEL_SHIFT     12
-#define  ZCP_RAM_ACC_CFIFOADDR         0x0000000000000fffULL
-#define  ZCP_RAM_ACC_CFIFOADDR_SHIFT   0
-
-#define ZCP_RAM_SEL_BAM(INDEX)         (0x00 + (INDEX))
-#define ZCP_RAM_SEL_TT_STATIC          0x08
-#define ZCP_RAM_SEL_TT_DYNAMIC         0x09
-#define ZCP_RAM_SEL_CFIFO(PORT)                (0x10 + (PORT))
-
-#define NIU_CFIFO_ENTRIES              1024
-#define ATLAS_P0_P1_CFIFO_ENTRIES      2048
-#define ATLAS_P2_P3_CFIFO_ENTRIES      1024
-
-#define CHK_BIT_DATA                   (FZC_ZCP + 0x00090UL)
-#define  CHK_BIT_DATA_DATA             0x000000000000ffffULL
-
-#define RESET_CFIFO                    (FZC_ZCP + 0x00098UL)
-#define  RESET_CFIFO_RST(PORT)         (0x1 << (PORT))
-
-#define CFIFO_ECC(PORT)                        (FZC_ZCP + 0x000a0UL + (PORT) * 8UL)
-#define  CFIFO_ECC_DIS_DBLBIT_ERR      0x0000000080000000ULL
-#define  CFIFO_ECC_DBLBIT_ERR          0x0000000000020000ULL
-#define  CFIFO_ECC_SINGLEBIT_ERR       0x0000000000010000ULL
-#define  CFIFO_ECC_ALL_PKT             0x0000000000000400ULL
-#define  CFIFO_ECC_LAST_LINE           0x0000000000000004ULL
-#define  CFIFO_ECC_2ND_LINE            0x0000000000000002ULL
-#define  CFIFO_ECC_1ST_LINE            0x0000000000000001ULL
-
-#define ZCP_TRAINING_VECTOR            (FZC_ZCP + 0x000c0UL)
-#define  ZCP_TRAINING_VECTOR_VECTOR    0x00000000ffffffffULL
-
-#define ZCP_STATE_MACHINE              (FZC_ZCP + 0x000c8UL)
-#define  ZCP_STATE_MACHINE_SM          0x00000000ffffffffULL
-
-/* Same bits as ZCP_INT_STAT */
-#define ZCP_INT_STAT_TEST              (FZC_ZCP + 0x00108UL)
-
-#define RXDMA_CFIG1(IDX)               (DMC + 0x00000UL + (IDX) * 0x200UL)
-#define  RXDMA_CFIG1_EN                        0x0000000080000000ULL
-#define  RXDMA_CFIG1_RST               0x0000000040000000ULL
-#define  RXDMA_CFIG1_QST               0x0000000020000000ULL
-#define  RXDMA_CFIG1_MBADDR_H          0x0000000000000fffULL /* mboxaddr 43:32 */
-
-#define RXDMA_CFIG2(IDX)               (DMC + 0x00008UL + (IDX) * 0x200UL)
-#define  RXDMA_CFIG2_MBADDR_L          0x00000000ffffffc0ULL /* mboxaddr 31:6 */
-#define  RXDMA_CFIG2_OFFSET            0x0000000000000006ULL
-#define  RXDMA_CFIG2_OFFSET_SHIFT      1
-#define  RXDMA_CFIG2_FULL_HDR          0x0000000000000001ULL
-
-#define RBR_CFIG_A(IDX)                        (DMC + 0x00010UL + (IDX) * 0x200UL)
-#define  RBR_CFIG_A_LEN                        0xffff000000000000ULL
-#define  RBR_CFIG_A_LEN_SHIFT          48
-#define  RBR_CFIG_A_STADDR_BASE                0x00000ffffffc0000ULL
-#define  RBR_CFIG_A_STADDR             0x000000000003ffc0ULL
-
-#define RBR_CFIG_B(IDX)                        (DMC + 0x00018UL + (IDX) * 0x200UL)
-#define  RBR_CFIG_B_BLKSIZE            0x0000000003000000ULL
-#define  RBR_CFIG_B_BLKSIZE_SHIFT      24
-#define  RBR_CFIG_B_VLD2               0x0000000000800000ULL
-#define  RBR_CFIG_B_BUFSZ2             0x0000000000030000ULL
-#define  RBR_CFIG_B_BUFSZ2_SHIFT       16
-#define  RBR_CFIG_B_VLD1               0x0000000000008000ULL
-#define  RBR_CFIG_B_BUFSZ1             0x0000000000000300ULL
-#define  RBR_CFIG_B_BUFSZ1_SHIFT       8
-#define  RBR_CFIG_B_VLD0               0x0000000000000080ULL
-#define  RBR_CFIG_B_BUFSZ0             0x0000000000000003ULL
-#define  RBR_CFIG_B_BUFSZ0_SHIFT       0
-
-#define RBR_BLKSIZE_4K                 0x0
-#define RBR_BLKSIZE_8K                 0x1
-#define RBR_BLKSIZE_16K                        0x2
-#define RBR_BLKSIZE_32K                        0x3
-#define RBR_BUFSZ2_2K                  0x0
-#define RBR_BUFSZ2_4K                  0x1
-#define RBR_BUFSZ2_8K                  0x2
-#define RBR_BUFSZ2_16K                 0x3
-#define RBR_BUFSZ1_1K                  0x0
-#define RBR_BUFSZ1_2K                  0x1
-#define RBR_BUFSZ1_4K                  0x2
-#define RBR_BUFSZ1_8K                  0x3
-#define RBR_BUFSZ0_256                 0x0
-#define RBR_BUFSZ0_512                 0x1
-#define RBR_BUFSZ0_1K                  0x2
-#define RBR_BUFSZ0_2K                  0x3
-
-#define RBR_KICK(IDX)                  (DMC + 0x00020UL + (IDX) * 0x200UL)
-#define  RBR_KICK_BKADD                        0x000000000000ffffULL
-
-#define RBR_STAT(IDX)                  (DMC + 0x00028UL + (IDX) * 0x200UL)
-#define  RBR_STAT_QLEN                 0x000000000000ffffULL
-
-#define RBR_HDH(IDX)                   (DMC + 0x00030UL + (IDX) * 0x200UL)
-#define  RBR_HDH_HEAD_H                        0x0000000000000fffULL
-
-#define RBR_HDL(IDX)                   (DMC + 0x00038UL + (IDX) * 0x200UL)
-#define  RBR_HDL_HEAD_L                        0x00000000fffffffcULL
-
-#define RCRCFIG_A(IDX)                 (DMC + 0x00040UL + (IDX) * 0x200UL)
-#define  RCRCFIG_A_LEN                 0xffff000000000000ULL
-#define  RCRCFIG_A_LEN_SHIFT           48
-#define  RCRCFIG_A_STADDR_BASE         0x00000ffffff80000ULL
-#define  RCRCFIG_A_STADDR              0x000000000007ffc0ULL
-
-#define RCRCFIG_B(IDX)                 (DMC + 0x00048UL + (IDX) * 0x200UL)
-#define  RCRCFIG_B_PTHRES              0x00000000ffff0000ULL
-#define  RCRCFIG_B_PTHRES_SHIFT                16
-#define  RCRCFIG_B_ENTOUT              0x0000000000008000ULL
-#define  RCRCFIG_B_TIMEOUT             0x000000000000003fULL
-#define  RCRCFIG_B_TIMEOUT_SHIFT       0
-
-#define RCRSTAT_A(IDX)                 (DMC + 0x00050UL + (IDX) * 0x200UL)
-#define  RCRSTAT_A_QLEN                        0x000000000000ffffULL
-
-#define RCRSTAT_B(IDX)                 (DMC + 0x00058UL + (IDX) * 0x200UL)
-#define  RCRSTAT_B_TIPTR_H             0x0000000000000fffULL
-
-#define RCRSTAT_C(IDX)                 (DMC + 0x00060UL + (IDX) * 0x200UL)
-#define  RCRSTAT_C_TIPTR_L             0x00000000fffffff8ULL
-
-#define RX_DMA_CTL_STAT(IDX)           (DMC + 0x00070UL + (IDX) * 0x200UL)
-#define  RX_DMA_CTL_STAT_RBR_TMOUT     0x0020000000000000ULL
-#define  RX_DMA_CTL_STAT_RSP_CNT_ERR   0x0010000000000000ULL
-#define  RX_DMA_CTL_STAT_BYTE_EN_BUS   0x0008000000000000ULL
-#define  RX_DMA_CTL_STAT_RSP_DAT_ERR   0x0004000000000000ULL
-#define  RX_DMA_CTL_STAT_RCR_ACK_ERR   0x0002000000000000ULL
-#define  RX_DMA_CTL_STAT_DC_FIFO_ERR   0x0001000000000000ULL
-#define  RX_DMA_CTL_STAT_MEX           0x0000800000000000ULL
-#define  RX_DMA_CTL_STAT_RCRTHRES      0x0000400000000000ULL
-#define  RX_DMA_CTL_STAT_RCRTO         0x0000200000000000ULL
-#define  RX_DMA_CTL_STAT_RCR_SHA_PAR   0x0000100000000000ULL
-#define  RX_DMA_CTL_STAT_RBR_PRE_PAR   0x0000080000000000ULL
-#define  RX_DMA_CTL_STAT_PORT_DROP_PKT 0x0000040000000000ULL
-#define  RX_DMA_CTL_STAT_WRED_DROP     0x0000020000000000ULL
-#define  RX_DMA_CTL_STAT_RBR_PRE_EMTY  0x0000010000000000ULL
-#define  RX_DMA_CTL_STAT_RCRSHADOW_FULL        0x0000008000000000ULL
-#define  RX_DMA_CTL_STAT_CONFIG_ERR    0x0000004000000000ULL
-#define  RX_DMA_CTL_STAT_RCRINCON      0x0000002000000000ULL
-#define  RX_DMA_CTL_STAT_RCRFULL       0x0000001000000000ULL
-#define  RX_DMA_CTL_STAT_RBR_EMPTY     0x0000000800000000ULL
-#define  RX_DMA_CTL_STAT_RBRFULL       0x0000000400000000ULL
-#define  RX_DMA_CTL_STAT_RBRLOGPAGE    0x0000000200000000ULL
-#define  RX_DMA_CTL_STAT_CFIGLOGPAGE   0x0000000100000000ULL
-#define  RX_DMA_CTL_STAT_PTRREAD       0x00000000ffff0000ULL
-#define  RX_DMA_CTL_STAT_PTRREAD_SHIFT 16
-#define  RX_DMA_CTL_STAT_PKTREAD       0x000000000000ffffULL
-#define  RX_DMA_CTL_STAT_PKTREAD_SHIFT 0
-
-#define  RX_DMA_CTL_STAT_CHAN_FATAL    (RX_DMA_CTL_STAT_RBR_TMOUT | \
-                                        RX_DMA_CTL_STAT_RSP_CNT_ERR | \
-                                        RX_DMA_CTL_STAT_BYTE_EN_BUS | \
-                                        RX_DMA_CTL_STAT_RSP_DAT_ERR | \
-                                        RX_DMA_CTL_STAT_RCR_ACK_ERR | \
-                                        RX_DMA_CTL_STAT_RCR_SHA_PAR | \
-                                        RX_DMA_CTL_STAT_RBR_PRE_PAR | \
-                                        RX_DMA_CTL_STAT_CONFIG_ERR | \
-                                        RX_DMA_CTL_STAT_RCRINCON | \
-                                        RX_DMA_CTL_STAT_RCRFULL | \
-                                        RX_DMA_CTL_STAT_RBRFULL | \
-                                        RX_DMA_CTL_STAT_RBRLOGPAGE | \
-                                        RX_DMA_CTL_STAT_CFIGLOGPAGE)
-
-#define RX_DMA_CTL_STAT_PORT_FATAL     (RX_DMA_CTL_STAT_DC_FIFO_ERR)
-
-#define RX_DMA_CTL_WRITE_CLEAR_ERRS    (RX_DMA_CTL_STAT_RBR_EMPTY | \
-                                        RX_DMA_CTL_STAT_RCRSHADOW_FULL | \
-                                        RX_DMA_CTL_STAT_RBR_PRE_EMTY | \
-                                        RX_DMA_CTL_STAT_WRED_DROP | \
-                                        RX_DMA_CTL_STAT_PORT_DROP_PKT | \
-                                        RX_DMA_CTL_STAT_RCRTO | \
-                                        RX_DMA_CTL_STAT_RCRTHRES | \
-                                        RX_DMA_CTL_STAT_DC_FIFO_ERR)
-
-#define RCR_FLSH(IDX)                  (DMC + 0x00078UL + (IDX) * 0x200UL)
-#define  RCR_FLSH_FLSH                 0x0000000000000001ULL
-
-#define RXMISC(IDX)                    (DMC + 0x00090UL + (IDX) * 0x200UL)
-#define  RXMISC_OFLOW                  0x0000000000010000ULL
-#define  RXMISC_COUNT                  0x000000000000ffffULL
-
-#define RX_DMA_CTL_STAT_DBG(IDX)       (DMC + 0x00098UL + (IDX) * 0x200UL)
-#define  RX_DMA_CTL_STAT_DBG_RBR_TMOUT         0x0020000000000000ULL
-#define  RX_DMA_CTL_STAT_DBG_RSP_CNT_ERR       0x0010000000000000ULL
-#define  RX_DMA_CTL_STAT_DBG_BYTE_EN_BUS       0x0008000000000000ULL
-#define  RX_DMA_CTL_STAT_DBG_RSP_DAT_ERR       0x0004000000000000ULL
-#define  RX_DMA_CTL_STAT_DBG_RCR_ACK_ERR       0x0002000000000000ULL
-#define  RX_DMA_CTL_STAT_DBG_DC_FIFO_ERR       0x0001000000000000ULL
-#define  RX_DMA_CTL_STAT_DBG_MEX               0x0000800000000000ULL
-#define  RX_DMA_CTL_STAT_DBG_RCRTHRES          0x0000400000000000ULL
-#define  RX_DMA_CTL_STAT_DBG_RCRTO             0x0000200000000000ULL
-#define  RX_DMA_CTL_STAT_DBG_RCR_SHA_PAR       0x0000100000000000ULL
-#define  RX_DMA_CTL_STAT_DBG_RBR_PRE_PAR       0x0000080000000000ULL
-#define  RX_DMA_CTL_STAT_DBG_PORT_DROP_PKT     0x0000040000000000ULL
-#define  RX_DMA_CTL_STAT_DBG_WRED_DROP         0x0000020000000000ULL
-#define  RX_DMA_CTL_STAT_DBG_RBR_PRE_EMTY      0x0000010000000000ULL
-#define  RX_DMA_CTL_STAT_DBG_RCRSHADOW_FULL    0x0000008000000000ULL
-#define  RX_DMA_CTL_STAT_DBG_CONFIG_ERR                0x0000004000000000ULL
-#define  RX_DMA_CTL_STAT_DBG_RCRINCON          0x0000002000000000ULL
-#define  RX_DMA_CTL_STAT_DBG_RCRFULL           0x0000001000000000ULL
-#define  RX_DMA_CTL_STAT_DBG_RBR_EMPTY         0x0000000800000000ULL
-#define  RX_DMA_CTL_STAT_DBG_RBRFULL           0x0000000400000000ULL
-#define  RX_DMA_CTL_STAT_DBG_RBRLOGPAGE                0x0000000200000000ULL
-#define  RX_DMA_CTL_STAT_DBG_CFIGLOGPAGE       0x0000000100000000ULL
-#define  RX_DMA_CTL_STAT_DBG_PTRREAD           0x00000000ffff0000ULL
-#define  RX_DMA_CTL_STAT_DBG_PKTREAD           0x000000000000ffffULL
-
-#define RX_DMA_ENT_MSK(IDX)            (DMC + 0x00068UL + (IDX) * 0x200UL)
-#define  RX_DMA_ENT_MSK_RBR_TMOUT      0x0000000000200000ULL
-#define  RX_DMA_ENT_MSK_RSP_CNT_ERR    0x0000000000100000ULL
-#define  RX_DMA_ENT_MSK_BYTE_EN_BUS    0x0000000000080000ULL
-#define  RX_DMA_ENT_MSK_RSP_DAT_ERR    0x0000000000040000ULL
-#define  RX_DMA_ENT_MSK_RCR_ACK_ERR    0x0000000000020000ULL
-#define  RX_DMA_ENT_MSK_DC_FIFO_ERR    0x0000000000010000ULL
-#define  RX_DMA_ENT_MSK_RCRTHRES       0x0000000000004000ULL
-#define  RX_DMA_ENT_MSK_RCRTO          0x0000000000002000ULL
-#define  RX_DMA_ENT_MSK_RCR_SHA_PAR    0x0000000000001000ULL
-#define  RX_DMA_ENT_MSK_RBR_PRE_PAR    0x0000000000000800ULL
-#define  RX_DMA_ENT_MSK_PORT_DROP_PKT  0x0000000000000400ULL
-#define  RX_DMA_ENT_MSK_WRED_DROP      0x0000000000000200ULL
-#define  RX_DMA_ENT_MSK_RBR_PRE_EMTY   0x0000000000000100ULL
-#define  RX_DMA_ENT_MSK_RCR_SHADOW_FULL        0x0000000000000080ULL
-#define  RX_DMA_ENT_MSK_CONFIG_ERR     0x0000000000000040ULL
-#define  RX_DMA_ENT_MSK_RCRINCON       0x0000000000000020ULL
-#define  RX_DMA_ENT_MSK_RCRFULL                0x0000000000000010ULL
-#define  RX_DMA_ENT_MSK_RBR_EMPTY      0x0000000000000008ULL
-#define  RX_DMA_ENT_MSK_RBRFULL                0x0000000000000004ULL
-#define  RX_DMA_ENT_MSK_RBRLOGPAGE     0x0000000000000002ULL
-#define  RX_DMA_ENT_MSK_CFIGLOGPAGE    0x0000000000000001ULL
-#define  RX_DMA_ENT_MSK_ALL            0x00000000003f7fffULL
-
-#define TX_RNG_CFIG(IDX)               (DMC + 0x40000UL + (IDX) * 0x200UL)
-#define  TX_RNG_CFIG_LEN               0x1fff000000000000ULL
-#define  TX_RNG_CFIG_LEN_SHIFT         48
-#define  TX_RNG_CFIG_STADDR_BASE       0x00000ffffff80000ULL
-#define  TX_RNG_CFIG_STADDR            0x000000000007ffc0ULL
-
-#define TX_RING_HDL(IDX)               (DMC + 0x40010UL + (IDX) * 0x200UL)
-#define  TX_RING_HDL_WRAP              0x0000000000080000ULL
-#define  TX_RING_HDL_HEAD              0x000000000007fff8ULL
-#define  TX_RING_HDL_HEAD_SHIFT                3
-
-#define TX_RING_KICK(IDX)              (DMC + 0x40018UL + (IDX) * 0x200UL)
-#define  TX_RING_KICK_WRAP             0x0000000000080000ULL
-#define  TX_RING_KICK_TAIL             0x000000000007fff8ULL
-
-#define TX_ENT_MSK(IDX)                        (DMC + 0x40020UL + (IDX) * 0x200UL)
-#define  TX_ENT_MSK_MK                 0x0000000000008000ULL
-#define  TX_ENT_MSK_MBOX_ERR           0x0000000000000080ULL
-#define  TX_ENT_MSK_PKT_SIZE_ERR       0x0000000000000040ULL
-#define  TX_ENT_MSK_TX_RING_OFLOW      0x0000000000000020ULL
-#define  TX_ENT_MSK_PREF_BUF_ECC_ERR   0x0000000000000010ULL
-#define  TX_ENT_MSK_NACK_PREF          0x0000000000000008ULL
-#define  TX_ENT_MSK_NACK_PKT_RD                0x0000000000000004ULL
-#define  TX_ENT_MSK_CONF_PART_ERR      0x0000000000000002ULL
-#define  TX_ENT_MSK_PKT_PRT_ERR                0x0000000000000001ULL
-
-#define TX_CS(IDX)                     (DMC + 0x40028UL + (IDX)*0x200UL)
-#define  TX_CS_PKT_CNT                 0x0fff000000000000ULL
-#define  TX_CS_PKT_CNT_SHIFT           48
-#define  TX_CS_LASTMARK                        0x00000fff00000000ULL
-#define  TX_CS_LASTMARK_SHIFT          32
-#define  TX_CS_RST                     0x0000000080000000ULL
-#define  TX_CS_RST_STATE               0x0000000040000000ULL
-#define  TX_CS_MB                      0x0000000020000000ULL
-#define  TX_CS_STOP_N_GO               0x0000000010000000ULL
-#define  TX_CS_SNG_STATE               0x0000000008000000ULL
-#define  TX_CS_MK                      0x0000000000008000ULL
-#define  TX_CS_MMK                     0x0000000000004000ULL
-#define  TX_CS_MBOX_ERR                        0x0000000000000080ULL
-#define  TX_CS_PKT_SIZE_ERR            0x0000000000000040ULL
-#define  TX_CS_TX_RING_OFLOW           0x0000000000000020ULL
-#define  TX_CS_PREF_BUF_PAR_ERR                0x0000000000000010ULL
-#define  TX_CS_NACK_PREF               0x0000000000000008ULL
-#define  TX_CS_NACK_PKT_RD             0x0000000000000004ULL
-#define  TX_CS_CONF_PART_ERR           0x0000000000000002ULL
-#define  TX_CS_PKT_PRT_ERR             0x0000000000000001ULL
-
-#define TXDMA_MBH(IDX)                 (DMC + 0x40030UL + (IDX) * 0x200UL)
-#define  TXDMA_MBH_MBADDR              0x0000000000000fffULL
-
-#define TXDMA_MBL(IDX)                 (DMC + 0x40038UL + (IDX) * 0x200UL)
-#define  TXDMA_MBL_MBADDR              0x00000000ffffffc0ULL
-
-#define TX_DMA_PRE_ST(IDX)             (DMC + 0x40040UL + (IDX) * 0x200UL)
-#define  TX_DMA_PRE_ST_SHADOW_HD       0x000000000007ffffULL
-
-#define TX_RNG_ERR_LOGH(IDX)           (DMC + 0x40048UL + (IDX) * 0x200UL)
-#define  TX_RNG_ERR_LOGH_ERR           0x0000000080000000ULL
-#define  TX_RNG_ERR_LOGH_MERR          0x0000000040000000ULL
-#define  TX_RNG_ERR_LOGH_ERRCODE       0x0000000038000000ULL
-#define  TX_RNG_ERR_LOGH_ERRADDR       0x0000000000000fffULL
-
-#define TX_RNG_ERR_LOGL(IDX)           (DMC + 0x40050UL + (IDX) * 0x200UL)
-#define  TX_RNG_ERR_LOGL_ERRADDR       0x00000000ffffffffULL
-
-#define TDMC_INTR_DBG(IDX)             (DMC + 0x40060UL + (IDX) * 0x200UL)
-#define  TDMC_INTR_DBG_MK              0x0000000000008000ULL
-#define  TDMC_INTR_DBG_MBOX_ERR                0x0000000000000080ULL
-#define  TDMC_INTR_DBG_PKT_SIZE_ERR    0x0000000000000040ULL
-#define  TDMC_INTR_DBG_TX_RING_OFLOW   0x0000000000000020ULL
-#define  TDMC_INTR_DBG_PREF_BUF_PAR_ERR        0x0000000000000010ULL
-#define  TDMC_INTR_DBG_NACK_PREF       0x0000000000000008ULL
-#define  TDMC_INTR_DBG_NACK_PKT_RD     0x0000000000000004ULL
-#define  TDMC_INTR_DBG_CONF_PART_ERR   0x0000000000000002ULL
-#define  TDMC_INTR_DBG_PKT_PART_ERR    0x0000000000000001ULL
-
-#define TX_CS_DBG(IDX)                 (DMC + 0x40068UL + (IDX) * 0x200UL)
-#define  TX_CS_DBG_PKT_CNT             0x0fff000000000000ULL
-
-#define TDMC_INJ_PAR_ERR(IDX)          (DMC + 0x45040UL + (IDX) * 0x200UL)
-#define  TDMC_INJ_PAR_ERR_VAL          0x000000000000ffffULL
-
-#define TDMC_DBG_SEL(IDX)              (DMC + 0x45080UL + (IDX) * 0x200UL)
-#define  TDMC_DBG_SEL_DBG_SEL          0x000000000000003fULL
-
-#define TDMC_TRAINING_VECTOR(IDX)      (DMC + 0x45088UL + (IDX) * 0x200UL)
-#define  TDMC_TRAINING_VECTOR_VEC      0x00000000ffffffffULL
-
-#define TXC_DMA_MAX(CHAN)              (FZC_TXC + 0x00000UL + (CHAN)*0x1000UL)
-#define TXC_DMA_MAX_LEN(CHAN)          (FZC_TXC + 0x00008UL + (CHAN)*0x1000UL)
-
-#define TXC_CONTROL                    (FZC_TXC + 0x20000UL)
-#define  TXC_CONTROL_ENABLE            0x0000000000000010ULL
-#define  TXC_CONTROL_PORT_ENABLE(X)    (1 << (X))
-
-#define TXC_TRAINING_VEC               (FZC_TXC + 0x20008UL)
-#define  TXC_TRAINING_VEC_MASK         0x00000000ffffffffULL
-
-#define TXC_DEBUG                      (FZC_TXC + 0x20010UL)
-#define  TXC_DEBUG_SELECT              0x000000000000003fULL
-
-#define TXC_MAX_REORDER                        (FZC_TXC + 0x20018UL)
-#define  TXC_MAX_REORDER_PORT3         0x000000000f000000ULL
-#define  TXC_MAX_REORDER_PORT2         0x00000000000f0000ULL
-#define  TXC_MAX_REORDER_PORT1         0x0000000000000f00ULL
-#define  TXC_MAX_REORDER_PORT0         0x000000000000000fULL
-
-#define TXC_PORT_CTL(PORT)             (FZC_TXC + 0x20020UL + (PORT)*0x100UL)
-#define  TXC_PORT_CTL_CLR_ALL_STAT     0x0000000000000001ULL
-
-#define TXC_PKT_STUFFED(PORT)          (FZC_TXC + 0x20030UL + (PORT)*0x100UL)
-#define  TXC_PKT_STUFFED_PP_REORDER    0x00000000ffff0000ULL
-#define  TXC_PKT_STUFFED_PP_PACKETASSY 0x000000000000ffffULL
-
-#define TXC_PKT_XMIT(PORT)             (FZC_TXC + 0x20038UL + (PORT)*0x100UL)
-#define  TXC_PKT_XMIT_BYTES            0x00000000ffff0000ULL
-#define  TXC_PKT_XMIT_PKTS             0x000000000000ffffULL
-
-#define TXC_ROECC_CTL(PORT)            (FZC_TXC + 0x20040UL + (PORT)*0x100UL)
-#define  TXC_ROECC_CTL_DISABLE_UE      0x0000000080000000ULL
-#define  TXC_ROECC_CTL_DBL_BIT_ERR     0x0000000000020000ULL
-#define  TXC_ROECC_CTL_SNGL_BIT_ERR    0x0000000000010000ULL
-#define  TXC_ROECC_CTL_ALL_PKTS                0x0000000000000400ULL
-#define  TXC_ROECC_CTL_ALT_PKTS                0x0000000000000200ULL
-#define  TXC_ROECC_CTL_ONE_PKT_ONLY    0x0000000000000100ULL
-#define  TXC_ROECC_CTL_LST_PKT_LINE    0x0000000000000004ULL
-#define  TXC_ROECC_CTL_2ND_PKT_LINE    0x0000000000000002ULL
-#define  TXC_ROECC_CTL_1ST_PKT_LINE    0x0000000000000001ULL
-
-#define TXC_ROECC_ST(PORT)             (FZC_TXC + 0x20048UL + (PORT)*0x100UL)
-#define  TXC_ROECC_CLR_ST              0x0000000080000000ULL
-#define  TXC_ROECC_CE                  0x0000000000020000ULL
-#define  TXC_ROECC_UE                  0x0000000000010000ULL
-#define  TXC_ROECC_ST_ECC_ADDR         0x00000000000003ffULL
-
-#define TXC_RO_DATA0(PORT)             (FZC_TXC + 0x20050UL + (PORT)*0x100UL)
-#define  TXC_RO_DATA0_DATA0            0x00000000ffffffffULL /* bits 31:0 */
-
-#define TXC_RO_DATA1(PORT)             (FZC_TXC + 0x20058UL + (PORT)*0x100UL)
-#define  TXC_RO_DATA1_DATA1            0x00000000ffffffffULL /* bits 63:32 */
-
-#define TXC_RO_DATA2(PORT)             (FZC_TXC + 0x20060UL + (PORT)*0x100UL)
-#define  TXC_RO_DATA2_DATA2            0x00000000ffffffffULL /* bits 95:64 */
-
-#define TXC_RO_DATA3(PORT)             (FZC_TXC + 0x20068UL + (PORT)*0x100UL)
-#define  TXC_RO_DATA3_DATA3            0x00000000ffffffffULL /* bits 127:96 */
-
-#define TXC_RO_DATA4(PORT)             (FZC_TXC + 0x20070UL + (PORT)*0x100UL)
-#define  TXC_RO_DATA4_DATA4            0x0000000000ffffffULL /* bits 151:128 */
-
-#define TXC_SFECC_CTL(PORT)            (FZC_TXC + 0x20078UL + (PORT)*0x100UL)
-#define  TXC_SFECC_CTL_DISABLE_UE      0x0000000080000000ULL
-#define  TXC_SFECC_CTL_DBL_BIT_ERR     0x0000000000020000ULL
-#define  TXC_SFECC_CTL_SNGL_BIT_ERR    0x0000000000010000ULL
-#define  TXC_SFECC_CTL_ALL_PKTS                0x0000000000000400ULL
-#define  TXC_SFECC_CTL_ALT_PKTS                0x0000000000000200ULL
-#define  TXC_SFECC_CTL_ONE_PKT_ONLY    0x0000000000000100ULL
-#define  TXC_SFECC_CTL_LST_PKT_LINE    0x0000000000000004ULL
-#define  TXC_SFECC_CTL_2ND_PKT_LINE    0x0000000000000002ULL
-#define  TXC_SFECC_CTL_1ST_PKT_LINE    0x0000000000000001ULL
-
-#define TXC_SFECC_ST(PORT)             (FZC_TXC + 0x20080UL + (PORT)*0x100UL)
-#define  TXC_SFECC_ST_CLR_ST           0x0000000080000000ULL
-#define  TXC_SFECC_ST_CE               0x0000000000020000ULL
-#define  TXC_SFECC_ST_UE               0x0000000000010000ULL
-#define  TXC_SFECC_ST_ECC_ADDR         0x00000000000003ffULL
-
-#define TXC_SF_DATA0(PORT)             (FZC_TXC + 0x20088UL + (PORT)*0x100UL)
-#define  TXC_SF_DATA0_DATA0            0x00000000ffffffffULL /* bits 31:0 */
-
-#define TXC_SF_DATA1(PORT)             (FZC_TXC + 0x20090UL + (PORT)*0x100UL)
-#define  TXC_SF_DATA1_DATA1            0x00000000ffffffffULL /* bits 63:32 */
-
-#define TXC_SF_DATA2(PORT)             (FZC_TXC + 0x20098UL + (PORT)*0x100UL)
-#define  TXC_SF_DATA2_DATA2            0x00000000ffffffffULL /* bits 95:64 */
-
-#define TXC_SF_DATA3(PORT)             (FZC_TXC + 0x200a0UL + (PORT)*0x100UL)
-#define  TXC_SF_DATA3_DATA3            0x00000000ffffffffULL /* bits 127:96 */
-
-#define TXC_SF_DATA4(PORT)             (FZC_TXC + 0x200a8UL + (PORT)*0x100UL)
-#define  TXC_SF_DATA4_DATA4            0x0000000000ffffffULL /* bits 151:128 */
-
-#define TXC_RO_TIDS(PORT)              (FZC_TXC + 0x200b0UL + (PORT)*0x100UL)
-#define  TXC_RO_TIDS_IN_USE            0x00000000ffffffffULL
-
-#define TXC_RO_STATE0(PORT)            (FZC_TXC + 0x200b8UL + (PORT)*0x100UL)
-#define  TXC_RO_STATE0_DUPLICATE_TID   0x00000000ffffffffULL
-
-#define TXC_RO_STATE1(PORT)            (FZC_TXC + 0x200c0UL + (PORT)*0x100UL)
-#define  TXC_RO_STATE1_UNUSED_TID      0x00000000ffffffffULL
-
-#define TXC_RO_STATE2(PORT)            (FZC_TXC + 0x200c8UL + (PORT)*0x100UL)
-#define  TXC_RO_STATE2_TRANS_TIMEOUT   0x00000000ffffffffULL
-
-#define TXC_RO_STATE3(PORT)            (FZC_TXC + 0x200d0UL + (PORT)*0x100UL)
-#define  TXC_RO_STATE3_ENAB_SPC_WMARK  0x0000000080000000ULL
-#define  TXC_RO_STATE3_RO_SPC_WMARK    0x000000007fe00000ULL
-#define  TXC_RO_STATE3_ROFIFO_SPC_AVAIL        0x00000000001ff800ULL
-#define  TXC_RO_STATE3_ENAB_RO_WMARK   0x0000000000000100ULL
-#define  TXC_RO_STATE3_HIGH_RO_USED    0x00000000000000f0ULL
-#define  TXC_RO_STATE3_NUM_RO_USED     0x000000000000000fULL
-
-#define TXC_RO_CTL(PORT)               (FZC_TXC + 0x200d8UL + (PORT)*0x100UL)
-#define  TXC_RO_CTL_CLR_FAIL_STATE     0x0000000080000000ULL
-#define  TXC_RO_CTL_RO_ADDR            0x000000000f000000ULL
-#define  TXC_RO_CTL_ADDR_FAILED                0x0000000000400000ULL
-#define  TXC_RO_CTL_DMA_FAILED         0x0000000000200000ULL
-#define  TXC_RO_CTL_LEN_FAILED         0x0000000000100000ULL
-#define  TXC_RO_CTL_CAPT_ADDR_FAILED   0x0000000000040000ULL
-#define  TXC_RO_CTL_CAPT_DMA_FAILED    0x0000000000020000ULL
-#define  TXC_RO_CTL_CAPT_LEN_FAILED    0x0000000000010000ULL
-#define  TXC_RO_CTL_RO_STATE_RD_DONE   0x0000000000000080ULL
-#define  TXC_RO_CTL_RO_STATE_WR_DONE   0x0000000000000040ULL
-#define  TXC_RO_CTL_RO_STATE_RD                0x0000000000000020ULL
-#define  TXC_RO_CTL_RO_STATE_WR                0x0000000000000010ULL
-#define  TXC_RO_CTL_RO_STATE_ADDR      0x000000000000000fULL
-
-#define TXC_RO_ST_DATA0(PORT)          (FZC_TXC + 0x200e0UL + (PORT)*0x100UL)
-#define  TXC_RO_ST_DATA0_DATA0         0x00000000ffffffffULL
-
-#define TXC_RO_ST_DATA1(PORT)          (FZC_TXC + 0x200e8UL + (PORT)*0x100UL)
-#define  TXC_RO_ST_DATA1_DATA1         0x00000000ffffffffULL
-
-#define TXC_RO_ST_DATA2(PORT)          (FZC_TXC + 0x200f0UL + (PORT)*0x100UL)
-#define  TXC_RO_ST_DATA2_DATA2         0x00000000ffffffffULL
-
-#define TXC_RO_ST_DATA3(PORT)          (FZC_TXC + 0x200f8UL + (PORT)*0x100UL)
-#define  TXC_RO_ST_DATA3_DATA3         0x00000000ffffffffULL
-
-#define TXC_PORT_PACKET_REQ(PORT)      (FZC_TXC + 0x20100UL + (PORT)*0x100UL)
-#define  TXC_PORT_PACKET_REQ_GATHER_REQ        0x00000000f0000000ULL
-#define  TXC_PORT_PACKET_REQ_PKT_REQ   0x000000000fff0000ULL
-#define  TXC_PORT_PACKET_REQ_PERR_ABRT 0x000000000000ffffULL
-
-       /* bits are same as TXC_INT_STAT */
-#define TXC_INT_STAT_DBG               (FZC_TXC + 0x20420UL)
-
-#define TXC_INT_STAT                   (FZC_TXC + 0x20428UL)
-#define  TXC_INT_STAT_VAL_SHIFT(PORT)  ((PORT) * 8)
-#define  TXC_INT_STAT_VAL(PORT)                (0x3f << TXC_INT_STAT_VAL_SHIFT(PORT))
-#define  TXC_INT_STAT_SF_CE(PORT)      (0x01 << TXC_INT_STAT_VAL_SHIFT(PORT))
-#define  TXC_INT_STAT_SF_UE(PORT)      (0x02 << TXC_INT_STAT_VAL_SHIFT(PORT))
-#define  TXC_INT_STAT_RO_CE(PORT)      (0x04 << TXC_INT_STAT_VAL_SHIFT(PORT))
-#define  TXC_INT_STAT_RO_UE(PORT)      (0x08 << TXC_INT_STAT_VAL_SHIFT(PORT))
-#define  TXC_INT_STAT_REORDER_ERR(PORT)        (0x10 << TXC_INT_STAT_VAL_SHIFT(PORT))
-#define  TXC_INT_STAT_PKTASM_DEAD(PORT)        (0x20 << TXC_INT_STAT_VAL_SHIFT(PORT))
-
-#define TXC_INT_MASK                   (FZC_TXC + 0x20430UL)
-#define  TXC_INT_MASK_VAL_SHIFT(PORT)  ((PORT) * 8)
-#define  TXC_INT_MASK_VAL(PORT)                (0x3f << TXC_INT_STAT_VAL_SHIFT(PORT))
-
-#define TXC_INT_MASK_SF_CE             0x01
-#define TXC_INT_MASK_SF_UE             0x02
-#define TXC_INT_MASK_RO_CE             0x04
-#define TXC_INT_MASK_RO_UE             0x08
-#define TXC_INT_MASK_REORDER_ERR       0x10
-#define TXC_INT_MASK_PKTASM_DEAD       0x20
-#define TXC_INT_MASK_ALL               0x3f
-
-#define TXC_PORT_DMA(IDX)              (FZC_TXC + 0x20028UL + (IDX)*0x100UL)
-
-#define ESPC_PIO_EN                    (FZC_PROM + 0x40000UL)
-#define  ESPC_PIO_EN_ENABLE            0x0000000000000001ULL
-
-#define ESPC_PIO_STAT                  (FZC_PROM + 0x40008UL)
-#define  ESPC_PIO_STAT_READ_START      0x0000000080000000ULL
-#define  ESPC_PIO_STAT_READ_END                0x0000000040000000ULL
-#define  ESPC_PIO_STAT_WRITE_INIT      0x0000000020000000ULL
-#define  ESPC_PIO_STAT_WRITE_END       0x0000000010000000ULL
-#define  ESPC_PIO_STAT_ADDR            0x0000000003ffff00ULL
-#define  ESPC_PIO_STAT_ADDR_SHIFT      8
-#define  ESPC_PIO_STAT_DATA            0x00000000000000ffULL
-#define  ESPC_PIO_STAT_DATA_SHIFT      0
-
-#define ESPC_NCR(IDX)                  (FZC_PROM + 0x40020UL + (IDX)*0x8UL)
-#define  ESPC_NCR_VAL                  0x00000000ffffffffULL
-
-#define ESPC_MAC_ADDR0                 ESPC_NCR(0)
-#define ESPC_MAC_ADDR1                 ESPC_NCR(1)
-#define ESPC_NUM_PORTS_MACS            ESPC_NCR(2)
-#define  ESPC_NUM_PORTS_MACS_VAL       0x00000000000000ffULL
-#define ESPC_MOD_STR_LEN               ESPC_NCR(4)
-#define ESPC_MOD_STR_1                 ESPC_NCR(5)
-#define ESPC_MOD_STR_2                 ESPC_NCR(6)
-#define ESPC_MOD_STR_3                 ESPC_NCR(7)
-#define ESPC_MOD_STR_4                 ESPC_NCR(8)
-#define ESPC_MOD_STR_5                 ESPC_NCR(9)
-#define ESPC_MOD_STR_6                 ESPC_NCR(10)
-#define ESPC_MOD_STR_7                 ESPC_NCR(11)
-#define ESPC_MOD_STR_8                 ESPC_NCR(12)
-#define ESPC_BD_MOD_STR_LEN            ESPC_NCR(13)
-#define ESPC_BD_MOD_STR_1              ESPC_NCR(14)
-#define ESPC_BD_MOD_STR_2              ESPC_NCR(15)
-#define ESPC_BD_MOD_STR_3              ESPC_NCR(16)
-#define ESPC_BD_MOD_STR_4              ESPC_NCR(17)
-
-#define ESPC_PHY_TYPE                  ESPC_NCR(18)
-#define  ESPC_PHY_TYPE_PORT0           0x00000000ff000000ULL
-#define  ESPC_PHY_TYPE_PORT0_SHIFT     24
-#define  ESPC_PHY_TYPE_PORT1           0x0000000000ff0000ULL
-#define  ESPC_PHY_TYPE_PORT1_SHIFT     16
-#define  ESPC_PHY_TYPE_PORT2           0x000000000000ff00ULL
-#define  ESPC_PHY_TYPE_PORT2_SHIFT     8
-#define  ESPC_PHY_TYPE_PORT3           0x00000000000000ffULL
-#define  ESPC_PHY_TYPE_PORT3_SHIFT     0
-
-#define  ESPC_PHY_TYPE_1G_COPPER       3
-#define  ESPC_PHY_TYPE_1G_FIBER                2
-#define  ESPC_PHY_TYPE_10G_COPPER      1
-#define  ESPC_PHY_TYPE_10G_FIBER       0
-
-#define ESPC_MAX_FM_SZ                 ESPC_NCR(19)
-
-#define ESPC_INTR_NUM                  ESPC_NCR(20)
-#define  ESPC_INTR_NUM_PORT0           0x00000000ff000000ULL
-#define  ESPC_INTR_NUM_PORT1           0x0000000000ff0000ULL
-#define  ESPC_INTR_NUM_PORT2           0x000000000000ff00ULL
-#define  ESPC_INTR_NUM_PORT3           0x00000000000000ffULL
-
-#define ESPC_VER_IMGSZ                 ESPC_NCR(21)
-#define  ESPC_VER_IMGSZ_IMGSZ          0x00000000ffff0000ULL
-#define  ESPC_VER_IMGSZ_IMGSZ_SHIFT    16
-#define  ESPC_VER_IMGSZ_VER            0x000000000000ffffULL
-#define  ESPC_VER_IMGSZ_VER_SHIFT      0
-
-#define ESPC_CHKSUM                    ESPC_NCR(22)
-#define  ESPC_CHKSUM_SUM               0x00000000000000ffULL
-
-#define ESPC_EEPROM_SIZE               0x100000
-
-#define CLASS_CODE_UNRECOG             0x00
-#define CLASS_CODE_DUMMY1              0x01
-#define CLASS_CODE_ETHERTYPE1          0x02
-#define CLASS_CODE_ETHERTYPE2          0x03
-#define CLASS_CODE_USER_PROG1          0x04
-#define CLASS_CODE_USER_PROG2          0x05
-#define CLASS_CODE_USER_PROG3          0x06
-#define CLASS_CODE_USER_PROG4          0x07
-#define CLASS_CODE_TCP_IPV4            0x08
-#define CLASS_CODE_UDP_IPV4            0x09
-#define CLASS_CODE_AH_ESP_IPV4         0x0a
-#define CLASS_CODE_SCTP_IPV4           0x0b
-#define CLASS_CODE_TCP_IPV6            0x0c
-#define CLASS_CODE_UDP_IPV6            0x0d
-#define CLASS_CODE_AH_ESP_IPV6         0x0e
-#define CLASS_CODE_SCTP_IPV6           0x0f
-#define CLASS_CODE_ARP                 0x10
-#define CLASS_CODE_RARP                        0x11
-#define CLASS_CODE_DUMMY2              0x12
-#define CLASS_CODE_DUMMY3              0x13
-#define CLASS_CODE_DUMMY4              0x14
-#define CLASS_CODE_DUMMY5              0x15
-#define CLASS_CODE_DUMMY6              0x16
-#define CLASS_CODE_DUMMY7              0x17
-#define CLASS_CODE_DUMMY8              0x18
-#define CLASS_CODE_DUMMY9              0x19
-#define CLASS_CODE_DUMMY10             0x1a
-#define CLASS_CODE_DUMMY11             0x1b
-#define CLASS_CODE_DUMMY12             0x1c
-#define CLASS_CODE_DUMMY13             0x1d
-#define CLASS_CODE_DUMMY14             0x1e
-#define CLASS_CODE_DUMMY15             0x1f
-
-/* Logical devices and device groups */
-#define LDN_RXDMA(CHAN)                        (0 + (CHAN))
-#define LDN_RESV1(OFF)                 (16 + (OFF))
-#define LDN_TXDMA(CHAN)                        (32 + (CHAN))
-#define LDN_RESV2(OFF)                 (56 + (OFF))
-#define LDN_MIF                                63
-#define LDN_MAC(PORT)                  (64 + (PORT))
-#define LDN_DEVICE_ERROR               68
-#define LDN_MAX                                LDN_DEVICE_ERROR
-
-#define NIU_LDG_MIN                    0
-#define NIU_LDG_MAX                    63
-#define NIU_NUM_LDG                    64
-#define LDG_INVALID                    0xff
-
-/* PHY stuff */
-#define NIU_PMA_PMD_DEV_ADDR           1
-#define NIU_PCS_DEV_ADDR               3
-
-#define NIU_PHY_ID_MASK                        0xfffff0f0
-#define NIU_PHY_ID_BCM8704             0x00206030
-#define NIU_PHY_ID_BCM8706             0x00206035
-#define NIU_PHY_ID_BCM5464R            0x002060b0
-#define NIU_PHY_ID_MRVL88X2011         0x01410020
-
-/* MRVL88X2011 register addresses */
-#define MRVL88X2011_USER_DEV1_ADDR     1
-#define MRVL88X2011_USER_DEV2_ADDR     2
-#define MRVL88X2011_USER_DEV3_ADDR     3
-#define MRVL88X2011_USER_DEV4_ADDR     4
-#define MRVL88X2011_PMA_PMD_CTL_1      0x0000
-#define MRVL88X2011_PMA_PMD_STATUS_1   0x0001
-#define MRVL88X2011_10G_PMD_STATUS_2   0x0008
-#define MRVL88X2011_10G_PMD_TX_DIS     0x0009
-#define MRVL88X2011_10G_XGXS_LANE_STAT 0x0018
-#define MRVL88X2011_GENERAL_CTL                0x8300
-#define MRVL88X2011_LED_BLINK_CTL      0x8303
-#define MRVL88X2011_LED_8_TO_11_CTL    0x8306
-
-/* MRVL88X2011 register control */
-#define MRVL88X2011_ENA_XFPREFCLK      0x0001
-#define MRVL88X2011_ENA_PMDTX          0x0000
-#define MRVL88X2011_LOOPBACK            0x1
-#define MRVL88X2011_LED_ACT            0x1
-#define MRVL88X2011_LNK_STATUS_OK      0x4
-#define MRVL88X2011_LED_BLKRATE_MASK   0x70
-#define MRVL88X2011_LED_BLKRATE_034MS  0x0
-#define MRVL88X2011_LED_BLKRATE_067MS  0x1
-#define MRVL88X2011_LED_BLKRATE_134MS  0x2
-#define MRVL88X2011_LED_BLKRATE_269MS  0x3
-#define MRVL88X2011_LED_BLKRATE_538MS  0x4
-#define MRVL88X2011_LED_CTL_OFF                0x0
-#define MRVL88X2011_LED_CTL_PCS_ACT    0x5
-#define MRVL88X2011_LED_CTL_MASK       0x7
-#define MRVL88X2011_LED(n,v)           ((v)<<((n)*4))
-#define MRVL88X2011_LED_STAT(n,v)      ((v)>>((n)*4))
-
-#define BCM8704_PMA_PMD_DEV_ADDR       1
-#define BCM8704_PCS_DEV_ADDR           2
-#define BCM8704_USER_DEV3_ADDR         3
-#define BCM8704_PHYXS_DEV_ADDR         4
-#define BCM8704_USER_DEV4_ADDR         4
-
-#define BCM8704_PMD_RCV_SIGDET         0x000a
-#define  PMD_RCV_SIGDET_LANE3          0x0010
-#define  PMD_RCV_SIGDET_LANE2          0x0008
-#define  PMD_RCV_SIGDET_LANE1          0x0004
-#define  PMD_RCV_SIGDET_LANE0          0x0002
-#define  PMD_RCV_SIGDET_GLOBAL         0x0001
-
-#define BCM8704_PCS_10G_R_STATUS       0x0020
-#define  PCS_10G_R_STATUS_LINKSTAT     0x1000
-#define  PCS_10G_R_STATUS_PRBS31_ABLE  0x0004
-#define  PCS_10G_R_STATUS_HI_BER       0x0002
-#define  PCS_10G_R_STATUS_BLK_LOCK     0x0001
-
-#define BCM8704_USER_CONTROL           0xc800
-#define  USER_CONTROL_OPTXENB_LVL      0x8000
-#define  USER_CONTROL_OPTXRST_LVL      0x4000
-#define  USER_CONTROL_OPBIASFLT_LVL    0x2000
-#define  USER_CONTROL_OBTMPFLT_LVL     0x1000
-#define  USER_CONTROL_OPPRFLT_LVL      0x0800
-#define  USER_CONTROL_OPTXFLT_LVL      0x0400
-#define  USER_CONTROL_OPRXLOS_LVL      0x0200
-#define  USER_CONTROL_OPRXFLT_LVL      0x0100
-#define  USER_CONTROL_OPTXON_LVL       0x0080
-#define  USER_CONTROL_RES1             0x007f
-#define  USER_CONTROL_RES1_SHIFT       0
-
-#define BCM8704_USER_ANALOG_CLK                0xc801
-#define BCM8704_USER_PMD_RX_CONTROL    0xc802
-
-#define BCM8704_USER_PMD_TX_CONTROL    0xc803
-#define  USER_PMD_TX_CTL_RES1          0xfe00
-#define  USER_PMD_TX_CTL_XFP_CLKEN     0x0100
-#define  USER_PMD_TX_CTL_TX_DAC_TXD    0x00c0
-#define  USER_PMD_TX_CTL_TX_DAC_TXD_SH 6
-#define  USER_PMD_TX_CTL_TX_DAC_TXCK   0x0030
-#define  USER_PMD_TX_CTL_TX_DAC_TXCK_SH        4
-#define  USER_PMD_TX_CTL_TSD_LPWREN    0x0008
-#define  USER_PMD_TX_CTL_TSCK_LPWREN   0x0004
-#define  USER_PMD_TX_CTL_CMU_LPWREN    0x0002
-#define  USER_PMD_TX_CTL_SFIFORST      0x0001
-
-#define BCM8704_USER_ANALOG_STATUS0    0xc804
-#define BCM8704_USER_OPT_DIGITAL_CTRL  0xc808
-#define BCM8704_USER_TX_ALARM_STATUS   0x9004
-
-#define  USER_ODIG_CTRL_FMODE          0x8000
-#define  USER_ODIG_CTRL_TX_PDOWN       0x4000
-#define  USER_ODIG_CTRL_RX_PDOWN       0x2000
-#define  USER_ODIG_CTRL_EFILT_EN       0x1000
-#define  USER_ODIG_CTRL_OPT_RST                0x0800
-#define  USER_ODIG_CTRL_PCS_TIB                0x0400
-#define  USER_ODIG_CTRL_PCS_RI         0x0200
-#define  USER_ODIG_CTRL_RESV1          0x0180
-#define  USER_ODIG_CTRL_GPIOS          0x0060
-#define  USER_ODIG_CTRL_GPIOS_SHIFT    5
-#define  USER_ODIG_CTRL_RESV2          0x0010
-#define  USER_ODIG_CTRL_LB_ERR_DIS     0x0008
-#define  USER_ODIG_CTRL_RESV3          0x0006
-#define  USER_ODIG_CTRL_TXONOFF_PD_DIS 0x0001
-
-#define BCM8704_PHYXS_XGXS_LANE_STAT   0x0018
-#define  PHYXS_XGXS_LANE_STAT_ALINGED  0x1000
-#define  PHYXS_XGXS_LANE_STAT_PATTEST  0x0800
-#define  PHYXS_XGXS_LANE_STAT_MAGIC    0x0400
-#define  PHYXS_XGXS_LANE_STAT_LANE3    0x0008
-#define  PHYXS_XGXS_LANE_STAT_LANE2    0x0004
-#define  PHYXS_XGXS_LANE_STAT_LANE1    0x0002
-#define  PHYXS_XGXS_LANE_STAT_LANE0    0x0001
-
-#define BCM5464R_AUX_CTL               24
-#define  BCM5464R_AUX_CTL_EXT_LB       0x8000
-#define  BCM5464R_AUX_CTL_EXT_PLEN     0x4000
-#define  BCM5464R_AUX_CTL_ER1000       0x3000
-#define  BCM5464R_AUX_CTL_ER1000_SHIFT 12
-#define  BCM5464R_AUX_CTL_RESV1                0x0800
-#define  BCM5464R_AUX_CTL_WRITE_1      0x0400
-#define  BCM5464R_AUX_CTL_RESV2                0x0300
-#define  BCM5464R_AUX_CTL_PRESP_DIS    0x0080
-#define  BCM5464R_AUX_CTL_RESV3                0x0040
-#define  BCM5464R_AUX_CTL_ER100                0x0030
-#define  BCM5464R_AUX_CTL_ER100_SHIFT  4
-#define  BCM5464R_AUX_CTL_DIAG_MODE    0x0008
-#define  BCM5464R_AUX_CTL_SR_SEL       0x0007
-#define  BCM5464R_AUX_CTL_SR_SEL_SHIFT 0
-
-#define  BCM5464R_CTRL1000_AS_MASTER           0x0800
-#define  BCM5464R_CTRL1000_ENABLE_AS_MASTER    0x1000
-
-#define RCR_ENTRY_MULTI                        0x8000000000000000ULL
-#define RCR_ENTRY_PKT_TYPE             0x6000000000000000ULL
-#define RCR_ENTRY_PKT_TYPE_SHIFT       61
-#define RCR_ENTRY_ZERO_COPY            0x1000000000000000ULL
-#define RCR_ENTRY_NOPORT               0x0800000000000000ULL
-#define RCR_ENTRY_PROMISC              0x0400000000000000ULL
-#define RCR_ENTRY_ERROR                        0x0380000000000000ULL
-#define RCR_ENTRY_DCF_ERR              0x0040000000000000ULL
-#define RCR_ENTRY_L2_LEN               0x003fff0000000000ULL
-#define RCR_ENTRY_L2_LEN_SHIFT         40
-#define RCR_ENTRY_PKTBUFSZ             0x000000c000000000ULL
-#define RCR_ENTRY_PKTBUFSZ_SHIFT       38
-#define RCR_ENTRY_PKT_BUF_ADDR         0x0000003fffffffffULL /* bits 43:6 */
-#define RCR_ENTRY_PKT_BUF_ADDR_SHIFT   6
-
-#define RCR_PKT_TYPE_OTHER             0x0
-#define RCR_PKT_TYPE_TCP               0x1
-#define RCR_PKT_TYPE_UDP               0x2
-#define RCR_PKT_TYPE_SCTP              0x3
-
-#define NIU_RXPULL_MAX                 ETH_HLEN
-
-struct rx_pkt_hdr0 {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
-       u8      inputport:2,
-               maccheck:1,
-               class:5;
-       u8      vlan:1,
-               llcsnap:1,
-               noport:1,
-               badip:1,
-               tcamhit:1,
-               tres:2,
-               tzfvld:1;
-#elif defined(__BIG_ENDIAN_BITFIELD)
-       u8      class:5,
-               maccheck:1,
-               inputport:2;
-       u8      tzfvld:1,
-               tres:2,
-               tcamhit:1,
-               badip:1,
-               noport:1,
-               llcsnap:1,
-               vlan:1;
-#endif
-};
-
-struct rx_pkt_hdr1 {
-       u8      hwrsvd1;
-       u8      tcammatch;
-#if defined(__LITTLE_ENDIAN_BITFIELD)
-       u8      hwrsvd2:2,
-               hashit:1,
-               exact:1,
-               hzfvld:1,
-               hashsidx:3;
-#elif defined(__BIG_ENDIAN_BITFIELD)
-       u8      hashsidx:3,
-               hzfvld:1,
-               exact:1,
-               hashit:1,
-               hwrsvd2:2;
-#endif
-       u8      zcrsvd;
-
-       /* Bits 11:8 of zero copy flow ID.  */
-#if defined(__LITTLE_ENDIAN_BITFIELD)
-       u8      hwrsvd3:4, zflowid0:4;
-#elif defined(__BIG_ENDIAN_BITFIELD)
-       u8      zflowid0:4, hwrsvd3:4;
-#endif
-
-       /* Bits 7:0 of zero copy flow ID.  */
-       u8      zflowid1;
-
-       /* Bits 15:8 of hash value, H2.  */
-       u8      hashval2_0;
-
-       /* Bits 7:0 of hash value, H2.  */
-       u8      hashval2_1;
-
-       /* Bits 19:16 of hash value, H1.  */
-#if defined(__LITTLE_ENDIAN_BITFIELD)
-       u8      hwrsvd4:4, hashval1_0:4;
-#elif defined(__BIG_ENDIAN_BITFIELD)
-       u8      hashval1_0:4, hwrsvd4:4;
-#endif
-
-       /* Bits 15:8 of hash value, H1.  */
-       u8      hashval1_1;
-
-       /* Bits 7:0 of hash value, H1.  */
-       u8      hashval1_2;
-
-       u8      hwrsvd5;
-       u8      hwrsvd6;
-
-       u8      usrdata_0;      /* Bits 39:32 of user data.  */
-       u8      usrdata_1;      /* Bits 31:24 of user data.  */
-       u8      usrdata_2;      /* Bits 23:16 of user data.  */
-       u8      usrdata_3;      /* Bits 15:8 of user data.  */
-       u8      usrdata_4;      /* Bits 7:0 of user data.  */
-};
-
-struct tx_dma_mbox {
-       u64     tx_dma_pre_st;
-       u64     tx_cs;
-       u64     tx_ring_kick;
-       u64     tx_ring_hdl;
-       u64     resv1;
-       u32     tx_rng_err_logl;
-       u32     tx_rng_err_logh;
-       u64     resv2;
-       u64     resv3;
-};
-
-struct tx_pkt_hdr {
-       __le64  flags;
-#define TXHDR_PAD              0x0000000000000007ULL
-#define  TXHDR_PAD_SHIFT       0
-#define TXHDR_LEN              0x000000003fff0000ULL
-#define  TXHDR_LEN_SHIFT       16
-#define TXHDR_L4STUFF          0x0000003f00000000ULL
-#define  TXHDR_L4STUFF_SHIFT   32
-#define TXHDR_L4START          0x00003f0000000000ULL
-#define  TXHDR_L4START_SHIFT   40
-#define TXHDR_L3START          0x000f000000000000ULL
-#define  TXHDR_L3START_SHIFT   48
-#define TXHDR_IHL              0x00f0000000000000ULL
-#define  TXHDR_IHL_SHIFT       52
-#define TXHDR_VLAN             0x0100000000000000ULL
-#define TXHDR_LLC              0x0200000000000000ULL
-#define TXHDR_IP_VER           0x2000000000000000ULL
-#define TXHDR_CSUM_NONE                0x0000000000000000ULL
-#define TXHDR_CSUM_TCP         0x4000000000000000ULL
-#define TXHDR_CSUM_UDP         0x8000000000000000ULL
-#define TXHDR_CSUM_SCTP                0xc000000000000000ULL
-       __le64  resv;
-};
-
-#define TX_DESC_SOP            0x8000000000000000ULL
-#define TX_DESC_MARK           0x4000000000000000ULL
-#define TX_DESC_NUM_PTR                0x3c00000000000000ULL
-#define TX_DESC_NUM_PTR_SHIFT  58
-#define TX_DESC_TR_LEN         0x01fff00000000000ULL
-#define TX_DESC_TR_LEN_SHIFT   44
-#define TX_DESC_SAD            0x00000fffffffffffULL
-#define TX_DESC_SAD_SHIFT      0
-
-struct tx_buff_info {
-       struct sk_buff *skb;
-       u64 mapping;
-};
-
-struct txdma_mailbox {
-       __le64  tx_dma_pre_st;
-       __le64  tx_cs;
-       __le64  tx_ring_kick;
-       __le64  tx_ring_hdl;
-       __le64  resv1;
-       __le32  tx_rng_err_logl;
-       __le32  tx_rng_err_logh;
-       __le64  resv2[2];
-} __attribute__((aligned(64)));
-
-#define MAX_TX_RING_SIZE       256
-#define MAX_TX_DESC_LEN                4076
-
-struct tx_ring_info {
-       struct tx_buff_info     tx_buffs[MAX_TX_RING_SIZE];
-       struct niu              *np;
-       u64                     tx_cs;
-       int                     pending;
-       int                     prod;
-       int                     cons;
-       int                     wrap_bit;
-       u16                     last_pkt_cnt;
-       u16                     tx_channel;
-       u16                     mark_counter;
-       u16                     mark_freq;
-       u16                     mark_pending;
-       u16                     __pad;
-       struct txdma_mailbox    *mbox;
-       __le64                  *descr;
-
-       u64                     tx_packets;
-       u64                     tx_bytes;
-       u64                     tx_errors;
-
-       u64                     mbox_dma;
-       u64                     descr_dma;
-       int                     max_burst;
-};
-
-#define NEXT_TX(tp, index) \
-       (((index) + 1) < (tp)->pending ? ((index) + 1) : 0)
-
-static inline u32 niu_tx_avail(struct tx_ring_info *tp)
-{
-       return (tp->pending -
-               ((tp->prod - tp->cons) & (MAX_TX_RING_SIZE - 1)));
-}
-
-struct rxdma_mailbox {
-       __le64  rx_dma_ctl_stat;
-       __le64  rbr_stat;
-       __le32  rbr_hdl;
-       __le32  rbr_hdh;
-       __le64  resv1;
-       __le32  rcrstat_c;
-       __le32  rcrstat_b;
-       __le64  rcrstat_a;
-       __le64  resv2[2];
-} __attribute__((aligned(64)));
-
-#define MAX_RBR_RING_SIZE      128
-#define MAX_RCR_RING_SIZE      (MAX_RBR_RING_SIZE * 2)
-
-#define RBR_REFILL_MIN         16
-
-#define RX_SKB_ALLOC_SIZE      128 + NET_IP_ALIGN
-
-struct rx_ring_info {
-       struct niu              *np;
-       int                     rx_channel;
-       u16                     rbr_block_size;
-       u16                     rbr_blocks_per_page;
-       u16                     rbr_sizes[4];
-       unsigned int            rcr_index;
-       unsigned int            rcr_table_size;
-       unsigned int            rbr_index;
-       unsigned int            rbr_pending;
-       unsigned int            rbr_refill_pending;
-       unsigned int            rbr_kick_thresh;
-       unsigned int            rbr_table_size;
-       struct page             **rxhash;
-       struct rxdma_mailbox    *mbox;
-       __le64                  *rcr;
-       __le32                  *rbr;
-#define RBR_DESCR_ADDR_SHIFT   12
-
-       u64                     rx_packets;
-       u64                     rx_bytes;
-       u64                     rx_dropped;
-       u64                     rx_errors;
-
-       u64                     mbox_dma;
-       u64                     rcr_dma;
-       u64                     rbr_dma;
-
-       /* WRED */
-       int                     nonsyn_window;
-       int                     nonsyn_threshold;
-       int                     syn_window;
-       int                     syn_threshold;
-
-       /* interrupt mitigation */
-       int                     rcr_pkt_threshold;
-       int                     rcr_timeout;
-};
-
-#define NEXT_RCR(rp, index) \
-       (((index) + 1) < (rp)->rcr_table_size ? ((index) + 1) : 0)
-#define NEXT_RBR(rp, index) \
-       (((index) + 1) < (rp)->rbr_table_size ? ((index) + 1) : 0)
-
-#define NIU_MAX_PORTS          4
-#define NIU_NUM_RXCHAN         16
-#define NIU_NUM_TXCHAN         24
-#define MAC_NUM_HASH           16
-
-#define NIU_MAX_MTU            9216
-
-/* VPD strings */
-#define        NIU_QGC_LP_BM_STR       "501-7606"
-#define        NIU_2XGF_LP_BM_STR      "501-7283"
-#define        NIU_QGC_PEM_BM_STR      "501-7765"
-#define        NIU_2XGF_PEM_BM_STR     "501-7626"
-#define        NIU_ALONSO_BM_STR       "373-0202"
-#define        NIU_FOXXY_BM_STR        "501-7961"
-#define        NIU_2XGF_MRVL_BM_STR    "SK-6E82"
-#define        NIU_QGC_LP_MDL_STR      "SUNW,pcie-qgc"
-#define        NIU_2XGF_LP_MDL_STR     "SUNW,pcie-2xgf"
-#define        NIU_QGC_PEM_MDL_STR     "SUNW,pcie-qgc-pem"
-#define        NIU_2XGF_PEM_MDL_STR    "SUNW,pcie-2xgf-pem"
-#define        NIU_ALONSO_MDL_STR      "SUNW,CP3220"
-#define        NIU_KIMI_MDL_STR        "SUNW,CP3260"
-#define        NIU_MARAMBA_MDL_STR     "SUNW,pcie-neptune"
-#define        NIU_FOXXY_MDL_STR       "SUNW,pcie-rfem"
-#define        NIU_2XGF_MRVL_MDL_STR   "SysKonnect,pcie-2xgf"
-
-#define NIU_VPD_MIN_MAJOR      3
-#define NIU_VPD_MIN_MINOR      4
-
-#define NIU_VPD_MODEL_MAX      32
-#define NIU_VPD_BD_MODEL_MAX   16
-#define NIU_VPD_VERSION_MAX    64
-#define NIU_VPD_PHY_TYPE_MAX   8
-
-struct niu_vpd {
-       char                    model[NIU_VPD_MODEL_MAX];
-       char                    board_model[NIU_VPD_BD_MODEL_MAX];
-       char                    version[NIU_VPD_VERSION_MAX];
-       char                    phy_type[NIU_VPD_PHY_TYPE_MAX];
-       u8                      mac_num;
-       u8                      __pad;
-       u8                      local_mac[6];
-       int                     fcode_major;
-       int                     fcode_minor;
-};
-
-struct niu_altmac_rdc {
-       u8                      alt_mac_num;
-       u8                      rdc_num;
-       u8                      mac_pref;
-};
-
-struct niu_vlan_rdc {
-       u8                      rdc_num;
-       u8                      vlan_pref;
-};
-
-struct niu_classifier {
-       struct niu_altmac_rdc   alt_mac_mappings[16];
-       struct niu_vlan_rdc     vlan_mappings[ENET_VLAN_TBL_NUM_ENTRIES];
-
-       u16                     tcam_top;
-       u16                     tcam_sz;
-       u16                     tcam_valid_entries;
-       u16                     num_alt_mac_mappings;
-
-       u32                     h1_init;
-       u16                     h2_init;
-};
-
-#define NIU_NUM_RDC_TABLES     8
-#define NIU_RDC_TABLE_SLOTS    16
-
-struct rdc_table {
-       u8                      rxdma_channel[NIU_RDC_TABLE_SLOTS];
-};
-
-struct niu_rdc_tables {
-       struct rdc_table        tables[NIU_NUM_RDC_TABLES];
-       int                     first_table_num;
-       int                     num_tables;
-};
-
-#define PHY_TYPE_PMA_PMD       0
-#define PHY_TYPE_PCS           1
-#define PHY_TYPE_MII           2
-#define PHY_TYPE_MAX           3
-
-struct phy_probe_info {
-       u32     phy_id[PHY_TYPE_MAX][NIU_MAX_PORTS];
-       u8      phy_port[PHY_TYPE_MAX][NIU_MAX_PORTS];
-       u8      cur[PHY_TYPE_MAX];
-
-       struct device_attribute phy_port_attrs[PHY_TYPE_MAX * NIU_MAX_PORTS];
-       struct device_attribute phy_type_attrs[PHY_TYPE_MAX * NIU_MAX_PORTS];
-       struct device_attribute phy_id_attrs[PHY_TYPE_MAX * NIU_MAX_PORTS];
-};
-
-struct niu_tcam_entry {
-       u8                      valid;
-       u64                     key[4];
-       u64                     key_mask[4];
-       u64                     assoc_data;
-};
-
-struct device_node;
-union niu_parent_id {
-       struct {
-               int             domain;
-               int             bus;
-               int             device;
-       } pci;
-       struct device_node      *of;
-};
-
-struct niu;
-struct niu_parent {
-       struct platform_device  *plat_dev;
-       int                     index;
-
-       union niu_parent_id     id;
-
-       struct niu              *ports[NIU_MAX_PORTS];
-
-       atomic_t                refcnt;
-       struct list_head        list;
-
-       spinlock_t              lock;
-
-       u32                     flags;
-#define PARENT_FLGS_CLS_HWINIT 0x00000001
-
-       u32                     port_phy;
-#define PORT_PHY_UNKNOWN       0x00000000
-#define PORT_PHY_INVALID       0xffffffff
-#define PORT_TYPE_10G          0x01
-#define PORT_TYPE_1G           0x02
-#define PORT_TYPE_MASK         0x03
-
-       u8                      rxchan_per_port[NIU_MAX_PORTS];
-       u8                      txchan_per_port[NIU_MAX_PORTS];
-
-       struct niu_rdc_tables   rdc_group_cfg[NIU_MAX_PORTS];
-       u8                      rdc_default[NIU_MAX_PORTS];
-
-       u8                      ldg_map[LDN_MAX + 1];
-
-       u8                      plat_type;
-#define PLAT_TYPE_INVALID      0x00
-#define PLAT_TYPE_ATLAS                0x01
-#define PLAT_TYPE_NIU          0x02
-#define PLAT_TYPE_VF_P0                0x03
-#define PLAT_TYPE_VF_P1                0x04
-#define PLAT_TYPE_ATCA_CP3220  0x08
-
-       u8                      num_ports;
-
-       u16                     tcam_num_entries;
-#define NIU_PCI_TCAM_ENTRIES   256
-#define NIU_NONPCI_TCAM_ENTRIES        128
-#define NIU_TCAM_ENTRIES_MAX   256
-
-       int                     rxdma_clock_divider;
-
-       struct phy_probe_info   phy_probe_info;
-
-       struct niu_tcam_entry   tcam[NIU_TCAM_ENTRIES_MAX];
-
-#define        NIU_L2_PROG_CLS         2
-#define        NIU_L3_PROG_CLS         4
-       u64                     l2_cls[NIU_L2_PROG_CLS];
-       u64                     l3_cls[NIU_L3_PROG_CLS];
-       u64                     tcam_key[12];
-       u64                     flow_key[12];
-       u16                     l3_cls_refcnt[NIU_L3_PROG_CLS];
-       u8                      l3_cls_pid[NIU_L3_PROG_CLS];
-};
-
-struct niu_ops {
-       void *(*alloc_coherent)(struct device *dev, size_t size,
-                               u64 *handle, gfp_t flag);
-       void (*free_coherent)(struct device *dev, size_t size,
-                             void *cpu_addr, u64 handle);
-       u64 (*map_page)(struct device *dev, struct page *page,
-                       unsigned long offset, size_t size,
-                       enum dma_data_direction direction);
-       void (*unmap_page)(struct device *dev, u64 dma_address,
-                          size_t size, enum dma_data_direction direction);
-       u64 (*map_single)(struct device *dev, void *cpu_addr,
-                         size_t size,
-                         enum dma_data_direction direction);
-       void (*unmap_single)(struct device *dev, u64 dma_address,
-                            size_t size, enum dma_data_direction direction);
-};
-
-struct niu_link_config {
-       u32                             supported;
-
-       /* Describes what we're trying to get. */
-       u32                             advertising;
-       u16                             speed;
-       u8                              duplex;
-       u8                              autoneg;
-
-       /* Describes what we actually have. */
-       u32                             active_advertising;
-       u16                             active_speed;
-       u8                              active_duplex;
-       u8                              active_autoneg;
-#define SPEED_INVALID          0xffff
-#define DUPLEX_INVALID         0xff
-#define AUTONEG_INVALID                0xff
-
-       u8                              loopback_mode;
-#define LOOPBACK_DISABLED      0x00
-#define LOOPBACK_PHY           0x01
-#define LOOPBACK_MAC           0x02
-};
-
-struct niu_ldg {
-       struct napi_struct      napi;
-       struct niu      *np;
-       u8              ldg_num;
-       u8              timer;
-       u64             v0, v1, v2;
-       unsigned int    irq;
-};
-
-struct niu_xmac_stats {
-       u64     tx_frames;
-       u64     tx_bytes;
-       u64     tx_fifo_errors;
-       u64     tx_overflow_errors;
-       u64     tx_max_pkt_size_errors;
-       u64     tx_underflow_errors;
-
-       u64     rx_local_faults;
-       u64     rx_remote_faults;
-       u64     rx_link_faults;
-       u64     rx_align_errors;
-       u64     rx_frags;
-       u64     rx_mcasts;
-       u64     rx_bcasts;
-       u64     rx_hist_cnt1;
-       u64     rx_hist_cnt2;
-       u64     rx_hist_cnt3;
-       u64     rx_hist_cnt4;
-       u64     rx_hist_cnt5;
-       u64     rx_hist_cnt6;
-       u64     rx_hist_cnt7;
-       u64     rx_octets;
-       u64     rx_code_violations;
-       u64     rx_len_errors;
-       u64     rx_crc_errors;
-       u64     rx_underflows;
-       u64     rx_overflows;
-
-       u64     pause_off_state;
-       u64     pause_on_state;
-       u64     pause_received;
-};
-
-struct niu_bmac_stats {
-       u64     tx_underflow_errors;
-       u64     tx_max_pkt_size_errors;
-       u64     tx_bytes;
-       u64     tx_frames;
-
-       u64     rx_overflows;
-       u64     rx_frames;
-       u64     rx_align_errors;
-       u64     rx_crc_errors;
-       u64     rx_len_errors;
-
-       u64     pause_off_state;
-       u64     pause_on_state;
-       u64     pause_received;
-};
-
-union niu_mac_stats {
-       struct niu_xmac_stats   xmac;
-       struct niu_bmac_stats   bmac;
-};
-
-struct niu_phy_ops {
-       int (*serdes_init)(struct niu *np);
-       int (*xcvr_init)(struct niu *np);
-       int (*link_status)(struct niu *np, int *);
-};
-
-struct platform_device;
-struct niu {
-       void __iomem                    *regs;
-       struct net_device               *dev;
-       struct pci_dev                  *pdev;
-       struct device                   *device;
-       struct niu_parent               *parent;
-
-       u32                             flags;
-#define NIU_FLAGS_HOTPLUG_PHY_PRESENT  0x02000000 /* Removeable PHY detected*/
-#define NIU_FLAGS_HOTPLUG_PHY          0x01000000 /* Removeable PHY */
-#define NIU_FLAGS_VPD_VALID            0x00800000 /* VPD has valid version */
-#define NIU_FLAGS_MSIX                 0x00400000 /* MSI-X in use */
-#define NIU_FLAGS_MCAST                        0x00200000 /* multicast filter enabled */
-#define NIU_FLAGS_PROMISC              0x00100000 /* PROMISC enabled */
-#define NIU_FLAGS_XCVR_SERDES          0x00080000 /* 0=PHY 1=SERDES */
-#define NIU_FLAGS_10G                  0x00040000 /* 0=1G 1=10G */
-#define NIU_FLAGS_FIBER                        0x00020000 /* 0=COPPER 1=FIBER */
-#define NIU_FLAGS_XMAC                 0x00010000 /* 0=BMAC 1=XMAC */
-
-       u32                             msg_enable;
-       char                            irq_name[NIU_NUM_RXCHAN+NIU_NUM_TXCHAN+3][IFNAMSIZ + 6];
-
-       /* Protects hw programming, and ring state.  */
-       spinlock_t                      lock;
-
-       const struct niu_ops            *ops;
-       union niu_mac_stats             mac_stats;
-
-       struct rx_ring_info             *rx_rings;
-       struct tx_ring_info             *tx_rings;
-       int                             num_rx_rings;
-       int                             num_tx_rings;
-
-       struct niu_ldg                  ldg[NIU_NUM_LDG];
-       int                             num_ldg;
-
-       void __iomem                    *mac_regs;
-       unsigned long                   ipp_off;
-       unsigned long                   pcs_off;
-       unsigned long                   xpcs_off;
-
-       struct timer_list               timer;
-       u64                             orig_led_state;
-       const struct niu_phy_ops        *phy_ops;
-       int                             phy_addr;
-
-       struct niu_link_config          link_config;
-
-       struct work_struct              reset_task;
-
-       u8                              port;
-       u8                              mac_xcvr;
-#define MAC_XCVR_MII                   1
-#define MAC_XCVR_PCS                   2
-#define MAC_XCVR_XPCS                  3
-
-       struct niu_classifier           clas;
-
-       struct niu_vpd                  vpd;
-       u32                             eeprom_len;
-
-       struct platform_device          *op;
-       void __iomem                    *vir_regs_1;
-       void __iomem                    *vir_regs_2;
-};
-
-#endif /* _NIU_H */
index 020f64a8fcf79a97bfa26828be3303f7a6cbfbdf..a891ad00054bc55beaf0bcbf19d2c4f53f7a7e99 100644 (file)
@@ -27,7 +27,7 @@
 
 #define VERSION "2.0 B"
 
-#include "sungem_phy.h"
+#include "./ethernet/sun/sungem_phy.h"
 
 extern int spider_net_stop(struct net_device *netdev);
 extern int spider_net_open(struct net_device *netdev);
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
deleted file mode 100644 (file)
index 297a424..0000000
+++ /dev/null
@@ -1,1306 +0,0 @@
-/* sunbmac.c: Driver for Sparc BigMAC 100baseT ethernet adapters.
- *
- * Copyright (C) 1997, 1998, 1999, 2003, 2008 David S. Miller (davem@davemloft.net)
- */
-
-#include <linux/module.h>
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/crc32.h>
-#include <linux/errno.h>
-#include <linux/ethtool.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/bitops.h>
-#include <linux/dma-mapping.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/gfp.h>
-
-#include <asm/auxio.h>
-#include <asm/byteorder.h>
-#include <asm/dma.h>
-#include <asm/idprom.h>
-#include <asm/io.h>
-#include <asm/openprom.h>
-#include <asm/oplib.h>
-#include <asm/pgtable.h>
-#include <asm/system.h>
-
-#include "sunbmac.h"
-
-#define DRV_NAME       "sunbmac"
-#define DRV_VERSION    "2.1"
-#define DRV_RELDATE    "August 26, 2008"
-#define DRV_AUTHOR     "David S. Miller (davem@davemloft.net)"
-
-static char version[] =
-       DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
-
-MODULE_VERSION(DRV_VERSION);
-MODULE_AUTHOR(DRV_AUTHOR);
-MODULE_DESCRIPTION("Sun BigMAC 100baseT ethernet driver");
-MODULE_LICENSE("GPL");
-
-#undef DEBUG_PROBE
-#undef DEBUG_TX
-#undef DEBUG_IRQ
-
-#ifdef DEBUG_PROBE
-#define DP(x)  printk x
-#else
-#define DP(x)
-#endif
-
-#ifdef DEBUG_TX
-#define DTX(x)  printk x
-#else
-#define DTX(x)
-#endif
-
-#ifdef DEBUG_IRQ
-#define DIRQ(x)  printk x
-#else
-#define DIRQ(x)
-#endif
-
-#define DEFAULT_JAMSIZE    4 /* Toe jam */
-
-#define QEC_RESET_TRIES 200
-
-static int qec_global_reset(void __iomem *gregs)
-{
-       int tries = QEC_RESET_TRIES;
-
-       sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);
-       while (--tries) {
-               if (sbus_readl(gregs + GLOB_CTRL) & GLOB_CTRL_RESET) {
-                       udelay(20);
-                       continue;
-               }
-               break;
-       }
-       if (tries)
-               return 0;
-       printk(KERN_ERR "BigMAC: Cannot reset the QEC.\n");
-       return -1;
-}
-
-static void qec_init(struct bigmac *bp)
-{
-       struct platform_device *qec_op = bp->qec_op;
-       void __iomem *gregs = bp->gregs;
-       u8 bsizes = bp->bigmac_bursts;
-       u32 regval;
-
-       /* 64byte bursts do not work at the moment, do
-        * not even try to enable them.  -DaveM
-        */
-       if (bsizes & DMA_BURST32)
-               regval = GLOB_CTRL_B32;
-       else
-               regval = GLOB_CTRL_B16;
-       sbus_writel(regval | GLOB_CTRL_BMODE, gregs + GLOB_CTRL);
-       sbus_writel(GLOB_PSIZE_2048, gregs + GLOB_PSIZE);
-
-       /* All of memsize is given to bigmac. */
-       sbus_writel(resource_size(&qec_op->resource[1]),
-                   gregs + GLOB_MSIZE);
-
-       /* Half to the transmitter, half to the receiver. */
-       sbus_writel(resource_size(&qec_op->resource[1]) >> 1,
-                   gregs + GLOB_TSIZE);
-       sbus_writel(resource_size(&qec_op->resource[1]) >> 1,
-                   gregs + GLOB_RSIZE);
-}
-
-#define TX_RESET_TRIES     32
-#define RX_RESET_TRIES     32
-
-static void bigmac_tx_reset(void __iomem *bregs)
-{
-       int tries = TX_RESET_TRIES;
-
-       sbus_writel(0, bregs + BMAC_TXCFG);
-
-       /* The fifo threshold bit is read-only and does
-        * not clear.  -DaveM
-        */
-       while ((sbus_readl(bregs + BMAC_TXCFG) & ~(BIGMAC_TXCFG_FIFO)) != 0 &&
-              --tries != 0)
-               udelay(20);
-
-       if (!tries) {
-               printk(KERN_ERR "BIGMAC: Transmitter will not reset.\n");
-               printk(KERN_ERR "BIGMAC: tx_cfg is %08x\n",
-                      sbus_readl(bregs + BMAC_TXCFG));
-       }
-}
-
-static void bigmac_rx_reset(void __iomem *bregs)
-{
-       int tries = RX_RESET_TRIES;
-
-       sbus_writel(0, bregs + BMAC_RXCFG);
-       while (sbus_readl(bregs + BMAC_RXCFG) && --tries)
-               udelay(20);
-
-       if (!tries) {
-               printk(KERN_ERR "BIGMAC: Receiver will not reset.\n");
-               printk(KERN_ERR "BIGMAC: rx_cfg is %08x\n",
-                      sbus_readl(bregs + BMAC_RXCFG));
-       }
-}
-
-/* Reset the transmitter and receiver. */
-static void bigmac_stop(struct bigmac *bp)
-{
-       bigmac_tx_reset(bp->bregs);
-       bigmac_rx_reset(bp->bregs);
-}
-
-static void bigmac_get_counters(struct bigmac *bp, void __iomem *bregs)
-{
-       struct net_device_stats *stats = &bp->enet_stats;
-
-       stats->rx_crc_errors += sbus_readl(bregs + BMAC_RCRCECTR);
-       sbus_writel(0, bregs + BMAC_RCRCECTR);
-
-       stats->rx_frame_errors += sbus_readl(bregs + BMAC_UNALECTR);
-       sbus_writel(0, bregs + BMAC_UNALECTR);
-
-       stats->rx_length_errors += sbus_readl(bregs + BMAC_GLECTR);
-       sbus_writel(0, bregs + BMAC_GLECTR);
-
-       stats->tx_aborted_errors += sbus_readl(bregs + BMAC_EXCTR);
-
-       stats->collisions +=
-               (sbus_readl(bregs + BMAC_EXCTR) +
-                sbus_readl(bregs + BMAC_LTCTR));
-       sbus_writel(0, bregs + BMAC_EXCTR);
-       sbus_writel(0, bregs + BMAC_LTCTR);
-}
-
-static void bigmac_clean_rings(struct bigmac *bp)
-{
-       int i;
-
-       for (i = 0; i < RX_RING_SIZE; i++) {
-               if (bp->rx_skbs[i] != NULL) {
-                       dev_kfree_skb_any(bp->rx_skbs[i]);
-                       bp->rx_skbs[i] = NULL;
-               }
-       }
-
-       for (i = 0; i < TX_RING_SIZE; i++) {
-               if (bp->tx_skbs[i] != NULL) {
-                       dev_kfree_skb_any(bp->tx_skbs[i]);
-                       bp->tx_skbs[i] = NULL;
-               }
-       }
-}
-
-static void bigmac_init_rings(struct bigmac *bp, int from_irq)
-{
-       struct bmac_init_block *bb = bp->bmac_block;
-       struct net_device *dev = bp->dev;
-       int i;
-       gfp_t gfp_flags = GFP_KERNEL;
-
-       if (from_irq || in_interrupt())
-               gfp_flags = GFP_ATOMIC;
-
-       bp->rx_new = bp->rx_old = bp->tx_new = bp->tx_old = 0;
-
-       /* Free any skippy bufs left around in the rings. */
-       bigmac_clean_rings(bp);
-
-       /* Now get new skbufs for the receive ring. */
-       for (i = 0; i < RX_RING_SIZE; i++) {
-               struct sk_buff *skb;
-
-               skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, gfp_flags);
-               if (!skb)
-                       continue;
-
-               bp->rx_skbs[i] = skb;
-               skb->dev = dev;
-
-               /* Because we reserve afterwards. */
-               skb_put(skb, ETH_FRAME_LEN);
-               skb_reserve(skb, 34);
-
-               bb->be_rxd[i].rx_addr =
-                       dma_map_single(&bp->bigmac_op->dev,
-                                      skb->data,
-                                      RX_BUF_ALLOC_SIZE - 34,
-                                      DMA_FROM_DEVICE);
-               bb->be_rxd[i].rx_flags =
-                       (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
-       }
-
-       for (i = 0; i < TX_RING_SIZE; i++)
-               bb->be_txd[i].tx_flags = bb->be_txd[i].tx_addr = 0;
-}
-
-#define MGMT_CLKON  (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB|MGMT_PAL_DCLOCK)
-#define MGMT_CLKOFF (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB)
-
-static void idle_transceiver(void __iomem *tregs)
-{
-       int i = 20;
-
-       while (i--) {
-               sbus_writel(MGMT_CLKOFF, tregs + TCVR_MPAL);
-               sbus_readl(tregs + TCVR_MPAL);
-               sbus_writel(MGMT_CLKON, tregs + TCVR_MPAL);
-               sbus_readl(tregs + TCVR_MPAL);
-       }
-}
-
-static void write_tcvr_bit(struct bigmac *bp, void __iomem *tregs, int bit)
-{
-       if (bp->tcvr_type == internal) {
-               bit = (bit & 1) << 3;
-               sbus_writel(bit | (MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO),
-                           tregs + TCVR_MPAL);
-               sbus_readl(tregs + TCVR_MPAL);
-               sbus_writel(bit | MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK,
-                           tregs + TCVR_MPAL);
-               sbus_readl(tregs + TCVR_MPAL);
-       } else if (bp->tcvr_type == external) {
-               bit = (bit & 1) << 2;
-               sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB,
-                           tregs + TCVR_MPAL);
-               sbus_readl(tregs + TCVR_MPAL);
-               sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB | MGMT_PAL_DCLOCK,
-                           tregs + TCVR_MPAL);
-               sbus_readl(tregs + TCVR_MPAL);
-       } else {
-               printk(KERN_ERR "write_tcvr_bit: No transceiver type known!\n");
-       }
-}
-
-static int read_tcvr_bit(struct bigmac *bp, void __iomem *tregs)
-{
-       int retval = 0;
-
-       if (bp->tcvr_type == internal) {
-               sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL);
-               sbus_readl(tregs + TCVR_MPAL);
-               sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK,
-                           tregs + TCVR_MPAL);
-               sbus_readl(tregs + TCVR_MPAL);
-               retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3;
-       } else if (bp->tcvr_type == external) {
-               sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL);
-               sbus_readl(tregs + TCVR_MPAL);
-               sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL);
-               sbus_readl(tregs + TCVR_MPAL);
-               retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2;
-       } else {
-               printk(KERN_ERR "read_tcvr_bit: No transceiver type known!\n");
-       }
-       return retval;
-}
-
-static int read_tcvr_bit2(struct bigmac *bp, void __iomem *tregs)
-{
-       int retval = 0;
-
-       if (bp->tcvr_type == internal) {
-               sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL);
-               sbus_readl(tregs + TCVR_MPAL);
-               retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3;
-               sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL);
-               sbus_readl(tregs + TCVR_MPAL);
-       } else if (bp->tcvr_type == external) {
-               sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL);
-               sbus_readl(tregs + TCVR_MPAL);
-               retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2;
-               sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL);
-               sbus_readl(tregs + TCVR_MPAL);
-       } else {
-               printk(KERN_ERR "read_tcvr_bit2: No transceiver type known!\n");
-       }
-       return retval;
-}
-
-static void put_tcvr_byte(struct bigmac *bp,
-                         void __iomem *tregs,
-                         unsigned int byte)
-{
-       int shift = 4;
-
-       do {
-               write_tcvr_bit(bp, tregs, ((byte >> shift) & 1));
-               shift -= 1;
-       } while (shift >= 0);
-}
-
-static void bigmac_tcvr_write(struct bigmac *bp, void __iomem *tregs,
-                             int reg, unsigned short val)
-{
-       int shift;
-
-       reg &= 0xff;
-       val &= 0xffff;
-       switch(bp->tcvr_type) {
-       case internal:
-       case external:
-               break;
-
-       default:
-               printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n");
-               return;
-       }
-
-       idle_transceiver(tregs);
-       write_tcvr_bit(bp, tregs, 0);
-       write_tcvr_bit(bp, tregs, 1);
-       write_tcvr_bit(bp, tregs, 0);
-       write_tcvr_bit(bp, tregs, 1);
-
-       put_tcvr_byte(bp, tregs,
-                     ((bp->tcvr_type == internal) ?
-                      BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL));
-
-       put_tcvr_byte(bp, tregs, reg);
-
-       write_tcvr_bit(bp, tregs, 1);
-       write_tcvr_bit(bp, tregs, 0);
-
-       shift = 15;
-       do {
-               write_tcvr_bit(bp, tregs, (val >> shift) & 1);
-               shift -= 1;
-       } while (shift >= 0);
-}
-
-static unsigned short bigmac_tcvr_read(struct bigmac *bp,
-                                      void __iomem *tregs,
-                                      int reg)
-{
-       unsigned short retval = 0;
-
-       reg &= 0xff;
-       switch(bp->tcvr_type) {
-       case internal:
-       case external:
-               break;
-
-       default:
-               printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n");
-               return 0xffff;
-       }
-
-       idle_transceiver(tregs);
-       write_tcvr_bit(bp, tregs, 0);
-       write_tcvr_bit(bp, tregs, 1);
-       write_tcvr_bit(bp, tregs, 1);
-       write_tcvr_bit(bp, tregs, 0);
-
-       put_tcvr_byte(bp, tregs,
-                     ((bp->tcvr_type == internal) ?
-                      BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL));
-
-       put_tcvr_byte(bp, tregs, reg);
-
-       if (bp->tcvr_type == external) {
-               int shift = 15;
-
-               (void) read_tcvr_bit2(bp, tregs);
-               (void) read_tcvr_bit2(bp, tregs);
-
-               do {
-                       int tmp;
-
-                       tmp = read_tcvr_bit2(bp, tregs);
-                       retval |= ((tmp & 1) << shift);
-                       shift -= 1;
-               } while (shift >= 0);
-
-               (void) read_tcvr_bit2(bp, tregs);
-               (void) read_tcvr_bit2(bp, tregs);
-               (void) read_tcvr_bit2(bp, tregs);
-       } else {
-               int shift = 15;
-
-               (void) read_tcvr_bit(bp, tregs);
-               (void) read_tcvr_bit(bp, tregs);
-
-               do {
-                       int tmp;
-
-                       tmp = read_tcvr_bit(bp, tregs);
-                       retval |= ((tmp & 1) << shift);
-                       shift -= 1;
-               } while (shift >= 0);
-
-               (void) read_tcvr_bit(bp, tregs);
-               (void) read_tcvr_bit(bp, tregs);
-               (void) read_tcvr_bit(bp, tregs);
-       }
-       return retval;
-}
-
-static void bigmac_tcvr_init(struct bigmac *bp)
-{
-       void __iomem *tregs = bp->tregs;
-       u32 mpal;
-
-       idle_transceiver(tregs);
-       sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK,
-                   tregs + TCVR_MPAL);
-       sbus_readl(tregs + TCVR_MPAL);
-
-       /* Only the bit for the present transceiver (internal or
-        * external) will stick, set them both and see what stays.
-        */
-       sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL);
-       sbus_readl(tregs + TCVR_MPAL);
-       udelay(20);
-
-       mpal = sbus_readl(tregs + TCVR_MPAL);
-       if (mpal & MGMT_PAL_EXT_MDIO) {
-               bp->tcvr_type = external;
-               sbus_writel(~(TCVR_PAL_EXTLBACK | TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE),
-                           tregs + TCVR_TPAL);
-               sbus_readl(tregs + TCVR_TPAL);
-       } else if (mpal & MGMT_PAL_INT_MDIO) {
-               bp->tcvr_type = internal;
-               sbus_writel(~(TCVR_PAL_SERIAL | TCVR_PAL_EXTLBACK |
-                             TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE),
-                           tregs + TCVR_TPAL);
-               sbus_readl(tregs + TCVR_TPAL);
-       } else {
-               printk(KERN_ERR "BIGMAC: AIEEE, neither internal nor "
-                      "external MDIO available!\n");
-               printk(KERN_ERR "BIGMAC: mgmt_pal[%08x] tcvr_pal[%08x]\n",
-                      sbus_readl(tregs + TCVR_MPAL),
-                      sbus_readl(tregs + TCVR_TPAL));
-       }
-}
-
-static int bigmac_init_hw(struct bigmac *, int);
-
-static int try_next_permutation(struct bigmac *bp, void __iomem *tregs)
-{
-       if (bp->sw_bmcr & BMCR_SPEED100) {
-               int timeout;
-
-               /* Reset the PHY. */
-               bp->sw_bmcr     = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK);
-               bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
-               bp->sw_bmcr     = (BMCR_RESET);
-               bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
-
-               timeout = 64;
-               while (--timeout) {
-                       bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
-                       if ((bp->sw_bmcr & BMCR_RESET) == 0)
-                               break;
-                       udelay(20);
-               }
-               if (timeout == 0)
-                       printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name);
-
-               bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
-
-               /* Now we try 10baseT. */
-               bp->sw_bmcr &= ~(BMCR_SPEED100);
-               bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
-               return 0;
-       }
-
-       /* We've tried them all. */
-       return -1;
-}
-
-static void bigmac_timer(unsigned long data)
-{
-       struct bigmac *bp = (struct bigmac *) data;
-       void __iomem *tregs = bp->tregs;
-       int restart_timer = 0;
-
-       bp->timer_ticks++;
-       if (bp->timer_state == ltrywait) {
-               bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMSR);
-               bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
-               if (bp->sw_bmsr & BMSR_LSTATUS) {
-                       printk(KERN_INFO "%s: Link is now up at %s.\n",
-                              bp->dev->name,
-                              (bp->sw_bmcr & BMCR_SPEED100) ?
-                              "100baseT" : "10baseT");
-                       bp->timer_state = asleep;
-                       restart_timer = 0;
-               } else {
-                       if (bp->timer_ticks >= 4) {
-                               int ret;
-
-                               ret = try_next_permutation(bp, tregs);
-                               if (ret == -1) {
-                                       printk(KERN_ERR "%s: Link down, cable problem?\n",
-                                              bp->dev->name);
-                                       ret = bigmac_init_hw(bp, 0);
-                                       if (ret) {
-                                               printk(KERN_ERR "%s: Error, cannot re-init the "
-                                                      "BigMAC.\n", bp->dev->name);
-                                       }
-                                       return;
-                               }
-                               bp->timer_ticks = 0;
-                               restart_timer = 1;
-                       } else {
-                               restart_timer = 1;
-                       }
-               }
-       } else {
-               /* Can't happens.... */
-               printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n",
-                      bp->dev->name);
-               restart_timer = 0;
-               bp->timer_ticks = 0;
-               bp->timer_state = asleep; /* foo on you */
-       }
-
-       if (restart_timer != 0) {
-               bp->bigmac_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */
-               add_timer(&bp->bigmac_timer);
-       }
-}
-
-/* Well, really we just force the chip into 100baseT then
- * 10baseT, each time checking for a link status.
- */
-static void bigmac_begin_auto_negotiation(struct bigmac *bp)
-{
-       void __iomem *tregs = bp->tregs;
-       int timeout;
-
-       /* Grab new software copies of PHY registers. */
-       bp->sw_bmsr     = bigmac_tcvr_read(bp, tregs, BIGMAC_BMSR);
-       bp->sw_bmcr     = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
-
-       /* Reset the PHY. */
-       bp->sw_bmcr     = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK);
-       bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
-       bp->sw_bmcr     = (BMCR_RESET);
-       bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
-
-       timeout = 64;
-       while (--timeout) {
-               bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
-               if ((bp->sw_bmcr & BMCR_RESET) == 0)
-                       break;
-               udelay(20);
-       }
-       if (timeout == 0)
-               printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name);
-
-       bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR);
-
-       /* First we try 100baseT. */
-       bp->sw_bmcr |= BMCR_SPEED100;
-       bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr);
-
-       bp->timer_state = ltrywait;
-       bp->timer_ticks = 0;
-       bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10;
-       bp->bigmac_timer.data = (unsigned long) bp;
-       bp->bigmac_timer.function = bigmac_timer;
-       add_timer(&bp->bigmac_timer);
-}
-
-static int bigmac_init_hw(struct bigmac *bp, int from_irq)
-{
-       void __iomem *gregs        = bp->gregs;
-       void __iomem *cregs        = bp->creg;
-       void __iomem *bregs        = bp->bregs;
-       unsigned char *e = &bp->dev->dev_addr[0];
-
-       /* Latch current counters into statistics. */
-       bigmac_get_counters(bp, bregs);
-
-       /* Reset QEC. */
-       qec_global_reset(gregs);
-
-       /* Init QEC. */
-       qec_init(bp);
-
-       /* Alloc and reset the tx/rx descriptor chains. */
-       bigmac_init_rings(bp, from_irq);
-
-       /* Initialize the PHY. */
-       bigmac_tcvr_init(bp);
-
-       /* Stop transmitter and receiver. */
-       bigmac_stop(bp);
-
-       /* Set hardware ethernet address. */
-       sbus_writel(((e[4] << 8) | e[5]), bregs + BMAC_MACADDR2);
-       sbus_writel(((e[2] << 8) | e[3]), bregs + BMAC_MACADDR1);
-       sbus_writel(((e[0] << 8) | e[1]), bregs + BMAC_MACADDR0);
-
-       /* Clear the hash table until mc upload occurs. */
-       sbus_writel(0, bregs + BMAC_HTABLE3);
-       sbus_writel(0, bregs + BMAC_HTABLE2);
-       sbus_writel(0, bregs + BMAC_HTABLE1);
-       sbus_writel(0, bregs + BMAC_HTABLE0);
-
-       /* Enable Big Mac hash table filter. */
-       sbus_writel(BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_FIFO,
-                   bregs + BMAC_RXCFG);
-       udelay(20);
-
-       /* Ok, configure the Big Mac transmitter. */
-       sbus_writel(BIGMAC_TXCFG_FIFO, bregs + BMAC_TXCFG);
-
-       /* The HME docs recommend to use the 10LSB of our MAC here. */
-       sbus_writel(((e[5] | e[4] << 8) & 0x3ff),
-                   bregs + BMAC_RSEED);
-
-       /* Enable the output drivers no matter what. */
-       sbus_writel(BIGMAC_XCFG_ODENABLE | BIGMAC_XCFG_RESV,
-                   bregs + BMAC_XIFCFG);
-
-       /* Tell the QEC where the ring descriptors are. */
-       sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0),
-                   cregs + CREG_RXDS);
-       sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0),
-                   cregs + CREG_TXDS);
-
-       /* Setup the FIFO pointers into QEC local memory. */
-       sbus_writel(0, cregs + CREG_RXRBUFPTR);
-       sbus_writel(0, cregs + CREG_RXWBUFPTR);
-       sbus_writel(sbus_readl(gregs + GLOB_RSIZE),
-                   cregs + CREG_TXRBUFPTR);
-       sbus_writel(sbus_readl(gregs + GLOB_RSIZE),
-                   cregs + CREG_TXWBUFPTR);
-
-       /* Tell bigmac what interrupts we don't want to hear about. */
-       sbus_writel(BIGMAC_IMASK_GOTFRAME | BIGMAC_IMASK_SENTFRAME,
-                   bregs + BMAC_IMASK);
-
-       /* Enable the various other irq's. */
-       sbus_writel(0, cregs + CREG_RIMASK);
-       sbus_writel(0, cregs + CREG_TIMASK);
-       sbus_writel(0, cregs + CREG_QMASK);
-       sbus_writel(0, cregs + CREG_BMASK);
-
-       /* Set jam size to a reasonable default. */
-       sbus_writel(DEFAULT_JAMSIZE, bregs + BMAC_JSIZE);
-
-       /* Clear collision counter. */
-       sbus_writel(0, cregs + CREG_CCNT);
-
-       /* Enable transmitter and receiver. */
-       sbus_writel(sbus_readl(bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE,
-                   bregs + BMAC_TXCFG);
-       sbus_writel(sbus_readl(bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE,
-                   bregs + BMAC_RXCFG);
-
-       /* Ok, start detecting link speed/duplex. */
-       bigmac_begin_auto_negotiation(bp);
-
-       /* Success. */
-       return 0;
-}
-
-/* Error interrupts get sent here. */
-static void bigmac_is_medium_rare(struct bigmac *bp, u32 qec_status, u32 bmac_status)
-{
-       printk(KERN_ERR "bigmac_is_medium_rare: ");
-       if (qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) {
-               if (qec_status & GLOB_STAT_ER)
-                       printk("QEC_ERROR, ");
-               if (qec_status & GLOB_STAT_BM)
-                       printk("QEC_BMAC_ERROR, ");
-       }
-       if (bmac_status & CREG_STAT_ERRORS) {
-               if (bmac_status & CREG_STAT_BERROR)
-                       printk("BMAC_ERROR, ");
-               if (bmac_status & CREG_STAT_TXDERROR)
-                       printk("TXD_ERROR, ");
-               if (bmac_status & CREG_STAT_TXLERR)
-                       printk("TX_LATE_ERROR, ");
-               if (bmac_status & CREG_STAT_TXPERR)
-                       printk("TX_PARITY_ERROR, ");
-               if (bmac_status & CREG_STAT_TXSERR)
-                       printk("TX_SBUS_ERROR, ");
-
-               if (bmac_status & CREG_STAT_RXDROP)
-                       printk("RX_DROP_ERROR, ");
-
-               if (bmac_status & CREG_STAT_RXSMALL)
-                       printk("RX_SMALL_ERROR, ");
-               if (bmac_status & CREG_STAT_RXLERR)
-                       printk("RX_LATE_ERROR, ");
-               if (bmac_status & CREG_STAT_RXPERR)
-                       printk("RX_PARITY_ERROR, ");
-               if (bmac_status & CREG_STAT_RXSERR)
-                       printk("RX_SBUS_ERROR, ");
-       }
-
-       printk(" RESET\n");
-       bigmac_init_hw(bp, 1);
-}
-
-/* BigMAC transmit complete service routines. */
-static void bigmac_tx(struct bigmac *bp)
-{
-       struct be_txd *txbase = &bp->bmac_block->be_txd[0];
-       struct net_device *dev = bp->dev;
-       int elem;
-
-       spin_lock(&bp->lock);
-
-       elem = bp->tx_old;
-       DTX(("bigmac_tx: tx_old[%d] ", elem));
-       while (elem != bp->tx_new) {
-               struct sk_buff *skb;
-               struct be_txd *this = &txbase[elem];
-
-               DTX(("this(%p) [flags(%08x)addr(%08x)]",
-                    this, this->tx_flags, this->tx_addr));
-
-               if (this->tx_flags & TXD_OWN)
-                       break;
-               skb = bp->tx_skbs[elem];
-               bp->enet_stats.tx_packets++;
-               bp->enet_stats.tx_bytes += skb->len;
-               dma_unmap_single(&bp->bigmac_op->dev,
-                                this->tx_addr, skb->len,
-                                DMA_TO_DEVICE);
-
-               DTX(("skb(%p) ", skb));
-               bp->tx_skbs[elem] = NULL;
-               dev_kfree_skb_irq(skb);
-
-               elem = NEXT_TX(elem);
-       }
-       DTX((" DONE, tx_old=%d\n", elem));
-       bp->tx_old = elem;
-
-       if (netif_queue_stopped(dev) &&
-           TX_BUFFS_AVAIL(bp) > 0)
-               netif_wake_queue(bp->dev);
-
-       spin_unlock(&bp->lock);
-}
-
-/* BigMAC receive complete service routines. */
-static void bigmac_rx(struct bigmac *bp)
-{
-       struct be_rxd *rxbase = &bp->bmac_block->be_rxd[0];
-       struct be_rxd *this;
-       int elem = bp->rx_new, drops = 0;
-       u32 flags;
-
-       this = &rxbase[elem];
-       while (!((flags = this->rx_flags) & RXD_OWN)) {
-               struct sk_buff *skb;
-               int len = (flags & RXD_LENGTH); /* FCS not included */
-
-               /* Check for errors. */
-               if (len < ETH_ZLEN) {
-                       bp->enet_stats.rx_errors++;
-                       bp->enet_stats.rx_length_errors++;
-
-       drop_it:
-                       /* Return it to the BigMAC. */
-                       bp->enet_stats.rx_dropped++;
-                       this->rx_flags =
-                               (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
-                       goto next;
-               }
-               skb = bp->rx_skbs[elem];
-               if (len > RX_COPY_THRESHOLD) {
-                       struct sk_buff *new_skb;
-
-                       /* Now refill the entry, if we can. */
-                       new_skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
-                       if (new_skb == NULL) {
-                               drops++;
-                               goto drop_it;
-                       }
-                       dma_unmap_single(&bp->bigmac_op->dev,
-                                        this->rx_addr,
-                                        RX_BUF_ALLOC_SIZE - 34,
-                                        DMA_FROM_DEVICE);
-                       bp->rx_skbs[elem] = new_skb;
-                       new_skb->dev = bp->dev;
-                       skb_put(new_skb, ETH_FRAME_LEN);
-                       skb_reserve(new_skb, 34);
-                       this->rx_addr =
-                               dma_map_single(&bp->bigmac_op->dev,
-                                              new_skb->data,
-                                              RX_BUF_ALLOC_SIZE - 34,
-                                              DMA_FROM_DEVICE);
-                       this->rx_flags =
-                               (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
-
-                       /* Trim the original skb for the netif. */
-                       skb_trim(skb, len);
-               } else {
-                       struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
-
-                       if (copy_skb == NULL) {
-                               drops++;
-                               goto drop_it;
-                       }
-                       skb_reserve(copy_skb, 2);
-                       skb_put(copy_skb, len);
-                       dma_sync_single_for_cpu(&bp->bigmac_op->dev,
-                                               this->rx_addr, len,
-                                               DMA_FROM_DEVICE);
-                       skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len);
-                       dma_sync_single_for_device(&bp->bigmac_op->dev,
-                                                  this->rx_addr, len,
-                                                  DMA_FROM_DEVICE);
-
-                       /* Reuse original ring buffer. */
-                       this->rx_flags =
-                               (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
-
-                       skb = copy_skb;
-               }
-
-               /* No checksums done by the BigMAC ;-( */
-               skb->protocol = eth_type_trans(skb, bp->dev);
-               netif_rx(skb);
-               bp->enet_stats.rx_packets++;
-               bp->enet_stats.rx_bytes += len;
-       next:
-               elem = NEXT_RX(elem);
-               this = &rxbase[elem];
-       }
-       bp->rx_new = elem;
-       if (drops)
-               printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", bp->dev->name);
-}
-
-static irqreturn_t bigmac_interrupt(int irq, void *dev_id)
-{
-       struct bigmac *bp = (struct bigmac *) dev_id;
-       u32 qec_status, bmac_status;
-
-       DIRQ(("bigmac_interrupt: "));
-
-       /* Latch status registers now. */
-       bmac_status = sbus_readl(bp->creg + CREG_STAT);
-       qec_status = sbus_readl(bp->gregs + GLOB_STAT);
-
-       DIRQ(("qec_status=%08x bmac_status=%08x\n", qec_status, bmac_status));
-       if ((qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) ||
-          (bmac_status & CREG_STAT_ERRORS))
-               bigmac_is_medium_rare(bp, qec_status, bmac_status);
-
-       if (bmac_status & CREG_STAT_TXIRQ)
-               bigmac_tx(bp);
-
-       if (bmac_status & CREG_STAT_RXIRQ)
-               bigmac_rx(bp);
-
-       return IRQ_HANDLED;
-}
-
-static int bigmac_open(struct net_device *dev)
-{
-       struct bigmac *bp = netdev_priv(dev);
-       int ret;
-
-       ret = request_irq(dev->irq, bigmac_interrupt, IRQF_SHARED, dev->name, bp);
-       if (ret) {
-               printk(KERN_ERR "BIGMAC: Can't order irq %d to go.\n", dev->irq);
-               return ret;
-       }
-       init_timer(&bp->bigmac_timer);
-       ret = bigmac_init_hw(bp, 0);
-       if (ret)
-               free_irq(dev->irq, bp);
-       return ret;
-}
-
-static int bigmac_close(struct net_device *dev)
-{
-       struct bigmac *bp = netdev_priv(dev);
-
-       del_timer(&bp->bigmac_timer);
-       bp->timer_state = asleep;
-       bp->timer_ticks = 0;
-
-       bigmac_stop(bp);
-       bigmac_clean_rings(bp);
-       free_irq(dev->irq, bp);
-       return 0;
-}
-
-static void bigmac_tx_timeout(struct net_device *dev)
-{
-       struct bigmac *bp = netdev_priv(dev);
-
-       bigmac_init_hw(bp, 0);
-       netif_wake_queue(dev);
-}
-
-/* Put a packet on the wire. */
-static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-       struct bigmac *bp = netdev_priv(dev);
-       int len, entry;
-       u32 mapping;
-
-       len = skb->len;
-       mapping = dma_map_single(&bp->bigmac_op->dev, skb->data,
-                                len, DMA_TO_DEVICE);
-
-       /* Avoid a race... */
-       spin_lock_irq(&bp->lock);
-       entry = bp->tx_new;
-       DTX(("bigmac_start_xmit: len(%d) entry(%d)\n", len, entry));
-       bp->bmac_block->be_txd[entry].tx_flags = TXD_UPDATE;
-       bp->tx_skbs[entry] = skb;
-       bp->bmac_block->be_txd[entry].tx_addr = mapping;
-       bp->bmac_block->be_txd[entry].tx_flags =
-               (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
-       bp->tx_new = NEXT_TX(entry);
-       if (TX_BUFFS_AVAIL(bp) <= 0)
-               netif_stop_queue(dev);
-       spin_unlock_irq(&bp->lock);
-
-       /* Get it going. */
-       sbus_writel(CREG_CTRL_TWAKEUP, bp->creg + CREG_CTRL);
-
-
-       return NETDEV_TX_OK;
-}
-
-static struct net_device_stats *bigmac_get_stats(struct net_device *dev)
-{
-       struct bigmac *bp = netdev_priv(dev);
-
-       bigmac_get_counters(bp, bp->bregs);
-       return &bp->enet_stats;
-}
-
-static void bigmac_set_multicast(struct net_device *dev)
-{
-       struct bigmac *bp = netdev_priv(dev);
-       void __iomem *bregs = bp->bregs;
-       struct netdev_hw_addr *ha;
-       int i;
-       u32 tmp, crc;
-
-       /* Disable the receiver.  The bit self-clears when
-        * the operation is complete.
-        */
-       tmp = sbus_readl(bregs + BMAC_RXCFG);
-       tmp &= ~(BIGMAC_RXCFG_ENABLE);
-       sbus_writel(tmp, bregs + BMAC_RXCFG);
-       while ((sbus_readl(bregs + BMAC_RXCFG) & BIGMAC_RXCFG_ENABLE) != 0)
-               udelay(20);
-
-       if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
-               sbus_writel(0xffff, bregs + BMAC_HTABLE0);
-               sbus_writel(0xffff, bregs + BMAC_HTABLE1);
-               sbus_writel(0xffff, bregs + BMAC_HTABLE2);
-               sbus_writel(0xffff, bregs + BMAC_HTABLE3);
-       } else if (dev->flags & IFF_PROMISC) {
-               tmp = sbus_readl(bregs + BMAC_RXCFG);
-               tmp |= BIGMAC_RXCFG_PMISC;
-               sbus_writel(tmp, bregs + BMAC_RXCFG);
-       } else {
-               u16 hash_table[4];
-
-               for (i = 0; i < 4; i++)
-                       hash_table[i] = 0;
-
-               netdev_for_each_mc_addr(ha, dev) {
-                       crc = ether_crc_le(6, ha->addr);
-                       crc >>= 26;
-                       hash_table[crc >> 4] |= 1 << (crc & 0xf);
-               }
-               sbus_writel(hash_table[0], bregs + BMAC_HTABLE0);
-               sbus_writel(hash_table[1], bregs + BMAC_HTABLE1);
-               sbus_writel(hash_table[2], bregs + BMAC_HTABLE2);
-               sbus_writel(hash_table[3], bregs + BMAC_HTABLE3);
-       }
-
-       /* Re-enable the receiver. */
-       tmp = sbus_readl(bregs + BMAC_RXCFG);
-       tmp |= BIGMAC_RXCFG_ENABLE;
-       sbus_writel(tmp, bregs + BMAC_RXCFG);
-}
-
-/* Ethtool support... */
-static void bigmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
-       strcpy(info->driver, "sunbmac");
-       strcpy(info->version, "2.0");
-}
-
-static u32 bigmac_get_link(struct net_device *dev)
-{
-       struct bigmac *bp = netdev_priv(dev);
-
-       spin_lock_irq(&bp->lock);
-       bp->sw_bmsr = bigmac_tcvr_read(bp, bp->tregs, BIGMAC_BMSR);
-       spin_unlock_irq(&bp->lock);
-
-       return (bp->sw_bmsr & BMSR_LSTATUS);
-}
-
-static const struct ethtool_ops bigmac_ethtool_ops = {
-       .get_drvinfo            = bigmac_get_drvinfo,
-       .get_link               = bigmac_get_link,
-};
-
-static const struct net_device_ops bigmac_ops = {
-       .ndo_open               = bigmac_open,
-       .ndo_stop               = bigmac_close,
-       .ndo_start_xmit         = bigmac_start_xmit,
-       .ndo_get_stats          = bigmac_get_stats,
-       .ndo_set_multicast_list = bigmac_set_multicast,
-       .ndo_tx_timeout         = bigmac_tx_timeout,
-       .ndo_change_mtu         = eth_change_mtu,
-       .ndo_set_mac_address    = eth_mac_addr,
-       .ndo_validate_addr      = eth_validate_addr,
-};
-
-static int __devinit bigmac_ether_init(struct platform_device *op,
-                                      struct platform_device *qec_op)
-{
-       static int version_printed;
-       struct net_device *dev;
-       u8 bsizes, bsizes_more;
-       struct bigmac *bp;
-       int i;
-
-       /* Get a new device struct for this interface. */
-       dev = alloc_etherdev(sizeof(struct bigmac));
-       if (!dev)
-               return -ENOMEM;
-
-       if (version_printed++ == 0)
-               printk(KERN_INFO "%s", version);
-
-       for (i = 0; i < 6; i++)
-               dev->dev_addr[i] = idprom->id_ethaddr[i];
-
-       /* Setup softc, with backpointers to QEC and BigMAC SBUS device structs. */
-       bp = netdev_priv(dev);
-       bp->qec_op = qec_op;
-       bp->bigmac_op = op;
-
-       SET_NETDEV_DEV(dev, &op->dev);
-
-       spin_lock_init(&bp->lock);
-
-       /* Map in QEC global control registers. */
-       bp->gregs = of_ioremap(&qec_op->resource[0], 0,
-                              GLOB_REG_SIZE, "BigMAC QEC GLobal Regs");
-       if (!bp->gregs) {
-               printk(KERN_ERR "BIGMAC: Cannot map QEC global registers.\n");
-               goto fail_and_cleanup;
-       }
-
-       /* Make sure QEC is in BigMAC mode. */
-       if ((sbus_readl(bp->gregs + GLOB_CTRL) & 0xf0000000) != GLOB_CTRL_BMODE) {
-               printk(KERN_ERR "BigMAC: AIEEE, QEC is not in BigMAC mode!\n");
-               goto fail_and_cleanup;
-       }
-
-       /* Reset the QEC. */
-       if (qec_global_reset(bp->gregs))
-               goto fail_and_cleanup;
-
-       /* Get supported SBUS burst sizes. */
-       bsizes = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff);
-       bsizes_more = of_getintprop_default(qec_op->dev.of_node, "burst-sizes", 0xff);
-
-       bsizes &= 0xff;
-       if (bsizes_more != 0xff)
-               bsizes &= bsizes_more;
-       if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
-           (bsizes & DMA_BURST32) == 0)
-               bsizes = (DMA_BURST32 - 1);
-       bp->bigmac_bursts = bsizes;
-
-       /* Perform QEC initialization. */
-       qec_init(bp);
-
-       /* Map in the BigMAC channel registers. */
-       bp->creg = of_ioremap(&op->resource[0], 0,
-                             CREG_REG_SIZE, "BigMAC QEC Channel Regs");
-       if (!bp->creg) {
-               printk(KERN_ERR "BIGMAC: Cannot map QEC channel registers.\n");
-               goto fail_and_cleanup;
-       }
-
-       /* Map in the BigMAC control registers. */
-       bp->bregs = of_ioremap(&op->resource[1], 0,
-                              BMAC_REG_SIZE, "BigMAC Primary Regs");
-       if (!bp->bregs) {
-               printk(KERN_ERR "BIGMAC: Cannot map BigMAC primary registers.\n");
-               goto fail_and_cleanup;
-       }
-
-       /* Map in the BigMAC transceiver registers, this is how you poke at
-        * the BigMAC's PHY.
-        */
-       bp->tregs = of_ioremap(&op->resource[2], 0,
-                              TCVR_REG_SIZE, "BigMAC Transceiver Regs");
-       if (!bp->tregs) {
-               printk(KERN_ERR "BIGMAC: Cannot map BigMAC transceiver registers.\n");
-               goto fail_and_cleanup;
-       }
-
-       /* Stop the BigMAC. */
-       bigmac_stop(bp);
-
-       /* Allocate transmit/receive descriptor DVMA block. */
-       bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev,
-                                           PAGE_SIZE,
-                                           &bp->bblock_dvma, GFP_ATOMIC);
-       if (bp->bmac_block == NULL || bp->bblock_dvma == 0) {
-               printk(KERN_ERR "BIGMAC: Cannot allocate consistent DMA.\n");
-               goto fail_and_cleanup;
-       }
-
-       /* Get the board revision of this BigMAC. */
-       bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node,
-                                             "board-version", 1);
-
-       /* Init auto-negotiation timer state. */
-       init_timer(&bp->bigmac_timer);
-       bp->timer_state = asleep;
-       bp->timer_ticks = 0;
-
-       /* Backlink to generic net device struct. */
-       bp->dev = dev;
-
-       /* Set links to our BigMAC open and close routines. */
-       dev->ethtool_ops = &bigmac_ethtool_ops;
-       dev->netdev_ops = &bigmac_ops;
-       dev->watchdog_timeo = 5*HZ;
-
-       /* Finish net device registration. */
-       dev->irq = bp->bigmac_op->archdata.irqs[0];
-       dev->dma = 0;
-
-       if (register_netdev(dev)) {
-               printk(KERN_ERR "BIGMAC: Cannot register device.\n");
-               goto fail_and_cleanup;
-       }
-
-       dev_set_drvdata(&bp->bigmac_op->dev, bp);
-
-       printk(KERN_INFO "%s: BigMAC 100baseT Ethernet %pM\n",
-              dev->name, dev->dev_addr);
-
-       return 0;
-
-fail_and_cleanup:
-       /* Something went wrong, undo whatever we did so far. */
-       /* Free register mappings if any. */
-       if (bp->gregs)
-               of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE);
-       if (bp->creg)
-               of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE);
-       if (bp->bregs)
-               of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE);
-       if (bp->tregs)
-               of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE);
-
-       if (bp->bmac_block)
-               dma_free_coherent(&bp->bigmac_op->dev,
-                                 PAGE_SIZE,
-                                 bp->bmac_block,
-                                 bp->bblock_dvma);
-
-       /* This also frees the co-located private data */
-       free_netdev(dev);
-       return -ENODEV;
-}
-
-/* QEC can be the parent of either QuadEthernet or a BigMAC.  We want
- * the latter.
- */
-static int __devinit bigmac_sbus_probe(struct platform_device *op)
-{
-       struct device *parent = op->dev.parent;
-       struct platform_device *qec_op;
-
-       qec_op = to_platform_device(parent);
-
-       return bigmac_ether_init(op, qec_op);
-}
-
-static int __devexit bigmac_sbus_remove(struct platform_device *op)
-{
-       struct bigmac *bp = dev_get_drvdata(&op->dev);
-       struct device *parent = op->dev.parent;
-       struct net_device *net_dev = bp->dev;
-       struct platform_device *qec_op;
-
-       qec_op = to_platform_device(parent);
-
-       unregister_netdev(net_dev);
-
-       of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE);
-       of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE);
-       of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE);
-       of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE);
-       dma_free_coherent(&op->dev,
-                         PAGE_SIZE,
-                         bp->bmac_block,
-                         bp->bblock_dvma);
-
-       free_netdev(net_dev);
-
-       dev_set_drvdata(&op->dev, NULL);
-
-       return 0;
-}
-
-static const struct of_device_id bigmac_sbus_match[] = {
-       {
-               .name = "be",
-       },
-       {},
-};
-
-MODULE_DEVICE_TABLE(of, bigmac_sbus_match);
-
-static struct platform_driver bigmac_sbus_driver = {
-       .driver = {
-               .name = "sunbmac",
-               .owner = THIS_MODULE,
-               .of_match_table = bigmac_sbus_match,
-       },
-       .probe          = bigmac_sbus_probe,
-       .remove         = __devexit_p(bigmac_sbus_remove),
-};
-
-static int __init bigmac_init(void)
-{
-       return platform_driver_register(&bigmac_sbus_driver);
-}
-
-static void __exit bigmac_exit(void)
-{
-       platform_driver_unregister(&bigmac_sbus_driver);
-}
-
-module_init(bigmac_init);
-module_exit(bigmac_exit);
diff --git a/drivers/net/sunbmac.h b/drivers/net/sunbmac.h
deleted file mode 100644 (file)
index 4943e97..0000000
+++ /dev/null
@@ -1,355 +0,0 @@
-/* $Id: sunbmac.h,v 1.7 2000/07/11 22:35:22 davem Exp $
- * sunbmac.h: Defines for the Sun "Big MAC" 100baseT ethernet cards.
- *
- * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
- */
-
-#ifndef _SUNBMAC_H
-#define _SUNBMAC_H
-
-/* QEC global registers. */
-#define GLOB_CTRL      0x00UL  /* Control                  */
-#define GLOB_STAT      0x04UL  /* Status                   */
-#define GLOB_PSIZE     0x08UL  /* Packet Size              */
-#define GLOB_MSIZE     0x0cUL  /* Local-mem size (64K)     */
-#define GLOB_RSIZE     0x10UL  /* Receive partition size   */
-#define GLOB_TSIZE     0x14UL  /* Transmit partition size  */
-#define GLOB_REG_SIZE  0x18UL
-
-#define GLOB_CTRL_MMODE       0x40000000 /* MACE qec mode            */
-#define GLOB_CTRL_BMODE       0x10000000 /* BigMAC qec mode          */
-#define GLOB_CTRL_EPAR        0x00000020 /* Enable parity            */
-#define GLOB_CTRL_ACNTRL      0x00000018 /* SBUS arbitration control */
-#define GLOB_CTRL_B64         0x00000004 /* 64 byte dvma bursts      */
-#define GLOB_CTRL_B32         0x00000002 /* 32 byte dvma bursts      */
-#define GLOB_CTRL_B16         0x00000000 /* 16 byte dvma bursts      */
-#define GLOB_CTRL_RESET       0x00000001 /* Reset the QEC            */
-
-#define GLOB_STAT_TX          0x00000008 /* BigMAC Transmit IRQ      */
-#define GLOB_STAT_RX          0x00000004 /* BigMAC Receive IRQ       */
-#define GLOB_STAT_BM          0x00000002 /* BigMAC Global IRQ        */
-#define GLOB_STAT_ER          0x00000001 /* BigMAC Error IRQ         */
-
-#define GLOB_PSIZE_2048       0x00       /* 2k packet size           */
-#define GLOB_PSIZE_4096       0x01       /* 4k packet size           */
-#define GLOB_PSIZE_6144       0x10       /* 6k packet size           */
-#define GLOB_PSIZE_8192       0x11       /* 8k packet size           */
-
-/* QEC BigMAC channel registers. */
-#define CREG_CTRL      0x00UL  /* Control                   */
-#define CREG_STAT      0x04UL  /* Status                    */
-#define CREG_RXDS      0x08UL  /* RX descriptor ring ptr    */
-#define CREG_TXDS      0x0cUL  /* TX descriptor ring ptr    */
-#define CREG_RIMASK    0x10UL  /* RX Interrupt Mask         */
-#define CREG_TIMASK    0x14UL  /* TX Interrupt Mask         */
-#define CREG_QMASK     0x18UL  /* QEC Error Interrupt Mask  */
-#define CREG_BMASK     0x1cUL  /* BigMAC Error Interrupt Mask*/
-#define CREG_RXWBUFPTR 0x20UL  /* Local memory rx write ptr */
-#define CREG_RXRBUFPTR 0x24UL  /* Local memory rx read ptr  */
-#define CREG_TXWBUFPTR 0x28UL  /* Local memory tx write ptr */
-#define CREG_TXRBUFPTR 0x2cUL  /* Local memory tx read ptr  */
-#define CREG_CCNT      0x30UL  /* Collision Counter         */
-#define CREG_REG_SIZE  0x34UL
-
-#define CREG_CTRL_TWAKEUP     0x00000001  /* Transmitter Wakeup, 'go'. */
-
-#define CREG_STAT_BERROR      0x80000000  /* BigMAC error              */
-#define CREG_STAT_TXIRQ       0x00200000  /* Transmit Interrupt        */
-#define CREG_STAT_TXDERROR    0x00080000  /* TX Descriptor is bogus    */
-#define CREG_STAT_TXLERR      0x00040000  /* Late Transmit Error       */
-#define CREG_STAT_TXPERR      0x00020000  /* Transmit Parity Error     */
-#define CREG_STAT_TXSERR      0x00010000  /* Transmit SBUS error ack   */
-#define CREG_STAT_RXIRQ       0x00000020  /* Receive Interrupt         */
-#define CREG_STAT_RXDROP      0x00000010  /* Dropped a RX'd packet     */
-#define CREG_STAT_RXSMALL     0x00000008  /* Receive buffer too small  */
-#define CREG_STAT_RXLERR      0x00000004  /* Receive Late Error        */
-#define CREG_STAT_RXPERR      0x00000002  /* Receive Parity Error      */
-#define CREG_STAT_RXSERR      0x00000001  /* Receive SBUS Error ACK    */
-
-#define CREG_STAT_ERRORS      (CREG_STAT_BERROR|CREG_STAT_TXDERROR|CREG_STAT_TXLERR|   \
-                               CREG_STAT_TXPERR|CREG_STAT_TXSERR|CREG_STAT_RXDROP|     \
-                               CREG_STAT_RXSMALL|CREG_STAT_RXLERR|CREG_STAT_RXPERR|    \
-                               CREG_STAT_RXSERR)
-
-#define CREG_QMASK_TXDERROR   0x00080000  /* TXD error                 */
-#define CREG_QMASK_TXLERR     0x00040000  /* TX late error             */
-#define CREG_QMASK_TXPERR     0x00020000  /* TX parity error           */
-#define CREG_QMASK_TXSERR     0x00010000  /* TX sbus error ack         */
-#define CREG_QMASK_RXDROP     0x00000010  /* RX drop                   */
-#define CREG_QMASK_RXBERROR   0x00000008  /* RX buffer error           */
-#define CREG_QMASK_RXLEERR    0x00000004  /* RX late error             */
-#define CREG_QMASK_RXPERR     0x00000002  /* RX parity error           */
-#define CREG_QMASK_RXSERR     0x00000001  /* RX sbus error ack         */
-
-/* BIGMAC core registers */
-#define BMAC_XIFCFG    0x000UL /* XIF config register                */
-       /* 0x004-->0x0fc, reserved */
-#define BMAC_STATUS    0x100UL /* Status register, clear on read     */
-#define BMAC_IMASK     0x104UL /* Interrupt mask register            */
-       /* 0x108-->0x204, reserved */
-#define BMAC_TXSWRESET 0x208UL /* Transmitter software reset         */
-#define BMAC_TXCFG     0x20cUL /* Transmitter config register        */
-#define BMAC_IGAP1     0x210UL /* Inter-packet gap 1                 */
-#define BMAC_IGAP2     0x214UL /* Inter-packet gap 2                 */
-#define BMAC_ALIMIT    0x218UL /* Transmit attempt limit             */
-#define BMAC_STIME     0x21cUL /* Transmit slot time                 */
-#define BMAC_PLEN      0x220UL /* Size of transmit preamble          */
-#define BMAC_PPAT      0x224UL /* Pattern for transmit preamble      */
-#define BMAC_TXDELIM   0x228UL /* Transmit delimiter                 */
-#define BMAC_JSIZE     0x22cUL /* Toe jam...                         */
-#define BMAC_TXPMAX    0x230UL /* Transmit max pkt size              */
-#define BMAC_TXPMIN    0x234UL /* Transmit min pkt size              */
-#define BMAC_PATTEMPT  0x238UL /* Count of transmit peak attempts    */
-#define BMAC_DTCTR     0x23cUL /* Transmit defer timer               */
-#define BMAC_NCCTR     0x240UL /* Transmit normal-collision counter  */
-#define BMAC_FCCTR     0x244UL /* Transmit first-collision counter   */
-#define BMAC_EXCTR     0x248UL /* Transmit excess-collision counter  */
-#define BMAC_LTCTR     0x24cUL /* Transmit late-collision counter    */
-#define BMAC_RSEED     0x250UL /* Transmit random number seed        */
-#define BMAC_TXSMACHINE        0x254UL /* Transmit state machine             */
-       /* 0x258-->0x304, reserved */
-#define BMAC_RXSWRESET 0x308UL /* Receiver software reset            */
-#define BMAC_RXCFG     0x30cUL /* Receiver config register           */
-#define BMAC_RXPMAX    0x310UL /* Receive max pkt size               */
-#define BMAC_RXPMIN    0x314UL /* Receive min pkt size               */
-#define BMAC_MACADDR2  0x318UL /* Ether address register 2           */
-#define BMAC_MACADDR1  0x31cUL /* Ether address register 1           */
-#define BMAC_MACADDR0  0x320UL /* Ether address register 0           */
-#define BMAC_FRCTR     0x324UL /* Receive frame receive counter      */
-#define BMAC_GLECTR    0x328UL /* Receive giant-length error counter */
-#define BMAC_UNALECTR  0x32cUL /* Receive unaligned error counter    */
-#define BMAC_RCRCECTR  0x330UL /* Receive CRC error counter          */
-#define BMAC_RXSMACHINE        0x334UL /* Receiver state machine             */
-#define BMAC_RXCVALID  0x338UL /* Receiver code violation            */
-       /* 0x33c, reserved */
-#define BMAC_HTABLE3   0x340UL /* Hash table 3                       */
-#define BMAC_HTABLE2   0x344UL /* Hash table 2                       */
-#define BMAC_HTABLE1   0x348UL /* Hash table 1                       */
-#define BMAC_HTABLE0   0x34cUL /* Hash table 0                       */
-#define BMAC_AFILTER2  0x350UL /* Address filter 2                   */
-#define BMAC_AFILTER1  0x354UL /* Address filter 1                   */
-#define BMAC_AFILTER0  0x358UL /* Address filter 0                   */
-#define BMAC_AFMASK    0x35cUL /* Address filter mask                */
-#define BMAC_REG_SIZE  0x360UL
-
-/* BigMac XIF config register. */
-#define BIGMAC_XCFG_ODENABLE   0x00000001 /* Output driver enable                     */
-#define BIGMAC_XCFG_RESV       0x00000002 /* Reserved, write always as 1              */
-#define BIGMAC_XCFG_MLBACK     0x00000004 /* Loopback-mode MII enable                 */
-#define BIGMAC_XCFG_SMODE      0x00000008 /* Enable serial mode                       */
-
-/* BigMAC status register. */
-#define BIGMAC_STAT_GOTFRAME   0x00000001 /* Received a frame                         */
-#define BIGMAC_STAT_RCNTEXP    0x00000002 /* Receive frame counter expired            */
-#define BIGMAC_STAT_ACNTEXP    0x00000004 /* Align-error counter expired              */
-#define BIGMAC_STAT_CCNTEXP    0x00000008 /* CRC-error counter expired                */
-#define BIGMAC_STAT_LCNTEXP    0x00000010 /* Length-error counter expired             */
-#define BIGMAC_STAT_RFIFOVF    0x00000020 /* Receive FIFO overflow                    */
-#define BIGMAC_STAT_CVCNTEXP   0x00000040 /* Code-violation counter expired           */
-#define BIGMAC_STAT_SENTFRAME  0x00000100 /* Transmitted a frame                      */
-#define BIGMAC_STAT_TFIFO_UND  0x00000200 /* Transmit FIFO underrun                   */
-#define BIGMAC_STAT_MAXPKTERR  0x00000400 /* Max-packet size error                    */
-#define BIGMAC_STAT_NCNTEXP    0x00000800 /* Normal-collision counter expired         */
-#define BIGMAC_STAT_ECNTEXP    0x00001000 /* Excess-collision counter expired         */
-#define BIGMAC_STAT_LCCNTEXP   0x00002000 /* Late-collision counter expired           */
-#define BIGMAC_STAT_FCNTEXP    0x00004000 /* First-collision counter expired          */
-#define BIGMAC_STAT_DTIMEXP    0x00008000 /* Defer-timer expired                      */
-
-/* BigMAC interrupt mask register. */
-#define BIGMAC_IMASK_GOTFRAME  0x00000001 /* Received a frame                         */
-#define BIGMAC_IMASK_RCNTEXP   0x00000002 /* Receive frame counter expired            */
-#define BIGMAC_IMASK_ACNTEXP   0x00000004 /* Align-error counter expired              */
-#define BIGMAC_IMASK_CCNTEXP   0x00000008 /* CRC-error counter expired                */
-#define BIGMAC_IMASK_LCNTEXP   0x00000010 /* Length-error counter expired             */
-#define BIGMAC_IMASK_RFIFOVF   0x00000020 /* Receive FIFO overflow                    */
-#define BIGMAC_IMASK_CVCNTEXP  0x00000040 /* Code-violation counter expired           */
-#define BIGMAC_IMASK_SENTFRAME 0x00000100 /* Transmitted a frame                      */
-#define BIGMAC_IMASK_TFIFO_UND 0x00000200 /* Transmit FIFO underrun                   */
-#define BIGMAC_IMASK_MAXPKTERR 0x00000400 /* Max-packet size error                    */
-#define BIGMAC_IMASK_NCNTEXP   0x00000800 /* Normal-collision counter expired         */
-#define BIGMAC_IMASK_ECNTEXP   0x00001000 /* Excess-collision counter expired         */
-#define BIGMAC_IMASK_LCCNTEXP  0x00002000 /* Late-collision counter expired           */
-#define BIGMAC_IMASK_FCNTEXP   0x00004000 /* First-collision counter expired          */
-#define BIGMAC_IMASK_DTIMEXP   0x00008000 /* Defer-timer expired                      */
-
-/* BigMac transmit config register. */
-#define BIGMAC_TXCFG_ENABLE    0x00000001 /* Enable the transmitter                   */
-#define BIGMAC_TXCFG_FIFO      0x00000010 /* Default tx fthresh...                    */
-#define BIGMAC_TXCFG_SMODE     0x00000020 /* Enable slow transmit mode                */
-#define BIGMAC_TXCFG_CIGN      0x00000040 /* Ignore transmit collisions               */
-#define BIGMAC_TXCFG_FCSOFF    0x00000080 /* Do not emit FCS                          */
-#define BIGMAC_TXCFG_DBACKOFF  0x00000100 /* Disable backoff                          */
-#define BIGMAC_TXCFG_FULLDPLX  0x00000200 /* Enable full-duplex                       */
-
-/* BigMac receive config register. */
-#define BIGMAC_RXCFG_ENABLE    0x00000001 /* Enable the receiver                      */
-#define BIGMAC_RXCFG_FIFO      0x0000000e /* Default rx fthresh...                    */
-#define BIGMAC_RXCFG_PSTRIP    0x00000020 /* Pad byte strip enable                    */
-#define BIGMAC_RXCFG_PMISC     0x00000040 /* Enable promiscuous mode                   */
-#define BIGMAC_RXCFG_DERR      0x00000080 /* Disable error checking                   */
-#define BIGMAC_RXCFG_DCRCS     0x00000100 /* Disable CRC stripping                    */
-#define BIGMAC_RXCFG_ME        0x00000200 /* Receive packets addressed to me          */
-#define BIGMAC_RXCFG_PGRP      0x00000400 /* Enable promisc group mode                */
-#define BIGMAC_RXCFG_HENABLE   0x00000800 /* Enable the hash filter                   */
-#define BIGMAC_RXCFG_AENABLE   0x00001000 /* Enable the address filter                */
-
-/* The BigMAC PHY transceiver.  Not nearly as sophisticated as the happy meal
- * one.  But it does have the "bit banger", oh baby.
- */
-#define TCVR_TPAL      0x00UL
-#define TCVR_MPAL      0x04UL
-#define TCVR_REG_SIZE  0x08UL
-
-/* Frame commands. */
-#define FRAME_WRITE           0x50020000
-#define FRAME_READ            0x60020000
-
-/* Tranceiver registers. */
-#define TCVR_PAL_SERIAL       0x00000001 /* Enable serial mode              */
-#define TCVR_PAL_EXTLBACK     0x00000002 /* Enable external loopback        */
-#define TCVR_PAL_MSENSE       0x00000004 /* Media sense                     */
-#define TCVR_PAL_LTENABLE     0x00000008 /* Link test enable                */
-#define TCVR_PAL_LTSTATUS     0x00000010 /* Link test status  (P1 only)     */
-
-/* Management PAL. */
-#define MGMT_PAL_DCLOCK       0x00000001 /* Data clock                      */
-#define MGMT_PAL_OENAB        0x00000002 /* Output enabler                  */
-#define MGMT_PAL_MDIO         0x00000004 /* MDIO Data/attached              */
-#define MGMT_PAL_TIMEO        0x00000008 /* Transmit enable timeout error   */
-#define MGMT_PAL_EXT_MDIO     MGMT_PAL_MDIO
-#define MGMT_PAL_INT_MDIO     MGMT_PAL_TIMEO
-
-/* Here are some PHY addresses. */
-#define BIGMAC_PHY_EXTERNAL   0 /* External transceiver */
-#define BIGMAC_PHY_INTERNAL   1 /* Internal transceiver */
-
-/* PHY registers */
-#define BIGMAC_BMCR           0x00 /* Basic mode control register      */
-#define BIGMAC_BMSR           0x01 /* Basic mode status register       */
-
-/* BMCR bits */
-#define BMCR_ISOLATE            0x0400  /* Disconnect DP83840 from MII */
-#define BMCR_PDOWN              0x0800  /* Powerdown the DP83840       */
-#define BMCR_ANENABLE           0x1000  /* Enable auto negotiation     */
-#define BMCR_SPEED100           0x2000  /* Select 100Mbps              */
-#define BMCR_LOOPBACK           0x4000  /* TXD loopback bits           */
-#define BMCR_RESET              0x8000  /* Reset the DP83840           */
-
-/* BMSR bits */
-#define BMSR_ERCAP              0x0001  /* Ext-reg capability          */
-#define BMSR_JCD                0x0002  /* Jabber detected             */
-#define BMSR_LSTATUS            0x0004  /* Link status                 */
-
-/* Ring descriptors and such, same as Quad Ethernet. */
-struct be_rxd {
-       u32 rx_flags;
-       u32 rx_addr;
-};
-
-#define RXD_OWN      0x80000000 /* Ownership.      */
-#define RXD_UPDATE   0x10000000 /* Being Updated?  */
-#define RXD_LENGTH   0x000007ff /* Packet Length.  */
-
-struct be_txd {
-       u32 tx_flags;
-       u32 tx_addr;
-};
-
-#define TXD_OWN      0x80000000 /* Ownership.      */
-#define TXD_SOP      0x40000000 /* Start Of Packet */
-#define TXD_EOP      0x20000000 /* End Of Packet   */
-#define TXD_UPDATE   0x10000000 /* Being Updated?  */
-#define TXD_LENGTH   0x000007ff /* Packet Length.  */
-
-#define TX_RING_MAXSIZE   256
-#define RX_RING_MAXSIZE   256
-
-#define TX_RING_SIZE      256
-#define RX_RING_SIZE      256
-
-#define NEXT_RX(num)       (((num) + 1) & (RX_RING_SIZE - 1))
-#define NEXT_TX(num)       (((num) + 1) & (TX_RING_SIZE - 1))
-#define PREV_RX(num)       (((num) - 1) & (RX_RING_SIZE - 1))
-#define PREV_TX(num)       (((num) - 1) & (TX_RING_SIZE - 1))
-
-#define TX_BUFFS_AVAIL(bp)                                    \
-        (((bp)->tx_old <= (bp)->tx_new) ?                     \
-         (bp)->tx_old + (TX_RING_SIZE - 1) - (bp)->tx_new :  \
-                           (bp)->tx_old - (bp)->tx_new - 1)
-
-
-#define RX_COPY_THRESHOLD  256
-#define RX_BUF_ALLOC_SIZE  (ETH_FRAME_LEN + (64 * 3))
-
-struct bmac_init_block {
-       struct be_rxd be_rxd[RX_RING_MAXSIZE];
-       struct be_txd be_txd[TX_RING_MAXSIZE];
-};
-
-#define bib_offset(mem, elem) \
-((__u32)((unsigned long)(&(((struct bmac_init_block *)0)->mem[elem]))))
-
-/* Now software state stuff. */
-enum bigmac_transceiver {
-       external = 0,
-       internal = 1,
-       none     = 2,
-};
-
-/* Timer state engine. */
-enum bigmac_timer_state {
-       ltrywait = 1,  /* Forcing try of all modes, from fastest to slowest. */
-       asleep   = 2,  /* Timer inactive.                                    */
-};
-
-struct bigmac {
-       void __iomem    *gregs; /* QEC Global Registers               */
-       void __iomem    *creg;  /* QEC BigMAC Channel Registers       */
-       void __iomem    *bregs; /* BigMAC Registers                   */
-       void __iomem    *tregs; /* BigMAC Transceiver                 */
-       struct bmac_init_block  *bmac_block;    /* RX and TX descriptors */
-       __u32                    bblock_dvma;   /* RX and TX descriptors */
-
-       spinlock_t              lock;
-
-       struct sk_buff          *rx_skbs[RX_RING_SIZE];
-       struct sk_buff          *tx_skbs[TX_RING_SIZE];
-
-       int rx_new, tx_new, rx_old, tx_old;
-
-       int board_rev;                          /* BigMAC board revision.             */
-
-       enum bigmac_transceiver tcvr_type;
-       unsigned int            bigmac_bursts;
-       unsigned int            paddr;
-       unsigned short          sw_bmsr;         /* SW copy of PHY BMSR               */
-       unsigned short          sw_bmcr;         /* SW copy of PHY BMCR               */
-       struct timer_list       bigmac_timer;
-       enum bigmac_timer_state timer_state;
-       unsigned int            timer_ticks;
-
-       struct net_device_stats enet_stats;
-       struct platform_device  *qec_op;
-       struct platform_device  *bigmac_op;
-       struct net_device       *dev;
-};
-
-/* We use this to acquire receive skb's that we can DMA directly into. */
-#define ALIGNED_RX_SKB_ADDR(addr) \
-        ((((unsigned long)(addr) + (64 - 1)) & ~(64 - 1)) - (unsigned long)(addr))
-
-static inline struct sk_buff *big_mac_alloc_skb(unsigned int length, gfp_t gfp_flags)
-{
-       struct sk_buff *skb;
-
-       skb = alloc_skb(length + 64, gfp_flags);
-       if(skb) {
-               int offset = ALIGNED_RX_SKB_ADDR(skb->data);
-
-               if(offset)
-                       skb_reserve(skb, offset);
-       }
-       return skb;
-}
-
-#endif /* !(_SUNBMAC_H) */
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
deleted file mode 100644 (file)
index ade35dd..0000000
+++ /dev/null
@@ -1,3049 +0,0 @@
-/* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $
- * sungem.c: Sun GEM ethernet driver.
- *
- * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com)
- *
- * Support for Apple GMAC and assorted PHYs, WOL, Power Management
- * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org)
- * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp.
- *
- * NAPI and NETPOLL support
- * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com)
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/mii.h>
-#include <linux/ethtool.h>
-#include <linux/crc32.h>
-#include <linux/random.h>
-#include <linux/workqueue.h>
-#include <linux/if_vlan.h>
-#include <linux/bitops.h>
-#include <linux/mm.h>
-#include <linux/gfp.h>
-
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/byteorder.h>
-#include <asm/uaccess.h>
-#include <asm/irq.h>
-
-#ifdef CONFIG_SPARC
-#include <asm/idprom.h>
-#include <asm/prom.h>
-#endif
-
-#ifdef CONFIG_PPC_PMAC
-#include <asm/pci-bridge.h>
-#include <asm/prom.h>
-#include <asm/machdep.h>
-#include <asm/pmac_feature.h>
-#endif
-
-#include "sungem_phy.h"
-#include "sungem.h"
-
-/* Stripping FCS is causing problems, disabled for now */
-#undef STRIP_FCS
-
-#define DEFAULT_MSG    (NETIF_MSG_DRV          | \
-                        NETIF_MSG_PROBE        | \
-                        NETIF_MSG_LINK)
-
-#define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
-                        SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
-                        SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \
-                        SUPPORTED_Pause | SUPPORTED_Autoneg)
-
-#define DRV_NAME       "sungem"
-#define DRV_VERSION    "1.0"
-#define DRV_AUTHOR     "David S. Miller <davem@redhat.com>"
-
-static char version[] __devinitdata =
-        DRV_NAME ".c:v" DRV_VERSION " " DRV_AUTHOR "\n";
-
-MODULE_AUTHOR(DRV_AUTHOR);
-MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver");
-MODULE_LICENSE("GPL");
-
-#define GEM_MODULE_NAME        "gem"
-
-static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = {
-       { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-
-       /* These models only differ from the original GEM in
-        * that their tx/rx fifos are of a different size and
-        * they only support 10/100 speeds. -DaveM
-        *
-        * Apple's GMAC does support gigabit on machines with
-        * the BCM54xx PHYs. -BenH
-        */
-       { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC,
-         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       {0, }
-};
-
-MODULE_DEVICE_TABLE(pci, gem_pci_tbl);
-
-static u16 __phy_read(struct gem *gp, int phy_addr, int reg)
-{
-       u32 cmd;
-       int limit = 10000;
-
-       cmd  = (1 << 30);
-       cmd |= (2 << 28);
-       cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
-       cmd |= (reg << 18) & MIF_FRAME_REGAD;
-       cmd |= (MIF_FRAME_TAMSB);
-       writel(cmd, gp->regs + MIF_FRAME);
-
-       while (--limit) {
-               cmd = readl(gp->regs + MIF_FRAME);
-               if (cmd & MIF_FRAME_TALSB)
-                       break;
-
-               udelay(10);
-       }
-
-       if (!limit)
-               cmd = 0xffff;
-
-       return cmd & MIF_FRAME_DATA;
-}
-
-static inline int _phy_read(struct net_device *dev, int mii_id, int reg)
-{
-       struct gem *gp = netdev_priv(dev);
-       return __phy_read(gp, mii_id, reg);
-}
-
-static inline u16 phy_read(struct gem *gp, int reg)
-{
-       return __phy_read(gp, gp->mii_phy_addr, reg);
-}
-
-static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val)
-{
-       u32 cmd;
-       int limit = 10000;
-
-       cmd  = (1 << 30);
-       cmd |= (1 << 28);
-       cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
-       cmd |= (reg << 18) & MIF_FRAME_REGAD;
-       cmd |= (MIF_FRAME_TAMSB);
-       cmd |= (val & MIF_FRAME_DATA);
-       writel(cmd, gp->regs + MIF_FRAME);
-
-       while (limit--) {
-               cmd = readl(gp->regs + MIF_FRAME);
-               if (cmd & MIF_FRAME_TALSB)
-                       break;
-
-               udelay(10);
-       }
-}
-
-static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val)
-{
-       struct gem *gp = netdev_priv(dev);
-       __phy_write(gp, mii_id, reg, val & 0xffff);
-}
-
-static inline void phy_write(struct gem *gp, int reg, u16 val)
-{
-       __phy_write(gp, gp->mii_phy_addr, reg, val);
-}
-
-static inline void gem_enable_ints(struct gem *gp)
-{
-       /* Enable all interrupts but TXDONE */
-       writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
-}
-
-static inline void gem_disable_ints(struct gem *gp)
-{
-       /* Disable all interrupts, including TXDONE */
-       writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
-       (void)readl(gp->regs + GREG_IMASK); /* write posting */
-}
-
-static void gem_get_cell(struct gem *gp)
-{
-       BUG_ON(gp->cell_enabled < 0);
-       gp->cell_enabled++;
-#ifdef CONFIG_PPC_PMAC
-       if (gp->cell_enabled == 1) {
-               mb();
-               pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1);
-               udelay(10);
-       }
-#endif /* CONFIG_PPC_PMAC */
-}
-
-/* Turn off the chip's clock */
-static void gem_put_cell(struct gem *gp)
-{
-       BUG_ON(gp->cell_enabled <= 0);
-       gp->cell_enabled--;
-#ifdef CONFIG_PPC_PMAC
-       if (gp->cell_enabled == 0) {
-               mb();
-               pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0);
-               udelay(10);
-       }
-#endif /* CONFIG_PPC_PMAC */
-}
-
-static inline void gem_netif_stop(struct gem *gp)
-{
-       gp->dev->trans_start = jiffies; /* prevent tx timeout */
-       napi_disable(&gp->napi);
-       netif_tx_disable(gp->dev);
-}
-
-static inline void gem_netif_start(struct gem *gp)
-{
-       /* NOTE: unconditional netif_wake_queue is only
-        * appropriate so long as all callers are assured to
-        * have free tx slots.
-        */
-       netif_wake_queue(gp->dev);
-       napi_enable(&gp->napi);
-}
-
-static void gem_schedule_reset(struct gem *gp)
-{
-       gp->reset_task_pending = 1;
-       schedule_work(&gp->reset_task);
-}
-
-static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits)
-{
-       if (netif_msg_intr(gp))
-               printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name);
-}
-
-static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
-{
-       u32 pcs_istat = readl(gp->regs + PCS_ISTAT);
-       u32 pcs_miistat;
-
-       if (netif_msg_intr(gp))
-               printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n",
-                       gp->dev->name, pcs_istat);
-
-       if (!(pcs_istat & PCS_ISTAT_LSC)) {
-               netdev_err(dev, "PCS irq but no link status change???\n");
-               return 0;
-       }
-
-       /* The link status bit latches on zero, so you must
-        * read it twice in such a case to see a transition
-        * to the link being up.
-        */
-       pcs_miistat = readl(gp->regs + PCS_MIISTAT);
-       if (!(pcs_miistat & PCS_MIISTAT_LS))
-               pcs_miistat |=
-                       (readl(gp->regs + PCS_MIISTAT) &
-                        PCS_MIISTAT_LS);
-
-       if (pcs_miistat & PCS_MIISTAT_ANC) {
-               /* The remote-fault indication is only valid
-                * when autoneg has completed.
-                */
-               if (pcs_miistat & PCS_MIISTAT_RF)
-                       netdev_info(dev, "PCS AutoNEG complete, RemoteFault\n");
-               else
-                       netdev_info(dev, "PCS AutoNEG complete\n");
-       }
-
-       if (pcs_miistat & PCS_MIISTAT_LS) {
-               netdev_info(dev, "PCS link is now up\n");
-               netif_carrier_on(gp->dev);
-       } else {
-               netdev_info(dev, "PCS link is now down\n");
-               netif_carrier_off(gp->dev);
-               /* If this happens and the link timer is not running,
-                * reset so we re-negotiate.
-                */
-               if (!timer_pending(&gp->link_timer))
-                       return 1;
-       }
-
-       return 0;
-}
-
-static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
-{
-       u32 txmac_stat = readl(gp->regs + MAC_TXSTAT);
-
-       if (netif_msg_intr(gp))
-               printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
-                       gp->dev->name, txmac_stat);
-
-       /* Defer timer expiration is quite normal,
-        * don't even log the event.
-        */
-       if ((txmac_stat & MAC_TXSTAT_DTE) &&
-           !(txmac_stat & ~MAC_TXSTAT_DTE))
-               return 0;
-
-       if (txmac_stat & MAC_TXSTAT_URUN) {
-               netdev_err(dev, "TX MAC xmit underrun\n");
-               dev->stats.tx_fifo_errors++;
-       }
-
-       if (txmac_stat & MAC_TXSTAT_MPE) {
-               netdev_err(dev, "TX MAC max packet size error\n");
-               dev->stats.tx_errors++;
-       }
-
-       /* The rest are all cases of one of the 16-bit TX
-        * counters expiring.
-        */
-       if (txmac_stat & MAC_TXSTAT_NCE)
-               dev->stats.collisions += 0x10000;
-
-       if (txmac_stat & MAC_TXSTAT_ECE) {
-               dev->stats.tx_aborted_errors += 0x10000;
-               dev->stats.collisions += 0x10000;
-       }
-
-       if (txmac_stat & MAC_TXSTAT_LCE) {
-               dev->stats.tx_aborted_errors += 0x10000;
-               dev->stats.collisions += 0x10000;
-       }
-
-       /* We do not keep track of MAC_TXSTAT_FCE and
-        * MAC_TXSTAT_PCE events.
-        */
-       return 0;
-}
-
-/* When we get a RX fifo overflow, the RX unit in GEM is probably hung
- * so we do the following.
- *
- * If any part of the reset goes wrong, we return 1 and that causes the
- * whole chip to be reset.
- */
-static int gem_rxmac_reset(struct gem *gp)
-{
-       struct net_device *dev = gp->dev;
-       int limit, i;
-       u64 desc_dma;
-       u32 val;
-
-       /* First, reset & disable MAC RX. */
-       writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
-       for (limit = 0; limit < 5000; limit++) {
-               if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD))
-                       break;
-               udelay(10);
-       }
-       if (limit == 5000) {
-               netdev_err(dev, "RX MAC will not reset, resetting whole chip\n");
-               return 1;
-       }
-
-       writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB,
-              gp->regs + MAC_RXCFG);
-       for (limit = 0; limit < 5000; limit++) {
-               if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB))
-                       break;
-               udelay(10);
-       }
-       if (limit == 5000) {
-               netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
-               return 1;
-       }
-
-       /* Second, disable RX DMA. */
-       writel(0, gp->regs + RXDMA_CFG);
-       for (limit = 0; limit < 5000; limit++) {
-               if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE))
-                       break;
-               udelay(10);
-       }
-       if (limit == 5000) {
-               netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
-               return 1;
-       }
-
-       udelay(5000);
-
-       /* Execute RX reset command. */
-       writel(gp->swrst_base | GREG_SWRST_RXRST,
-              gp->regs + GREG_SWRST);
-       for (limit = 0; limit < 5000; limit++) {
-               if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST))
-                       break;
-               udelay(10);
-       }
-       if (limit == 5000) {
-               netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
-               return 1;
-       }
-
-       /* Refresh the RX ring. */
-       for (i = 0; i < RX_RING_SIZE; i++) {
-               struct gem_rxd *rxd = &gp->init_block->rxd[i];
-
-               if (gp->rx_skbs[i] == NULL) {
-                       netdev_err(dev, "Parts of RX ring empty, resetting whole chip\n");
-                       return 1;
-               }
-
-               rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
-       }
-       gp->rx_new = gp->rx_old = 0;
-
-       /* Now we must reprogram the rest of RX unit. */
-       desc_dma = (u64) gp->gblock_dvma;
-       desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
-       writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
-       writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
-       writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
-       val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
-              ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
-       writel(val, gp->regs + RXDMA_CFG);
-       if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
-               writel(((5 & RXDMA_BLANK_IPKTS) |
-                       ((8 << 12) & RXDMA_BLANK_ITIME)),
-                      gp->regs + RXDMA_BLANK);
-       else
-               writel(((5 & RXDMA_BLANK_IPKTS) |
-                       ((4 << 12) & RXDMA_BLANK_ITIME)),
-                      gp->regs + RXDMA_BLANK);
-       val  = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
-       val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
-       writel(val, gp->regs + RXDMA_PTHRESH);
-       val = readl(gp->regs + RXDMA_CFG);
-       writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
-       writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
-       val = readl(gp->regs + MAC_RXCFG);
-       writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
-
-       return 0;
-}
-
-static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
-{
-       u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT);
-       int ret = 0;
-
-       if (netif_msg_intr(gp))
-               printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n",
-                       gp->dev->name, rxmac_stat);
-
-       if (rxmac_stat & MAC_RXSTAT_OFLW) {
-               u32 smac = readl(gp->regs + MAC_SMACHINE);
-
-               netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac);
-               dev->stats.rx_over_errors++;
-               dev->stats.rx_fifo_errors++;
-
-               ret = gem_rxmac_reset(gp);
-       }
-
-       if (rxmac_stat & MAC_RXSTAT_ACE)
-               dev->stats.rx_frame_errors += 0x10000;
-
-       if (rxmac_stat & MAC_RXSTAT_CCE)
-               dev->stats.rx_crc_errors += 0x10000;
-
-       if (rxmac_stat & MAC_RXSTAT_LCE)
-               dev->stats.rx_length_errors += 0x10000;
-
-       /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
-        * events.
-        */
-       return ret;
-}
-
-static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
-{
-       u32 mac_cstat = readl(gp->regs + MAC_CSTAT);
-
-       if (netif_msg_intr(gp))
-               printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n",
-                       gp->dev->name, mac_cstat);
-
-       /* This interrupt is just for pause frame and pause
-        * tracking.  It is useful for diagnostics and debug
-        * but probably by default we will mask these events.
-        */
-       if (mac_cstat & MAC_CSTAT_PS)
-               gp->pause_entered++;
-
-       if (mac_cstat & MAC_CSTAT_PRCV)
-               gp->pause_last_time_recvd = (mac_cstat >> 16);
-
-       return 0;
-}
-
-static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
-{
-       u32 mif_status = readl(gp->regs + MIF_STATUS);
-       u32 reg_val, changed_bits;
-
-       reg_val = (mif_status & MIF_STATUS_DATA) >> 16;
-       changed_bits = (mif_status & MIF_STATUS_STAT);
-
-       gem_handle_mif_event(gp, reg_val, changed_bits);
-
-       return 0;
-}
-
-static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
-{
-       u32 pci_estat = readl(gp->regs + GREG_PCIESTAT);
-
-       if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
-           gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
-               netdev_err(dev, "PCI error [%04x]", pci_estat);
-
-               if (pci_estat & GREG_PCIESTAT_BADACK)
-                       pr_cont(" <No ACK64# during ABS64 cycle>");
-               if (pci_estat & GREG_PCIESTAT_DTRTO)
-                       pr_cont(" <Delayed transaction timeout>");
-               if (pci_estat & GREG_PCIESTAT_OTHER)
-                       pr_cont(" <other>");
-               pr_cont("\n");
-       } else {
-               pci_estat |= GREG_PCIESTAT_OTHER;
-               netdev_err(dev, "PCI error\n");
-       }
-
-       if (pci_estat & GREG_PCIESTAT_OTHER) {
-               u16 pci_cfg_stat;
-
-               /* Interrogate PCI config space for the
-                * true cause.
-                */
-               pci_read_config_word(gp->pdev, PCI_STATUS,
-                                    &pci_cfg_stat);
-               netdev_err(dev, "Read PCI cfg space status [%04x]\n",
-                          pci_cfg_stat);
-               if (pci_cfg_stat & PCI_STATUS_PARITY)
-                       netdev_err(dev, "PCI parity error detected\n");
-               if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT)
-                       netdev_err(dev, "PCI target abort\n");
-               if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT)
-                       netdev_err(dev, "PCI master acks target abort\n");
-               if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT)
-                       netdev_err(dev, "PCI master abort\n");
-               if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR)
-                       netdev_err(dev, "PCI system error SERR#\n");
-               if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY)
-                       netdev_err(dev, "PCI parity error\n");
-
-               /* Write the error bits back to clear them. */
-               pci_cfg_stat &= (PCI_STATUS_PARITY |
-                                PCI_STATUS_SIG_TARGET_ABORT |
-                                PCI_STATUS_REC_TARGET_ABORT |
-                                PCI_STATUS_REC_MASTER_ABORT |
-                                PCI_STATUS_SIG_SYSTEM_ERROR |
-                                PCI_STATUS_DETECTED_PARITY);
-               pci_write_config_word(gp->pdev,
-                                     PCI_STATUS, pci_cfg_stat);
-       }
-
-       /* For all PCI errors, we should reset the chip. */
-       return 1;
-}
-
-/* All non-normal interrupt conditions get serviced here.
- * Returns non-zero if we should just exit the interrupt
- * handler right now (ie. if we reset the card which invalidates
- * all of the other original irq status bits).
- */
-static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status)
-{
-       if (gem_status & GREG_STAT_RXNOBUF) {
-               /* Frame arrived, no free RX buffers available. */
-               if (netif_msg_rx_err(gp))
-                       printk(KERN_DEBUG "%s: no buffer for rx frame\n",
-                               gp->dev->name);
-               dev->stats.rx_dropped++;
-       }
-
-       if (gem_status & GREG_STAT_RXTAGERR) {
-               /* corrupt RX tag framing */
-               if (netif_msg_rx_err(gp))
-                       printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
-                               gp->dev->name);
-               dev->stats.rx_errors++;
-
-               return 1;
-       }
-
-       if (gem_status & GREG_STAT_PCS) {
-               if (gem_pcs_interrupt(dev, gp, gem_status))
-                       return 1;
-       }
-
-       if (gem_status & GREG_STAT_TXMAC) {
-               if (gem_txmac_interrupt(dev, gp, gem_status))
-                       return 1;
-       }
-
-       if (gem_status & GREG_STAT_RXMAC) {
-               if (gem_rxmac_interrupt(dev, gp, gem_status))
-                       return 1;
-       }
-
-       if (gem_status & GREG_STAT_MAC) {
-               if (gem_mac_interrupt(dev, gp, gem_status))
-                       return 1;
-       }
-
-       if (gem_status & GREG_STAT_MIF) {
-               if (gem_mif_interrupt(dev, gp, gem_status))
-                       return 1;
-       }
-
-       if (gem_status & GREG_STAT_PCIERR) {
-               if (gem_pci_interrupt(dev, gp, gem_status))
-                       return 1;
-       }
-
-       return 0;
-}
-
-static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status)
-{
-       int entry, limit;
-
-       entry = gp->tx_old;
-       limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT);
-       while (entry != limit) {
-               struct sk_buff *skb;
-               struct gem_txd *txd;
-               dma_addr_t dma_addr;
-               u32 dma_len;
-               int frag;
-
-               if (netif_msg_tx_done(gp))
-                       printk(KERN_DEBUG "%s: tx done, slot %d\n",
-                               gp->dev->name, entry);
-               skb = gp->tx_skbs[entry];
-               if (skb_shinfo(skb)->nr_frags) {
-                       int last = entry + skb_shinfo(skb)->nr_frags;
-                       int walk = entry;
-                       int incomplete = 0;
-
-                       last &= (TX_RING_SIZE - 1);
-                       for (;;) {
-                               walk = NEXT_TX(walk);
-                               if (walk == limit)
-                                       incomplete = 1;
-                               if (walk == last)
-                                       break;
-                       }
-                       if (incomplete)
-                               break;
-               }
-               gp->tx_skbs[entry] = NULL;
-               dev->stats.tx_bytes += skb->len;
-
-               for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
-                       txd = &gp->init_block->txd[entry];
-
-                       dma_addr = le64_to_cpu(txd->buffer);
-                       dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ;
-
-                       pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
-                       entry = NEXT_TX(entry);
-               }
-
-               dev->stats.tx_packets++;
-               dev_kfree_skb(skb);
-       }
-       gp->tx_old = entry;
-
-       /* Need to make the tx_old update visible to gem_start_xmit()
-        * before checking for netif_queue_stopped().  Without the
-        * memory barrier, there is a small possibility that gem_start_xmit()
-        * will miss it and cause the queue to be stopped forever.
-        */
-       smp_mb();
-
-       if (unlikely(netif_queue_stopped(dev) &&
-                    TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))) {
-               struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
-
-               __netif_tx_lock(txq, smp_processor_id());
-               if (netif_queue_stopped(dev) &&
-                   TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
-                       netif_wake_queue(dev);
-               __netif_tx_unlock(txq);
-       }
-}
-
-static __inline__ void gem_post_rxds(struct gem *gp, int limit)
-{
-       int cluster_start, curr, count, kick;
-
-       cluster_start = curr = (gp->rx_new & ~(4 - 1));
-       count = 0;
-       kick = -1;
-       wmb();
-       while (curr != limit) {
-               curr = NEXT_RX(curr);
-               if (++count == 4) {
-                       struct gem_rxd *rxd =
-                               &gp->init_block->rxd[cluster_start];
-                       for (;;) {
-                               rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
-                               rxd++;
-                               cluster_start = NEXT_RX(cluster_start);
-                               if (cluster_start == curr)
-                                       break;
-                       }
-                       kick = curr;
-                       count = 0;
-               }
-       }
-       if (kick >= 0) {
-               mb();
-               writel(kick, gp->regs + RXDMA_KICK);
-       }
-}
-
-#define ALIGNED_RX_SKB_ADDR(addr) \
-        ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
-static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size,
-                                               gfp_t gfp_flags)
-{
-       struct sk_buff *skb = alloc_skb(size + 64, gfp_flags);
-
-       if (likely(skb)) {
-               unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data);
-               skb_reserve(skb, offset);
-               skb->dev = dev;
-       }
-       return skb;
-}
-
-static int gem_rx(struct gem *gp, int work_to_do)
-{
-       struct net_device *dev = gp->dev;
-       int entry, drops, work_done = 0;
-       u32 done;
-       __sum16 csum;
-
-       if (netif_msg_rx_status(gp))
-               printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
-                       gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new);
-
-       entry = gp->rx_new;
-       drops = 0;
-       done = readl(gp->regs + RXDMA_DONE);
-       for (;;) {
-               struct gem_rxd *rxd = &gp->init_block->rxd[entry];
-               struct sk_buff *skb;
-               u64 status = le64_to_cpu(rxd->status_word);
-               dma_addr_t dma_addr;
-               int len;
-
-               if ((status & RXDCTRL_OWN) != 0)
-                       break;
-
-               if (work_done >= RX_RING_SIZE || work_done >= work_to_do)
-                       break;
-
-               /* When writing back RX descriptor, GEM writes status
-                * then buffer address, possibly in separate transactions.
-                * If we don't wait for the chip to write both, we could
-                * post a new buffer to this descriptor then have GEM spam
-                * on the buffer address.  We sync on the RX completion
-                * register to prevent this from happening.
-                */
-               if (entry == done) {
-                       done = readl(gp->regs + RXDMA_DONE);
-                       if (entry == done)
-                               break;
-               }
-
-               /* We can now account for the work we're about to do */
-               work_done++;
-
-               skb = gp->rx_skbs[entry];
-
-               len = (status & RXDCTRL_BUFSZ) >> 16;
-               if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
-                       dev->stats.rx_errors++;
-                       if (len < ETH_ZLEN)
-                               dev->stats.rx_length_errors++;
-                       if (len & RXDCTRL_BAD)
-                               dev->stats.rx_crc_errors++;
-
-                       /* We'll just return it to GEM. */
-               drop_it:
-                       dev->stats.rx_dropped++;
-                       goto next;
-               }
-
-               dma_addr = le64_to_cpu(rxd->buffer);
-               if (len > RX_COPY_THRESHOLD) {
-                       struct sk_buff *new_skb;
-
-                       new_skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
-                       if (new_skb == NULL) {
-                               drops++;
-                               goto drop_it;
-                       }
-                       pci_unmap_page(gp->pdev, dma_addr,
-                                      RX_BUF_ALLOC_SIZE(gp),
-                                      PCI_DMA_FROMDEVICE);
-                       gp->rx_skbs[entry] = new_skb;
-                       skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET));
-                       rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev,
-                                                              virt_to_page(new_skb->data),
-                                                              offset_in_page(new_skb->data),
-                                                              RX_BUF_ALLOC_SIZE(gp),
-                                                              PCI_DMA_FROMDEVICE));
-                       skb_reserve(new_skb, RX_OFFSET);
-
-                       /* Trim the original skb for the netif. */
-                       skb_trim(skb, len);
-               } else {
-                       struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2);
-
-                       if (copy_skb == NULL) {
-                               drops++;
-                               goto drop_it;
-                       }
-
-                       skb_reserve(copy_skb, 2);
-                       skb_put(copy_skb, len);
-                       pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
-                       skb_copy_from_linear_data(skb, copy_skb->data, len);
-                       pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
-
-                       /* We'll reuse the original ring buffer. */
-                       skb = copy_skb;
-               }
-
-               csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
-               skb->csum = csum_unfold(csum);
-               skb->ip_summed = CHECKSUM_COMPLETE;
-               skb->protocol = eth_type_trans(skb, gp->dev);
-
-               napi_gro_receive(&gp->napi, skb);
-
-               dev->stats.rx_packets++;
-               dev->stats.rx_bytes += len;
-
-       next:
-               entry = NEXT_RX(entry);
-       }
-
-       gem_post_rxds(gp, entry);
-
-       gp->rx_new = entry;
-
-       if (drops)
-               netdev_info(gp->dev, "Memory squeeze, deferring packet\n");
-
-       return work_done;
-}
-
-static int gem_poll(struct napi_struct *napi, int budget)
-{
-       struct gem *gp = container_of(napi, struct gem, napi);
-       struct net_device *dev = gp->dev;
-       int work_done;
-
-       work_done = 0;
-       do {
-               /* Handle anomalies */
-               if (unlikely(gp->status & GREG_STAT_ABNORMAL)) {
-                       struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
-                       int reset;
-
-                       /* We run the abnormal interrupt handling code with
-                        * the Tx lock. It only resets the Rx portion of the
-                        * chip, but we need to guard it against DMA being
-                        * restarted by the link poll timer
-                        */
-                       __netif_tx_lock(txq, smp_processor_id());
-                       reset = gem_abnormal_irq(dev, gp, gp->status);
-                       __netif_tx_unlock(txq);
-                       if (reset) {
-                               gem_schedule_reset(gp);
-                               napi_complete(napi);
-                               return work_done;
-                       }
-               }
-
-               /* Run TX completion thread */
-               gem_tx(dev, gp, gp->status);
-
-               /* Run RX thread. We don't use any locking here,
-                * code willing to do bad things - like cleaning the
-                * rx ring - must call napi_disable(), which
-                * schedule_timeout()'s if polling is already disabled.
-                */
-               work_done += gem_rx(gp, budget - work_done);
-
-               if (work_done >= budget)
-                       return work_done;
-
-               gp->status = readl(gp->regs + GREG_STAT);
-       } while (gp->status & GREG_STAT_NAPI);
-
-       napi_complete(napi);
-       gem_enable_ints(gp);
-
-       return work_done;
-}
-
-static irqreturn_t gem_interrupt(int irq, void *dev_id)
-{
-       struct net_device *dev = dev_id;
-       struct gem *gp = netdev_priv(dev);
-
-       if (napi_schedule_prep(&gp->napi)) {
-               u32 gem_status = readl(gp->regs + GREG_STAT);
-
-               if (unlikely(gem_status == 0)) {
-                       napi_enable(&gp->napi);
-                       return IRQ_NONE;
-               }
-               if (netif_msg_intr(gp))
-                       printk(KERN_DEBUG "%s: gem_interrupt() gem_status: 0x%x\n",
-                              gp->dev->name, gem_status);
-
-               gp->status = gem_status;
-               gem_disable_ints(gp);
-               __napi_schedule(&gp->napi);
-       }
-
-       /* If polling was disabled at the time we received that
-        * interrupt, we may return IRQ_HANDLED here while we
-        * should return IRQ_NONE. No big deal...
-        */
-       return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void gem_poll_controller(struct net_device *dev)
-{
-       struct gem *gp = netdev_priv(dev);
-
-       disable_irq(gp->pdev->irq);
-       gem_interrupt(gp->pdev->irq, dev);
-       enable_irq(gp->pdev->irq);
-}
-#endif
-
-static void gem_tx_timeout(struct net_device *dev)
-{
-       struct gem *gp = netdev_priv(dev);
-
-       netdev_err(dev, "transmit timed out, resetting\n");
-
-       netdev_err(dev, "TX_STATE[%08x:%08x:%08x]\n",
-                  readl(gp->regs + TXDMA_CFG),
-                  readl(gp->regs + MAC_TXSTAT),
-                  readl(gp->regs + MAC_TXCFG));
-       netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
-                  readl(gp->regs + RXDMA_CFG),
-                  readl(gp->regs + MAC_RXSTAT),
-                  readl(gp->regs + MAC_RXCFG));
-
-       gem_schedule_reset(gp);
-}
-
-static __inline__ int gem_intme(int entry)
-{
-       /* Algorithm: IRQ every 1/2 of descriptors. */
-       if (!(entry & ((TX_RING_SIZE>>1)-1)))
-               return 1;
-
-       return 0;
-}
-
-static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
-                                 struct net_device *dev)
-{
-       struct gem *gp = netdev_priv(dev);
-       int entry;
-       u64 ctrl;
-
-       ctrl = 0;
-       if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               const u64 csum_start_off = skb_checksum_start_offset(skb);
-               const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
-
-               ctrl = (TXDCTRL_CENAB |
-                       (csum_start_off << 15) |
-                       (csum_stuff_off << 21));
-       }
-
-       if (unlikely(TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1))) {
-               /* This is a hard error, log it. */
-               if (!netif_queue_stopped(dev)) {
-                       netif_stop_queue(dev);
-                       netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
-               }
-               return NETDEV_TX_BUSY;
-       }
-
-       entry = gp->tx_new;
-       gp->tx_skbs[entry] = skb;
-
-       if (skb_shinfo(skb)->nr_frags == 0) {
-               struct gem_txd *txd = &gp->init_block->txd[entry];
-               dma_addr_t mapping;
-               u32 len;
-
-               len = skb->len;
-               mapping = pci_map_page(gp->pdev,
-                                      virt_to_page(skb->data),
-                                      offset_in_page(skb->data),
-                                      len, PCI_DMA_TODEVICE);
-               ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len;
-               if (gem_intme(entry))
-                       ctrl |= TXDCTRL_INTME;
-               txd->buffer = cpu_to_le64(mapping);
-               wmb();
-               txd->control_word = cpu_to_le64(ctrl);
-               entry = NEXT_TX(entry);
-       } else {
-               struct gem_txd *txd;
-               u32 first_len;
-               u64 intme;
-               dma_addr_t first_mapping;
-               int frag, first_entry = entry;
-
-               intme = 0;
-               if (gem_intme(entry))
-                       intme |= TXDCTRL_INTME;
-
-               /* We must give this initial chunk to the device last.
-                * Otherwise we could race with the device.
-                */
-               first_len = skb_headlen(skb);
-               first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data),
-                                            offset_in_page(skb->data),
-                                            first_len, PCI_DMA_TODEVICE);
-               entry = NEXT_TX(entry);
-
-               for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
-                       skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
-                       u32 len;
-                       dma_addr_t mapping;
-                       u64 this_ctrl;
-
-                       len = this_frag->size;
-                       mapping = pci_map_page(gp->pdev,
-                                              this_frag->page,
-                                              this_frag->page_offset,
-                                              len, PCI_DMA_TODEVICE);
-                       this_ctrl = ctrl;
-                       if (frag == skb_shinfo(skb)->nr_frags - 1)
-                               this_ctrl |= TXDCTRL_EOF;
-
-                       txd = &gp->init_block->txd[entry];
-                       txd->buffer = cpu_to_le64(mapping);
-                       wmb();
-                       txd->control_word = cpu_to_le64(this_ctrl | len);
-
-                       if (gem_intme(entry))
-                               intme |= TXDCTRL_INTME;
-
-                       entry = NEXT_TX(entry);
-               }
-               txd = &gp->init_block->txd[first_entry];
-               txd->buffer = cpu_to_le64(first_mapping);
-               wmb();
-               txd->control_word =
-                       cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len);
-       }
-
-       gp->tx_new = entry;
-       if (unlikely(TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))) {
-               netif_stop_queue(dev);
-
-               /* netif_stop_queue() must be done before checking
-                * checking tx index in TX_BUFFS_AVAIL() below, because
-                * in gem_tx(), we update tx_old before checking for
-                * netif_queue_stopped().
-                */
-               smp_mb();
-               if (TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
-                       netif_wake_queue(dev);
-       }
-       if (netif_msg_tx_queued(gp))
-               printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
-                      dev->name, entry, skb->len);
-       mb();
-       writel(gp->tx_new, gp->regs + TXDMA_KICK);
-
-       return NETDEV_TX_OK;
-}
-
-static void gem_pcs_reset(struct gem *gp)
-{
-       int limit;
-       u32 val;
-
-       /* Reset PCS unit. */
-       val = readl(gp->regs + PCS_MIICTRL);
-       val |= PCS_MIICTRL_RST;
-       writel(val, gp->regs + PCS_MIICTRL);
-
-       limit = 32;
-       while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
-               udelay(100);
-               if (limit-- <= 0)
-                       break;
-       }
-       if (limit < 0)
-               netdev_warn(gp->dev, "PCS reset bit would not clear\n");
-}
-
-static void gem_pcs_reinit_adv(struct gem *gp)
-{
-       u32 val;
-
-       /* Make sure PCS is disabled while changing advertisement
-        * configuration.
-        */
-       val = readl(gp->regs + PCS_CFG);
-       val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
-       writel(val, gp->regs + PCS_CFG);
-
-       /* Advertise all capabilities except asymmetric
-        * pause.
-        */
-       val = readl(gp->regs + PCS_MIIADV);
-       val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
-               PCS_MIIADV_SP | PCS_MIIADV_AP);
-       writel(val, gp->regs + PCS_MIIADV);
-
-       /* Enable and restart auto-negotiation, disable wrapback/loopback,
-        * and re-enable PCS.
-        */
-       val = readl(gp->regs + PCS_MIICTRL);
-       val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
-       val &= ~PCS_MIICTRL_WB;
-       writel(val, gp->regs + PCS_MIICTRL);
-
-       val = readl(gp->regs + PCS_CFG);
-       val |= PCS_CFG_ENABLE;
-       writel(val, gp->regs + PCS_CFG);
-
-       /* Make sure serialink loopback is off.  The meaning
-        * of this bit is logically inverted based upon whether
-        * you are in Serialink or SERDES mode.
-        */
-       val = readl(gp->regs + PCS_SCTRL);
-       if (gp->phy_type == phy_serialink)
-               val &= ~PCS_SCTRL_LOOP;
-       else
-               val |= PCS_SCTRL_LOOP;
-       writel(val, gp->regs + PCS_SCTRL);
-}
-
-#define STOP_TRIES 32
-
-static void gem_reset(struct gem *gp)
-{
-       int limit;
-       u32 val;
-
-       /* Make sure we won't get any more interrupts */
-       writel(0xffffffff, gp->regs + GREG_IMASK);
-
-       /* Reset the chip */
-       writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST,
-              gp->regs + GREG_SWRST);
-
-       limit = STOP_TRIES;
-
-       do {
-               udelay(20);
-               val = readl(gp->regs + GREG_SWRST);
-               if (limit-- <= 0)
-                       break;
-       } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
-
-       if (limit < 0)
-               netdev_err(gp->dev, "SW reset is ghetto\n");
-
-       if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
-               gem_pcs_reinit_adv(gp);
-}
-
-static void gem_start_dma(struct gem *gp)
-{
-       u32 val;
-
-       /* We are ready to rock, turn everything on. */
-       val = readl(gp->regs + TXDMA_CFG);
-       writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
-       val = readl(gp->regs + RXDMA_CFG);
-       writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
-       val = readl(gp->regs + MAC_TXCFG);
-       writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
-       val = readl(gp->regs + MAC_RXCFG);
-       writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
-
-       (void) readl(gp->regs + MAC_RXCFG);
-       udelay(100);
-
-       gem_enable_ints(gp);
-
-       writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
-}
-
-/* DMA won't be actually stopped before about 4ms tho ...
- */
-static void gem_stop_dma(struct gem *gp)
-{
-       u32 val;
-
-       /* We are done rocking, turn everything off. */
-       val = readl(gp->regs + TXDMA_CFG);
-       writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
-       val = readl(gp->regs + RXDMA_CFG);
-       writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
-       val = readl(gp->regs + MAC_TXCFG);
-       writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
-       val = readl(gp->regs + MAC_RXCFG);
-       writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
-
-       (void) readl(gp->regs + MAC_RXCFG);
-
-       /* Need to wait a bit ... done by the caller */
-}
-
-
-// XXX dbl check what that function should do when called on PCS PHY
-static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
-{
-       u32 advertise, features;
-       int autoneg;
-       int speed;
-       int duplex;
-
-       if (gp->phy_type != phy_mii_mdio0 &&
-           gp->phy_type != phy_mii_mdio1)
-               goto non_mii;
-
-       /* Setup advertise */
-       if (found_mii_phy(gp))
-               features = gp->phy_mii.def->features;
-       else
-               features = 0;
-
-       advertise = features & ADVERTISE_MASK;
-       if (gp->phy_mii.advertising != 0)
-               advertise &= gp->phy_mii.advertising;
-
-       autoneg = gp->want_autoneg;
-       speed = gp->phy_mii.speed;
-       duplex = gp->phy_mii.duplex;
-
-       /* Setup link parameters */
-       if (!ep)
-               goto start_aneg;
-       if (ep->autoneg == AUTONEG_ENABLE) {
-               advertise = ep->advertising;
-               autoneg = 1;
-       } else {
-               autoneg = 0;
-               speed = ethtool_cmd_speed(ep);
-               duplex = ep->duplex;
-       }
-
-start_aneg:
-       /* Sanitize settings based on PHY capabilities */
-       if ((features & SUPPORTED_Autoneg) == 0)
-               autoneg = 0;
-       if (speed == SPEED_1000 &&
-           !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)))
-               speed = SPEED_100;
-       if (speed == SPEED_100 &&
-           !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full)))
-               speed = SPEED_10;
-       if (duplex == DUPLEX_FULL &&
-           !(features & (SUPPORTED_1000baseT_Full |
-                         SUPPORTED_100baseT_Full |
-                         SUPPORTED_10baseT_Full)))
-               duplex = DUPLEX_HALF;
-       if (speed == 0)
-               speed = SPEED_10;
-
-       /* If we are asleep, we don't try to actually setup the PHY, we
-        * just store the settings
-        */
-       if (!netif_device_present(gp->dev)) {
-               gp->phy_mii.autoneg = gp->want_autoneg = autoneg;
-               gp->phy_mii.speed = speed;
-               gp->phy_mii.duplex = duplex;
-               return;
-       }
-
-       /* Configure PHY & start aneg */
-       gp->want_autoneg = autoneg;
-       if (autoneg) {
-               if (found_mii_phy(gp))
-                       gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise);
-               gp->lstate = link_aneg;
-       } else {
-               if (found_mii_phy(gp))
-                       gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex);
-               gp->lstate = link_force_ok;
-       }
-
-non_mii:
-       gp->timer_ticks = 0;
-       mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
-}
-
-/* A link-up condition has occurred, initialize and enable the
- * rest of the chip.
- */
-static int gem_set_link_modes(struct gem *gp)
-{
-       struct netdev_queue *txq = netdev_get_tx_queue(gp->dev, 0);
-       int full_duplex, speed, pause;
-       u32 val;
-
-       full_duplex = 0;
-       speed = SPEED_10;
-       pause = 0;
-
-       if (found_mii_phy(gp)) {
-               if (gp->phy_mii.def->ops->read_link(&gp->phy_mii))
-                       return 1;
-               full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL);
-               speed = gp->phy_mii.speed;
-               pause = gp->phy_mii.pause;
-       } else if (gp->phy_type == phy_serialink ||
-                  gp->phy_type == phy_serdes) {
-               u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
-
-               if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes)
-                       full_duplex = 1;
-               speed = SPEED_1000;
-       }
-
-       netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n",
-                  speed, (full_duplex ? "full" : "half"));
-
-
-       /* We take the tx queue lock to avoid collisions between
-        * this code, the tx path and the NAPI-driven error path
-        */
-       __netif_tx_lock(txq, smp_processor_id());
-
-       val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU);
-       if (full_duplex) {
-               val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL);
-       } else {
-               /* MAC_TXCFG_NBO must be zero. */
-       }
-       writel(val, gp->regs + MAC_TXCFG);
-
-       val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED);
-       if (!full_duplex &&
-           (gp->phy_type == phy_mii_mdio0 ||
-            gp->phy_type == phy_mii_mdio1)) {
-               val |= MAC_XIFCFG_DISE;
-       } else if (full_duplex) {
-               val |= MAC_XIFCFG_FLED;
-       }
-
-       if (speed == SPEED_1000)
-               val |= (MAC_XIFCFG_GMII);
-
-       writel(val, gp->regs + MAC_XIFCFG);
-
-       /* If gigabit and half-duplex, enable carrier extension
-        * mode.  Else, disable it.
-        */
-       if (speed == SPEED_1000 && !full_duplex) {
-               val = readl(gp->regs + MAC_TXCFG);
-               writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
-
-               val = readl(gp->regs + MAC_RXCFG);
-               writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
-       } else {
-               val = readl(gp->regs + MAC_TXCFG);
-               writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
-
-               val = readl(gp->regs + MAC_RXCFG);
-               writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
-       }
-
-       if (gp->phy_type == phy_serialink ||
-           gp->phy_type == phy_serdes) {
-               u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
-
-               if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP))
-                       pause = 1;
-       }
-
-       if (!full_duplex)
-               writel(512, gp->regs + MAC_STIME);
-       else
-               writel(64, gp->regs + MAC_STIME);
-       val = readl(gp->regs + MAC_MCCFG);
-       if (pause)
-               val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE);
-       else
-               val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE);
-       writel(val, gp->regs + MAC_MCCFG);
-
-       gem_start_dma(gp);
-
-       __netif_tx_unlock(txq);
-
-       if (netif_msg_link(gp)) {
-               if (pause) {
-                       netdev_info(gp->dev,
-                                   "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
-                                   gp->rx_fifo_sz,
-                                   gp->rx_pause_off,
-                                   gp->rx_pause_on);
-               } else {
-                       netdev_info(gp->dev, "Pause is disabled\n");
-               }
-       }
-
-       return 0;
-}
-
-static int gem_mdio_link_not_up(struct gem *gp)
-{
-       switch (gp->lstate) {
-       case link_force_ret:
-               netif_info(gp, link, gp->dev,
-                          "Autoneg failed again, keeping forced mode\n");
-               gp->phy_mii.def->ops->setup_forced(&gp->phy_mii,
-                       gp->last_forced_speed, DUPLEX_HALF);
-               gp->timer_ticks = 5;
-               gp->lstate = link_force_ok;
-               return 0;
-       case link_aneg:
-               /* We try forced modes after a failed aneg only on PHYs that don't
-                * have "magic_aneg" bit set, which means they internally do the
-                * while forced-mode thingy. On these, we just restart aneg
-                */
-               if (gp->phy_mii.def->magic_aneg)
-                       return 1;
-               netif_info(gp, link, gp->dev, "switching to forced 100bt\n");
-               /* Try forced modes. */
-               gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100,
-                       DUPLEX_HALF);
-               gp->timer_ticks = 5;
-               gp->lstate = link_force_try;
-               return 0;
-       case link_force_try:
-               /* Downgrade from 100 to 10 Mbps if necessary.
-                * If already at 10Mbps, warn user about the
-                * situation every 10 ticks.
-                */
-               if (gp->phy_mii.speed == SPEED_100) {
-                       gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10,
-                               DUPLEX_HALF);
-                       gp->timer_ticks = 5;
-                       netif_info(gp, link, gp->dev,
-                                  "switching to forced 10bt\n");
-                       return 0;
-               } else
-                       return 1;
-       default:
-               return 0;
-       }
-}
-
-static void gem_link_timer(unsigned long data)
-{
-       struct gem *gp = (struct gem *) data;
-       struct net_device *dev = gp->dev;
-       int restart_aneg = 0;
-
-       /* There's no point doing anything if we're going to be reset */
-       if (gp->reset_task_pending)
-               return;
-
-       if (gp->phy_type == phy_serialink ||
-           gp->phy_type == phy_serdes) {
-               u32 val = readl(gp->regs + PCS_MIISTAT);
-
-               if (!(val & PCS_MIISTAT_LS))
-                       val = readl(gp->regs + PCS_MIISTAT);
-
-               if ((val & PCS_MIISTAT_LS) != 0) {
-                       if (gp->lstate == link_up)
-                               goto restart;
-
-                       gp->lstate = link_up;
-                       netif_carrier_on(dev);
-                       (void)gem_set_link_modes(gp);
-               }
-               goto restart;
-       }
-       if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) {
-               /* Ok, here we got a link. If we had it due to a forced
-                * fallback, and we were configured for autoneg, we do
-                * retry a short autoneg pass. If you know your hub is
-                * broken, use ethtool ;)
-                */
-               if (gp->lstate == link_force_try && gp->want_autoneg) {
-                       gp->lstate = link_force_ret;
-                       gp->last_forced_speed = gp->phy_mii.speed;
-                       gp->timer_ticks = 5;
-                       if (netif_msg_link(gp))
-                               netdev_info(dev,
-                                           "Got link after fallback, retrying autoneg once...\n");
-                       gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising);
-               } else if (gp->lstate != link_up) {
-                       gp->lstate = link_up;
-                       netif_carrier_on(dev);
-                       if (gem_set_link_modes(gp))
-                               restart_aneg = 1;
-               }
-       } else {
-               /* If the link was previously up, we restart the
-                * whole process
-                */
-               if (gp->lstate == link_up) {
-                       gp->lstate = link_down;
-                       netif_info(gp, link, dev, "Link down\n");
-                       netif_carrier_off(dev);
-                       gem_schedule_reset(gp);
-                       /* The reset task will restart the timer */
-                       return;
-               } else if (++gp->timer_ticks > 10) {
-                       if (found_mii_phy(gp))
-                               restart_aneg = gem_mdio_link_not_up(gp);
-                       else
-                               restart_aneg = 1;
-               }
-       }
-       if (restart_aneg) {
-               gem_begin_auto_negotiation(gp, NULL);
-               return;
-       }
-restart:
-       mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
-}
-
-static void gem_clean_rings(struct gem *gp)
-{
-       struct gem_init_block *gb = gp->init_block;
-       struct sk_buff *skb;
-       int i;
-       dma_addr_t dma_addr;
-
-       for (i = 0; i < RX_RING_SIZE; i++) {
-               struct gem_rxd *rxd;
-
-               rxd = &gb->rxd[i];
-               if (gp->rx_skbs[i] != NULL) {
-                       skb = gp->rx_skbs[i];
-                       dma_addr = le64_to_cpu(rxd->buffer);
-                       pci_unmap_page(gp->pdev, dma_addr,
-                                      RX_BUF_ALLOC_SIZE(gp),
-                                      PCI_DMA_FROMDEVICE);
-                       dev_kfree_skb_any(skb);
-                       gp->rx_skbs[i] = NULL;
-               }
-               rxd->status_word = 0;
-               wmb();
-               rxd->buffer = 0;
-       }
-
-       for (i = 0; i < TX_RING_SIZE; i++) {
-               if (gp->tx_skbs[i] != NULL) {
-                       struct gem_txd *txd;
-                       int frag;
-
-                       skb = gp->tx_skbs[i];
-                       gp->tx_skbs[i] = NULL;
-
-                       for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
-                               int ent = i & (TX_RING_SIZE - 1);
-
-                               txd = &gb->txd[ent];
-                               dma_addr = le64_to_cpu(txd->buffer);
-                               pci_unmap_page(gp->pdev, dma_addr,
-                                              le64_to_cpu(txd->control_word) &
-                                              TXDCTRL_BUFSZ, PCI_DMA_TODEVICE);
-
-                               if (frag != skb_shinfo(skb)->nr_frags)
-                                       i++;
-                       }
-                       dev_kfree_skb_any(skb);
-               }
-       }
-}
-
-static void gem_init_rings(struct gem *gp)
-{
-       struct gem_init_block *gb = gp->init_block;
-       struct net_device *dev = gp->dev;
-       int i;
-       dma_addr_t dma_addr;
-
-       gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0;
-
-       gem_clean_rings(gp);
-
-       gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN,
-                           (unsigned)VLAN_ETH_FRAME_LEN);
-
-       for (i = 0; i < RX_RING_SIZE; i++) {
-               struct sk_buff *skb;
-               struct gem_rxd *rxd = &gb->rxd[i];
-
-               skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_KERNEL);
-               if (!skb) {
-                       rxd->buffer = 0;
-                       rxd->status_word = 0;
-                       continue;
-               }
-
-               gp->rx_skbs[i] = skb;
-               skb_put(skb, (gp->rx_buf_sz + RX_OFFSET));
-               dma_addr = pci_map_page(gp->pdev,
-                                       virt_to_page(skb->data),
-                                       offset_in_page(skb->data),
-                                       RX_BUF_ALLOC_SIZE(gp),
-                                       PCI_DMA_FROMDEVICE);
-               rxd->buffer = cpu_to_le64(dma_addr);
-               wmb();
-               rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
-               skb_reserve(skb, RX_OFFSET);
-       }
-
-       for (i = 0; i < TX_RING_SIZE; i++) {
-               struct gem_txd *txd = &gb->txd[i];
-
-               txd->control_word = 0;
-               wmb();
-               txd->buffer = 0;
-       }
-       wmb();
-}
-
-/* Init PHY interface and start link poll state machine */
-static void gem_init_phy(struct gem *gp)
-{
-       u32 mifcfg;
-
-       /* Revert MIF CFG setting done on stop_phy */
-       mifcfg = readl(gp->regs + MIF_CFG);
-       mifcfg &= ~MIF_CFG_BBMODE;
-       writel(mifcfg, gp->regs + MIF_CFG);
-
-       if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
-               int i;
-
-               /* Those delay sucks, the HW seem to love them though, I'll
-                * serisouly consider breaking some locks here to be able
-                * to schedule instead
-                */
-               for (i = 0; i < 3; i++) {
-#ifdef CONFIG_PPC_PMAC
-                       pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0);
-                       msleep(20);
-#endif
-                       /* Some PHYs used by apple have problem getting back to us,
-                        * we do an additional reset here
-                        */
-                       phy_write(gp, MII_BMCR, BMCR_RESET);
-                       msleep(20);
-                       if (phy_read(gp, MII_BMCR) != 0xffff)
-                               break;
-                       if (i == 2)
-                               netdev_warn(gp->dev, "GMAC PHY not responding !\n");
-               }
-       }
-
-       if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
-           gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
-               u32 val;
-
-               /* Init datapath mode register. */
-               if (gp->phy_type == phy_mii_mdio0 ||
-                   gp->phy_type == phy_mii_mdio1) {
-                       val = PCS_DMODE_MGM;
-               } else if (gp->phy_type == phy_serialink) {
-                       val = PCS_DMODE_SM | PCS_DMODE_GMOE;
-               } else {
-                       val = PCS_DMODE_ESM;
-               }
-
-               writel(val, gp->regs + PCS_DMODE);
-       }
-
-       if (gp->phy_type == phy_mii_mdio0 ||
-           gp->phy_type == phy_mii_mdio1) {
-               /* Reset and detect MII PHY */
-               mii_phy_probe(&gp->phy_mii, gp->mii_phy_addr);
-
-               /* Init PHY */
-               if (gp->phy_mii.def && gp->phy_mii.def->ops->init)
-                       gp->phy_mii.def->ops->init(&gp->phy_mii);
-       } else {
-               gem_pcs_reset(gp);
-               gem_pcs_reinit_adv(gp);
-       }
-
-       /* Default aneg parameters */
-       gp->timer_ticks = 0;
-       gp->lstate = link_down;
-       netif_carrier_off(gp->dev);
-
-       /* Print things out */
-       if (gp->phy_type == phy_mii_mdio0 ||
-           gp->phy_type == phy_mii_mdio1)
-               netdev_info(gp->dev, "Found %s PHY\n",
-                           gp->phy_mii.def ? gp->phy_mii.def->name : "no");
-
-       gem_begin_auto_negotiation(gp, NULL);
-}
-
-static void gem_init_dma(struct gem *gp)
-{
-       u64 desc_dma = (u64) gp->gblock_dvma;
-       u32 val;
-
-       val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE);
-       writel(val, gp->regs + TXDMA_CFG);
-
-       writel(desc_dma >> 32, gp->regs + TXDMA_DBHI);
-       writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW);
-       desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
-
-       writel(0, gp->regs + TXDMA_KICK);
-
-       val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
-              ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
-       writel(val, gp->regs + RXDMA_CFG);
-
-       writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
-       writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
-
-       writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
-
-       val  = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
-       val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
-       writel(val, gp->regs + RXDMA_PTHRESH);
-
-       if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
-               writel(((5 & RXDMA_BLANK_IPKTS) |
-                       ((8 << 12) & RXDMA_BLANK_ITIME)),
-                      gp->regs + RXDMA_BLANK);
-       else
-               writel(((5 & RXDMA_BLANK_IPKTS) |
-                       ((4 << 12) & RXDMA_BLANK_ITIME)),
-                      gp->regs + RXDMA_BLANK);
-}
-
-static u32 gem_setup_multicast(struct gem *gp)
-{
-       u32 rxcfg = 0;
-       int i;
-
-       if ((gp->dev->flags & IFF_ALLMULTI) ||
-           (netdev_mc_count(gp->dev) > 256)) {
-               for (i=0; i<16; i++)
-                       writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
-               rxcfg |= MAC_RXCFG_HFE;
-       } else if (gp->dev->flags & IFF_PROMISC) {
-               rxcfg |= MAC_RXCFG_PROM;
-       } else {
-               u16 hash_table[16];
-               u32 crc;
-               struct netdev_hw_addr *ha;
-               int i;
-
-               memset(hash_table, 0, sizeof(hash_table));
-               netdev_for_each_mc_addr(ha, gp->dev) {
-                       crc = ether_crc_le(6, ha->addr);
-                       crc >>= 24;
-                       hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
-               }
-               for (i=0; i<16; i++)
-                       writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2));
-               rxcfg |= MAC_RXCFG_HFE;
-       }
-
-       return rxcfg;
-}
-
-static void gem_init_mac(struct gem *gp)
-{
-       unsigned char *e = &gp->dev->dev_addr[0];
-
-       writel(0x1bf0, gp->regs + MAC_SNDPAUSE);
-
-       writel(0x00, gp->regs + MAC_IPG0);
-       writel(0x08, gp->regs + MAC_IPG1);
-       writel(0x04, gp->regs + MAC_IPG2);
-       writel(0x40, gp->regs + MAC_STIME);
-       writel(0x40, gp->regs + MAC_MINFSZ);
-
-       /* Ethernet payload + header + FCS + optional VLAN tag. */
-       writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ);
-
-       writel(0x07, gp->regs + MAC_PASIZE);
-       writel(0x04, gp->regs + MAC_JAMSIZE);
-       writel(0x10, gp->regs + MAC_ATTLIM);
-       writel(0x8808, gp->regs + MAC_MCTYPE);
-
-       writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED);
-
-       writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
-       writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
-       writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
-
-       writel(0, gp->regs + MAC_ADDR3);
-       writel(0, gp->regs + MAC_ADDR4);
-       writel(0, gp->regs + MAC_ADDR5);
-
-       writel(0x0001, gp->regs + MAC_ADDR6);
-       writel(0xc200, gp->regs + MAC_ADDR7);
-       writel(0x0180, gp->regs + MAC_ADDR8);
-
-       writel(0, gp->regs + MAC_AFILT0);
-       writel(0, gp->regs + MAC_AFILT1);
-       writel(0, gp->regs + MAC_AFILT2);
-       writel(0, gp->regs + MAC_AF21MSK);
-       writel(0, gp->regs + MAC_AF0MSK);
-
-       gp->mac_rx_cfg = gem_setup_multicast(gp);
-#ifdef STRIP_FCS
-       gp->mac_rx_cfg |= MAC_RXCFG_SFCS;
-#endif
-       writel(0, gp->regs + MAC_NCOLL);
-       writel(0, gp->regs + MAC_FASUCC);
-       writel(0, gp->regs + MAC_ECOLL);
-       writel(0, gp->regs + MAC_LCOLL);
-       writel(0, gp->regs + MAC_DTIMER);
-       writel(0, gp->regs + MAC_PATMPS);
-       writel(0, gp->regs + MAC_RFCTR);
-       writel(0, gp->regs + MAC_LERR);
-       writel(0, gp->regs + MAC_AERR);
-       writel(0, gp->regs + MAC_FCSERR);
-       writel(0, gp->regs + MAC_RXCVERR);
-
-       /* Clear RX/TX/MAC/XIF config, we will set these up and enable
-        * them once a link is established.
-        */
-       writel(0, gp->regs + MAC_TXCFG);
-       writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG);
-       writel(0, gp->regs + MAC_MCCFG);
-       writel(0, gp->regs + MAC_XIFCFG);
-
-       /* Setup MAC interrupts.  We want to get all of the interesting
-        * counter expiration events, but we do not want to hear about
-        * normal rx/tx as the DMA engine tells us that.
-        */
-       writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK);
-       writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
-
-       /* Don't enable even the PAUSE interrupts for now, we
-        * make no use of those events other than to record them.
-        */
-       writel(0xffffffff, gp->regs + MAC_MCMASK);
-
-       /* Don't enable GEM's WOL in normal operations
-        */
-       if (gp->has_wol)
-               writel(0, gp->regs + WOL_WAKECSR);
-}
-
-static void gem_init_pause_thresholds(struct gem *gp)
-{
-               u32 cfg;
-
-       /* Calculate pause thresholds.  Setting the OFF threshold to the
-        * full RX fifo size effectively disables PAUSE generation which
-        * is what we do for 10/100 only GEMs which have FIFOs too small
-        * to make real gains from PAUSE.
-        */
-       if (gp->rx_fifo_sz <= (2 * 1024)) {
-               gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz;
-       } else {
-               int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63;
-               int off = (gp->rx_fifo_sz - (max_frame * 2));
-               int on = off - max_frame;
-
-               gp->rx_pause_off = off;
-               gp->rx_pause_on = on;
-       }
-
-
-       /* Configure the chip "burst" DMA mode & enable some
-        * HW bug fixes on Apple version
-        */
-               cfg  = 0;
-               if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
-               cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX;
-#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
-               cfg |= GREG_CFG_IBURST;
-#endif
-               cfg |= ((31 << 1) & GREG_CFG_TXDMALIM);
-               cfg |= ((31 << 6) & GREG_CFG_RXDMALIM);
-               writel(cfg, gp->regs + GREG_CFG);
-
-       /* If Infinite Burst didn't stick, then use different
-        * thresholds (and Apple bug fixes don't exist)
-        */
-       if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) {
-               cfg = ((2 << 1) & GREG_CFG_TXDMALIM);
-               cfg |= ((8 << 6) & GREG_CFG_RXDMALIM);
-               writel(cfg, gp->regs + GREG_CFG);
-       }
-}
-
-static int gem_check_invariants(struct gem *gp)
-{
-       struct pci_dev *pdev = gp->pdev;
-       u32 mif_cfg;
-
-       /* On Apple's sungem, we can't rely on registers as the chip
-        * was been powered down by the firmware. The PHY is looked
-        * up later on.
-        */
-       if (pdev->vendor == PCI_VENDOR_ID_APPLE) {
-               gp->phy_type = phy_mii_mdio0;
-               gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
-               gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
-               gp->swrst_base = 0;
-
-               mif_cfg = readl(gp->regs + MIF_CFG);
-               mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1);
-               mif_cfg |= MIF_CFG_MDI0;
-               writel(mif_cfg, gp->regs + MIF_CFG);
-               writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE);
-               writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG);
-
-               /* We hard-code the PHY address so we can properly bring it out of
-                * reset later on, we can't really probe it at this point, though
-                * that isn't an issue.
-                */
-               if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC)
-                       gp->mii_phy_addr = 1;
-               else
-                       gp->mii_phy_addr = 0;
-
-               return 0;
-       }
-
-       mif_cfg = readl(gp->regs + MIF_CFG);
-
-       if (pdev->vendor == PCI_VENDOR_ID_SUN &&
-           pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) {
-               /* One of the MII PHYs _must_ be present
-                * as this chip has no gigabit PHY.
-                */
-               if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) {
-                       pr_err("RIO GEM lacks MII phy, mif_cfg[%08x]\n",
-                              mif_cfg);
-                       return -1;
-               }
-       }
-
-       /* Determine initial PHY interface type guess.  MDIO1 is the
-        * external PHY and thus takes precedence over MDIO0.
-        */
-
-       if (mif_cfg & MIF_CFG_MDI1) {
-               gp->phy_type = phy_mii_mdio1;
-               mif_cfg |= MIF_CFG_PSELECT;
-               writel(mif_cfg, gp->regs + MIF_CFG);
-       } else if (mif_cfg & MIF_CFG_MDI0) {
-               gp->phy_type = phy_mii_mdio0;
-               mif_cfg &= ~MIF_CFG_PSELECT;
-               writel(mif_cfg, gp->regs + MIF_CFG);
-       } else {
-#ifdef CONFIG_SPARC
-               const char *p;
-
-               p = of_get_property(gp->of_node, "shared-pins", NULL);
-               if (p && !strcmp(p, "serdes"))
-                       gp->phy_type = phy_serdes;
-               else
-#endif
-                       gp->phy_type = phy_serialink;
-       }
-       if (gp->phy_type == phy_mii_mdio1 ||
-           gp->phy_type == phy_mii_mdio0) {
-               int i;
-
-               for (i = 0; i < 32; i++) {
-                       gp->mii_phy_addr = i;
-                       if (phy_read(gp, MII_BMCR) != 0xffff)
-                               break;
-               }
-               if (i == 32) {
-                       if (pdev->device != PCI_DEVICE_ID_SUN_GEM) {
-                               pr_err("RIO MII phy will not respond\n");
-                               return -1;
-                       }
-                       gp->phy_type = phy_serdes;
-               }
-       }
-
-       /* Fetch the FIFO configurations now too. */
-       gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
-       gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
-
-       if (pdev->vendor == PCI_VENDOR_ID_SUN) {
-               if (pdev->device == PCI_DEVICE_ID_SUN_GEM) {
-                       if (gp->tx_fifo_sz != (9 * 1024) ||
-                           gp->rx_fifo_sz != (20 * 1024)) {
-                               pr_err("GEM has bogus fifo sizes tx(%d) rx(%d)\n",
-                                      gp->tx_fifo_sz, gp->rx_fifo_sz);
-                               return -1;
-                       }
-                       gp->swrst_base = 0;
-               } else {
-                       if (gp->tx_fifo_sz != (2 * 1024) ||
-                           gp->rx_fifo_sz != (2 * 1024)) {
-                               pr_err("RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
-                                      gp->tx_fifo_sz, gp->rx_fifo_sz);
-                               return -1;
-                       }
-                       gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT;
-               }
-       }
-
-       return 0;
-}
-
-static void gem_reinit_chip(struct gem *gp)
-{
-       /* Reset the chip */
-       gem_reset(gp);
-
-       /* Make sure ints are disabled */
-       gem_disable_ints(gp);
-
-       /* Allocate & setup ring buffers */
-       gem_init_rings(gp);
-
-       /* Configure pause thresholds */
-       gem_init_pause_thresholds(gp);
-
-       /* Init DMA & MAC engines */
-       gem_init_dma(gp);
-       gem_init_mac(gp);
-}
-
-
-static void gem_stop_phy(struct gem *gp, int wol)
-{
-       u32 mifcfg;
-
-       /* Let the chip settle down a bit, it seems that helps
-        * for sleep mode on some models
-        */
-       msleep(10);
-
-       /* Make sure we aren't polling PHY status change. We
-        * don't currently use that feature though
-        */
-       mifcfg = readl(gp->regs + MIF_CFG);
-       mifcfg &= ~MIF_CFG_POLL;
-       writel(mifcfg, gp->regs + MIF_CFG);
-
-       if (wol && gp->has_wol) {
-               unsigned char *e = &gp->dev->dev_addr[0];
-               u32 csr;
-
-               /* Setup wake-on-lan for MAGIC packet */
-               writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB,
-                      gp->regs + MAC_RXCFG);
-               writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0);
-               writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1);
-               writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2);
-
-               writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT);
-               csr = WOL_WAKECSR_ENABLE;
-               if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0)
-                       csr |= WOL_WAKECSR_MII;
-               writel(csr, gp->regs + WOL_WAKECSR);
-       } else {
-               writel(0, gp->regs + MAC_RXCFG);
-               (void)readl(gp->regs + MAC_RXCFG);
-               /* Machine sleep will die in strange ways if we
-                * dont wait a bit here, looks like the chip takes
-                * some time to really shut down
-                */
-               msleep(10);
-       }
-
-       writel(0, gp->regs + MAC_TXCFG);
-       writel(0, gp->regs + MAC_XIFCFG);
-       writel(0, gp->regs + TXDMA_CFG);
-       writel(0, gp->regs + RXDMA_CFG);
-
-       if (!wol) {
-               gem_reset(gp);
-               writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST);
-               writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
-
-               if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend)
-                       gp->phy_mii.def->ops->suspend(&gp->phy_mii);
-
-               /* According to Apple, we must set the MDIO pins to this begnign
-                * state or we may 1) eat more current, 2) damage some PHYs
-                */
-               writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG);
-               writel(0, gp->regs + MIF_BBCLK);
-               writel(0, gp->regs + MIF_BBDATA);
-               writel(0, gp->regs + MIF_BBOENAB);
-               writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG);
-               (void) readl(gp->regs + MAC_XIFCFG);
-       }
-}
-
-static int gem_do_start(struct net_device *dev)
-{
-       struct gem *gp = netdev_priv(dev);
-       int rc;
-
-       /* Enable the cell */
-       gem_get_cell(gp);
-
-       /* Make sure PCI access and bus master are enabled */
-       rc = pci_enable_device(gp->pdev);
-       if (rc) {
-               netdev_err(dev, "Failed to enable chip on PCI bus !\n");
-
-               /* Put cell and forget it for now, it will be considered as
-                * still asleep, a new sleep cycle may bring it back
-                */
-               gem_put_cell(gp);
-               return -ENXIO;
-       }
-       pci_set_master(gp->pdev);
-
-       /* Init & setup chip hardware */
-       gem_reinit_chip(gp);
-
-       /* An interrupt might come in handy */
-       rc = request_irq(gp->pdev->irq, gem_interrupt,
-                        IRQF_SHARED, dev->name, (void *)dev);
-       if (rc) {
-               netdev_err(dev, "failed to request irq !\n");
-
-               gem_reset(gp);
-               gem_clean_rings(gp);
-               gem_put_cell(gp);
-               return rc;
-       }
-
-       /* Mark us as attached again if we come from resume(), this has
-        * no effect if we weren't detatched and needs to be done now.
-        */
-       netif_device_attach(dev);
-
-       /* Restart NAPI & queues */
-       gem_netif_start(gp);
-
-       /* Detect & init PHY, start autoneg etc... this will
-        * eventually result in starting DMA operations when
-        * the link is up
-        */
-       gem_init_phy(gp);
-
-       return 0;
-}
-
-static void gem_do_stop(struct net_device *dev, int wol)
-{
-       struct gem *gp = netdev_priv(dev);
-
-       /* Stop NAPI and stop tx queue */
-       gem_netif_stop(gp);
-
-       /* Make sure ints are disabled. We don't care about
-        * synchronizing as NAPI is disabled, thus a stray
-        * interrupt will do nothing bad (our irq handler
-        * just schedules NAPI)
-        */
-       gem_disable_ints(gp);
-
-       /* Stop the link timer */
-       del_timer_sync(&gp->link_timer);
-
-       /* We cannot cancel the reset task while holding the
-        * rtnl lock, we'd get an A->B / B->A deadlock stituation
-        * if we did. This is not an issue however as the reset
-        * task is synchronized vs. us (rtnl_lock) and will do
-        * nothing if the device is down or suspended. We do
-        * still clear reset_task_pending to avoid a spurrious
-        * reset later on in case we do resume before it gets
-        * scheduled.
-        */
-       gp->reset_task_pending = 0;
-
-       /* If we are going to sleep with WOL */
-       gem_stop_dma(gp);
-       msleep(10);
-       if (!wol)
-               gem_reset(gp);
-       msleep(10);
-
-       /* Get rid of rings */
-       gem_clean_rings(gp);
-
-       /* No irq needed anymore */
-       free_irq(gp->pdev->irq, (void *) dev);
-
-       /* Shut the PHY down eventually and setup WOL */
-       gem_stop_phy(gp, wol);
-
-       /* Make sure bus master is disabled */
-       pci_disable_device(gp->pdev);
-
-       /* Cell not needed neither if no WOL */
-       if (!wol)
-               gem_put_cell(gp);
-}
-
-static void gem_reset_task(struct work_struct *work)
-{
-       struct gem *gp = container_of(work, struct gem, reset_task);
-
-       /* Lock out the network stack (essentially shield ourselves
-        * against a racing open, close, control call, or suspend
-        */
-       rtnl_lock();
-
-       /* Skip the reset task if suspended or closed, or if it's
-        * been cancelled by gem_do_stop (see comment there)
-        */
-       if (!netif_device_present(gp->dev) ||
-           !netif_running(gp->dev) ||
-           !gp->reset_task_pending) {
-               rtnl_unlock();
-               return;
-       }
-
-       /* Stop the link timer */
-       del_timer_sync(&gp->link_timer);
-
-       /* Stop NAPI and tx */
-       gem_netif_stop(gp);
-
-       /* Reset the chip & rings */
-       gem_reinit_chip(gp);
-       if (gp->lstate == link_up)
-               gem_set_link_modes(gp);
-
-       /* Restart NAPI and Tx */
-       gem_netif_start(gp);
-
-       /* We are back ! */
-       gp->reset_task_pending = 0;
-
-       /* If the link is not up, restart autoneg, else restart the
-        * polling timer
-        */
-       if (gp->lstate != link_up)
-               gem_begin_auto_negotiation(gp, NULL);
-       else
-               mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
-
-       rtnl_unlock();
-}
-
-static int gem_open(struct net_device *dev)
-{
-       /* We allow open while suspended, we just do nothing,
-        * the chip will be initialized in resume()
-        */
-       if (netif_device_present(dev))
-               return gem_do_start(dev);
-       return 0;
-}
-
-static int gem_close(struct net_device *dev)
-{
-       if (netif_device_present(dev))
-               gem_do_stop(dev, 0);
-
-       return 0;
-}
-
-#ifdef CONFIG_PM
-static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
-{
-       struct net_device *dev = pci_get_drvdata(pdev);
-       struct gem *gp = netdev_priv(dev);
-
-       /* Lock the network stack first to avoid racing with open/close,
-        * reset task and setting calls
-        */
-       rtnl_lock();
-
-       /* Not running, mark ourselves non-present, no need for
-        * a lock here
-        */
-       if (!netif_running(dev)) {
-               netif_device_detach(dev);
-               rtnl_unlock();
-               return 0;
-       }
-       netdev_info(dev, "suspending, WakeOnLan %s\n",
-                   (gp->wake_on_lan && netif_running(dev)) ?
-                   "enabled" : "disabled");
-
-       /* Tell the network stack we're gone. gem_do_stop() below will
-        * synchronize with TX, stop NAPI etc...
-        */
-       netif_device_detach(dev);
-
-       /* Switch off chip, remember WOL setting */
-       gp->asleep_wol = gp->wake_on_lan;
-       gem_do_stop(dev, gp->asleep_wol);
-
-       /* Unlock the network stack */
-       rtnl_unlock();
-
-       return 0;
-}
-
-static int gem_resume(struct pci_dev *pdev)
-{
-       struct net_device *dev = pci_get_drvdata(pdev);
-       struct gem *gp = netdev_priv(dev);
-
-       /* See locking comment in gem_suspend */
-       rtnl_lock();
-
-       /* Not running, mark ourselves present, no need for
-        * a lock here
-        */
-       if (!netif_running(dev)) {
-               netif_device_attach(dev);
-               rtnl_unlock();
-               return 0;
-       }
-
-       /* Restart chip. If that fails there isn't much we can do, we
-        * leave things stopped.
-        */
-       gem_do_start(dev);
-
-       /* If we had WOL enabled, the cell clock was never turned off during
-        * sleep, so we end up beeing unbalanced. Fix that here
-        */
-       if (gp->asleep_wol)
-               gem_put_cell(gp);
-
-       /* Unlock the network stack */
-       rtnl_unlock();
-
-       return 0;
-}
-#endif /* CONFIG_PM */
-
-static struct net_device_stats *gem_get_stats(struct net_device *dev)
-{
-       struct gem *gp = netdev_priv(dev);
-
-       /* I have seen this being called while the PM was in progress,
-        * so we shield against this. Let's also not poke at registers
-        * while the reset task is going on.
-        *
-        * TODO: Move stats collection elsewhere (link timer ?) and
-        * make this a nop to avoid all those synchro issues
-        */
-       if (!netif_device_present(dev) || !netif_running(dev))
-               goto bail;
-
-       /* Better safe than sorry... */
-       if (WARN_ON(!gp->cell_enabled))
-               goto bail;
-
-       dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR);
-       writel(0, gp->regs + MAC_FCSERR);
-
-       dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR);
-       writel(0, gp->regs + MAC_AERR);
-
-       dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR);
-       writel(0, gp->regs + MAC_LERR);
-
-       dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
-       dev->stats.collisions +=
-               (readl(gp->regs + MAC_ECOLL) + readl(gp->regs + MAC_LCOLL));
-       writel(0, gp->regs + MAC_ECOLL);
-       writel(0, gp->regs + MAC_LCOLL);
- bail:
-       return &dev->stats;
-}
-
-static int gem_set_mac_address(struct net_device *dev, void *addr)
-{
-       struct sockaddr *macaddr = (struct sockaddr *) addr;
-       struct gem *gp = netdev_priv(dev);
-       unsigned char *e = &dev->dev_addr[0];
-
-       if (!is_valid_ether_addr(macaddr->sa_data))
-               return -EADDRNOTAVAIL;
-
-       memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len);
-
-       /* We'll just catch it later when the device is up'd or resumed */
-       if (!netif_running(dev) || !netif_device_present(dev))
-               return 0;
-
-       /* Better safe than sorry... */
-       if (WARN_ON(!gp->cell_enabled))
-               return 0;
-
-       writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
-       writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
-       writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
-
-       return 0;
-}
-
-static void gem_set_multicast(struct net_device *dev)
-{
-       struct gem *gp = netdev_priv(dev);
-       u32 rxcfg, rxcfg_new;
-       int limit = 10000;
-
-       if (!netif_running(dev) || !netif_device_present(dev))
-               return;
-
-       /* Better safe than sorry... */
-       if (gp->reset_task_pending || WARN_ON(!gp->cell_enabled))
-               return;
-
-       rxcfg = readl(gp->regs + MAC_RXCFG);
-       rxcfg_new = gem_setup_multicast(gp);
-#ifdef STRIP_FCS
-       rxcfg_new |= MAC_RXCFG_SFCS;
-#endif
-       gp->mac_rx_cfg = rxcfg_new;
-
-       writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
-       while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) {
-               if (!limit--)
-                       break;
-               udelay(10);
-       }
-
-       rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE);
-       rxcfg |= rxcfg_new;
-
-       writel(rxcfg, gp->regs + MAC_RXCFG);
-}
-
-/* Jumbo-grams don't seem to work :-( */
-#define GEM_MIN_MTU    68
-#if 1
-#define GEM_MAX_MTU    1500
-#else
-#define GEM_MAX_MTU    9000
-#endif
-
-static int gem_change_mtu(struct net_device *dev, int new_mtu)
-{
-       struct gem *gp = netdev_priv(dev);
-
-       if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU)
-               return -EINVAL;
-
-       dev->mtu = new_mtu;
-
-       /* We'll just catch it later when the device is up'd or resumed */
-       if (!netif_running(dev) || !netif_device_present(dev))
-               return 0;
-
-       /* Better safe than sorry... */
-       if (WARN_ON(!gp->cell_enabled))
-               return 0;
-
-       gem_netif_stop(gp);
-       gem_reinit_chip(gp);
-       if (gp->lstate == link_up)
-               gem_set_link_modes(gp);
-       gem_netif_start(gp);
-
-       return 0;
-}
-
-static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
-       struct gem *gp = netdev_priv(dev);
-
-       strcpy(info->driver, DRV_NAME);
-       strcpy(info->version, DRV_VERSION);
-       strcpy(info->bus_info, pci_name(gp->pdev));
-}
-
-static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-       struct gem *gp = netdev_priv(dev);
-
-       if (gp->phy_type == phy_mii_mdio0 ||
-           gp->phy_type == phy_mii_mdio1) {
-               if (gp->phy_mii.def)
-                       cmd->supported = gp->phy_mii.def->features;
-               else
-                       cmd->supported = (SUPPORTED_10baseT_Half |
-                                         SUPPORTED_10baseT_Full);
-
-               /* XXX hardcoded stuff for now */
-               cmd->port = PORT_MII;
-               cmd->transceiver = XCVR_EXTERNAL;
-               cmd->phy_address = 0; /* XXX fixed PHYAD */
-
-               /* Return current PHY settings */
-               cmd->autoneg = gp->want_autoneg;
-               ethtool_cmd_speed_set(cmd, gp->phy_mii.speed);
-               cmd->duplex = gp->phy_mii.duplex;
-               cmd->advertising = gp->phy_mii.advertising;
-
-               /* If we started with a forced mode, we don't have a default
-                * advertise set, we need to return something sensible so
-                * userland can re-enable autoneg properly.
-                */
-               if (cmd->advertising == 0)
-                       cmd->advertising = cmd->supported;
-       } else { // XXX PCS ?
-               cmd->supported =
-                       (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
-                        SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
-                        SUPPORTED_Autoneg);
-               cmd->advertising = cmd->supported;
-               ethtool_cmd_speed_set(cmd, 0);
-               cmd->duplex = cmd->port = cmd->phy_address =
-                       cmd->transceiver = cmd->autoneg = 0;
-
-               /* serdes means usually a Fibre connector, with most fixed */
-               if (gp->phy_type == phy_serdes) {
-                       cmd->port = PORT_FIBRE;
-                       cmd->supported = (SUPPORTED_1000baseT_Half |
-                               SUPPORTED_1000baseT_Full |
-                               SUPPORTED_FIBRE | SUPPORTED_Autoneg |
-                               SUPPORTED_Pause | SUPPORTED_Asym_Pause);
-                       cmd->advertising = cmd->supported;
-                       cmd->transceiver = XCVR_INTERNAL;
-                       if (gp->lstate == link_up)
-                               ethtool_cmd_speed_set(cmd, SPEED_1000);
-                       cmd->duplex = DUPLEX_FULL;
-                       cmd->autoneg = 1;
-               }
-       }
-       cmd->maxtxpkt = cmd->maxrxpkt = 0;
-
-       return 0;
-}
-
-static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-       struct gem *gp = netdev_priv(dev);
-       u32 speed = ethtool_cmd_speed(cmd);
-
-       /* Verify the settings we care about. */
-       if (cmd->autoneg != AUTONEG_ENABLE &&
-           cmd->autoneg != AUTONEG_DISABLE)
-               return -EINVAL;
-
-       if (cmd->autoneg == AUTONEG_ENABLE &&
-           cmd->advertising == 0)
-               return -EINVAL;
-
-       if (cmd->autoneg == AUTONEG_DISABLE &&
-           ((speed != SPEED_1000 &&
-             speed != SPEED_100 &&
-             speed != SPEED_10) ||
-            (cmd->duplex != DUPLEX_HALF &&
-             cmd->duplex != DUPLEX_FULL)))
-               return -EINVAL;
-
-       /* Apply settings and restart link process. */
-       if (netif_device_present(gp->dev)) {
-               del_timer_sync(&gp->link_timer);
-               gem_begin_auto_negotiation(gp, cmd);
-       }
-
-       return 0;
-}
-
-static int gem_nway_reset(struct net_device *dev)
-{
-       struct gem *gp = netdev_priv(dev);
-
-       if (!gp->want_autoneg)
-               return -EINVAL;
-
-       /* Restart link process  */
-       if (netif_device_present(gp->dev)) {
-               del_timer_sync(&gp->link_timer);
-               gem_begin_auto_negotiation(gp, NULL);
-       }
-
-       return 0;
-}
-
-static u32 gem_get_msglevel(struct net_device *dev)
-{
-       struct gem *gp = netdev_priv(dev);
-       return gp->msg_enable;
-}
-
-static void gem_set_msglevel(struct net_device *dev, u32 value)
-{
-       struct gem *gp = netdev_priv(dev);
-       gp->msg_enable = value;
-}
-
-
-/* Add more when I understand how to program the chip */
-/* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */
-
-#define WOL_SUPPORTED_MASK     (WAKE_MAGIC)
-
-static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
-       struct gem *gp = netdev_priv(dev);
-
-       /* Add more when I understand how to program the chip */
-       if (gp->has_wol) {
-               wol->supported = WOL_SUPPORTED_MASK;
-               wol->wolopts = gp->wake_on_lan;
-       } else {
-               wol->supported = 0;
-               wol->wolopts = 0;
-       }
-}
-
-static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
-       struct gem *gp = netdev_priv(dev);
-
-       if (!gp->has_wol)
-               return -EOPNOTSUPP;
-       gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK;
-       return 0;
-}
-
-static const struct ethtool_ops gem_ethtool_ops = {
-       .get_drvinfo            = gem_get_drvinfo,
-       .get_link               = ethtool_op_get_link,
-       .get_settings           = gem_get_settings,
-       .set_settings           = gem_set_settings,
-       .nway_reset             = gem_nway_reset,
-       .get_msglevel           = gem_get_msglevel,
-       .set_msglevel           = gem_set_msglevel,
-       .get_wol                = gem_get_wol,
-       .set_wol                = gem_set_wol,
-};
-
-static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
-       struct gem *gp = netdev_priv(dev);
-       struct mii_ioctl_data *data = if_mii(ifr);
-       int rc = -EOPNOTSUPP;
-
-       /* For SIOCGMIIREG and SIOCSMIIREG the core checks for us that
-        * netif_device_present() is true and holds rtnl_lock for us
-        * so we have nothing to worry about
-        */
-
-       switch (cmd) {
-       case SIOCGMIIPHY:               /* Get address of MII PHY in use. */
-               data->phy_id = gp->mii_phy_addr;
-               /* Fallthrough... */
-
-       case SIOCGMIIREG:               /* Read MII PHY register. */
-               data->val_out = __phy_read(gp, data->phy_id & 0x1f,
-                                          data->reg_num & 0x1f);
-               rc = 0;
-               break;
-
-       case SIOCSMIIREG:               /* Write MII PHY register. */
-               __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f,
-                           data->val_in);
-               rc = 0;
-               break;
-       }
-       return rc;
-}
-
-#if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC))
-/* Fetch MAC address from vital product data of PCI ROM. */
-static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr)
-{
-       int this_offset;
-
-       for (this_offset = 0x20; this_offset < len; this_offset++) {
-               void __iomem *p = rom_base + this_offset;
-               int i;
-
-               if (readb(p + 0) != 0x90 ||
-                   readb(p + 1) != 0x00 ||
-                   readb(p + 2) != 0x09 ||
-                   readb(p + 3) != 0x4e ||
-                   readb(p + 4) != 0x41 ||
-                   readb(p + 5) != 0x06)
-                       continue;
-
-               this_offset += 6;
-               p += 6;
-
-               for (i = 0; i < 6; i++)
-                       dev_addr[i] = readb(p + i);
-               return 1;
-       }
-       return 0;
-}
-
-static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
-{
-       size_t size;
-       void __iomem *p = pci_map_rom(pdev, &size);
-
-       if (p) {
-                       int found;
-
-               found = readb(p) == 0x55 &&
-                       readb(p + 1) == 0xaa &&
-                       find_eth_addr_in_vpd(p, (64 * 1024), dev_addr);
-               pci_unmap_rom(pdev, p);
-               if (found)
-                       return;
-       }
-
-       /* Sun MAC prefix then 3 random bytes. */
-       dev_addr[0] = 0x08;
-       dev_addr[1] = 0x00;
-       dev_addr[2] = 0x20;
-       get_random_bytes(dev_addr + 3, 3);
-}
-#endif /* not Sparc and not PPC */
-
-static int __devinit gem_get_device_address(struct gem *gp)
-{
-#if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC)
-       struct net_device *dev = gp->dev;
-       const unsigned char *addr;
-
-       addr = of_get_property(gp->of_node, "local-mac-address", NULL);
-       if (addr == NULL) {
-#ifdef CONFIG_SPARC
-               addr = idprom->id_ethaddr;
-#else
-               printk("\n");
-               pr_err("%s: can't get mac-address\n", dev->name);
-               return -1;
-#endif
-       }
-       memcpy(dev->dev_addr, addr, 6);
-#else
-       get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
-#endif
-       return 0;
-}
-
-static void gem_remove_one(struct pci_dev *pdev)
-{
-       struct net_device *dev = pci_get_drvdata(pdev);
-
-       if (dev) {
-               struct gem *gp = netdev_priv(dev);
-
-               unregister_netdev(dev);
-
-               /* Ensure reset task is truely gone */
-               cancel_work_sync(&gp->reset_task);
-
-               /* Free resources */
-               pci_free_consistent(pdev,
-                                   sizeof(struct gem_init_block),
-                                   gp->init_block,
-                                   gp->gblock_dvma);
-               iounmap(gp->regs);
-               pci_release_regions(pdev);
-               free_netdev(dev);
-
-               pci_set_drvdata(pdev, NULL);
-       }
-}
-
-static const struct net_device_ops gem_netdev_ops = {
-       .ndo_open               = gem_open,
-       .ndo_stop               = gem_close,
-       .ndo_start_xmit         = gem_start_xmit,
-       .ndo_get_stats          = gem_get_stats,
-       .ndo_set_multicast_list = gem_set_multicast,
-       .ndo_do_ioctl           = gem_ioctl,
-       .ndo_tx_timeout         = gem_tx_timeout,
-       .ndo_change_mtu         = gem_change_mtu,
-       .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = gem_set_mac_address,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = gem_poll_controller,
-#endif
-};
-
-static int __devinit gem_init_one(struct pci_dev *pdev,
-                                 const struct pci_device_id *ent)
-{
-       unsigned long gemreg_base, gemreg_len;
-       struct net_device *dev;
-       struct gem *gp;
-       int err, pci_using_dac;
-
-       printk_once(KERN_INFO "%s", version);
-
-       /* Apple gmac note: during probe, the chip is powered up by
-        * the arch code to allow the code below to work (and to let
-        * the chip be probed on the config space. It won't stay powered
-        * up until the interface is brought up however, so we can't rely
-        * on register configuration done at this point.
-        */
-       err = pci_enable_device(pdev);
-       if (err) {
-               pr_err("Cannot enable MMIO operation, aborting\n");
-               return err;
-       }
-       pci_set_master(pdev);
-
-       /* Configure DMA attributes. */
-
-       /* All of the GEM documentation states that 64-bit DMA addressing
-        * is fully supported and should work just fine.  However the
-        * front end for RIO based GEMs is different and only supports
-        * 32-bit addressing.
-        *
-        * For now we assume the various PPC GEMs are 32-bit only as well.
-        */
-       if (pdev->vendor == PCI_VENDOR_ID_SUN &&
-           pdev->device == PCI_DEVICE_ID_SUN_GEM &&
-           !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               pci_using_dac = 1;
-       } else {
-               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (err) {
-                       pr_err("No usable DMA configuration, aborting\n");
-                       goto err_disable_device;
-               }
-               pci_using_dac = 0;
-       }
-
-       gemreg_base = pci_resource_start(pdev, 0);
-       gemreg_len = pci_resource_len(pdev, 0);
-
-       if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
-               pr_err("Cannot find proper PCI device base address, aborting\n");
-               err = -ENODEV;
-               goto err_disable_device;
-       }
-
-       dev = alloc_etherdev(sizeof(*gp));
-       if (!dev) {
-               pr_err("Etherdev alloc failed, aborting\n");
-               err = -ENOMEM;
-               goto err_disable_device;
-       }
-       SET_NETDEV_DEV(dev, &pdev->dev);
-
-       gp = netdev_priv(dev);
-
-       err = pci_request_regions(pdev, DRV_NAME);
-       if (err) {
-               pr_err("Cannot obtain PCI resources, aborting\n");
-               goto err_out_free_netdev;
-       }
-
-       gp->pdev = pdev;
-       dev->base_addr = (long) pdev;
-       gp->dev = dev;
-
-       gp->msg_enable = DEFAULT_MSG;
-
-       init_timer(&gp->link_timer);
-       gp->link_timer.function = gem_link_timer;
-       gp->link_timer.data = (unsigned long) gp;
-
-       INIT_WORK(&gp->reset_task, gem_reset_task);
-
-       gp->lstate = link_down;
-       gp->timer_ticks = 0;
-       netif_carrier_off(dev);
-
-       gp->regs = ioremap(gemreg_base, gemreg_len);
-       if (!gp->regs) {
-               pr_err("Cannot map device registers, aborting\n");
-               err = -EIO;
-               goto err_out_free_res;
-       }
-
-       /* On Apple, we want a reference to the Open Firmware device-tree
-        * node. We use it for clock control.
-        */
-#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC)
-       gp->of_node = pci_device_to_OF_node(pdev);
-#endif
-
-       /* Only Apple version supports WOL afaik */
-       if (pdev->vendor == PCI_VENDOR_ID_APPLE)
-               gp->has_wol = 1;
-
-       /* Make sure cell is enabled */
-       gem_get_cell(gp);
-
-       /* Make sure everything is stopped and in init state */
-       gem_reset(gp);
-
-       /* Fill up the mii_phy structure (even if we won't use it) */
-       gp->phy_mii.dev = dev;
-       gp->phy_mii.mdio_read = _phy_read;
-       gp->phy_mii.mdio_write = _phy_write;
-#ifdef CONFIG_PPC_PMAC
-       gp->phy_mii.platform_data = gp->of_node;
-#endif
-       /* By default, we start with autoneg */
-       gp->want_autoneg = 1;
-
-       /* Check fifo sizes, PHY type, etc... */
-       if (gem_check_invariants(gp)) {
-               err = -ENODEV;
-               goto err_out_iounmap;
-       }
-
-       /* It is guaranteed that the returned buffer will be at least
-        * PAGE_SIZE aligned.
-        */
-       gp->init_block = (struct gem_init_block *)
-               pci_alloc_consistent(pdev, sizeof(struct gem_init_block),
-                                    &gp->gblock_dvma);
-       if (!gp->init_block) {
-               pr_err("Cannot allocate init block, aborting\n");
-               err = -ENOMEM;
-               goto err_out_iounmap;
-       }
-
-       if (gem_get_device_address(gp))
-               goto err_out_free_consistent;
-
-       dev->netdev_ops = &gem_netdev_ops;
-       netif_napi_add(dev, &gp->napi, gem_poll, 64);
-       dev->ethtool_ops = &gem_ethtool_ops;
-       dev->watchdog_timeo = 5 * HZ;
-       dev->irq = pdev->irq;
-       dev->dma = 0;
-
-       /* Set that now, in case PM kicks in now */
-       pci_set_drvdata(pdev, dev);
-
-       /* We can do scatter/gather and HW checksum */
-       dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
-       dev->features |= dev->hw_features | NETIF_F_RXCSUM;
-       if (pci_using_dac)
-               dev->features |= NETIF_F_HIGHDMA;
-
-       /* Register with kernel */
-       if (register_netdev(dev)) {
-               pr_err("Cannot register net device, aborting\n");
-               err = -ENOMEM;
-               goto err_out_free_consistent;
-       }
-
-       /* Undo the get_cell with appropriate locking (we could use
-        * ndo_init/uninit but that would be even more clumsy imho)
-        */
-       rtnl_lock();
-       gem_put_cell(gp);
-       rtnl_unlock();
-
-       netdev_info(dev, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n",
-                   dev->dev_addr);
-       return 0;
-
-err_out_free_consistent:
-       gem_remove_one(pdev);
-err_out_iounmap:
-       gem_put_cell(gp);
-       iounmap(gp->regs);
-
-err_out_free_res:
-       pci_release_regions(pdev);
-
-err_out_free_netdev:
-       free_netdev(dev);
-err_disable_device:
-       pci_disable_device(pdev);
-       return err;
-
-}
-
-
-static struct pci_driver gem_driver = {
-       .name           = GEM_MODULE_NAME,
-       .id_table       = gem_pci_tbl,
-       .probe          = gem_init_one,
-       .remove         = gem_remove_one,
-#ifdef CONFIG_PM
-       .suspend        = gem_suspend,
-       .resume         = gem_resume,
-#endif /* CONFIG_PM */
-};
-
-static int __init gem_init(void)
-{
-       return pci_register_driver(&gem_driver);
-}
-
-static void __exit gem_cleanup(void)
-{
-       pci_unregister_driver(&gem_driver);
-}
-
-module_init(gem_init);
-module_exit(gem_cleanup);
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h
deleted file mode 100644 (file)
index 835ce1b..0000000
+++ /dev/null
@@ -1,1027 +0,0 @@
-/* $Id: sungem.h,v 1.10.2.4 2002/03/11 08:54:48 davem Exp $
- * sungem.h: Definitions for Sun GEM ethernet driver.
- *
- * Copyright (C) 2000 David S. Miller (davem@redhat.com)
- */
-
-#ifndef _SUNGEM_H
-#define _SUNGEM_H
-
-/* Global Registers */
-#define GREG_SEBSTATE  0x0000UL        /* SEB State Register           */
-#define GREG_CFG       0x0004UL        /* Configuration Register       */
-#define GREG_STAT      0x000CUL        /* Status Register              */
-#define GREG_IMASK     0x0010UL        /* Interrupt Mask Register      */
-#define GREG_IACK      0x0014UL        /* Interrupt ACK Register       */
-#define GREG_STAT2     0x001CUL        /* Alias of GREG_STAT           */
-#define GREG_PCIESTAT  0x1000UL        /* PCI Error Status Register    */
-#define GREG_PCIEMASK  0x1004UL        /* PCI Error Mask Register      */
-#define GREG_BIFCFG    0x1008UL        /* BIF Configuration Register   */
-#define GREG_BIFDIAG   0x100CUL        /* BIF Diagnostics Register     */
-#define GREG_SWRST     0x1010UL        /* Software Reset Register      */
-
-/* Global SEB State Register */
-#define GREG_SEBSTATE_ARB      0x00000003      /* State of Arbiter             */
-#define GREG_SEBSTATE_RXWON    0x00000004      /* RX won internal arbitration  */
-
-/* Global Configuration Register */
-#define GREG_CFG_IBURST                0x00000001      /* Infinite Burst               */
-#define GREG_CFG_TXDMALIM      0x0000003e      /* TX DMA grant limit           */
-#define GREG_CFG_RXDMALIM      0x000007c0      /* RX DMA grant limit           */
-#define GREG_CFG_RONPAULBIT    0x00000800      /* Use mem read multiple for PCI read
-                                                * after infinite burst (Apple) */
-#define GREG_CFG_ENBUG2FIX     0x00001000      /* Fix Rx hang after overflow */
-
-/* Global Interrupt Status Register.
- *
- * Reading this register automatically clears bits 0 through 6.
- * This auto-clearing does not occur when the alias at GREG_STAT2
- * is read instead.  The rest of the interrupt bits only clear when
- * the secondary interrupt status register corresponding to that
- * bit is read (ie. if GREG_STAT_PCS is set, it will be cleared by
- * reading PCS_ISTAT).
- */
-#define GREG_STAT_TXINTME      0x00000001      /* TX INTME frame transferred   */
-#define GREG_STAT_TXALL                0x00000002      /* All TX frames transferred    */
-#define GREG_STAT_TXDONE       0x00000004      /* One TX frame transferred     */
-#define GREG_STAT_RXDONE       0x00000010      /* One RX frame arrived         */
-#define GREG_STAT_RXNOBUF      0x00000020      /* No free RX buffers available */
-#define GREG_STAT_RXTAGERR     0x00000040      /* RX tag framing is corrupt    */
-#define GREG_STAT_PCS          0x00002000      /* PCS signalled interrupt      */
-#define GREG_STAT_TXMAC                0x00004000      /* TX MAC signalled interrupt   */
-#define GREG_STAT_RXMAC                0x00008000      /* RX MAC signalled interrupt   */
-#define GREG_STAT_MAC          0x00010000      /* MAC Control signalled irq    */
-#define GREG_STAT_MIF          0x00020000      /* MIF signalled interrupt      */
-#define GREG_STAT_PCIERR       0x00040000      /* PCI Error interrupt          */
-#define GREG_STAT_TXNR         0xfff80000      /* == TXDMA_TXDONE reg val      */
-#define GREG_STAT_TXNR_SHIFT   19
-
-#define GREG_STAT_ABNORMAL     (GREG_STAT_RXNOBUF | GREG_STAT_RXTAGERR | \
-                                GREG_STAT_PCS | GREG_STAT_TXMAC | GREG_STAT_RXMAC | \
-                                GREG_STAT_MAC | GREG_STAT_MIF | GREG_STAT_PCIERR)
-
-#define GREG_STAT_NAPI         (GREG_STAT_TXALL  | GREG_STAT_TXINTME | \
-                                GREG_STAT_RXDONE | GREG_STAT_ABNORMAL)
-
-/* The layout of GREG_IMASK and GREG_IACK is identical to GREG_STAT.
- * Bits set in GREG_IMASK will prevent that interrupt type from being
- * signalled to the cpu.  GREG_IACK can be used to clear specific top-level
- * interrupt conditions in GREG_STAT, ie. it only works for bits 0 through 6.
- * Setting the bit will clear that interrupt, clear bits will have no effect
- * on GREG_STAT.
- */
-
-/* Global PCI Error Status Register */
-#define GREG_PCIESTAT_BADACK   0x00000001      /* No ACK64# during ABS64 cycle */
-#define GREG_PCIESTAT_DTRTO    0x00000002      /* Delayed transaction timeout  */
-#define GREG_PCIESTAT_OTHER    0x00000004      /* Other PCI error, check cfg space */
-
-/* The layout of the GREG_PCIEMASK is identical to that of GREG_PCIESTAT.
- * Bits set in GREG_PCIEMASK will prevent that interrupt type from being
- * signalled to the cpu.
- */
-
-/* Global BIF Configuration Register */
-#define GREG_BIFCFG_SLOWCLK    0x00000001      /* Set if PCI runs < 25Mhz      */
-#define GREG_BIFCFG_B64DIS     0x00000002      /* Disable 64bit wide data cycle*/
-#define GREG_BIFCFG_M66EN      0x00000004      /* Set if on 66Mhz PCI segment  */
-
-/* Global BIF Diagnostics Register */
-#define GREG_BIFDIAG_BURSTSM   0x007f0000      /* PCI Burst state machine      */
-#define GREG_BIFDIAG_BIFSM     0xff000000      /* BIF state machine            */
-
-/* Global Software Reset Register.
- *
- * This register is used to perform a global reset of the RX and TX portions
- * of the GEM asic.  Setting the RX or TX reset bit will start the reset.
- * The driver _MUST_ poll these bits until they clear.  One may not attempt
- * to program any other part of GEM until the bits clear.
- */
-#define GREG_SWRST_TXRST       0x00000001      /* TX Software Reset            */
-#define GREG_SWRST_RXRST       0x00000002      /* RX Software Reset            */
-#define GREG_SWRST_RSTOUT      0x00000004      /* Force RST# pin active        */
-#define GREG_SWRST_CACHESIZE   0x00ff0000      /* RIO only: cache line size    */
-#define GREG_SWRST_CACHE_SHIFT 16
-
-/* TX DMA Registers */
-#define TXDMA_KICK     0x2000UL        /* TX Kick Register             */
-#define TXDMA_CFG      0x2004UL        /* TX Configuration Register    */
-#define TXDMA_DBLOW    0x2008UL        /* TX Desc. Base Low            */
-#define TXDMA_DBHI     0x200CUL        /* TX Desc. Base High           */
-#define TXDMA_FWPTR    0x2014UL        /* TX FIFO Write Pointer        */
-#define TXDMA_FSWPTR   0x2018UL        /* TX FIFO Shadow Write Pointer */
-#define TXDMA_FRPTR    0x201CUL        /* TX FIFO Read Pointer         */
-#define TXDMA_FSRPTR   0x2020UL        /* TX FIFO Shadow Read Pointer  */
-#define TXDMA_PCNT     0x2024UL        /* TX FIFO Packet Counter       */
-#define TXDMA_SMACHINE 0x2028UL        /* TX State Machine Register    */
-#define TXDMA_DPLOW    0x2030UL        /* TX Data Pointer Low          */
-#define TXDMA_DPHI     0x2034UL        /* TX Data Pointer High         */
-#define TXDMA_TXDONE   0x2100UL        /* TX Completion Register       */
-#define TXDMA_FADDR    0x2104UL        /* TX FIFO Address              */
-#define TXDMA_FTAG     0x2108UL        /* TX FIFO Tag                  */
-#define TXDMA_DLOW     0x210CUL        /* TX FIFO Data Low             */
-#define TXDMA_DHIT1    0x2110UL        /* TX FIFO Data HighT1          */
-#define TXDMA_DHIT0    0x2114UL        /* TX FIFO Data HighT0          */
-#define TXDMA_FSZ      0x2118UL        /* TX FIFO Size                 */
-
-/* TX Kick Register.
- *
- * This 13-bit register is programmed by the driver to hold the descriptor
- * entry index which follows the last valid transmit descriptor.
- */
-
-/* TX Completion Register.
- *
- * This 13-bit register is updated by GEM to hold to descriptor entry index
- * which follows the last descriptor already processed by GEM.  Note that
- * this value is mirrored in GREG_STAT which eliminates the need to even
- * access this register in the driver during interrupt processing.
- */
-
-/* TX Configuration Register.
- *
- * Note that TXDMA_CFG_FTHRESH, the TX FIFO Threshold, is an obsolete feature
- * that was meant to be used with jumbo packets.  It should be set to the
- * maximum value of 0x4ff, else one risks getting TX MAC Underrun errors.
- */
-#define TXDMA_CFG_ENABLE       0x00000001      /* Enable TX DMA channel        */
-#define TXDMA_CFG_RINGSZ       0x0000001e      /* TX descriptor ring size      */
-#define TXDMA_CFG_RINGSZ_32    0x00000000      /* 32 TX descriptors            */
-#define TXDMA_CFG_RINGSZ_64    0x00000002      /* 64 TX descriptors            */
-#define TXDMA_CFG_RINGSZ_128   0x00000004      /* 128 TX descriptors           */
-#define TXDMA_CFG_RINGSZ_256   0x00000006      /* 256 TX descriptors           */
-#define TXDMA_CFG_RINGSZ_512   0x00000008      /* 512 TX descriptors           */
-#define TXDMA_CFG_RINGSZ_1K    0x0000000a      /* 1024 TX descriptors          */
-#define TXDMA_CFG_RINGSZ_2K    0x0000000c      /* 2048 TX descriptors          */
-#define TXDMA_CFG_RINGSZ_4K    0x0000000e      /* 4096 TX descriptors          */
-#define TXDMA_CFG_RINGSZ_8K    0x00000010      /* 8192 TX descriptors          */
-#define TXDMA_CFG_PIOSEL       0x00000020      /* Enable TX FIFO PIO from cpu  */
-#define TXDMA_CFG_FTHRESH      0x001ffc00      /* TX FIFO Threshold, obsolete  */
-#define TXDMA_CFG_PMODE                0x00200000      /* TXALL irq means TX FIFO empty*/
-
-/* TX Descriptor Base Low/High.
- *
- * These two registers store the 53 most significant bits of the base address
- * of the TX descriptor table.  The 11 least significant bits are always
- * zero.  As a result, the TX descriptor table must be 2K aligned.
- */
-
-/* The rest of the TXDMA_* registers are for diagnostics and debug, I will document
- * them later. -DaveM
- */
-
-/* WakeOnLan Registers */
-#define WOL_MATCH0     0x3000UL
-#define WOL_MATCH1     0x3004UL
-#define WOL_MATCH2     0x3008UL
-#define WOL_MCOUNT     0x300CUL
-#define WOL_WAKECSR    0x3010UL
-
-/* WOL Match count register
- */
-#define WOL_MCOUNT_N           0x00000010
-#define WOL_MCOUNT_M           0x00000000 /* 0 << 8 */
-
-#define WOL_WAKECSR_ENABLE     0x00000001
-#define WOL_WAKECSR_MII                0x00000002
-#define WOL_WAKECSR_SEEN       0x00000004
-#define WOL_WAKECSR_FILT_UCAST 0x00000008
-#define WOL_WAKECSR_FILT_MCAST 0x00000010
-#define WOL_WAKECSR_FILT_BCAST 0x00000020
-#define WOL_WAKECSR_FILT_SEEN  0x00000040
-
-
-/* Receive DMA Registers */
-#define RXDMA_CFG      0x4000UL        /* RX Configuration Register    */
-#define RXDMA_DBLOW    0x4004UL        /* RX Descriptor Base Low       */
-#define RXDMA_DBHI     0x4008UL        /* RX Descriptor Base High      */
-#define RXDMA_FWPTR    0x400CUL        /* RX FIFO Write Pointer        */
-#define RXDMA_FSWPTR   0x4010UL        /* RX FIFO Shadow Write Pointer */
-#define RXDMA_FRPTR    0x4014UL        /* RX FIFO Read Pointer         */
-#define RXDMA_PCNT     0x4018UL        /* RX FIFO Packet Counter       */
-#define RXDMA_SMACHINE 0x401CUL        /* RX State Machine Register    */
-#define RXDMA_PTHRESH  0x4020UL        /* Pause Thresholds             */
-#define RXDMA_DPLOW    0x4024UL        /* RX Data Pointer Low          */
-#define RXDMA_DPHI     0x4028UL        /* RX Data Pointer High         */
-#define RXDMA_KICK     0x4100UL        /* RX Kick Register             */
-#define RXDMA_DONE     0x4104UL        /* RX Completion Register       */
-#define RXDMA_BLANK    0x4108UL        /* RX Blanking Register         */
-#define RXDMA_FADDR    0x410CUL        /* RX FIFO Address              */
-#define RXDMA_FTAG     0x4110UL        /* RX FIFO Tag                  */
-#define RXDMA_DLOW     0x4114UL        /* RX FIFO Data Low             */
-#define RXDMA_DHIT1    0x4118UL        /* RX FIFO Data HighT0          */
-#define RXDMA_DHIT0    0x411CUL        /* RX FIFO Data HighT1          */
-#define RXDMA_FSZ      0x4120UL        /* RX FIFO Size                 */
-
-/* RX Configuration Register. */
-#define RXDMA_CFG_ENABLE       0x00000001      /* Enable RX DMA channel        */
-#define RXDMA_CFG_RINGSZ       0x0000001e      /* RX descriptor ring size      */
-#define RXDMA_CFG_RINGSZ_32    0x00000000      /* - 32   entries               */
-#define RXDMA_CFG_RINGSZ_64    0x00000002      /* - 64   entries               */
-#define RXDMA_CFG_RINGSZ_128   0x00000004      /* - 128  entries               */
-#define RXDMA_CFG_RINGSZ_256   0x00000006      /* - 256  entries               */
-#define RXDMA_CFG_RINGSZ_512   0x00000008      /* - 512  entries               */
-#define RXDMA_CFG_RINGSZ_1K    0x0000000a      /* - 1024 entries               */
-#define RXDMA_CFG_RINGSZ_2K    0x0000000c      /* - 2048 entries               */
-#define RXDMA_CFG_RINGSZ_4K    0x0000000e      /* - 4096 entries               */
-#define RXDMA_CFG_RINGSZ_8K    0x00000010      /* - 8192 entries               */
-#define RXDMA_CFG_RINGSZ_BDISAB        0x00000020      /* Disable RX desc batching     */
-#define RXDMA_CFG_FBOFF                0x00001c00      /* Offset of first data byte    */
-#define RXDMA_CFG_CSUMOFF      0x000fe000      /* Skip bytes before csum calc  */
-#define RXDMA_CFG_FTHRESH      0x07000000      /* RX FIFO dma start threshold  */
-#define RXDMA_CFG_FTHRESH_64   0x00000000      /* - 64   bytes                 */
-#define RXDMA_CFG_FTHRESH_128  0x01000000      /* - 128  bytes                 */
-#define RXDMA_CFG_FTHRESH_256  0x02000000      /* - 256  bytes                 */
-#define RXDMA_CFG_FTHRESH_512  0x03000000      /* - 512  bytes                 */
-#define RXDMA_CFG_FTHRESH_1K   0x04000000      /* - 1024 bytes                 */
-#define RXDMA_CFG_FTHRESH_2K   0x05000000      /* - 2048 bytes                 */
-
-/* RX Descriptor Base Low/High.
- *
- * These two registers store the 53 most significant bits of the base address
- * of the RX descriptor table.  The 11 least significant bits are always
- * zero.  As a result, the RX descriptor table must be 2K aligned.
- */
-
-/* RX PAUSE Thresholds.
- *
- * These values determine when XOFF and XON PAUSE frames are emitted by
- * GEM.  The thresholds measure RX FIFO occupancy in units of 64 bytes.
- */
-#define RXDMA_PTHRESH_OFF      0x000001ff      /* XOFF emitted w/FIFO > this   */
-#define RXDMA_PTHRESH_ON       0x001ff000      /* XON emitted w/FIFO < this    */
-
-/* RX Kick Register.
- *
- * This 13-bit register is written by the host CPU and holds the last
- * valid RX descriptor number plus one.  This is, if 'N' is written to
- * this register, it means that all RX descriptors up to but excluding
- * 'N' are valid.
- *
- * The hardware requires that RX descriptors are posted in increments
- * of 4.  This means 'N' must be a multiple of four.  For the best
- * performance, the first new descriptor being posted should be (PCI)
- * cache line aligned.
- */
-
-/* RX Completion Register.
- *
- * This 13-bit register is updated by GEM to indicate which RX descriptors
- * have already been used for receive frames.  All descriptors up to but
- * excluding the value in this register are ready to be processed.  GEM
- * updates this register value after the RX FIFO empties completely into
- * the RX descriptor's buffer, but before the RX_DONE bit is set in the
- * interrupt status register.
- */
-
-/* RX Blanking Register. */
-#define RXDMA_BLANK_IPKTS      0x000001ff      /* RX_DONE asserted after this
-                                                * many packets received since
-                                                * previous RX_DONE.
-                                                */
-#define RXDMA_BLANK_ITIME      0x000ff000      /* RX_DONE asserted after this
-                                                * many clocks (measured in 2048
-                                                * PCI clocks) were counted since
-                                                * the previous RX_DONE.
-                                                */
-
-/* RX FIFO Size.
- *
- * This 11-bit read-only register indicates how large, in units of 64-bytes,
- * the RX FIFO is.  The driver uses this to properly configure the RX PAUSE
- * thresholds.
- */
-
-/* The rest of the RXDMA_* registers are for diagnostics and debug, I will document
- * them later. -DaveM
- */
-
-/* MAC Registers */
-#define MAC_TXRST      0x6000UL        /* TX MAC Software Reset Command*/
-#define MAC_RXRST      0x6004UL        /* RX MAC Software Reset Command*/
-#define MAC_SNDPAUSE   0x6008UL        /* Send Pause Command Register  */
-#define MAC_TXSTAT     0x6010UL        /* TX MAC Status Register       */
-#define MAC_RXSTAT     0x6014UL        /* RX MAC Status Register       */
-#define MAC_CSTAT      0x6018UL        /* MAC Control Status Register  */
-#define MAC_TXMASK     0x6020UL        /* TX MAC Mask Register         */
-#define MAC_RXMASK     0x6024UL        /* RX MAC Mask Register         */
-#define MAC_MCMASK     0x6028UL        /* MAC Control Mask Register    */
-#define MAC_TXCFG      0x6030UL        /* TX MAC Configuration Register*/
-#define MAC_RXCFG      0x6034UL        /* RX MAC Configuration Register*/
-#define MAC_MCCFG      0x6038UL        /* MAC Control Config Register  */
-#define MAC_XIFCFG     0x603CUL        /* XIF Configuration Register   */
-#define MAC_IPG0       0x6040UL        /* InterPacketGap0 Register     */
-#define MAC_IPG1       0x6044UL        /* InterPacketGap1 Register     */
-#define MAC_IPG2       0x6048UL        /* InterPacketGap2 Register     */
-#define MAC_STIME      0x604CUL        /* SlotTime Register            */
-#define MAC_MINFSZ     0x6050UL        /* MinFrameSize Register        */
-#define MAC_MAXFSZ     0x6054UL        /* MaxFrameSize Register        */
-#define MAC_PASIZE     0x6058UL        /* PA Size Register             */
-#define MAC_JAMSIZE    0x605CUL        /* JamSize Register             */
-#define MAC_ATTLIM     0x6060UL        /* Attempt Limit Register       */
-#define MAC_MCTYPE     0x6064UL        /* MAC Control Type Register    */
-#define MAC_ADDR0      0x6080UL        /* MAC Address 0 Register       */
-#define MAC_ADDR1      0x6084UL        /* MAC Address 1 Register       */
-#define MAC_ADDR2      0x6088UL        /* MAC Address 2 Register       */
-#define MAC_ADDR3      0x608CUL        /* MAC Address 3 Register       */
-#define MAC_ADDR4      0x6090UL        /* MAC Address 4 Register       */
-#define MAC_ADDR5      0x6094UL        /* MAC Address 5 Register       */
-#define MAC_ADDR6      0x6098UL        /* MAC Address 6 Register       */
-#define MAC_ADDR7      0x609CUL        /* MAC Address 7 Register       */
-#define MAC_ADDR8      0x60A0UL        /* MAC Address 8 Register       */
-#define MAC_AFILT0     0x60A4UL        /* Address Filter 0 Register    */
-#define MAC_AFILT1     0x60A8UL        /* Address Filter 1 Register    */
-#define MAC_AFILT2     0x60ACUL        /* Address Filter 2 Register    */
-#define MAC_AF21MSK    0x60B0UL        /* Address Filter 2&1 Mask Reg  */
-#define MAC_AF0MSK     0x60B4UL        /* Address Filter 0 Mask Reg    */
-#define MAC_HASH0      0x60C0UL        /* Hash Table 0 Register        */
-#define MAC_HASH1      0x60C4UL        /* Hash Table 1 Register        */
-#define MAC_HASH2      0x60C8UL        /* Hash Table 2 Register        */
-#define MAC_HASH3      0x60CCUL        /* Hash Table 3 Register        */
-#define MAC_HASH4      0x60D0UL        /* Hash Table 4 Register        */
-#define MAC_HASH5      0x60D4UL        /* Hash Table 5 Register        */
-#define MAC_HASH6      0x60D8UL        /* Hash Table 6 Register        */
-#define MAC_HASH7      0x60DCUL        /* Hash Table 7 Register        */
-#define MAC_HASH8      0x60E0UL        /* Hash Table 8 Register        */
-#define MAC_HASH9      0x60E4UL        /* Hash Table 9 Register        */
-#define MAC_HASH10     0x60E8UL        /* Hash Table 10 Register       */
-#define MAC_HASH11     0x60ECUL        /* Hash Table 11 Register       */
-#define MAC_HASH12     0x60F0UL        /* Hash Table 12 Register       */
-#define MAC_HASH13     0x60F4UL        /* Hash Table 13 Register       */
-#define MAC_HASH14     0x60F8UL        /* Hash Table 14 Register       */
-#define MAC_HASH15     0x60FCUL        /* Hash Table 15 Register       */
-#define MAC_NCOLL      0x6100UL        /* Normal Collision Counter     */
-#define MAC_FASUCC     0x6104UL        /* First Attmpt. Succ Coll Ctr. */
-#define MAC_ECOLL      0x6108UL        /* Excessive Collision Counter  */
-#define MAC_LCOLL      0x610CUL        /* Late Collision Counter       */
-#define MAC_DTIMER     0x6110UL        /* Defer Timer                  */
-#define MAC_PATMPS     0x6114UL        /* Peak Attempts Register       */
-#define MAC_RFCTR      0x6118UL        /* Receive Frame Counter        */
-#define MAC_LERR       0x611CUL        /* Length Error Counter         */
-#define MAC_AERR       0x6120UL        /* Alignment Error Counter      */
-#define MAC_FCSERR     0x6124UL        /* FCS Error Counter            */
-#define MAC_RXCVERR    0x6128UL        /* RX code Violation Error Ctr  */
-#define MAC_RANDSEED   0x6130UL        /* Random Number Seed Register  */
-#define MAC_SMACHINE   0x6134UL        /* State Machine Register       */
-
-/* TX MAC Software Reset Command. */
-#define MAC_TXRST_CMD  0x00000001      /* Start sw reset, self-clears  */
-
-/* RX MAC Software Reset Command. */
-#define MAC_RXRST_CMD  0x00000001      /* Start sw reset, self-clears  */
-
-/* Send Pause Command. */
-#define MAC_SNDPAUSE_TS        0x0000ffff      /* The pause_time operand used in
-                                        * Send_Pause and flow-control
-                                        * handshakes.
-                                        */
-#define MAC_SNDPAUSE_SP        0x00010000      /* Setting this bit instructs the MAC
-                                        * to send a Pause Flow Control
-                                        * frame onto the network.
-                                        */
-
-/* TX MAC Status Register. */
-#define MAC_TXSTAT_XMIT        0x00000001      /* Frame Transmitted            */
-#define MAC_TXSTAT_URUN        0x00000002      /* TX Underrun                  */
-#define MAC_TXSTAT_MPE 0x00000004      /* Max Packet Size Error        */
-#define MAC_TXSTAT_NCE 0x00000008      /* Normal Collision Cntr Expire */
-#define MAC_TXSTAT_ECE 0x00000010      /* Excess Collision Cntr Expire */
-#define MAC_TXSTAT_LCE 0x00000020      /* Late Collision Cntr Expire   */
-#define MAC_TXSTAT_FCE 0x00000040      /* First Collision Cntr Expire  */
-#define MAC_TXSTAT_DTE 0x00000080      /* Defer Timer Expire           */
-#define MAC_TXSTAT_PCE 0x00000100      /* Peak Attempts Cntr Expire    */
-
-/* RX MAC Status Register. */
-#define MAC_RXSTAT_RCV 0x00000001      /* Frame Received               */
-#define MAC_RXSTAT_OFLW        0x00000002      /* Receive Overflow             */
-#define MAC_RXSTAT_FCE 0x00000004      /* Frame Cntr Expire            */
-#define MAC_RXSTAT_ACE 0x00000008      /* Align Error Cntr Expire      */
-#define MAC_RXSTAT_CCE 0x00000010      /* CRC Error Cntr Expire        */
-#define MAC_RXSTAT_LCE 0x00000020      /* Length Error Cntr Expire     */
-#define MAC_RXSTAT_VCE 0x00000040      /* Code Violation Cntr Expire   */
-
-/* MAC Control Status Register. */
-#define MAC_CSTAT_PRCV 0x00000001      /* Pause Received               */
-#define MAC_CSTAT_PS   0x00000002      /* Paused State                 */
-#define MAC_CSTAT_NPS  0x00000004      /* Not Paused State             */
-#define MAC_CSTAT_PTR  0xffff0000      /* Pause Time Received          */
-
-/* The layout of the MAC_{TX,RX,C}MASK registers is identical to that
- * of MAC_{TX,RX,C}STAT.  Bits set in MAC_{TX,RX,C}MASK will prevent
- * that interrupt type from being signalled to front end of GEM.  For
- * the interrupt to actually get sent to the cpu, it is necessary to
- * properly set the appropriate GREG_IMASK_{TX,RX,}MAC bits as well.
- */
-
-/* TX MAC Configuration Register.
- *
- * NOTE: The TX MAC Enable bit must be cleared and polled until
- *      zero before any other bits in this register are changed.
- *
- *      Also, enabling the Carrier Extension feature of GEM is
- *      a 3 step process 1) Set TX Carrier Extension 2) Set
- *      RX Carrier Extension 3) Set Slot Time to 0x200.  This
- *      mode must be enabled when in half-duplex at 1Gbps, else
- *      it must be disabled.
- */
-#define MAC_TXCFG_ENAB 0x00000001      /* TX MAC Enable                */
-#define MAC_TXCFG_ICS  0x00000002      /* Ignore Carrier Sense         */
-#define MAC_TXCFG_ICOLL        0x00000004      /* Ignore Collisions            */
-#define MAC_TXCFG_EIPG0        0x00000008      /* Enable IPG0                  */
-#define MAC_TXCFG_NGU  0x00000010      /* Never Give Up                */
-#define MAC_TXCFG_NGUL 0x00000020      /* Never Give Up Limit          */
-#define MAC_TXCFG_NBO  0x00000040      /* No Backoff                   */
-#define MAC_TXCFG_SD   0x00000080      /* Slow Down                    */
-#define MAC_TXCFG_NFCS 0x00000100      /* No FCS                       */
-#define MAC_TXCFG_TCE  0x00000200      /* TX Carrier Extension         */
-
-/* RX MAC Configuration Register.
- *
- * NOTE: The RX MAC Enable bit must be cleared and polled until
- *      zero before any other bits in this register are changed.
- *
- *      Similar rules apply to the Hash Filter Enable bit when
- *      programming the hash table registers, and the Address Filter
- *      Enable bit when programming the address filter registers.
- */
-#define MAC_RXCFG_ENAB 0x00000001      /* RX MAC Enable                */
-#define MAC_RXCFG_SPAD 0x00000002      /* Strip Pad                    */
-#define MAC_RXCFG_SFCS 0x00000004      /* Strip FCS                    */
-#define MAC_RXCFG_PROM 0x00000008      /* Promiscuous Mode             */
-#define MAC_RXCFG_PGRP 0x00000010      /* Promiscuous Group            */
-#define MAC_RXCFG_HFE  0x00000020      /* Hash Filter Enable           */
-#define MAC_RXCFG_AFE  0x00000040      /* Address Filter Enable        */
-#define MAC_RXCFG_DDE  0x00000080      /* Disable Discard on Error     */
-#define MAC_RXCFG_RCE  0x00000100      /* RX Carrier Extension         */
-
-/* MAC Control Config Register. */
-#define MAC_MCCFG_SPE  0x00000001      /* Send Pause Enable            */
-#define MAC_MCCFG_RPE  0x00000002      /* Receive Pause Enable         */
-#define MAC_MCCFG_PMC  0x00000004      /* Pass MAC Control             */
-
-/* XIF Configuration Register.
- *
- * NOTE: When leaving or entering loopback mode, a global hardware
- *       init of GEM should be performed.
- */
-#define MAC_XIFCFG_OE  0x00000001      /* MII TX Output Driver Enable  */
-#define MAC_XIFCFG_LBCK        0x00000002      /* Loopback TX to RX            */
-#define MAC_XIFCFG_DISE        0x00000004      /* Disable RX path during TX    */
-#define MAC_XIFCFG_GMII        0x00000008      /* Use GMII clocks + datapath   */
-#define MAC_XIFCFG_MBOE        0x00000010      /* Controls MII_BUF_EN pin      */
-#define MAC_XIFCFG_LLED        0x00000020      /* Force LINKLED# active (low)  */
-#define MAC_XIFCFG_FLED        0x00000040      /* Force FDPLXLED# active (low) */
-
-/* InterPacketGap0 Register.  This 8-bit value is used as an extension
- * to the InterPacketGap1 Register.  Specifically it contributes to the
- * timing of the RX-to-TX IPG.  This value is ignored and presumed to
- * be zero for TX-to-TX IPG calculations and/or when the Enable IPG0 bit
- * is cleared in the TX MAC Configuration Register.
- *
- * This value in this register in terms of media byte time.
- *
- * Recommended value: 0x00
- */
-
-/* InterPacketGap1 Register.  This 8-bit value defines the first 2/3
- * portion of the Inter Packet Gap.
- *
- * This value in this register in terms of media byte time.
- *
- * Recommended value: 0x08
- */
-
-/* InterPacketGap2 Register.  This 8-bit value defines the second 1/3
- * portion of the Inter Packet Gap.
- *
- * This value in this register in terms of media byte time.
- *
- * Recommended value: 0x04
- */
-
-/* Slot Time Register.  This 10-bit value specifies the slot time
- * parameter in units of media byte time.  It determines the physical
- * span of the network.
- *
- * Recommended value: 0x40
- */
-
-/* Minimum Frame Size Register.  This 10-bit register specifies the
- * smallest sized frame the TXMAC will send onto the medium, and the
- * RXMAC will receive from the medium.
- *
- * Recommended value: 0x40
- */
-
-/* Maximum Frame and Burst Size Register.
- *
- * This register specifies two things.  First it specifies the maximum
- * sized frame the TXMAC will send and the RXMAC will recognize as
- * valid.  Second, it specifies the maximum run length of a burst of
- * packets sent in half-duplex gigabit modes.
- *
- * Recommended value: 0x200005ee
- */
-#define MAC_MAXFSZ_MFS 0x00007fff      /* Max Frame Size               */
-#define MAC_MAXFSZ_MBS 0x7fff0000      /* Max Burst Size               */
-
-/* PA Size Register.  This 10-bit register specifies the number of preamble
- * bytes which will be transmitted at the beginning of each frame.  A
- * value of two or greater should be programmed here.
- *
- * Recommended value: 0x07
- */
-
-/* Jam Size Register.  This 4-bit register specifies the duration of
- * the jam in units of media byte time.
- *
- * Recommended value: 0x04
- */
-
-/* Attempts Limit Register.  This 8-bit register specifies the number
- * of attempts that the TXMAC will make to transmit a frame, before it
- * resets its Attempts Counter.  After reaching the Attempts Limit the
- * TXMAC may or may not drop the frame, as determined by the NGU
- * (Never Give Up) and NGUL (Never Give Up Limit) bits in the TXMAC
- * Configuration Register.
- *
- * Recommended value: 0x10
- */
-
-/* MAX Control Type Register.  This 16-bit register specifies the
- * "type" field of a MAC Control frame.  The TXMAC uses this field to
- * encapsulate the MAC Control frame for transmission, and the RXMAC
- * uses it for decoding valid MAC Control frames received from the
- * network.
- *
- * Recommended value: 0x8808
- */
-
-/* MAC Address Registers.  Each of these registers specify the
- * ethernet MAC of the interface, 16-bits at a time.  Register
- * 0 specifies bits [47:32], register 1 bits [31:16], and register
- * 2 bits [15:0].
- *
- * Registers 3 through and including 5 specify an alternate
- * MAC address for the interface.
- *
- * Registers 6 through and including 8 specify the MAC Control
- * Address, which must be the reserved multicast address for MAC
- * Control frames.
- *
- * Example: To program primary station address a:b:c:d:e:f into
- *         the chip.
- *             MAC_Address_2 = (a << 8) | b
- *             MAC_Address_1 = (c << 8) | d
- *             MAC_Address_0 = (e << 8) | f
- */
-
-/* Address Filter Registers.  Registers 0 through 2 specify bit
- * fields [47:32] through [15:0], respectively, of the address
- * filter.  The Address Filter 2&1 Mask Register denotes the 8-bit
- * nibble mask for Address Filter Registers 2 and 1.  The Address
- * Filter 0 Mask Register denotes the 16-bit mask for the Address
- * Filter Register 0.
- */
-
-/* Hash Table Registers.  Registers 0 through 15 specify bit fields
- * [255:240] through [15:0], respectively, of the hash table.
- */
-
-/* Statistics Registers.  All of these registers are 16-bits and
- * track occurrences of a specific event.  GEM can be configured
- * to interrupt the host cpu when any of these counters overflow.
- * They should all be explicitly initialized to zero when the interface
- * is brought up.
- */
-
-/* Random Number Seed Register.  This 10-bit value is used as the
- * RNG seed inside GEM for the CSMA/CD backoff algorithm.  It is
- * recommended to program this register to the 10 LSB of the
- * interfaces MAC address.
- */
-
-/* Pause Timer, read-only.  This 16-bit timer is used to time the pause
- * interval as indicated by a received pause flow control frame.
- * A non-zero value in this timer indicates that the MAC is currently in
- * the paused state.
- */
-
-/* MIF Registers */
-#define MIF_BBCLK      0x6200UL        /* MIF Bit-Bang Clock           */
-#define MIF_BBDATA     0x6204UL        /* MIF Bit-Band Data            */
-#define MIF_BBOENAB    0x6208UL        /* MIF Bit-Bang Output Enable   */
-#define MIF_FRAME      0x620CUL        /* MIF Frame/Output Register    */
-#define MIF_CFG                0x6210UL        /* MIF Configuration Register   */
-#define MIF_MASK       0x6214UL        /* MIF Mask Register            */
-#define MIF_STATUS     0x6218UL        /* MIF Status Register          */
-#define MIF_SMACHINE   0x621CUL        /* MIF State Machine Register   */
-
-/* MIF Bit-Bang Clock.  This 1-bit register is used to generate the
- * MDC clock waveform on the MII Management Interface when the MIF is
- * programmed in the "Bit-Bang" mode.  Writing a '1' after a '0' into
- * this register will create a rising edge on the MDC, while writing
- * a '0' after a '1' will create a falling edge.  For every bit that
- * is transferred on the management interface, both edges have to be
- * generated.
- */
-
-/* MIF Bit-Bang Data.  This 1-bit register is used to generate the
- * outgoing data (MDO) on the MII Management Interface when the MIF
- * is programmed in the "Bit-Bang" mode.  The daa will be steered to the
- * appropriate MDIO based on the state of the PHY_Select bit in the MIF
- * Configuration Register.
- */
-
-/* MIF Big-Band Output Enable.  THis 1-bit register is used to enable
- * ('1') or disable ('0') the I-directional driver on the MII when the
- * MIF is programmed in the "Bit-Bang" mode.  The MDIO should be enabled
- * when data bits are transferred from the MIF to the transceiver, and it
- * should be disabled when the interface is idle or when data bits are
- * transferred from the transceiver to the MIF (data portion of a read
- * instruction).  Only one MDIO will be enabled at a given time, depending
- * on the state of the PHY_Select bit in the MIF Configuration Register.
- */
-
-/* MIF Configuration Register.  This 15-bit register controls the operation
- * of the MIF.
- */
-#define MIF_CFG_PSELECT        0x00000001      /* Xcvr slct: 0=mdio0 1=mdio1   */
-#define MIF_CFG_POLL   0x00000002      /* Enable polling mechanism     */
-#define MIF_CFG_BBMODE 0x00000004      /* 1=bit-bang 0=frame mode      */
-#define MIF_CFG_PRADDR 0x000000f8      /* Xcvr poll register address   */
-#define MIF_CFG_MDI0   0x00000100      /* MDIO_0 present or read-bit   */
-#define MIF_CFG_MDI1   0x00000200      /* MDIO_1 present or read-bit   */
-#define MIF_CFG_PPADDR 0x00007c00      /* Xcvr poll PHY address        */
-
-/* MIF Frame/Output Register.  This 32-bit register allows the host to
- * communicate with a transceiver in frame mode (as opposed to big-bang
- * mode).  Writes by the host specify an instrution.  After being issued
- * the host must poll this register for completion.  Also, after
- * completion this register holds the data returned by the transceiver
- * if applicable.
- */
-#define MIF_FRAME_ST   0xc0000000      /* STart of frame               */
-#define MIF_FRAME_OP   0x30000000      /* OPcode                       */
-#define MIF_FRAME_PHYAD        0x0f800000      /* PHY ADdress                  */
-#define MIF_FRAME_REGAD        0x007c0000      /* REGister ADdress             */
-#define MIF_FRAME_TAMSB        0x00020000      /* Turn Around MSB              */
-#define MIF_FRAME_TALSB        0x00010000      /* Turn Around LSB              */
-#define MIF_FRAME_DATA 0x0000ffff      /* Instruction Payload          */
-
-/* MIF Status Register.  This register reports status when the MIF is
- * operating in the poll mode.  The poll status field is auto-clearing
- * on read.
- */
-#define MIF_STATUS_DATA        0xffff0000      /* Live image of XCVR reg       */
-#define MIF_STATUS_STAT        0x0000ffff      /* Which bits have changed      */
-
-/* MIF Mask Register.  This 16-bit register is used when in poll mode
- * to say which bits of the polled register will cause an interrupt
- * when changed.
- */
-
-/* PCS/Serialink Registers */
-#define PCS_MIICTRL    0x9000UL        /* PCS MII Control Register     */
-#define PCS_MIISTAT    0x9004UL        /* PCS MII Status Register      */
-#define PCS_MIIADV     0x9008UL        /* PCS MII Advertisement Reg    */
-#define PCS_MIILP      0x900CUL        /* PCS MII Link Partner Ability */
-#define PCS_CFG                0x9010UL        /* PCS Configuration Register   */
-#define PCS_SMACHINE   0x9014UL        /* PCS State Machine Register   */
-#define PCS_ISTAT      0x9018UL        /* PCS Interrupt Status Reg     */
-#define PCS_DMODE      0x9050UL        /* Datapath Mode Register       */
-#define PCS_SCTRL      0x9054UL        /* Serialink Control Register   */
-#define PCS_SOS                0x9058UL        /* Shared Output Select Reg     */
-#define PCS_SSTATE     0x905CUL        /* Serialink State Register     */
-
-/* PCD MII Control Register. */
-#define PCS_MIICTRL_SPD        0x00000040      /* Read as one, writes ignored  */
-#define PCS_MIICTRL_CT 0x00000080      /* Force COL signal active      */
-#define PCS_MIICTRL_DM 0x00000100      /* Duplex mode, forced low      */
-#define PCS_MIICTRL_RAN        0x00000200      /* Restart auto-neg, self clear */
-#define PCS_MIICTRL_ISO        0x00000400      /* Read as zero, writes ignored */
-#define PCS_MIICTRL_PD 0x00000800      /* Read as zero, writes ignored */
-#define PCS_MIICTRL_ANE        0x00001000      /* Auto-neg enable              */
-#define PCS_MIICTRL_SS 0x00002000      /* Read as zero, writes ignored */
-#define PCS_MIICTRL_WB 0x00004000      /* Wrapback, loopback at 10-bit
-                                        * input side of Serialink
-                                        */
-#define PCS_MIICTRL_RST        0x00008000      /* Resets PCS, self clearing    */
-
-/* PCS MII Status Register. */
-#define PCS_MIISTAT_EC 0x00000001      /* Ext Capability: Read as zero */
-#define PCS_MIISTAT_JD 0x00000002      /* Jabber Detect: Read as zero  */
-#define PCS_MIISTAT_LS 0x00000004      /* Link Status: 1=up 0=down     */
-#define PCS_MIISTAT_ANA        0x00000008      /* Auto-neg Ability, always 1   */
-#define PCS_MIISTAT_RF 0x00000010      /* Remote Fault                 */
-#define PCS_MIISTAT_ANC        0x00000020      /* Auto-neg complete            */
-#define PCS_MIISTAT_ES 0x00000100      /* Extended Status, always 1    */
-
-/* PCS MII Advertisement Register. */
-#define PCS_MIIADV_FD  0x00000020      /* Advertise Full Duplex        */
-#define PCS_MIIADV_HD  0x00000040      /* Advertise Half Duplex        */
-#define PCS_MIIADV_SP  0x00000080      /* Advertise Symmetric Pause    */
-#define PCS_MIIADV_AP  0x00000100      /* Advertise Asymmetric Pause   */
-#define PCS_MIIADV_RF  0x00003000      /* Remote Fault                 */
-#define PCS_MIIADV_ACK 0x00004000      /* Read-only                    */
-#define PCS_MIIADV_NP  0x00008000      /* Next-page, forced low        */
-
-/* PCS MII Link Partner Ability Register.   This register is equivalent
- * to the Link Partnet Ability Register of the standard MII register set.
- * It's layout corresponds to the PCS MII Advertisement Register.
- */
-
-/* PCS Configuration Register. */
-#define PCS_CFG_ENABLE 0x00000001      /* Must be zero while changing
-                                        * PCS MII advertisement reg.
-                                        */
-#define PCS_CFG_SDO    0x00000002      /* Signal detect override       */
-#define PCS_CFG_SDL    0x00000004      /* Signal detect active low     */
-#define PCS_CFG_JS     0x00000018      /* Jitter-study:
-                                        * 0 = normal operation
-                                        * 1 = high-frequency test pattern
-                                        * 2 = low-frequency test pattern
-                                        * 3 = reserved
-                                        */
-#define PCS_CFG_TO     0x00000020      /* 10ms auto-neg timer override */
-
-/* PCS Interrupt Status Register.  This register is self-clearing
- * when read.
- */
-#define PCS_ISTAT_LSC  0x00000004      /* Link Status Change           */
-
-/* Datapath Mode Register. */
-#define PCS_DMODE_SM   0x00000001      /* 1 = use internal Serialink   */
-#define PCS_DMODE_ESM  0x00000002      /* External SERDES mode         */
-#define PCS_DMODE_MGM  0x00000004      /* MII/GMII mode                */
-#define PCS_DMODE_GMOE 0x00000008      /* GMII Output Enable           */
-
-/* Serialink Control Register.
- *
- * NOTE: When in SERDES mode, the loopback bit has inverse logic.
- */
-#define PCS_SCTRL_LOOP 0x00000001      /* Loopback enable              */
-#define PCS_SCTRL_ESCD 0x00000002      /* Enable sync char detection   */
-#define PCS_SCTRL_LOCK 0x00000004      /* Lock to reference clock      */
-#define PCS_SCTRL_EMP  0x00000018      /* Output driver emphasis       */
-#define PCS_SCTRL_STEST        0x000001c0      /* Self test patterns           */
-#define PCS_SCTRL_PDWN 0x00000200      /* Software power-down          */
-#define PCS_SCTRL_RXZ  0x00000c00      /* PLL input to Serialink       */
-#define PCS_SCTRL_RXP  0x00003000      /* PLL input to Serialink       */
-#define PCS_SCTRL_TXZ  0x0000c000      /* PLL input to Serialink       */
-#define PCS_SCTRL_TXP  0x00030000      /* PLL input to Serialink       */
-
-/* Shared Output Select Register.  For test and debug, allows multiplexing
- * test outputs into the PROM address pins.  Set to zero for normal
- * operation.
- */
-#define PCS_SOS_PADDR  0x00000003      /* PROM Address                 */
-
-/* PROM Image Space */
-#define PROM_START     0x100000UL      /* Expansion ROM run time access*/
-#define PROM_SIZE      0x0fffffUL      /* Size of ROM                  */
-#define PROM_END       0x200000UL      /* End of ROM                   */
-
-/* MII definitions missing from mii.h */
-
-#define BMCR_SPD2      0x0040          /* Gigabit enable? (bcm5411)    */
-#define LPA_PAUSE      0x0400
-
-/* More PHY registers (specific to Broadcom models) */
-
-/* MII BCM5201 MULTIPHY interrupt register */
-#define MII_BCM5201_INTERRUPT                  0x1A
-#define MII_BCM5201_INTERRUPT_INTENABLE                0x4000
-
-#define MII_BCM5201_AUXMODE2                   0x1B
-#define MII_BCM5201_AUXMODE2_LOWPOWER          0x0008
-
-#define MII_BCM5201_MULTIPHY                    0x1E
-
-/* MII BCM5201 MULTIPHY register bits */
-#define MII_BCM5201_MULTIPHY_SERIALMODE         0x0002
-#define MII_BCM5201_MULTIPHY_SUPERISOLATE       0x0008
-
-/* MII BCM5400 1000-BASET Control register */
-#define MII_BCM5400_GB_CONTROL                 0x09
-#define MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP   0x0200
-
-/* MII BCM5400 AUXCONTROL register */
-#define MII_BCM5400_AUXCONTROL                  0x18
-#define MII_BCM5400_AUXCONTROL_PWR10BASET       0x0004
-
-/* MII BCM5400 AUXSTATUS register */
-#define MII_BCM5400_AUXSTATUS                   0x19
-#define MII_BCM5400_AUXSTATUS_LINKMODE_MASK     0x0700
-#define MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT    8
-
-/* When it can, GEM internally caches 4 aligned TX descriptors
- * at a time, so that it can use full cacheline DMA reads.
- *
- * Note that unlike HME, there is no ownership bit in the descriptor
- * control word.  The same functionality is obtained via the TX-Kick
- * and TX-Complete registers.  As a result, GEM need not write back
- * updated values to the TX descriptor ring, it only performs reads.
- *
- * Since TX descriptors are never modified by GEM, the driver can
- * use the buffer DMA address as a place to keep track of allocated
- * DMA mappings for a transmitted packet.
- */
-struct gem_txd {
-       __le64  control_word;
-       __le64  buffer;
-};
-
-#define TXDCTRL_BUFSZ  0x0000000000007fffULL   /* Buffer Size          */
-#define TXDCTRL_CSTART 0x00000000001f8000ULL   /* CSUM Start Offset    */
-#define TXDCTRL_COFF   0x000000001fe00000ULL   /* CSUM Stuff Offset    */
-#define TXDCTRL_CENAB  0x0000000020000000ULL   /* CSUM Enable          */
-#define TXDCTRL_EOF    0x0000000040000000ULL   /* End of Frame         */
-#define TXDCTRL_SOF    0x0000000080000000ULL   /* Start of Frame       */
-#define TXDCTRL_INTME  0x0000000100000000ULL   /* "Interrupt Me"       */
-#define TXDCTRL_NOCRC  0x0000000200000000ULL   /* No CRC Present       */
-
-/* GEM requires that RX descriptors are provided four at a time,
- * aligned.  Also, the RX ring may not wrap around.  This means that
- * there will be at least 4 unused descriptor entries in the middle
- * of the RX ring at all times.
- *
- * Similar to HME, GEM assumes that it can write garbage bytes before
- * the beginning of the buffer and right after the end in order to DMA
- * whole cachelines.
- *
- * Unlike for TX, GEM does update the status word in the RX descriptors
- * when packets arrive.  Therefore an ownership bit does exist in the
- * RX descriptors.  It is advisory, GEM clears it but does not check
- * it in any way.  So when buffers are posted to the RX ring (via the
- * RX Kick register) by the driver it must make sure the buffers are
- * truly ready and that the ownership bits are set properly.
- *
- * Even though GEM modifies the RX descriptors, it guarantees that the
- * buffer DMA address field will stay the same when it performs these
- * updates.  Therefore it can be used to keep track of DMA mappings
- * by the host driver just as in the TX descriptor case above.
- */
-struct gem_rxd {
-       __le64  status_word;
-       __le64  buffer;
-};
-
-#define RXDCTRL_TCPCSUM        0x000000000000ffffULL   /* TCP Pseudo-CSUM      */
-#define RXDCTRL_BUFSZ  0x000000007fff0000ULL   /* Buffer Size          */
-#define RXDCTRL_OWN    0x0000000080000000ULL   /* GEM owns this entry  */
-#define RXDCTRL_HASHVAL        0x0ffff00000000000ULL   /* Hash Value           */
-#define RXDCTRL_HPASS  0x1000000000000000ULL   /* Passed Hash Filter   */
-#define RXDCTRL_ALTMAC 0x2000000000000000ULL   /* Matched ALT MAC      */
-#define RXDCTRL_BAD    0x4000000000000000ULL   /* Frame has bad CRC    */
-
-#define RXDCTRL_FRESH(gp)      \
-       ((((RX_BUF_ALLOC_SIZE(gp) - RX_OFFSET) << 16) & RXDCTRL_BUFSZ) | \
-        RXDCTRL_OWN)
-
-#define TX_RING_SIZE 128
-#define RX_RING_SIZE 128
-
-#if TX_RING_SIZE == 32
-#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_32
-#elif TX_RING_SIZE == 64
-#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_64
-#elif TX_RING_SIZE == 128
-#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_128
-#elif TX_RING_SIZE == 256
-#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_256
-#elif TX_RING_SIZE == 512
-#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_512
-#elif TX_RING_SIZE == 1024
-#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_1K
-#elif TX_RING_SIZE == 2048
-#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_2K
-#elif TX_RING_SIZE == 4096
-#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_4K
-#elif TX_RING_SIZE == 8192
-#define TXDMA_CFG_BASE TXDMA_CFG_RINGSZ_8K
-#else
-#error TX_RING_SIZE value is illegal...
-#endif
-
-#if RX_RING_SIZE == 32
-#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_32
-#elif RX_RING_SIZE == 64
-#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_64
-#elif RX_RING_SIZE == 128
-#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_128
-#elif RX_RING_SIZE == 256
-#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_256
-#elif RX_RING_SIZE == 512
-#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_512
-#elif RX_RING_SIZE == 1024
-#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_1K
-#elif RX_RING_SIZE == 2048
-#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_2K
-#elif RX_RING_SIZE == 4096
-#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_4K
-#elif RX_RING_SIZE == 8192
-#define RXDMA_CFG_BASE RXDMA_CFG_RINGSZ_8K
-#else
-#error RX_RING_SIZE is illegal...
-#endif
-
-#define NEXT_TX(N)     (((N) + 1) & (TX_RING_SIZE - 1))
-#define NEXT_RX(N)     (((N) + 1) & (RX_RING_SIZE - 1))
-
-#define TX_BUFFS_AVAIL(GP)                                     \
-       (((GP)->tx_old <= (GP)->tx_new) ?                       \
-         (GP)->tx_old + (TX_RING_SIZE - 1) - (GP)->tx_new :    \
-         (GP)->tx_old - (GP)->tx_new - 1)
-
-#define RX_OFFSET          2
-#define RX_BUF_ALLOC_SIZE(gp)  ((gp)->rx_buf_sz + 28 + RX_OFFSET + 64)
-
-#define RX_COPY_THRESHOLD  256
-
-#if TX_RING_SIZE < 128
-#define INIT_BLOCK_TX_RING_SIZE                128
-#else
-#define INIT_BLOCK_TX_RING_SIZE                TX_RING_SIZE
-#endif
-
-#if RX_RING_SIZE < 128
-#define INIT_BLOCK_RX_RING_SIZE                128
-#else
-#define INIT_BLOCK_RX_RING_SIZE                RX_RING_SIZE
-#endif
-
-struct gem_init_block {
-       struct gem_txd  txd[INIT_BLOCK_TX_RING_SIZE];
-       struct gem_rxd  rxd[INIT_BLOCK_RX_RING_SIZE];
-};
-
-enum gem_phy_type {
-       phy_mii_mdio0,
-       phy_mii_mdio1,
-       phy_serialink,
-       phy_serdes,
-};
-
-enum link_state {
-       link_down = 0,  /* No link, will retry */
-       link_aneg,      /* Autoneg in progress */
-       link_force_try, /* Try Forced link speed */
-       link_force_ret, /* Forced mode worked, retrying autoneg */
-       link_force_ok,  /* Stay in forced mode */
-       link_up         /* Link is up */
-};
-
-struct gem {
-       void __iomem            *regs;
-       int                     rx_new, rx_old;
-       int                     tx_new, tx_old;
-
-       unsigned int has_wol : 1;       /* chip supports wake-on-lan */
-       unsigned int asleep_wol : 1;    /* was asleep with WOL enabled */
-
-       int                     cell_enabled;
-       u32                     msg_enable;
-       u32                     status;
-
-       struct napi_struct      napi;
-
-       int                     tx_fifo_sz;
-       int                     rx_fifo_sz;
-       int                     rx_pause_off;
-       int                     rx_pause_on;
-       int                     rx_buf_sz;
-       u64                     pause_entered;
-       u16                     pause_last_time_recvd;
-       u32                     mac_rx_cfg;
-       u32                     swrst_base;
-
-       int                     want_autoneg;
-       int                     last_forced_speed;
-       enum link_state         lstate;
-       struct timer_list       link_timer;
-       int                     timer_ticks;
-       int                     wake_on_lan;
-       struct work_struct      reset_task;
-       volatile int            reset_task_pending;
-
-       enum gem_phy_type       phy_type;
-       struct mii_phy          phy_mii;
-       int                     mii_phy_addr;
-
-       struct gem_init_block   *init_block;
-       struct sk_buff          *rx_skbs[RX_RING_SIZE];
-       struct sk_buff          *tx_skbs[TX_RING_SIZE];
-       dma_addr_t              gblock_dvma;
-
-       struct pci_dev          *pdev;
-       struct net_device       *dev;
-#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC)
-       struct device_node      *of_node;
-#endif
-};
-
-#define found_mii_phy(gp) ((gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) && \
-                          gp->phy_mii.def && gp->phy_mii.def->ops)
-
-#endif /* _SUNGEM_H */
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
deleted file mode 100644 (file)
index d16880d..0000000
+++ /dev/null
@@ -1,1200 +0,0 @@
-/*
- * PHY drivers for the sungem ethernet driver.
- *
- * This file could be shared with other drivers.
- *
- * (c) 2002-2007, Benjamin Herrenscmidt (benh@kernel.crashing.org)
- *
- * TODO:
- *  - Add support for PHYs that provide an IRQ line
- *  - Eventually moved the entire polling state machine in
- *    there (out of the eth driver), so that it can easily be
- *    skipped on PHYs that implement it in hardware.
- *  - On LXT971 & BCM5201, Apple uses some chip specific regs
- *    to read the link status. Figure out why and if it makes
- *    sense to do the same (magic aneg ?)
- *  - Apple has some additional power management code for some
- *    Broadcom PHYs that they "hide" from the OpenSource version
- *    of darwin, still need to reverse engineer that
- */
-
-
-#include <linux/module.h>
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/mii.h>
-#include <linux/ethtool.h>
-#include <linux/delay.h>
-
-#ifdef CONFIG_PPC_PMAC
-#include <asm/prom.h>
-#endif
-
-#include "sungem_phy.h"
-
-/* Link modes of the BCM5400 PHY */
-static const int phy_BCM5400_link_table[8][3] = {
-       { 0, 0, 0 },    /* No link */
-       { 0, 0, 0 },    /* 10BT Half Duplex */
-       { 1, 0, 0 },    /* 10BT Full Duplex */
-       { 0, 1, 0 },    /* 100BT Half Duplex */
-       { 0, 1, 0 },    /* 100BT Half Duplex */
-       { 1, 1, 0 },    /* 100BT Full Duplex*/
-       { 1, 0, 1 },    /* 1000BT */
-       { 1, 0, 1 },    /* 1000BT */
-};
-
-static inline int __phy_read(struct mii_phy* phy, int id, int reg)
-{
-       return phy->mdio_read(phy->dev, id, reg);
-}
-
-static inline void __phy_write(struct mii_phy* phy, int id, int reg, int val)
-{
-       phy->mdio_write(phy->dev, id, reg, val);
-}
-
-static inline int phy_read(struct mii_phy* phy, int reg)
-{
-       return phy->mdio_read(phy->dev, phy->mii_id, reg);
-}
-
-static inline void phy_write(struct mii_phy* phy, int reg, int val)
-{
-       phy->mdio_write(phy->dev, phy->mii_id, reg, val);
-}
-
-static int reset_one_mii_phy(struct mii_phy* phy, int phy_id)
-{
-       u16 val;
-       int limit = 10000;
-
-       val = __phy_read(phy, phy_id, MII_BMCR);
-       val &= ~(BMCR_ISOLATE | BMCR_PDOWN);
-       val |= BMCR_RESET;
-       __phy_write(phy, phy_id, MII_BMCR, val);
-
-       udelay(100);
-
-       while (--limit) {
-               val = __phy_read(phy, phy_id, MII_BMCR);
-               if ((val & BMCR_RESET) == 0)
-                       break;
-               udelay(10);
-       }
-       if ((val & BMCR_ISOLATE) && limit > 0)
-               __phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE);
-
-       return limit <= 0;
-}
-
-static int bcm5201_init(struct mii_phy* phy)
-{
-       u16 data;
-
-       data = phy_read(phy, MII_BCM5201_MULTIPHY);
-       data &= ~MII_BCM5201_MULTIPHY_SUPERISOLATE;
-       phy_write(phy, MII_BCM5201_MULTIPHY, data);
-
-       phy_write(phy, MII_BCM5201_INTERRUPT, 0);
-
-       return 0;
-}
-
-static int bcm5201_suspend(struct mii_phy* phy)
-{
-       phy_write(phy, MII_BCM5201_INTERRUPT, 0);
-       phy_write(phy, MII_BCM5201_MULTIPHY, MII_BCM5201_MULTIPHY_SUPERISOLATE);
-
-       return 0;
-}
-
-static int bcm5221_init(struct mii_phy* phy)
-{
-       u16 data;
-
-       data = phy_read(phy, MII_BCM5221_TEST);
-       phy_write(phy, MII_BCM5221_TEST,
-               data | MII_BCM5221_TEST_ENABLE_SHADOWS);
-
-       data = phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2);
-       phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2,
-               data | MII_BCM5221_SHDOW_AUX_STAT2_APD);
-
-       data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
-       phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
-               data | MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR);
-
-       data = phy_read(phy, MII_BCM5221_TEST);
-       phy_write(phy, MII_BCM5221_TEST,
-               data & ~MII_BCM5221_TEST_ENABLE_SHADOWS);
-
-       return 0;
-}
-
-static int bcm5221_suspend(struct mii_phy* phy)
-{
-       u16 data;
-
-       data = phy_read(phy, MII_BCM5221_TEST);
-       phy_write(phy, MII_BCM5221_TEST,
-               data | MII_BCM5221_TEST_ENABLE_SHADOWS);
-
-       data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
-       phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
-                 data | MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE);
-
-       return 0;
-}
-
-static int bcm5241_init(struct mii_phy* phy)
-{
-       u16 data;
-
-       data = phy_read(phy, MII_BCM5221_TEST);
-       phy_write(phy, MII_BCM5221_TEST,
-               data | MII_BCM5221_TEST_ENABLE_SHADOWS);
-
-       data = phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2);
-       phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2,
-               data | MII_BCM5221_SHDOW_AUX_STAT2_APD);
-
-       data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
-       phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
-               data & ~MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR);
-
-       data = phy_read(phy, MII_BCM5221_TEST);
-       phy_write(phy, MII_BCM5221_TEST,
-               data & ~MII_BCM5221_TEST_ENABLE_SHADOWS);
-
-       return 0;
-}
-
-static int bcm5241_suspend(struct mii_phy* phy)
-{
-       u16 data;
-
-       data = phy_read(phy, MII_BCM5221_TEST);
-       phy_write(phy, MII_BCM5221_TEST,
-               data | MII_BCM5221_TEST_ENABLE_SHADOWS);
-
-       data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
-       phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
-                 data | MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR);
-
-       return 0;
-}
-
-static int bcm5400_init(struct mii_phy* phy)
-{
-       u16 data;
-
-       /* Configure for gigabit full duplex */
-       data = phy_read(phy, MII_BCM5400_AUXCONTROL);
-       data |= MII_BCM5400_AUXCONTROL_PWR10BASET;
-       phy_write(phy, MII_BCM5400_AUXCONTROL, data);
-
-       data = phy_read(phy, MII_BCM5400_GB_CONTROL);
-       data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP;
-       phy_write(phy, MII_BCM5400_GB_CONTROL, data);
-
-       udelay(100);
-
-       /* Reset and configure cascaded 10/100 PHY */
-       (void)reset_one_mii_phy(phy, 0x1f);
-
-       data = __phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY);
-       data |= MII_BCM5201_MULTIPHY_SERIALMODE;
-       __phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data);
-
-       data = phy_read(phy, MII_BCM5400_AUXCONTROL);
-       data &= ~MII_BCM5400_AUXCONTROL_PWR10BASET;
-       phy_write(phy, MII_BCM5400_AUXCONTROL, data);
-
-       return 0;
-}
-
-static int bcm5400_suspend(struct mii_phy* phy)
-{
-#if 0 /* Commented out in Darwin... someone has those dawn docs ? */
-       phy_write(phy, MII_BMCR, BMCR_PDOWN);
-#endif
-       return 0;
-}
-
-static int bcm5401_init(struct mii_phy* phy)
-{
-       u16 data;
-       int rev;
-
-       rev = phy_read(phy, MII_PHYSID2) & 0x000f;
-       if (rev == 0 || rev == 3) {
-               /* Some revisions of 5401 appear to need this
-                * initialisation sequence to disable, according
-                * to OF, "tap power management"
-                *
-                * WARNING ! OF and Darwin don't agree on the
-                * register addresses. OF seem to interpret the
-                * register numbers below as decimal
-                *
-                * Note: This should (and does) match tg3_init_5401phy_dsp
-                *       in the tg3.c driver. -DaveM
-                */
-               phy_write(phy, 0x18, 0x0c20);
-               phy_write(phy, 0x17, 0x0012);
-               phy_write(phy, 0x15, 0x1804);
-               phy_write(phy, 0x17, 0x0013);
-               phy_write(phy, 0x15, 0x1204);
-               phy_write(phy, 0x17, 0x8006);
-               phy_write(phy, 0x15, 0x0132);
-               phy_write(phy, 0x17, 0x8006);
-               phy_write(phy, 0x15, 0x0232);
-               phy_write(phy, 0x17, 0x201f);
-               phy_write(phy, 0x15, 0x0a20);
-       }
-
-       /* Configure for gigabit full duplex */
-       data = phy_read(phy, MII_BCM5400_GB_CONTROL);
-       data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP;
-       phy_write(phy, MII_BCM5400_GB_CONTROL, data);
-
-       udelay(10);
-
-       /* Reset and configure cascaded 10/100 PHY */
-       (void)reset_one_mii_phy(phy, 0x1f);
-
-       data = __phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY);
-       data |= MII_BCM5201_MULTIPHY_SERIALMODE;
-       __phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data);
-
-       return 0;
-}
-
-static int bcm5401_suspend(struct mii_phy* phy)
-{
-#if 0 /* Commented out in Darwin... someone has those dawn docs ? */
-       phy_write(phy, MII_BMCR, BMCR_PDOWN);
-#endif
-       return 0;
-}
-
-static int bcm5411_init(struct mii_phy* phy)
-{
-       u16 data;
-
-       /* Here's some more Apple black magic to setup
-        * some voltage stuffs.
-        */
-       phy_write(phy, 0x1c, 0x8c23);
-       phy_write(phy, 0x1c, 0x8ca3);
-       phy_write(phy, 0x1c, 0x8c23);
-
-       /* Here, Apple seems to want to reset it, do
-        * it as well
-        */
-       phy_write(phy, MII_BMCR, BMCR_RESET);
-       phy_write(phy, MII_BMCR, 0x1340);
-
-       data = phy_read(phy, MII_BCM5400_GB_CONTROL);
-       data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP;
-       phy_write(phy, MII_BCM5400_GB_CONTROL, data);
-
-       udelay(10);
-
-       /* Reset and configure cascaded 10/100 PHY */
-       (void)reset_one_mii_phy(phy, 0x1f);
-
-       return 0;
-}
-
-static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
-{
-       u16 ctl, adv;
-
-       phy->autoneg = 1;
-       phy->speed = SPEED_10;
-       phy->duplex = DUPLEX_HALF;
-       phy->pause = 0;
-       phy->advertising = advertise;
-
-       /* Setup standard advertise */
-       adv = phy_read(phy, MII_ADVERTISE);
-       adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
-       if (advertise & ADVERTISED_10baseT_Half)
-               adv |= ADVERTISE_10HALF;
-       if (advertise & ADVERTISED_10baseT_Full)
-               adv |= ADVERTISE_10FULL;
-       if (advertise & ADVERTISED_100baseT_Half)
-               adv |= ADVERTISE_100HALF;
-       if (advertise & ADVERTISED_100baseT_Full)
-               adv |= ADVERTISE_100FULL;
-       phy_write(phy, MII_ADVERTISE, adv);
-
-       /* Start/Restart aneg */
-       ctl = phy_read(phy, MII_BMCR);
-       ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
-       phy_write(phy, MII_BMCR, ctl);
-
-       return 0;
-}
-
-static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
-{
-       u16 ctl;
-
-       phy->autoneg = 0;
-       phy->speed = speed;
-       phy->duplex = fd;
-       phy->pause = 0;
-
-       ctl = phy_read(phy, MII_BMCR);
-       ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_ANENABLE);
-
-       /* First reset the PHY */
-       phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
-
-       /* Select speed & duplex */
-       switch(speed) {
-       case SPEED_10:
-               break;
-       case SPEED_100:
-               ctl |= BMCR_SPEED100;
-               break;
-       case SPEED_1000:
-       default:
-               return -EINVAL;
-       }
-       if (fd == DUPLEX_FULL)
-               ctl |= BMCR_FULLDPLX;
-       phy_write(phy, MII_BMCR, ctl);
-
-       return 0;
-}
-
-static int genmii_poll_link(struct mii_phy *phy)
-{
-       u16 status;
-
-       (void)phy_read(phy, MII_BMSR);
-       status = phy_read(phy, MII_BMSR);
-       if ((status & BMSR_LSTATUS) == 0)
-               return 0;
-       if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE))
-               return 0;
-       return 1;
-}
-
-static int genmii_read_link(struct mii_phy *phy)
-{
-       u16 lpa;
-
-       if (phy->autoneg) {
-               lpa = phy_read(phy, MII_LPA);
-
-               if (lpa & (LPA_10FULL | LPA_100FULL))
-                       phy->duplex = DUPLEX_FULL;
-               else
-                       phy->duplex = DUPLEX_HALF;
-               if (lpa & (LPA_100FULL | LPA_100HALF))
-                       phy->speed = SPEED_100;
-               else
-                       phy->speed = SPEED_10;
-               phy->pause = 0;
-       }
-       /* On non-aneg, we assume what we put in BMCR is the speed,
-        * though magic-aneg shouldn't prevent this case from occurring
-        */
-
-        return 0;
-}
-
-static int generic_suspend(struct mii_phy* phy)
-{
-       phy_write(phy, MII_BMCR, BMCR_PDOWN);
-
-       return 0;
-}
-
-static int bcm5421_init(struct mii_phy* phy)
-{
-       u16 data;
-       unsigned int id;
-
-       id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2));
-
-       /* Revision 0 of 5421 needs some fixups */
-       if (id == 0x002060e0) {
-               /* This is borrowed from MacOS
-                */
-               phy_write(phy, 0x18, 0x1007);
-               data = phy_read(phy, 0x18);
-               phy_write(phy, 0x18, data | 0x0400);
-               phy_write(phy, 0x18, 0x0007);
-               data = phy_read(phy, 0x18);
-               phy_write(phy, 0x18, data | 0x0800);
-               phy_write(phy, 0x17, 0x000a);
-               data = phy_read(phy, 0x15);
-               phy_write(phy, 0x15, data | 0x0200);
-       }
-
-       /* Pick up some init code from OF for K2 version */
-       if ((id & 0xfffffff0) == 0x002062e0) {
-               phy_write(phy, 4, 0x01e1);
-               phy_write(phy, 9, 0x0300);
-       }
-
-       /* Check if we can enable automatic low power */
-#ifdef CONFIG_PPC_PMAC
-       if (phy->platform_data) {
-               struct device_node *np = of_get_parent(phy->platform_data);
-               int can_low_power = 1;
-               if (np == NULL || of_get_property(np, "no-autolowpower", NULL))
-                       can_low_power = 0;
-               if (can_low_power) {
-                       /* Enable automatic low-power */
-                       phy_write(phy, 0x1c, 0x9002);
-                       phy_write(phy, 0x1c, 0xa821);
-                       phy_write(phy, 0x1c, 0x941d);
-               }
-       }
-#endif /* CONFIG_PPC_PMAC */
-
-       return 0;
-}
-
-static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise)
-{
-       u16 ctl, adv;
-
-       phy->autoneg = 1;
-       phy->speed = SPEED_10;
-       phy->duplex = DUPLEX_HALF;
-       phy->pause = 0;
-       phy->advertising = advertise;
-
-       /* Setup standard advertise */
-       adv = phy_read(phy, MII_ADVERTISE);
-       adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
-       if (advertise & ADVERTISED_10baseT_Half)
-               adv |= ADVERTISE_10HALF;
-       if (advertise & ADVERTISED_10baseT_Full)
-               adv |= ADVERTISE_10FULL;
-       if (advertise & ADVERTISED_100baseT_Half)
-               adv |= ADVERTISE_100HALF;
-       if (advertise & ADVERTISED_100baseT_Full)
-               adv |= ADVERTISE_100FULL;
-       if (advertise & ADVERTISED_Pause)
-               adv |= ADVERTISE_PAUSE_CAP;
-       if (advertise & ADVERTISED_Asym_Pause)
-               adv |= ADVERTISE_PAUSE_ASYM;
-       phy_write(phy, MII_ADVERTISE, adv);
-
-       /* Setup 1000BT advertise */
-       adv = phy_read(phy, MII_1000BASETCONTROL);
-       adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP|MII_1000BASETCONTROL_HALFDUPLEXCAP);
-       if (advertise & SUPPORTED_1000baseT_Half)
-               adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP;
-       if (advertise & SUPPORTED_1000baseT_Full)
-               adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP;
-       phy_write(phy, MII_1000BASETCONTROL, adv);
-
-       /* Start/Restart aneg */
-       ctl = phy_read(phy, MII_BMCR);
-       ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
-       phy_write(phy, MII_BMCR, ctl);
-
-       return 0;
-}
-
-static int bcm54xx_setup_forced(struct mii_phy *phy, int speed, int fd)
-{
-       u16 ctl;
-
-       phy->autoneg = 0;
-       phy->speed = speed;
-       phy->duplex = fd;
-       phy->pause = 0;
-
-       ctl = phy_read(phy, MII_BMCR);
-       ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPD2|BMCR_ANENABLE);
-
-       /* First reset the PHY */
-       phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
-
-       /* Select speed & duplex */
-       switch(speed) {
-       case SPEED_10:
-               break;
-       case SPEED_100:
-               ctl |= BMCR_SPEED100;
-               break;
-       case SPEED_1000:
-               ctl |= BMCR_SPD2;
-       }
-       if (fd == DUPLEX_FULL)
-               ctl |= BMCR_FULLDPLX;
-
-       // XXX Should we set the sungem to GII now on 1000BT ?
-
-       phy_write(phy, MII_BMCR, ctl);
-
-       return 0;
-}
-
-static int bcm54xx_read_link(struct mii_phy *phy)
-{
-       int link_mode;
-       u16 val;
-
-       if (phy->autoneg) {
-               val = phy_read(phy, MII_BCM5400_AUXSTATUS);
-               link_mode = ((val & MII_BCM5400_AUXSTATUS_LINKMODE_MASK) >>
-                            MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT);
-               phy->duplex = phy_BCM5400_link_table[link_mode][0] ?
-                       DUPLEX_FULL : DUPLEX_HALF;
-               phy->speed = phy_BCM5400_link_table[link_mode][2] ?
-                               SPEED_1000 :
-                               (phy_BCM5400_link_table[link_mode][1] ?
-                                SPEED_100 : SPEED_10);
-               val = phy_read(phy, MII_LPA);
-               phy->pause = (phy->duplex == DUPLEX_FULL) &&
-                       ((val & LPA_PAUSE) != 0);
-       }
-       /* On non-aneg, we assume what we put in BMCR is the speed,
-        * though magic-aneg shouldn't prevent this case from occurring
-        */
-
-       return 0;
-}
-
-static int marvell88e1111_init(struct mii_phy* phy)
-{
-       u16 rev;
-
-       /* magic init sequence for rev 0 */
-       rev = phy_read(phy, MII_PHYSID2) & 0x000f;
-       if (rev == 0) {
-               phy_write(phy, 0x1d, 0x000a);
-               phy_write(phy, 0x1e, 0x0821);
-
-               phy_write(phy, 0x1d, 0x0006);
-               phy_write(phy, 0x1e, 0x8600);
-
-               phy_write(phy, 0x1d, 0x000b);
-               phy_write(phy, 0x1e, 0x0100);
-
-               phy_write(phy, 0x1d, 0x0004);
-               phy_write(phy, 0x1e, 0x4850);
-       }
-       return 0;
-}
-
-#define BCM5421_MODE_MASK      (1 << 5)
-
-static int bcm5421_poll_link(struct mii_phy* phy)
-{
-       u32 phy_reg;
-       int mode;
-
-       /* find out in what mode we are */
-       phy_write(phy, MII_NCONFIG, 0x1000);
-       phy_reg = phy_read(phy, MII_NCONFIG);
-
-       mode = (phy_reg & BCM5421_MODE_MASK) >> 5;
-
-       if ( mode == BCM54XX_COPPER)
-               return genmii_poll_link(phy);
-
-       /* try to find out wether we have a link */
-       phy_write(phy, MII_NCONFIG, 0x2000);
-       phy_reg = phy_read(phy, MII_NCONFIG);
-
-       if (phy_reg & 0x0020)
-               return 0;
-       else
-               return 1;
-}
-
-static int bcm5421_read_link(struct mii_phy* phy)
-{
-       u32 phy_reg;
-       int mode;
-
-       /* find out in what mode we are */
-       phy_write(phy, MII_NCONFIG, 0x1000);
-       phy_reg = phy_read(phy, MII_NCONFIG);
-
-       mode = (phy_reg & BCM5421_MODE_MASK ) >> 5;
-
-       if ( mode == BCM54XX_COPPER)
-               return bcm54xx_read_link(phy);
-
-       phy->speed = SPEED_1000;
-
-       /* find out wether we are running half- or full duplex */
-       phy_write(phy, MII_NCONFIG, 0x2000);
-       phy_reg = phy_read(phy, MII_NCONFIG);
-
-       if ( (phy_reg & 0x0080) >> 7)
-               phy->duplex |=  DUPLEX_HALF;
-       else
-               phy->duplex |=  DUPLEX_FULL;
-
-       return 0;
-}
-
-static int bcm5421_enable_fiber(struct mii_phy* phy, int autoneg)
-{
-       /* enable fiber mode */
-       phy_write(phy, MII_NCONFIG, 0x9020);
-       /* LEDs active in both modes, autosense prio = fiber */
-       phy_write(phy, MII_NCONFIG, 0x945f);
-
-       if (!autoneg) {
-               /* switch off fibre autoneg */
-               phy_write(phy, MII_NCONFIG, 0xfc01);
-               phy_write(phy, 0x0b, 0x0004);
-       }
-
-       phy->autoneg = autoneg;
-
-       return 0;
-}
-
-#define BCM5461_FIBER_LINK     (1 << 2)
-#define BCM5461_MODE_MASK      (3 << 1)
-
-static int bcm5461_poll_link(struct mii_phy* phy)
-{
-       u32 phy_reg;
-       int mode;
-
-       /* find out in what mode we are */
-       phy_write(phy, MII_NCONFIG, 0x7c00);
-       phy_reg = phy_read(phy, MII_NCONFIG);
-
-       mode = (phy_reg & BCM5461_MODE_MASK ) >> 1;
-
-       if ( mode == BCM54XX_COPPER)
-               return genmii_poll_link(phy);
-
-       /* find out wether we have a link */
-       phy_write(phy, MII_NCONFIG, 0x7000);
-       phy_reg = phy_read(phy, MII_NCONFIG);
-
-       if (phy_reg & BCM5461_FIBER_LINK)
-               return 1;
-       else
-               return 0;
-}
-
-#define BCM5461_FIBER_DUPLEX   (1 << 3)
-
-static int bcm5461_read_link(struct mii_phy* phy)
-{
-       u32 phy_reg;
-       int mode;
-
-       /* find out in what mode we are */
-       phy_write(phy, MII_NCONFIG, 0x7c00);
-       phy_reg = phy_read(phy, MII_NCONFIG);
-
-       mode = (phy_reg & BCM5461_MODE_MASK ) >> 1;
-
-       if ( mode == BCM54XX_COPPER) {
-               return bcm54xx_read_link(phy);
-       }
-
-       phy->speed = SPEED_1000;
-
-       /* find out wether we are running half- or full duplex */
-       phy_write(phy, MII_NCONFIG, 0x7000);
-       phy_reg = phy_read(phy, MII_NCONFIG);
-
-       if (phy_reg & BCM5461_FIBER_DUPLEX)
-               phy->duplex |=  DUPLEX_FULL;
-       else
-               phy->duplex |=  DUPLEX_HALF;
-
-       return 0;
-}
-
-static int bcm5461_enable_fiber(struct mii_phy* phy, int autoneg)
-{
-       /* select fiber mode, enable 1000 base-X registers */
-       phy_write(phy, MII_NCONFIG, 0xfc0b);
-
-       if (autoneg) {
-               /* enable fiber with no autonegotiation */
-               phy_write(phy, MII_ADVERTISE, 0x01e0);
-               phy_write(phy, MII_BMCR, 0x1140);
-       } else {
-               /* enable fiber with autonegotiation */
-               phy_write(phy, MII_BMCR, 0x0140);
-       }
-
-       phy->autoneg = autoneg;
-
-       return 0;
-}
-
-static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise)
-{
-       u16 ctl, adv;
-
-       phy->autoneg = 1;
-       phy->speed = SPEED_10;
-       phy->duplex = DUPLEX_HALF;
-       phy->pause = 0;
-       phy->advertising = advertise;
-
-       /* Setup standard advertise */
-       adv = phy_read(phy, MII_ADVERTISE);
-       adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
-       if (advertise & ADVERTISED_10baseT_Half)
-               adv |= ADVERTISE_10HALF;
-       if (advertise & ADVERTISED_10baseT_Full)
-               adv |= ADVERTISE_10FULL;
-       if (advertise & ADVERTISED_100baseT_Half)
-               adv |= ADVERTISE_100HALF;
-       if (advertise & ADVERTISED_100baseT_Full)
-               adv |= ADVERTISE_100FULL;
-       if (advertise & ADVERTISED_Pause)
-               adv |= ADVERTISE_PAUSE_CAP;
-       if (advertise & ADVERTISED_Asym_Pause)
-               adv |= ADVERTISE_PAUSE_ASYM;
-       phy_write(phy, MII_ADVERTISE, adv);
-
-       /* Setup 1000BT advertise & enable crossover detect
-        * XXX How do we advertise 1000BT ? Darwin source is
-        * confusing here, they read from specific control and
-        * write to control... Someone has specs for those
-        * beasts ?
-        */
-       adv = phy_read(phy, MII_M1011_PHY_SPEC_CONTROL);
-       adv |= MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX;
-       adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP |
-                       MII_1000BASETCONTROL_HALFDUPLEXCAP);
-       if (advertise & SUPPORTED_1000baseT_Half)
-               adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP;
-       if (advertise & SUPPORTED_1000baseT_Full)
-               adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP;
-       phy_write(phy, MII_1000BASETCONTROL, adv);
-
-       /* Start/Restart aneg */
-       ctl = phy_read(phy, MII_BMCR);
-       ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
-       phy_write(phy, MII_BMCR, ctl);
-
-       return 0;
-}
-
-static int marvell_setup_forced(struct mii_phy *phy, int speed, int fd)
-{
-       u16 ctl, ctl2;
-
-       phy->autoneg = 0;
-       phy->speed = speed;
-       phy->duplex = fd;
-       phy->pause = 0;
-
-       ctl = phy_read(phy, MII_BMCR);
-       ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPD2|BMCR_ANENABLE);
-       ctl |= BMCR_RESET;
-
-       /* Select speed & duplex */
-       switch(speed) {
-       case SPEED_10:
-               break;
-       case SPEED_100:
-               ctl |= BMCR_SPEED100;
-               break;
-       /* I'm not sure about the one below, again, Darwin source is
-        * quite confusing and I lack chip specs
-        */
-       case SPEED_1000:
-               ctl |= BMCR_SPD2;
-       }
-       if (fd == DUPLEX_FULL)
-               ctl |= BMCR_FULLDPLX;
-
-       /* Disable crossover. Again, the way Apple does it is strange,
-        * though I don't assume they are wrong ;)
-        */
-       ctl2 = phy_read(phy, MII_M1011_PHY_SPEC_CONTROL);
-       ctl2 &= ~(MII_M1011_PHY_SPEC_CONTROL_MANUAL_MDIX |
-               MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX |
-               MII_1000BASETCONTROL_FULLDUPLEXCAP |
-               MII_1000BASETCONTROL_HALFDUPLEXCAP);
-       if (speed == SPEED_1000)
-               ctl2 |= (fd == DUPLEX_FULL) ?
-                       MII_1000BASETCONTROL_FULLDUPLEXCAP :
-                       MII_1000BASETCONTROL_HALFDUPLEXCAP;
-       phy_write(phy, MII_1000BASETCONTROL, ctl2);
-
-       // XXX Should we set the sungem to GII now on 1000BT ?
-
-       phy_write(phy, MII_BMCR, ctl);
-
-       return 0;
-}
-
-static int marvell_read_link(struct mii_phy *phy)
-{
-       u16 status, pmask;
-
-       if (phy->autoneg) {
-               status = phy_read(phy, MII_M1011_PHY_SPEC_STATUS);
-               if ((status & MII_M1011_PHY_SPEC_STATUS_RESOLVED) == 0)
-                       return -EAGAIN;
-               if (status & MII_M1011_PHY_SPEC_STATUS_1000)
-                       phy->speed = SPEED_1000;
-               else if (status & MII_M1011_PHY_SPEC_STATUS_100)
-                       phy->speed = SPEED_100;
-               else
-                       phy->speed = SPEED_10;
-               if (status & MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX)
-                       phy->duplex = DUPLEX_FULL;
-               else
-                       phy->duplex = DUPLEX_HALF;
-               pmask = MII_M1011_PHY_SPEC_STATUS_TX_PAUSE |
-                       MII_M1011_PHY_SPEC_STATUS_RX_PAUSE;
-               phy->pause = (status & pmask) == pmask;
-       }
-       /* On non-aneg, we assume what we put in BMCR is the speed,
-        * though magic-aneg shouldn't prevent this case from occurring
-        */
-
-       return 0;
-}
-
-#define MII_BASIC_FEATURES \
-       (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |      \
-        SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |    \
-        SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII |     \
-        SUPPORTED_Pause)
-
-/* On gigabit capable PHYs, we advertise Pause support but not asym pause
- * support for now as I'm not sure it's supported and Darwin doesn't do
- * it neither. --BenH.
- */
-#define MII_GBIT_FEATURES \
-       (MII_BASIC_FEATURES |   \
-        SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)
-
-/* Broadcom BCM 5201 */
-static struct mii_phy_ops bcm5201_phy_ops = {
-       .init           = bcm5201_init,
-       .suspend        = bcm5201_suspend,
-       .setup_aneg     = genmii_setup_aneg,
-       .setup_forced   = genmii_setup_forced,
-       .poll_link      = genmii_poll_link,
-       .read_link      = genmii_read_link,
-};
-
-static struct mii_phy_def bcm5201_phy_def = {
-       .phy_id         = 0x00406210,
-       .phy_id_mask    = 0xfffffff0,
-       .name           = "BCM5201",
-       .features       = MII_BASIC_FEATURES,
-       .magic_aneg     = 1,
-       .ops            = &bcm5201_phy_ops
-};
-
-/* Broadcom BCM 5221 */
-static struct mii_phy_ops bcm5221_phy_ops = {
-       .suspend        = bcm5221_suspend,
-       .init           = bcm5221_init,
-       .setup_aneg     = genmii_setup_aneg,
-       .setup_forced   = genmii_setup_forced,
-       .poll_link      = genmii_poll_link,
-       .read_link      = genmii_read_link,
-};
-
-static struct mii_phy_def bcm5221_phy_def = {
-       .phy_id         = 0x004061e0,
-       .phy_id_mask    = 0xfffffff0,
-       .name           = "BCM5221",
-       .features       = MII_BASIC_FEATURES,
-       .magic_aneg     = 1,
-       .ops            = &bcm5221_phy_ops
-};
-
-/* Broadcom BCM 5241 */
-static struct mii_phy_ops bcm5241_phy_ops = {
-       .suspend        = bcm5241_suspend,
-       .init           = bcm5241_init,
-       .setup_aneg     = genmii_setup_aneg,
-       .setup_forced   = genmii_setup_forced,
-       .poll_link      = genmii_poll_link,
-       .read_link      = genmii_read_link,
-};
-static struct mii_phy_def bcm5241_phy_def = {
-       .phy_id         = 0x0143bc30,
-       .phy_id_mask    = 0xfffffff0,
-       .name           = "BCM5241",
-       .features       = MII_BASIC_FEATURES,
-       .magic_aneg     = 1,
-       .ops            = &bcm5241_phy_ops
-};
-
-/* Broadcom BCM 5400 */
-static struct mii_phy_ops bcm5400_phy_ops = {
-       .init           = bcm5400_init,
-       .suspend        = bcm5400_suspend,
-       .setup_aneg     = bcm54xx_setup_aneg,
-       .setup_forced   = bcm54xx_setup_forced,
-       .poll_link      = genmii_poll_link,
-       .read_link      = bcm54xx_read_link,
-};
-
-static struct mii_phy_def bcm5400_phy_def = {
-       .phy_id         = 0x00206040,
-       .phy_id_mask    = 0xfffffff0,
-       .name           = "BCM5400",
-       .features       = MII_GBIT_FEATURES,
-       .magic_aneg     = 1,
-       .ops            = &bcm5400_phy_ops
-};
-
-/* Broadcom BCM 5401 */
-static struct mii_phy_ops bcm5401_phy_ops = {
-       .init           = bcm5401_init,
-       .suspend        = bcm5401_suspend,
-       .setup_aneg     = bcm54xx_setup_aneg,
-       .setup_forced   = bcm54xx_setup_forced,
-       .poll_link      = genmii_poll_link,
-       .read_link      = bcm54xx_read_link,
-};
-
-static struct mii_phy_def bcm5401_phy_def = {
-       .phy_id         = 0x00206050,
-       .phy_id_mask    = 0xfffffff0,
-       .name           = "BCM5401",
-       .features       = MII_GBIT_FEATURES,
-       .magic_aneg     = 1,
-       .ops            = &bcm5401_phy_ops
-};
-
-/* Broadcom BCM 5411 */
-static struct mii_phy_ops bcm5411_phy_ops = {
-       .init           = bcm5411_init,
-       .suspend        = generic_suspend,
-       .setup_aneg     = bcm54xx_setup_aneg,
-       .setup_forced   = bcm54xx_setup_forced,
-       .poll_link      = genmii_poll_link,
-       .read_link      = bcm54xx_read_link,
-};
-
-static struct mii_phy_def bcm5411_phy_def = {
-       .phy_id         = 0x00206070,
-       .phy_id_mask    = 0xfffffff0,
-       .name           = "BCM5411",
-       .features       = MII_GBIT_FEATURES,
-       .magic_aneg     = 1,
-       .ops            = &bcm5411_phy_ops
-};
-
-/* Broadcom BCM 5421 */
-static struct mii_phy_ops bcm5421_phy_ops = {
-       .init           = bcm5421_init,
-       .suspend        = generic_suspend,
-       .setup_aneg     = bcm54xx_setup_aneg,
-       .setup_forced   = bcm54xx_setup_forced,
-       .poll_link      = bcm5421_poll_link,
-       .read_link      = bcm5421_read_link,
-       .enable_fiber   = bcm5421_enable_fiber,
-};
-
-static struct mii_phy_def bcm5421_phy_def = {
-       .phy_id         = 0x002060e0,
-       .phy_id_mask    = 0xfffffff0,
-       .name           = "BCM5421",
-       .features       = MII_GBIT_FEATURES,
-       .magic_aneg     = 1,
-       .ops            = &bcm5421_phy_ops
-};
-
-/* Broadcom BCM 5421 built-in K2 */
-static struct mii_phy_ops bcm5421k2_phy_ops = {
-       .init           = bcm5421_init,
-       .suspend        = generic_suspend,
-       .setup_aneg     = bcm54xx_setup_aneg,
-       .setup_forced   = bcm54xx_setup_forced,
-       .poll_link      = genmii_poll_link,
-       .read_link      = bcm54xx_read_link,
-};
-
-static struct mii_phy_def bcm5421k2_phy_def = {
-       .phy_id         = 0x002062e0,
-       .phy_id_mask    = 0xfffffff0,
-       .name           = "BCM5421-K2",
-       .features       = MII_GBIT_FEATURES,
-       .magic_aneg     = 1,
-       .ops            = &bcm5421k2_phy_ops
-};
-
-static struct mii_phy_ops bcm5461_phy_ops = {
-       .init           = bcm5421_init,
-       .suspend        = generic_suspend,
-       .setup_aneg     = bcm54xx_setup_aneg,
-       .setup_forced   = bcm54xx_setup_forced,
-       .poll_link      = bcm5461_poll_link,
-       .read_link      = bcm5461_read_link,
-       .enable_fiber   = bcm5461_enable_fiber,
-};
-
-static struct mii_phy_def bcm5461_phy_def = {
-       .phy_id         = 0x002060c0,
-       .phy_id_mask    = 0xfffffff0,
-       .name           = "BCM5461",
-       .features       = MII_GBIT_FEATURES,
-       .magic_aneg     = 1,
-       .ops            = &bcm5461_phy_ops
-};
-
-/* Broadcom BCM 5462 built-in Vesta */
-static struct mii_phy_ops bcm5462V_phy_ops = {
-       .init           = bcm5421_init,
-       .suspend        = generic_suspend,
-       .setup_aneg     = bcm54xx_setup_aneg,
-       .setup_forced   = bcm54xx_setup_forced,
-       .poll_link      = genmii_poll_link,
-       .read_link      = bcm54xx_read_link,
-};
-
-static struct mii_phy_def bcm5462V_phy_def = {
-       .phy_id         = 0x002060d0,
-       .phy_id_mask    = 0xfffffff0,
-       .name           = "BCM5462-Vesta",
-       .features       = MII_GBIT_FEATURES,
-       .magic_aneg     = 1,
-       .ops            = &bcm5462V_phy_ops
-};
-
-/* Marvell 88E1101 amd 88E1111 */
-static struct mii_phy_ops marvell88e1101_phy_ops = {
-       .suspend        = generic_suspend,
-       .setup_aneg     = marvell_setup_aneg,
-       .setup_forced   = marvell_setup_forced,
-       .poll_link      = genmii_poll_link,
-       .read_link      = marvell_read_link
-};
-
-static struct mii_phy_ops marvell88e1111_phy_ops = {
-       .init           = marvell88e1111_init,
-       .suspend        = generic_suspend,
-       .setup_aneg     = marvell_setup_aneg,
-       .setup_forced   = marvell_setup_forced,
-       .poll_link      = genmii_poll_link,
-       .read_link      = marvell_read_link
-};
-
-/* two revs in darwin for the 88e1101 ... I could use a datasheet
- * to get the proper names...
- */
-static struct mii_phy_def marvell88e1101v1_phy_def = {
-       .phy_id         = 0x01410c20,
-       .phy_id_mask    = 0xfffffff0,
-       .name           = "Marvell 88E1101v1",
-       .features       = MII_GBIT_FEATURES,
-       .magic_aneg     = 1,
-       .ops            = &marvell88e1101_phy_ops
-};
-static struct mii_phy_def marvell88e1101v2_phy_def = {
-       .phy_id         = 0x01410c60,
-       .phy_id_mask    = 0xfffffff0,
-       .name           = "Marvell 88E1101v2",
-       .features       = MII_GBIT_FEATURES,
-       .magic_aneg     = 1,
-       .ops            = &marvell88e1101_phy_ops
-};
-static struct mii_phy_def marvell88e1111_phy_def = {
-       .phy_id         = 0x01410cc0,
-       .phy_id_mask    = 0xfffffff0,
-       .name           = "Marvell 88E1111",
-       .features       = MII_GBIT_FEATURES,
-       .magic_aneg     = 1,
-       .ops            = &marvell88e1111_phy_ops
-};
-
-/* Generic implementation for most 10/100 PHYs */
-static struct mii_phy_ops generic_phy_ops = {
-       .setup_aneg     = genmii_setup_aneg,
-       .setup_forced   = genmii_setup_forced,
-       .poll_link      = genmii_poll_link,
-       .read_link      = genmii_read_link
-};
-
-static struct mii_phy_def genmii_phy_def = {
-       .phy_id         = 0x00000000,
-       .phy_id_mask    = 0x00000000,
-       .name           = "Generic MII",
-       .features       = MII_BASIC_FEATURES,
-       .magic_aneg     = 0,
-       .ops            = &generic_phy_ops
-};
-
-static struct mii_phy_def* mii_phy_table[] = {
-       &bcm5201_phy_def,
-       &bcm5221_phy_def,
-       &bcm5241_phy_def,
-       &bcm5400_phy_def,
-       &bcm5401_phy_def,
-       &bcm5411_phy_def,
-       &bcm5421_phy_def,
-       &bcm5421k2_phy_def,
-       &bcm5461_phy_def,
-       &bcm5462V_phy_def,
-       &marvell88e1101v1_phy_def,
-       &marvell88e1101v2_phy_def,
-       &marvell88e1111_phy_def,
-       &genmii_phy_def,
-       NULL
-};
-
-int mii_phy_probe(struct mii_phy *phy, int mii_id)
-{
-       int rc;
-       u32 id;
-       struct mii_phy_def* def;
-       int i;
-
-       /* We do not reset the mii_phy structure as the driver
-        * may re-probe the PHY regulary
-        */
-       phy->mii_id = mii_id;
-
-       /* Take PHY out of isloate mode and reset it. */
-       rc = reset_one_mii_phy(phy, mii_id);
-       if (rc)
-               goto fail;
-
-       /* Read ID and find matching entry */
-       id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2));
-       printk(KERN_DEBUG KBUILD_MODNAME ": " "PHY ID: %x, addr: %x\n",
-              id, mii_id);
-       for (i=0; (def = mii_phy_table[i]) != NULL; i++)
-               if ((id & def->phy_id_mask) == def->phy_id)
-                       break;
-       /* Should never be NULL (we have a generic entry), but... */
-       if (def == NULL)
-               goto fail;
-
-       phy->def = def;
-
-       return 0;
-fail:
-       phy->speed = 0;
-       phy->duplex = 0;
-       phy->pause = 0;
-       phy->advertising = 0;
-       return -ENODEV;
-}
-
-EXPORT_SYMBOL(mii_phy_probe);
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/net/sungem_phy.h b/drivers/net/sungem_phy.h
deleted file mode 100644 (file)
index af02f94..0000000
+++ /dev/null
@@ -1,132 +0,0 @@
-#ifndef __SUNGEM_PHY_H__
-#define __SUNGEM_PHY_H__
-
-struct mii_phy;
-
-/* Operations supported by any kind of PHY */
-struct mii_phy_ops
-{
-       int             (*init)(struct mii_phy *phy);
-       int             (*suspend)(struct mii_phy *phy);
-       int             (*setup_aneg)(struct mii_phy *phy, u32 advertise);
-       int             (*setup_forced)(struct mii_phy *phy, int speed, int fd);
-       int             (*poll_link)(struct mii_phy *phy);
-       int             (*read_link)(struct mii_phy *phy);
-       int             (*enable_fiber)(struct mii_phy *phy, int autoneg);
-};
-
-/* Structure used to statically define an mii/gii based PHY */
-struct mii_phy_def
-{
-       u32                             phy_id;         /* Concatenated ID1 << 16 | ID2 */
-       u32                             phy_id_mask;    /* Significant bits */
-       u32                             features;       /* Ethtool SUPPORTED_* defines */
-       int                             magic_aneg;     /* Autoneg does all speed test for us */
-       const char*                     name;
-       const struct mii_phy_ops*       ops;
-};
-
-enum {
-       BCM54XX_COPPER,
-       BCM54XX_FIBER,
-       BCM54XX_GBIC,
-       BCM54XX_SGMII,
-       BCM54XX_UNKNOWN,
-};
-
-/* An instance of a PHY, partially borrowed from mii_if_info */
-struct mii_phy
-{
-       struct mii_phy_def*     def;
-       u32                     advertising;
-       int                     mii_id;
-
-       /* 1: autoneg enabled, 0: disabled */
-       int                     autoneg;
-
-       /* forced speed & duplex (no autoneg)
-        * partner speed & duplex & pause (autoneg)
-        */
-       int                     speed;
-       int                     duplex;
-       int                     pause;
-
-       /* Provided by host chip */
-       struct net_device       *dev;
-       int (*mdio_read) (struct net_device *dev, int mii_id, int reg);
-       void (*mdio_write) (struct net_device *dev, int mii_id, int reg, int val);
-       void                    *platform_data;
-};
-
-/* Pass in a struct mii_phy with dev, mdio_read and mdio_write
- * filled, the remaining fields will be filled on return
- */
-extern int mii_phy_probe(struct mii_phy *phy, int mii_id);
-
-
-/* MII definitions missing from mii.h */
-
-#define BMCR_SPD2      0x0040          /* Gigabit enable (bcm54xx)     */
-#define LPA_PAUSE      0x0400
-
-/* More PHY registers (model specific) */
-
-/* MII BCM5201 MULTIPHY interrupt register */
-#define MII_BCM5201_INTERRUPT                  0x1A
-#define MII_BCM5201_INTERRUPT_INTENABLE                0x4000
-
-#define MII_BCM5201_AUXMODE2                   0x1B
-#define MII_BCM5201_AUXMODE2_LOWPOWER          0x0008
-
-#define MII_BCM5201_MULTIPHY                    0x1E
-
-/* MII BCM5201 MULTIPHY register bits */
-#define MII_BCM5201_MULTIPHY_SERIALMODE         0x0002
-#define MII_BCM5201_MULTIPHY_SUPERISOLATE       0x0008
-
-/* MII BCM5221 Additional registers */
-#define MII_BCM5221_TEST                       0x1f
-#define MII_BCM5221_TEST_ENABLE_SHADOWS                0x0080
-#define MII_BCM5221_SHDOW_AUX_STAT2            0x1b
-#define MII_BCM5221_SHDOW_AUX_STAT2_APD                0x0020
-#define MII_BCM5221_SHDOW_AUX_MODE4            0x1a
-#define MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE   0x0001
-#define MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR   0x0004
-
-/* MII BCM5241 Additional registers */
-#define MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR 0x0008
-
-/* MII BCM5400 1000-BASET Control register */
-#define MII_BCM5400_GB_CONTROL                 0x09
-#define MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP   0x0200
-
-/* MII BCM5400 AUXCONTROL register */
-#define MII_BCM5400_AUXCONTROL                  0x18
-#define MII_BCM5400_AUXCONTROL_PWR10BASET       0x0004
-
-/* MII BCM5400 AUXSTATUS register */
-#define MII_BCM5400_AUXSTATUS                   0x19
-#define MII_BCM5400_AUXSTATUS_LINKMODE_MASK     0x0700
-#define MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT    8
-
-/* 1000BT control (Marvell & BCM54xx at least) */
-#define MII_1000BASETCONTROL                   0x09
-#define MII_1000BASETCONTROL_FULLDUPLEXCAP     0x0200
-#define MII_1000BASETCONTROL_HALFDUPLEXCAP     0x0100
-
-/* Marvell 88E1011 PHY control */
-#define MII_M1011_PHY_SPEC_CONTROL             0x10
-#define MII_M1011_PHY_SPEC_CONTROL_MANUAL_MDIX 0x20
-#define MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX   0x40
-
-/* Marvell 88E1011 PHY status */
-#define MII_M1011_PHY_SPEC_STATUS              0x11
-#define MII_M1011_PHY_SPEC_STATUS_1000         0x8000
-#define MII_M1011_PHY_SPEC_STATUS_100          0x4000
-#define MII_M1011_PHY_SPEC_STATUS_SPD_MASK     0xc000
-#define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX   0x2000
-#define MII_M1011_PHY_SPEC_STATUS_RESOLVED     0x0800
-#define MII_M1011_PHY_SPEC_STATUS_TX_PAUSE     0x0008
-#define MII_M1011_PHY_SPEC_STATUS_RX_PAUSE     0x0004
-
-#endif /* __SUNGEM_PHY_H__ */
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
deleted file mode 100644 (file)
index 856e05b..0000000
+++ /dev/null
@@ -1,3360 +0,0 @@
-/* sunhme.c: Sparc HME/BigMac 10/100baseT half/full duplex auto switching,
- *           auto carrier detecting ethernet driver.  Also known as the
- *           "Happy Meal Ethernet" found on SunSwift SBUS cards.
- *
- * Copyright (C) 1996, 1998, 1999, 2002, 2003,
- *             2006, 2008 David S. Miller (davem@davemloft.net)
- *
- * Changes :
- * 2000/11/11 Willy Tarreau <willy AT meta-x.org>
- *   - port to non-sparc architectures. Tested only on x86 and
- *     only currently works with QFE PCI cards.
- *   - ability to specify the MAC address at module load time by passing this
- *     argument : macaddr=0x00,0x10,0x20,0x30,0x40,0x50
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/crc32.h>
-#include <linux/random.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/mm.h>
-#include <linux/bitops.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/byteorder.h>
-
-#ifdef CONFIG_SPARC
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <asm/idprom.h>
-#include <asm/openprom.h>
-#include <asm/oplib.h>
-#include <asm/prom.h>
-#include <asm/auxio.h>
-#endif
-#include <asm/uaccess.h>
-
-#include <asm/pgtable.h>
-#include <asm/irq.h>
-
-#ifdef CONFIG_PCI
-#include <linux/pci.h>
-#endif
-
-#include "sunhme.h"
-
-#define DRV_NAME       "sunhme"
-#define DRV_VERSION    "3.10"
-#define DRV_RELDATE    "August 26, 2008"
-#define DRV_AUTHOR     "David S. Miller (davem@davemloft.net)"
-
-static char version[] =
-       DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
-
-MODULE_VERSION(DRV_VERSION);
-MODULE_AUTHOR(DRV_AUTHOR);
-MODULE_DESCRIPTION("Sun HappyMealEthernet(HME) 10/100baseT ethernet driver");
-MODULE_LICENSE("GPL");
-
-static int macaddr[6];
-
-/* accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
-module_param_array(macaddr, int, NULL, 0);
-MODULE_PARM_DESC(macaddr, "Happy Meal MAC address to set");
-
-#ifdef CONFIG_SBUS
-static struct quattro *qfe_sbus_list;
-#endif
-
-#ifdef CONFIG_PCI
-static struct quattro *qfe_pci_list;
-#endif
-
-#undef HMEDEBUG
-#undef SXDEBUG
-#undef RXDEBUG
-#undef TXDEBUG
-#undef TXLOGGING
-
-#ifdef TXLOGGING
-struct hme_tx_logent {
-       unsigned int tstamp;
-       int tx_new, tx_old;
-       unsigned int action;
-#define TXLOG_ACTION_IRQ       0x01
-#define TXLOG_ACTION_TXMIT     0x02
-#define TXLOG_ACTION_TBUSY     0x04
-#define TXLOG_ACTION_NBUFS     0x08
-       unsigned int status;
-};
-#define TX_LOG_LEN     128
-static struct hme_tx_logent tx_log[TX_LOG_LEN];
-static int txlog_cur_entry;
-static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigned int s)
-{
-       struct hme_tx_logent *tlp;
-       unsigned long flags;
-
-       local_irq_save(flags);
-       tlp = &tx_log[txlog_cur_entry];
-       tlp->tstamp = (unsigned int)jiffies;
-       tlp->tx_new = hp->tx_new;
-       tlp->tx_old = hp->tx_old;
-       tlp->action = a;
-       tlp->status = s;
-       txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1);
-       local_irq_restore(flags);
-}
-static __inline__ void tx_dump_log(void)
-{
-       int i, this;
-
-       this = txlog_cur_entry;
-       for (i = 0; i < TX_LOG_LEN; i++) {
-               printk("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i,
-                      tx_log[this].tstamp,
-                      tx_log[this].tx_new, tx_log[this].tx_old,
-                      tx_log[this].action, tx_log[this].status);
-               this = (this + 1) & (TX_LOG_LEN - 1);
-       }
-}
-static __inline__ void tx_dump_ring(struct happy_meal *hp)
-{
-       struct hmeal_init_block *hb = hp->happy_block;
-       struct happy_meal_txd *tp = &hb->happy_meal_txd[0];
-       int i;
-
-       for (i = 0; i < TX_RING_SIZE; i+=4) {
-               printk("TXD[%d..%d]: [%08x:%08x] [%08x:%08x] [%08x:%08x] [%08x:%08x]\n",
-                      i, i + 4,
-                      le32_to_cpu(tp[i].tx_flags), le32_to_cpu(tp[i].tx_addr),
-                      le32_to_cpu(tp[i + 1].tx_flags), le32_to_cpu(tp[i + 1].tx_addr),
-                      le32_to_cpu(tp[i + 2].tx_flags), le32_to_cpu(tp[i + 2].tx_addr),
-                      le32_to_cpu(tp[i + 3].tx_flags), le32_to_cpu(tp[i + 3].tx_addr));
-       }
-}
-#else
-#define tx_add_log(hp, a, s)           do { } while(0)
-#define tx_dump_log()                  do { } while(0)
-#define tx_dump_ring(hp)               do { } while(0)
-#endif
-
-#ifdef HMEDEBUG
-#define HMD(x)  printk x
-#else
-#define HMD(x)
-#endif
-
-/* #define AUTO_SWITCH_DEBUG */
-
-#ifdef AUTO_SWITCH_DEBUG
-#define ASD(x)  printk x
-#else
-#define ASD(x)
-#endif
-
-#define DEFAULT_IPG0      16 /* For lance-mode only */
-#define DEFAULT_IPG1       8 /* For all modes */
-#define DEFAULT_IPG2       4 /* For all modes */
-#define DEFAULT_JAMSIZE    4 /* Toe jam */
-
-/* NOTE: In the descriptor writes one _must_ write the address
- *      member _first_.  The card must not be allowed to see
- *      the updated descriptor flags until the address is
- *      correct.  I've added a write memory barrier between
- *      the two stores so that I can sleep well at night... -DaveM
- */
-
-#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
-static void sbus_hme_write32(void __iomem *reg, u32 val)
-{
-       sbus_writel(val, reg);
-}
-
-static u32 sbus_hme_read32(void __iomem *reg)
-{
-       return sbus_readl(reg);
-}
-
-static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
-{
-       rxd->rx_addr = (__force hme32)addr;
-       wmb();
-       rxd->rx_flags = (__force hme32)flags;
-}
-
-static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
-{
-       txd->tx_addr = (__force hme32)addr;
-       wmb();
-       txd->tx_flags = (__force hme32)flags;
-}
-
-static u32 sbus_hme_read_desc32(hme32 *p)
-{
-       return (__force u32)*p;
-}
-
-static void pci_hme_write32(void __iomem *reg, u32 val)
-{
-       writel(val, reg);
-}
-
-static u32 pci_hme_read32(void __iomem *reg)
-{
-       return readl(reg);
-}
-
-static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
-{
-       rxd->rx_addr = (__force hme32)cpu_to_le32(addr);
-       wmb();
-       rxd->rx_flags = (__force hme32)cpu_to_le32(flags);
-}
-
-static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
-{
-       txd->tx_addr = (__force hme32)cpu_to_le32(addr);
-       wmb();
-       txd->tx_flags = (__force hme32)cpu_to_le32(flags);
-}
-
-static u32 pci_hme_read_desc32(hme32 *p)
-{
-       return le32_to_cpup((__le32 *)p);
-}
-
-#define hme_write32(__hp, __reg, __val) \
-       ((__hp)->write32((__reg), (__val)))
-#define hme_read32(__hp, __reg) \
-       ((__hp)->read32(__reg))
-#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
-       ((__hp)->write_rxd((__rxd), (__flags), (__addr)))
-#define hme_write_txd(__hp, __txd, __flags, __addr) \
-       ((__hp)->write_txd((__txd), (__flags), (__addr)))
-#define hme_read_desc32(__hp, __p) \
-       ((__hp)->read_desc32(__p))
-#define hme_dma_map(__hp, __ptr, __size, __dir) \
-       ((__hp)->dma_map((__hp)->dma_dev, (__ptr), (__size), (__dir)))
-#define hme_dma_unmap(__hp, __addr, __size, __dir) \
-       ((__hp)->dma_unmap((__hp)->dma_dev, (__addr), (__size), (__dir)))
-#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
-       ((__hp)->dma_sync_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir)))
-#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
-       ((__hp)->dma_sync_for_device((__hp)->dma_dev, (__addr), (__size), (__dir)))
-#else
-#ifdef CONFIG_SBUS
-/* SBUS only compilation */
-#define hme_write32(__hp, __reg, __val) \
-       sbus_writel((__val), (__reg))
-#define hme_read32(__hp, __reg) \
-       sbus_readl(__reg)
-#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
-do {   (__rxd)->rx_addr = (__force hme32)(u32)(__addr); \
-       wmb(); \
-       (__rxd)->rx_flags = (__force hme32)(u32)(__flags); \
-} while(0)
-#define hme_write_txd(__hp, __txd, __flags, __addr) \
-do {   (__txd)->tx_addr = (__force hme32)(u32)(__addr); \
-       wmb(); \
-       (__txd)->tx_flags = (__force hme32)(u32)(__flags); \
-} while(0)
-#define hme_read_desc32(__hp, __p)     ((__force u32)(hme32)*(__p))
-#define hme_dma_map(__hp, __ptr, __size, __dir) \
-       dma_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir))
-#define hme_dma_unmap(__hp, __addr, __size, __dir) \
-       dma_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir))
-#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
-       dma_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))
-#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
-       dma_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))
-#else
-/* PCI only compilation */
-#define hme_write32(__hp, __reg, __val) \
-       writel((__val), (__reg))
-#define hme_read32(__hp, __reg) \
-       readl(__reg)
-#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
-do {   (__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \
-       wmb(); \
-       (__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \
-} while(0)
-#define hme_write_txd(__hp, __txd, __flags, __addr) \
-do {   (__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \
-       wmb(); \
-       (__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \
-} while(0)
-static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p)
-{
-       return le32_to_cpup((__le32 *)p);
-}
-#define hme_dma_map(__hp, __ptr, __size, __dir) \
-       pci_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir))
-#define hme_dma_unmap(__hp, __addr, __size, __dir) \
-       pci_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir))
-#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
-       pci_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))
-#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
-       pci_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))
-#endif
-#endif
-
-
-/* Oh yes, the MIF BitBang is mighty fun to program.  BitBucket is more like it. */
-static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit)
-{
-       hme_write32(hp, tregs + TCVR_BBDATA, bit);
-       hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
-       hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
-}
-
-#if 0
-static u32 BB_GET_BIT(struct happy_meal *hp, void __iomem *tregs, int internal)
-{
-       u32 ret;
-
-       hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
-       hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
-       ret = hme_read32(hp, tregs + TCVR_CFG);
-       if (internal)
-               ret &= TCV_CFG_MDIO0;
-       else
-               ret &= TCV_CFG_MDIO1;
-
-       return ret;
-}
-#endif
-
-static u32 BB_GET_BIT2(struct happy_meal *hp, void __iomem *tregs, int internal)
-{
-       u32 retval;
-
-       hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
-       udelay(1);
-       retval = hme_read32(hp, tregs + TCVR_CFG);
-       if (internal)
-               retval &= TCV_CFG_MDIO0;
-       else
-               retval &= TCV_CFG_MDIO1;
-       hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
-
-       return retval;
-}
-
-#define TCVR_FAILURE      0x80000000     /* Impossible MIF read value */
-
-static int happy_meal_bb_read(struct happy_meal *hp,
-                             void __iomem *tregs, int reg)
-{
-       u32 tmp;
-       int retval = 0;
-       int i;
-
-       ASD(("happy_meal_bb_read: reg=%d ", reg));
-
-       /* Enable the MIF BitBang outputs. */
-       hme_write32(hp, tregs + TCVR_BBOENAB, 1);
-
-       /* Force BitBang into the idle state. */
-       for (i = 0; i < 32; i++)
-               BB_PUT_BIT(hp, tregs, 1);
-
-       /* Give it the read sequence. */
-       BB_PUT_BIT(hp, tregs, 0);
-       BB_PUT_BIT(hp, tregs, 1);
-       BB_PUT_BIT(hp, tregs, 1);
-       BB_PUT_BIT(hp, tregs, 0);
-
-       /* Give it the PHY address. */
-       tmp = hp->paddr & 0xff;
-       for (i = 4; i >= 0; i--)
-               BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
-
-       /* Tell it what register we want to read. */
-       tmp = (reg & 0xff);
-       for (i = 4; i >= 0; i--)
-               BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
-
-       /* Close down the MIF BitBang outputs. */
-       hme_write32(hp, tregs + TCVR_BBOENAB, 0);
-
-       /* Now read in the value. */
-       (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
-       for (i = 15; i >= 0; i--)
-               retval |= BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
-       (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
-       (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
-       (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
-       ASD(("value=%x\n", retval));
-       return retval;
-}
-
-static void happy_meal_bb_write(struct happy_meal *hp,
-                               void __iomem *tregs, int reg,
-                               unsigned short value)
-{
-       u32 tmp;
-       int i;
-
-       ASD(("happy_meal_bb_write: reg=%d value=%x\n", reg, value));
-
-       /* Enable the MIF BitBang outputs. */
-       hme_write32(hp, tregs + TCVR_BBOENAB, 1);
-
-       /* Force BitBang into the idle state. */
-       for (i = 0; i < 32; i++)
-               BB_PUT_BIT(hp, tregs, 1);
-
-       /* Give it write sequence. */
-       BB_PUT_BIT(hp, tregs, 0);
-       BB_PUT_BIT(hp, tregs, 1);
-       BB_PUT_BIT(hp, tregs, 0);
-       BB_PUT_BIT(hp, tregs, 1);
-
-       /* Give it the PHY address. */
-       tmp = (hp->paddr & 0xff);
-       for (i = 4; i >= 0; i--)
-               BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
-
-       /* Tell it what register we will be writing. */
-       tmp = (reg & 0xff);
-       for (i = 4; i >= 0; i--)
-               BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
-
-       /* Tell it to become ready for the bits. */
-       BB_PUT_BIT(hp, tregs, 1);
-       BB_PUT_BIT(hp, tregs, 0);
-
-       for (i = 15; i >= 0; i--)
-               BB_PUT_BIT(hp, tregs, ((value >> i) & 1));
-
-       /* Close down the MIF BitBang outputs. */
-       hme_write32(hp, tregs + TCVR_BBOENAB, 0);
-}
-
-#define TCVR_READ_TRIES   16
-
-static int happy_meal_tcvr_read(struct happy_meal *hp,
-                               void __iomem *tregs, int reg)
-{
-       int tries = TCVR_READ_TRIES;
-       int retval;
-
-       ASD(("happy_meal_tcvr_read: reg=0x%02x ", reg));
-       if (hp->tcvr_type == none) {
-               ASD(("no transceiver, value=TCVR_FAILURE\n"));
-               return TCVR_FAILURE;
-       }
-
-       if (!(hp->happy_flags & HFLAG_FENABLE)) {
-               ASD(("doing bit bang\n"));
-               return happy_meal_bb_read(hp, tregs, reg);
-       }
-
-       hme_write32(hp, tregs + TCVR_FRAME,
-                   (FRAME_READ | (hp->paddr << 23) | ((reg & 0xff) << 18)));
-       while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
-               udelay(20);
-       if (!tries) {
-               printk(KERN_ERR "happy meal: Aieee, transceiver MIF read bolixed\n");
-               return TCVR_FAILURE;
-       }
-       retval = hme_read32(hp, tregs + TCVR_FRAME) & 0xffff;
-       ASD(("value=%04x\n", retval));
-       return retval;
-}
-
-#define TCVR_WRITE_TRIES  16
-
-static void happy_meal_tcvr_write(struct happy_meal *hp,
-                                 void __iomem *tregs, int reg,
-                                 unsigned short value)
-{
-       int tries = TCVR_WRITE_TRIES;
-
-       ASD(("happy_meal_tcvr_write: reg=0x%02x value=%04x\n", reg, value));
-
-       /* Welcome to Sun Microsystems, can I take your order please? */
-       if (!(hp->happy_flags & HFLAG_FENABLE)) {
-               happy_meal_bb_write(hp, tregs, reg, value);
-               return;
-       }
-
-       /* Would you like fries with that? */
-       hme_write32(hp, tregs + TCVR_FRAME,
-                   (FRAME_WRITE | (hp->paddr << 23) |
-                    ((reg & 0xff) << 18) | (value & 0xffff)));
-       while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
-               udelay(20);
-
-       /* Anything else? */
-       if (!tries)
-               printk(KERN_ERR "happy meal: Aieee, transceiver MIF write bolixed\n");
-
-       /* Fifty-two cents is your change, have a nice day. */
-}
-
-/* Auto negotiation.  The scheme is very simple.  We have a timer routine
- * that keeps watching the auto negotiation process as it progresses.
- * The DP83840 is first told to start doing it's thing, we set up the time
- * and place the timer state machine in it's initial state.
- *
- * Here the timer peeks at the DP83840 status registers at each click to see
- * if the auto negotiation has completed, we assume here that the DP83840 PHY
- * will time out at some point and just tell us what (didn't) happen.  For
- * complete coverage we only allow so many of the ticks at this level to run,
- * when this has expired we print a warning message and try another strategy.
- * This "other" strategy is to force the interface into various speed/duplex
- * configurations and we stop when we see a link-up condition before the
- * maximum number of "peek" ticks have occurred.
- *
- * Once a valid link status has been detected we configure the BigMAC and
- * the rest of the Happy Meal to speak the most efficient protocol we could
- * get a clean link for.  The priority for link configurations, highest first
- * is:
- *                 100 Base-T Full Duplex
- *                 100 Base-T Half Duplex
- *                 10 Base-T Full Duplex
- *                 10 Base-T Half Duplex
- *
- * We start a new timer now, after a successful auto negotiation status has
- * been detected.  This timer just waits for the link-up bit to get set in
- * the BMCR of the DP83840.  When this occurs we print a kernel log message
- * describing the link type in use and the fact that it is up.
- *
- * If a fatal error of some sort is signalled and detected in the interrupt
- * service routine, and the chip is reset, or the link is ifconfig'd down
- * and then back up, this entire process repeats itself all over again.
- */
-static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs)
-{
-       hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
-
-       /* Downgrade from full to half duplex.  Only possible
-        * via ethtool.
-        */
-       if (hp->sw_bmcr & BMCR_FULLDPLX) {
-               hp->sw_bmcr &= ~(BMCR_FULLDPLX);
-               happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
-               return 0;
-       }
-
-       /* Downgrade from 100 to 10. */
-       if (hp->sw_bmcr & BMCR_SPEED100) {
-               hp->sw_bmcr &= ~(BMCR_SPEED100);
-               happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
-               return 0;
-       }
-
-       /* We've tried everything. */
-       return -1;
-}
-
-static void display_link_mode(struct happy_meal *hp, void __iomem *tregs)
-{
-       printk(KERN_INFO "%s: Link is up using ", hp->dev->name);
-       if (hp->tcvr_type == external)
-               printk("external ");
-       else
-               printk("internal ");
-       printk("transceiver at ");
-       hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
-       if (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) {
-               if (hp->sw_lpa & LPA_100FULL)
-                       printk("100Mb/s, Full Duplex.\n");
-               else
-                       printk("100Mb/s, Half Duplex.\n");
-       } else {
-               if (hp->sw_lpa & LPA_10FULL)
-                       printk("10Mb/s, Full Duplex.\n");
-               else
-                       printk("10Mb/s, Half Duplex.\n");
-       }
-}
-
-static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs)
-{
-       printk(KERN_INFO "%s: Link has been forced up using ", hp->dev->name);
-       if (hp->tcvr_type == external)
-               printk("external ");
-       else
-               printk("internal ");
-       printk("transceiver at ");
-       hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
-       if (hp->sw_bmcr & BMCR_SPEED100)
-               printk("100Mb/s, ");
-       else
-               printk("10Mb/s, ");
-       if (hp->sw_bmcr & BMCR_FULLDPLX)
-               printk("Full Duplex.\n");
-       else
-               printk("Half Duplex.\n");
-}
-
-static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs)
-{
-       int full;
-
-       /* All we care about is making sure the bigmac tx_cfg has a
-        * proper duplex setting.
-        */
-       if (hp->timer_state == arbwait) {
-               hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
-               if (!(hp->sw_lpa & (LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL)))
-                       goto no_response;
-               if (hp->sw_lpa & LPA_100FULL)
-                       full = 1;
-               else if (hp->sw_lpa & LPA_100HALF)
-                       full = 0;
-               else if (hp->sw_lpa & LPA_10FULL)
-                       full = 1;
-               else
-                       full = 0;
-       } else {
-               /* Forcing a link mode. */
-               hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
-               if (hp->sw_bmcr & BMCR_FULLDPLX)
-                       full = 1;
-               else
-                       full = 0;
-       }
-
-       /* Before changing other bits in the tx_cfg register, and in
-        * general any of other the TX config registers too, you
-        * must:
-        * 1) Clear Enable
-        * 2) Poll with reads until that bit reads back as zero
-        * 3) Make TX configuration changes
-        * 4) Set Enable once more
-        */
-       hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
-                   hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
-                   ~(BIGMAC_TXCFG_ENABLE));
-       while (hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & BIGMAC_TXCFG_ENABLE)
-               barrier();
-       if (full) {
-               hp->happy_flags |= HFLAG_FULL;
-               hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
-                           hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
-                           BIGMAC_TXCFG_FULLDPLX);
-       } else {
-               hp->happy_flags &= ~(HFLAG_FULL);
-               hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
-                           hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
-                           ~(BIGMAC_TXCFG_FULLDPLX));
-       }
-       hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
-                   hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
-                   BIGMAC_TXCFG_ENABLE);
-       return 0;
-no_response:
-       return 1;
-}
-
-static int happy_meal_init(struct happy_meal *hp);
-
-static int is_lucent_phy(struct happy_meal *hp)
-{
-       void __iomem *tregs = hp->tcvregs;
-       unsigned short mr2, mr3;
-       int ret = 0;
-
-       mr2 = happy_meal_tcvr_read(hp, tregs, 2);
-       mr3 = happy_meal_tcvr_read(hp, tregs, 3);
-       if ((mr2 & 0xffff) == 0x0180 &&
-           ((mr3 & 0xffff) >> 10) == 0x1d)
-               ret = 1;
-
-       return ret;
-}
-
-static void happy_meal_timer(unsigned long data)
-{
-       struct happy_meal *hp = (struct happy_meal *) data;
-       void __iomem *tregs = hp->tcvregs;
-       int restart_timer = 0;
-
-       spin_lock_irq(&hp->happy_lock);
-
-       hp->timer_ticks++;
-       switch(hp->timer_state) {
-       case arbwait:
-               /* Only allow for 5 ticks, thats 10 seconds and much too
-                * long to wait for arbitration to complete.
-                */
-               if (hp->timer_ticks >= 10) {
-                       /* Enter force mode. */
-       do_force_mode:
-                       hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
-                       printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful, trying force link mode\n",
-                              hp->dev->name);
-                       hp->sw_bmcr = BMCR_SPEED100;
-                       happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
-
-                       if (!is_lucent_phy(hp)) {
-                               /* OK, seems we need do disable the transceiver for the first
-                                * tick to make sure we get an accurate link state at the
-                                * second tick.
-                                */
-                               hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
-                               hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
-                               happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig);
-                       }
-                       hp->timer_state = ltrywait;
-                       hp->timer_ticks = 0;
-                       restart_timer = 1;
-               } else {
-                       /* Anything interesting happen? */
-                       hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
-                       if (hp->sw_bmsr & BMSR_ANEGCOMPLETE) {
-                               int ret;
-
-                               /* Just what we've been waiting for... */
-                               ret = set_happy_link_modes(hp, tregs);
-                               if (ret) {
-                                       /* Ooops, something bad happened, go to force
-                                        * mode.
-                                        *
-                                        * XXX Broken hubs which don't support 802.3u
-                                        * XXX auto-negotiation make this happen as well.
-                                        */
-                                       goto do_force_mode;
-                               }
-
-                               /* Success, at least so far, advance our state engine. */
-                               hp->timer_state = lupwait;
-                               restart_timer = 1;
-                       } else {
-                               restart_timer = 1;
-                       }
-               }
-               break;
-
-       case lupwait:
-               /* Auto negotiation was successful and we are awaiting a
-                * link up status.  I have decided to let this timer run
-                * forever until some sort of error is signalled, reporting
-                * a message to the user at 10 second intervals.
-                */
-               hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
-               if (hp->sw_bmsr & BMSR_LSTATUS) {
-                       /* Wheee, it's up, display the link mode in use and put
-                        * the timer to sleep.
-                        */
-                       display_link_mode(hp, tregs);
-                       hp->timer_state = asleep;
-                       restart_timer = 0;
-               } else {
-                       if (hp->timer_ticks >= 10) {
-                               printk(KERN_NOTICE "%s: Auto negotiation successful, link still "
-                                      "not completely up.\n", hp->dev->name);
-                               hp->timer_ticks = 0;
-                               restart_timer = 1;
-                       } else {
-                               restart_timer = 1;
-                       }
-               }
-               break;
-
-       case ltrywait:
-               /* Making the timeout here too long can make it take
-                * annoyingly long to attempt all of the link mode
-                * permutations, but then again this is essentially
-                * error recovery code for the most part.
-                */
-               hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
-               hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
-               if (hp->timer_ticks == 1) {
-                       if (!is_lucent_phy(hp)) {
-                               /* Re-enable transceiver, we'll re-enable the transceiver next
-                                * tick, then check link state on the following tick.
-                                */
-                               hp->sw_csconfig |= CSCONFIG_TCVDISAB;
-                               happy_meal_tcvr_write(hp, tregs,
-                                                     DP83840_CSCONFIG, hp->sw_csconfig);
-                       }
-                       restart_timer = 1;
-                       break;
-               }
-               if (hp->timer_ticks == 2) {
-                       if (!is_lucent_phy(hp)) {
-                               hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
-                               happy_meal_tcvr_write(hp, tregs,
-                                                     DP83840_CSCONFIG, hp->sw_csconfig);
-                       }
-                       restart_timer = 1;
-                       break;
-               }
-               if (hp->sw_bmsr & BMSR_LSTATUS) {
-                       /* Force mode selection success. */
-                       display_forced_link_mode(hp, tregs);
-                       set_happy_link_modes(hp, tregs); /* XXX error? then what? */
-                       hp->timer_state = asleep;
-                       restart_timer = 0;
-               } else {
-                       if (hp->timer_ticks >= 4) { /* 6 seconds or so... */
-                               int ret;
-
-                               ret = try_next_permutation(hp, tregs);
-                               if (ret == -1) {
-                                       /* Aieee, tried them all, reset the
-                                        * chip and try all over again.
-                                        */
-
-                                       /* Let the user know... */
-                                       printk(KERN_NOTICE "%s: Link down, cable problem?\n",
-                                              hp->dev->name);
-
-                                       ret = happy_meal_init(hp);
-                                       if (ret) {
-                                               /* ho hum... */
-                                               printk(KERN_ERR "%s: Error, cannot re-init the "
-                                                      "Happy Meal.\n", hp->dev->name);
-                                       }
-                                       goto out;
-                               }
-                               if (!is_lucent_phy(hp)) {
-                                       hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
-                                                                              DP83840_CSCONFIG);
-                                       hp->sw_csconfig |= CSCONFIG_TCVDISAB;
-                                       happy_meal_tcvr_write(hp, tregs,
-                                                             DP83840_CSCONFIG, hp->sw_csconfig);
-                               }
-                               hp->timer_ticks = 0;
-                               restart_timer = 1;
-                       } else {
-                               restart_timer = 1;
-                       }
-               }
-               break;
-
-       case asleep:
-       default:
-               /* Can't happens.... */
-               printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n",
-                      hp->dev->name);
-               restart_timer = 0;
-               hp->timer_ticks = 0;
-               hp->timer_state = asleep; /* foo on you */
-               break;
-       }
-
-       if (restart_timer) {
-               hp->happy_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */
-               add_timer(&hp->happy_timer);
-       }
-
-out:
-       spin_unlock_irq(&hp->happy_lock);
-}
-
-#define TX_RESET_TRIES     32
-#define RX_RESET_TRIES     32
-
-/* hp->happy_lock must be held */
-static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs)
-{
-       int tries = TX_RESET_TRIES;
-
-       HMD(("happy_meal_tx_reset: reset, "));
-
-       /* Would you like to try our SMCC Delux? */
-       hme_write32(hp, bregs + BMAC_TXSWRESET, 0);
-       while ((hme_read32(hp, bregs + BMAC_TXSWRESET) & 1) && --tries)
-               udelay(20);
-
-       /* Lettuce, tomato, buggy hardware (no extra charge)? */
-       if (!tries)
-               printk(KERN_ERR "happy meal: Transceiver BigMac ATTACK!");
-
-       /* Take care. */
-       HMD(("done\n"));
-}
-
-/* hp->happy_lock must be held */
-static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs)
-{
-       int tries = RX_RESET_TRIES;
-
-       HMD(("happy_meal_rx_reset: reset, "));
-
-       /* We have a special on GNU/Viking hardware bugs today. */
-       hme_write32(hp, bregs + BMAC_RXSWRESET, 0);
-       while ((hme_read32(hp, bregs + BMAC_RXSWRESET) & 1) && --tries)
-               udelay(20);
-
-       /* Will that be all? */
-       if (!tries)
-               printk(KERN_ERR "happy meal: Receiver BigMac ATTACK!");
-
-       /* Don't forget your vik_1137125_wa.  Have a nice day. */
-       HMD(("done\n"));
-}
-
-#define STOP_TRIES         16
-
-/* hp->happy_lock must be held */
-static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
-{
-       int tries = STOP_TRIES;
-
-       HMD(("happy_meal_stop: reset, "));
-
-       /* We're consolidating our STB products, it's your lucky day. */
-       hme_write32(hp, gregs + GREG_SWRESET, GREG_RESET_ALL);
-       while (hme_read32(hp, gregs + GREG_SWRESET) && --tries)
-               udelay(20);
-
-       /* Come back next week when we are "Sun Microelectronics". */
-       if (!tries)
-               printk(KERN_ERR "happy meal: Fry guys.");
-
-       /* Remember: "Different name, same old buggy as shit hardware." */
-       HMD(("done\n"));
-}
-
-/* hp->happy_lock must be held */
-static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs)
-{
-       struct net_device_stats *stats = &hp->net_stats;
-
-       stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR);
-       hme_write32(hp, bregs + BMAC_RCRCECTR, 0);
-
-       stats->rx_frame_errors += hme_read32(hp, bregs + BMAC_UNALECTR);
-       hme_write32(hp, bregs + BMAC_UNALECTR, 0);
-
-       stats->rx_length_errors += hme_read32(hp, bregs + BMAC_GLECTR);
-       hme_write32(hp, bregs + BMAC_GLECTR, 0);
-
-       stats->tx_aborted_errors += hme_read32(hp, bregs + BMAC_EXCTR);
-
-       stats->collisions +=
-               (hme_read32(hp, bregs + BMAC_EXCTR) +
-                hme_read32(hp, bregs + BMAC_LTCTR));
-       hme_write32(hp, bregs + BMAC_EXCTR, 0);
-       hme_write32(hp, bregs + BMAC_LTCTR, 0);
-}
-
-/* hp->happy_lock must be held */
-static void happy_meal_poll_stop(struct happy_meal *hp, void __iomem *tregs)
-{
-       ASD(("happy_meal_poll_stop: "));
-
-       /* If polling disabled or not polling already, nothing to do. */
-       if ((hp->happy_flags & (HFLAG_POLLENABLE | HFLAG_POLL)) !=
-          (HFLAG_POLLENABLE | HFLAG_POLL)) {
-               HMD(("not polling, return\n"));
-               return;
-       }
-
-       /* Shut up the MIF. */
-       ASD(("were polling, mif ints off, "));
-       hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
-
-       /* Turn off polling. */
-       ASD(("polling off, "));
-       hme_write32(hp, tregs + TCVR_CFG,
-                   hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_PENABLE));
-
-       /* We are no longer polling. */
-       hp->happy_flags &= ~(HFLAG_POLL);
-
-       /* Let the bits set. */
-       udelay(200);
-       ASD(("done\n"));
-}
-
-/* Only Sun can take such nice parts and fuck up the programming interface
- * like this.  Good job guys...
- */
-#define TCVR_RESET_TRIES       16 /* It should reset quickly        */
-#define TCVR_UNISOLATE_TRIES   32 /* Dis-isolation can take longer. */
-
-/* hp->happy_lock must be held */
-static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
-{
-       u32 tconfig;
-       int result, tries = TCVR_RESET_TRIES;
-
-       tconfig = hme_read32(hp, tregs + TCVR_CFG);
-       ASD(("happy_meal_tcvr_reset: tcfg<%08lx> ", tconfig));
-       if (hp->tcvr_type == external) {
-               ASD(("external<"));
-               hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT));
-               hp->tcvr_type = internal;
-               hp->paddr = TCV_PADDR_ITX;
-               ASD(("ISOLATE,"));
-               happy_meal_tcvr_write(hp, tregs, MII_BMCR,
-                                     (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
-               result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
-               if (result == TCVR_FAILURE) {
-                       ASD(("phyread_fail>\n"));
-                       return -1;
-               }
-               ASD(("phyread_ok,PSELECT>"));
-               hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
-               hp->tcvr_type = external;
-               hp->paddr = TCV_PADDR_ETX;
-       } else {
-               if (tconfig & TCV_CFG_MDIO1) {
-                       ASD(("internal<PSELECT,"));
-                       hme_write32(hp, tregs + TCVR_CFG, (tconfig | TCV_CFG_PSELECT));
-                       ASD(("ISOLATE,"));
-                       happy_meal_tcvr_write(hp, tregs, MII_BMCR,
-                                             (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
-                       result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
-                       if (result == TCVR_FAILURE) {
-                               ASD(("phyread_fail>\n"));
-                               return -1;
-                       }
-                       ASD(("phyread_ok,~PSELECT>"));
-                       hme_write32(hp, tregs + TCVR_CFG, (tconfig & ~(TCV_CFG_PSELECT)));
-                       hp->tcvr_type = internal;
-                       hp->paddr = TCV_PADDR_ITX;
-               }
-       }
-
-       ASD(("BMCR_RESET "));
-       happy_meal_tcvr_write(hp, tregs, MII_BMCR, BMCR_RESET);
-
-       while (--tries) {
-               result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
-               if (result == TCVR_FAILURE)
-                       return -1;
-               hp->sw_bmcr = result;
-               if (!(result & BMCR_RESET))
-                       break;
-               udelay(20);
-       }
-       if (!tries) {
-               ASD(("BMCR RESET FAILED!\n"));
-               return -1;
-       }
-       ASD(("RESET_OK\n"));
-
-       /* Get fresh copies of the PHY registers. */
-       hp->sw_bmsr      = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
-       hp->sw_physid1   = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
-       hp->sw_physid2   = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
-       hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
-
-       ASD(("UNISOLATE"));
-       hp->sw_bmcr &= ~(BMCR_ISOLATE);
-       happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
-
-       tries = TCVR_UNISOLATE_TRIES;
-       while (--tries) {
-               result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
-               if (result == TCVR_FAILURE)
-                       return -1;
-               if (!(result & BMCR_ISOLATE))
-                       break;
-               udelay(20);
-       }
-       if (!tries) {
-               ASD((" FAILED!\n"));
-               return -1;
-       }
-       ASD((" SUCCESS and CSCONFIG_DFBYPASS\n"));
-       if (!is_lucent_phy(hp)) {
-               result = happy_meal_tcvr_read(hp, tregs,
-                                             DP83840_CSCONFIG);
-               happy_meal_tcvr_write(hp, tregs,
-                                     DP83840_CSCONFIG, (result | CSCONFIG_DFBYPASS));
-       }
-       return 0;
-}
-
-/* Figure out whether we have an internal or external transceiver.
- *
- * hp->happy_lock must be held
- */
-static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tregs)
-{
-       unsigned long tconfig = hme_read32(hp, tregs + TCVR_CFG);
-
-       ASD(("happy_meal_transceiver_check: tcfg=%08lx ", tconfig));
-       if (hp->happy_flags & HFLAG_POLL) {
-               /* If we are polling, we must stop to get the transceiver type. */
-               ASD(("<polling> "));
-               if (hp->tcvr_type == internal) {
-                       if (tconfig & TCV_CFG_MDIO1) {
-                               ASD(("<internal> <poll stop> "));
-                               happy_meal_poll_stop(hp, tregs);
-                               hp->paddr = TCV_PADDR_ETX;
-                               hp->tcvr_type = external;
-                               ASD(("<external>\n"));
-                               tconfig &= ~(TCV_CFG_PENABLE);
-                               tconfig |= TCV_CFG_PSELECT;
-                               hme_write32(hp, tregs + TCVR_CFG, tconfig);
-                       }
-               } else {
-                       if (hp->tcvr_type == external) {
-                               ASD(("<external> "));
-                               if (!(hme_read32(hp, tregs + TCVR_STATUS) >> 16)) {
-                                       ASD(("<poll stop> "));
-                                       happy_meal_poll_stop(hp, tregs);
-                                       hp->paddr = TCV_PADDR_ITX;
-                                       hp->tcvr_type = internal;
-                                       ASD(("<internal>\n"));
-                                       hme_write32(hp, tregs + TCVR_CFG,
-                                                   hme_read32(hp, tregs + TCVR_CFG) &
-                                                   ~(TCV_CFG_PSELECT));
-                               }
-                               ASD(("\n"));
-                       } else {
-                               ASD(("<none>\n"));
-                       }
-               }
-       } else {
-               u32 reread = hme_read32(hp, tregs + TCVR_CFG);
-
-               /* Else we can just work off of the MDIO bits. */
-               ASD(("<not polling> "));
-               if (reread & TCV_CFG_MDIO1) {
-                       hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
-                       hp->paddr = TCV_PADDR_ETX;
-                       hp->tcvr_type = external;
-                       ASD(("<external>\n"));
-               } else {
-                       if (reread & TCV_CFG_MDIO0) {
-                               hme_write32(hp, tregs + TCVR_CFG,
-                                           tconfig & ~(TCV_CFG_PSELECT));
-                               hp->paddr = TCV_PADDR_ITX;
-                               hp->tcvr_type = internal;
-                               ASD(("<internal>\n"));
-                       } else {
-                               printk(KERN_ERR "happy meal: Transceiver and a coke please.");
-                               hp->tcvr_type = none; /* Grrr... */
-                               ASD(("<none>\n"));
-                       }
-               }
-       }
-}
-
-/* The receive ring buffers are a bit tricky to get right.  Here goes...
- *
- * The buffers we dma into must be 64 byte aligned.  So we use a special
- * alloc_skb() routine for the happy meal to allocate 64 bytes more than
- * we really need.
- *
- * We use skb_reserve() to align the data block we get in the skb.  We
- * also program the etxregs->cfg register to use an offset of 2.  This
- * imperical constant plus the ethernet header size will always leave
- * us with a nicely aligned ip header once we pass things up to the
- * protocol layers.
- *
- * The numbers work out to:
- *
- *         Max ethernet frame size         1518
- *         Ethernet header size              14
- *         Happy Meal base offset             2
- *
- * Say a skb data area is at 0xf001b010, and its size alloced is
- * (ETH_FRAME_LEN + 64 + 2) = (1514 + 64 + 2) = 1580 bytes.
- *
- * First our alloc_skb() routine aligns the data base to a 64 byte
- * boundary.  We now have 0xf001b040 as our skb data address.  We
- * plug this into the receive descriptor address.
- *
- * Next, we skb_reserve() 2 bytes to account for the Happy Meal offset.
- * So now the data we will end up looking at starts at 0xf001b042.  When
- * the packet arrives, we will check out the size received and subtract
- * this from the skb->length.  Then we just pass the packet up to the
- * protocols as is, and allocate a new skb to replace this slot we have
- * just received from.
- *
- * The ethernet layer will strip the ether header from the front of the
- * skb we just sent to it, this leaves us with the ip header sitting
- * nicely aligned at 0xf001b050.  Also, for tcp and udp packets the
- * Happy Meal has even checksummed the tcp/udp data for us.  The 16
- * bit checksum is obtained from the low bits of the receive descriptor
- * flags, thus:
- *
- *     skb->csum = rxd->rx_flags & 0xffff;
- *     skb->ip_summed = CHECKSUM_COMPLETE;
- *
- * before sending off the skb to the protocols, and we are good as gold.
- */
-static void happy_meal_clean_rings(struct happy_meal *hp)
-{
-       int i;
-
-       for (i = 0; i < RX_RING_SIZE; i++) {
-               if (hp->rx_skbs[i] != NULL) {
-                       struct sk_buff *skb = hp->rx_skbs[i];
-                       struct happy_meal_rxd *rxd;
-                       u32 dma_addr;
-
-                       rxd = &hp->happy_block->happy_meal_rxd[i];
-                       dma_addr = hme_read_desc32(hp, &rxd->rx_addr);
-                       dma_unmap_single(hp->dma_dev, dma_addr,
-                                        RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
-                       dev_kfree_skb_any(skb);
-                       hp->rx_skbs[i] = NULL;
-               }
-       }
-
-       for (i = 0; i < TX_RING_SIZE; i++) {
-               if (hp->tx_skbs[i] != NULL) {
-                       struct sk_buff *skb = hp->tx_skbs[i];
-                       struct happy_meal_txd *txd;
-                       u32 dma_addr;
-                       int frag;
-
-                       hp->tx_skbs[i] = NULL;
-
-                       for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
-                               txd = &hp->happy_block->happy_meal_txd[i];
-                               dma_addr = hme_read_desc32(hp, &txd->tx_addr);
-                               if (!frag)
-                                       dma_unmap_single(hp->dma_dev, dma_addr,
-                                                        (hme_read_desc32(hp, &txd->tx_flags)
-                                                         & TXFLAG_SIZE),
-                                                        DMA_TO_DEVICE);
-                               else
-                                       dma_unmap_page(hp->dma_dev, dma_addr,
-                                                        (hme_read_desc32(hp, &txd->tx_flags)
-                                                         & TXFLAG_SIZE),
-                                                        DMA_TO_DEVICE);
-
-                               if (frag != skb_shinfo(skb)->nr_frags)
-                                       i++;
-                       }
-
-                       dev_kfree_skb_any(skb);
-               }
-       }
-}
-
-/* hp->happy_lock must be held */
-static void happy_meal_init_rings(struct happy_meal *hp)
-{
-       struct hmeal_init_block *hb = hp->happy_block;
-       struct net_device *dev = hp->dev;
-       int i;
-
-       HMD(("happy_meal_init_rings: counters to zero, "));
-       hp->rx_new = hp->rx_old = hp->tx_new = hp->tx_old = 0;
-
-       /* Free any skippy bufs left around in the rings. */
-       HMD(("clean, "));
-       happy_meal_clean_rings(hp);
-
-       /* Now get new skippy bufs for the receive ring. */
-       HMD(("init rxring, "));
-       for (i = 0; i < RX_RING_SIZE; i++) {
-               struct sk_buff *skb;
-
-               skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
-               if (!skb) {
-                       hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
-                       continue;
-               }
-               hp->rx_skbs[i] = skb;
-               skb->dev = dev;
-
-               /* Because we reserve afterwards. */
-               skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
-               hme_write_rxd(hp, &hb->happy_meal_rxd[i],
-                             (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
-                             dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
-                                            DMA_FROM_DEVICE));
-               skb_reserve(skb, RX_OFFSET);
-       }
-
-       HMD(("init txring, "));
-       for (i = 0; i < TX_RING_SIZE; i++)
-               hme_write_txd(hp, &hb->happy_meal_txd[i], 0, 0);
-
-       HMD(("done\n"));
-}
-
-/* hp->happy_lock must be held */
-static void happy_meal_begin_auto_negotiation(struct happy_meal *hp,
-                                             void __iomem *tregs,
-                                             struct ethtool_cmd *ep)
-{
-       int timeout;
-
-       /* Read all of the registers we are interested in now. */
-       hp->sw_bmsr      = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
-       hp->sw_bmcr      = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
-       hp->sw_physid1   = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
-       hp->sw_physid2   = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
-
-       /* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */
-
-       hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
-       if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
-               /* Advertise everything we can support. */
-               if (hp->sw_bmsr & BMSR_10HALF)
-                       hp->sw_advertise |= (ADVERTISE_10HALF);
-               else
-                       hp->sw_advertise &= ~(ADVERTISE_10HALF);
-
-               if (hp->sw_bmsr & BMSR_10FULL)
-                       hp->sw_advertise |= (ADVERTISE_10FULL);
-               else
-                       hp->sw_advertise &= ~(ADVERTISE_10FULL);
-               if (hp->sw_bmsr & BMSR_100HALF)
-                       hp->sw_advertise |= (ADVERTISE_100HALF);
-               else
-                       hp->sw_advertise &= ~(ADVERTISE_100HALF);
-               if (hp->sw_bmsr & BMSR_100FULL)
-                       hp->sw_advertise |= (ADVERTISE_100FULL);
-               else
-                       hp->sw_advertise &= ~(ADVERTISE_100FULL);
-               happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
-
-               /* XXX Currently no Happy Meal cards I know off support 100BaseT4,
-                * XXX and this is because the DP83840 does not support it, changes
-                * XXX would need to be made to the tx/rx logic in the driver as well
-                * XXX so I completely skip checking for it in the BMSR for now.
-                */
-
-#ifdef AUTO_SWITCH_DEBUG
-               ASD(("%s: Advertising [ ", hp->dev->name));
-               if (hp->sw_advertise & ADVERTISE_10HALF)
-                       ASD(("10H "));
-               if (hp->sw_advertise & ADVERTISE_10FULL)
-                       ASD(("10F "));
-               if (hp->sw_advertise & ADVERTISE_100HALF)
-                       ASD(("100H "));
-               if (hp->sw_advertise & ADVERTISE_100FULL)
-                       ASD(("100F "));
-#endif
-
-               /* Enable Auto-Negotiation, this is usually on already... */
-               hp->sw_bmcr |= BMCR_ANENABLE;
-               happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
-
-               /* Restart it to make sure it is going. */
-               hp->sw_bmcr |= BMCR_ANRESTART;
-               happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
-
-               /* BMCR_ANRESTART self clears when the process has begun. */
-
-               timeout = 64;  /* More than enough. */
-               while (--timeout) {
-                       hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
-                       if (!(hp->sw_bmcr & BMCR_ANRESTART))
-                               break; /* got it. */
-                       udelay(10);
-               }
-               if (!timeout) {
-                       printk(KERN_ERR "%s: Happy Meal would not start auto negotiation "
-                              "BMCR=0x%04x\n", hp->dev->name, hp->sw_bmcr);
-                       printk(KERN_NOTICE "%s: Performing force link detection.\n",
-                              hp->dev->name);
-                       goto force_link;
-               } else {
-                       hp->timer_state = arbwait;
-               }
-       } else {
-force_link:
-               /* Force the link up, trying first a particular mode.
-                * Either we are here at the request of ethtool or
-                * because the Happy Meal would not start to autoneg.
-                */
-
-               /* Disable auto-negotiation in BMCR, enable the duplex and
-                * speed setting, init the timer state machine, and fire it off.
-                */
-               if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
-                       hp->sw_bmcr = BMCR_SPEED100;
-               } else {
-                       if (ethtool_cmd_speed(ep) == SPEED_100)
-                               hp->sw_bmcr = BMCR_SPEED100;
-                       else
-                               hp->sw_bmcr = 0;
-                       if (ep->duplex == DUPLEX_FULL)
-                               hp->sw_bmcr |= BMCR_FULLDPLX;
-               }
-               happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
-
-               if (!is_lucent_phy(hp)) {
-                       /* OK, seems we need do disable the transceiver for the first
-                        * tick to make sure we get an accurate link state at the
-                        * second tick.
-                        */
-                       hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
-                                                              DP83840_CSCONFIG);
-                       hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
-                       happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG,
-                                             hp->sw_csconfig);
-               }
-               hp->timer_state = ltrywait;
-       }
-
-       hp->timer_ticks = 0;
-       hp->happy_timer.expires = jiffies + (12 * HZ)/10;  /* 1.2 sec. */
-       hp->happy_timer.data = (unsigned long) hp;
-       hp->happy_timer.function = happy_meal_timer;
-       add_timer(&hp->happy_timer);
-}
-
-/* hp->happy_lock must be held */
-static int happy_meal_init(struct happy_meal *hp)
-{
-       void __iomem *gregs        = hp->gregs;
-       void __iomem *etxregs      = hp->etxregs;
-       void __iomem *erxregs      = hp->erxregs;
-       void __iomem *bregs        = hp->bigmacregs;
-       void __iomem *tregs        = hp->tcvregs;
-       u32 regtmp, rxcfg;
-       unsigned char *e = &hp->dev->dev_addr[0];
-
-       /* If auto-negotiation timer is running, kill it. */
-       del_timer(&hp->happy_timer);
-
-       HMD(("happy_meal_init: happy_flags[%08x] ",
-            hp->happy_flags));
-       if (!(hp->happy_flags & HFLAG_INIT)) {
-               HMD(("set HFLAG_INIT, "));
-               hp->happy_flags |= HFLAG_INIT;
-               happy_meal_get_counters(hp, bregs);
-       }
-
-       /* Stop polling. */
-       HMD(("to happy_meal_poll_stop\n"));
-       happy_meal_poll_stop(hp, tregs);
-
-       /* Stop transmitter and receiver. */
-       HMD(("happy_meal_init: to happy_meal_stop\n"));
-       happy_meal_stop(hp, gregs);
-
-       /* Alloc and reset the tx/rx descriptor chains. */
-       HMD(("happy_meal_init: to happy_meal_init_rings\n"));
-       happy_meal_init_rings(hp);
-
-       /* Shut up the MIF. */
-       HMD(("happy_meal_init: Disable all MIF irqs (old[%08x]), ",
-            hme_read32(hp, tregs + TCVR_IMASK)));
-       hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
-
-       /* See if we can enable the MIF frame on this card to speak to the DP83840. */
-       if (hp->happy_flags & HFLAG_FENABLE) {
-               HMD(("use frame old[%08x], ",
-                    hme_read32(hp, tregs + TCVR_CFG)));
-               hme_write32(hp, tregs + TCVR_CFG,
-                           hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
-       } else {
-               HMD(("use bitbang old[%08x], ",
-                    hme_read32(hp, tregs + TCVR_CFG)));
-               hme_write32(hp, tregs + TCVR_CFG,
-                           hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
-       }
-
-       /* Check the state of the transceiver. */
-       HMD(("to happy_meal_transceiver_check\n"));
-       happy_meal_transceiver_check(hp, tregs);
-
-       /* Put the Big Mac into a sane state. */
-       HMD(("happy_meal_init: "));
-       switch(hp->tcvr_type) {
-       case none:
-               /* Cannot operate if we don't know the transceiver type! */
-               HMD(("AAIEEE no transceiver type, EAGAIN"));
-               return -EAGAIN;
-
-       case internal:
-               /* Using the MII buffers. */
-               HMD(("internal, using MII, "));
-               hme_write32(hp, bregs + BMAC_XIFCFG, 0);
-               break;
-
-       case external:
-               /* Not using the MII, disable it. */
-               HMD(("external, disable MII, "));
-               hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
-               break;
-       }
-
-       if (happy_meal_tcvr_reset(hp, tregs))
-               return -EAGAIN;
-
-       /* Reset the Happy Meal Big Mac transceiver and the receiver. */
-       HMD(("tx/rx reset, "));
-       happy_meal_tx_reset(hp, bregs);
-       happy_meal_rx_reset(hp, bregs);
-
-       /* Set jam size and inter-packet gaps to reasonable defaults. */
-       HMD(("jsize/ipg1/ipg2, "));
-       hme_write32(hp, bregs + BMAC_JSIZE, DEFAULT_JAMSIZE);
-       hme_write32(hp, bregs + BMAC_IGAP1, DEFAULT_IPG1);
-       hme_write32(hp, bregs + BMAC_IGAP2, DEFAULT_IPG2);
-
-       /* Load up the MAC address and random seed. */
-       HMD(("rseed/macaddr, "));
-
-       /* The docs recommend to use the 10LSB of our MAC here. */
-       hme_write32(hp, bregs + BMAC_RSEED, ((e[5] | e[4]<<8)&0x3ff));
-
-       hme_write32(hp, bregs + BMAC_MACADDR2, ((e[4] << 8) | e[5]));
-       hme_write32(hp, bregs + BMAC_MACADDR1, ((e[2] << 8) | e[3]));
-       hme_write32(hp, bregs + BMAC_MACADDR0, ((e[0] << 8) | e[1]));
-
-       HMD(("htable, "));
-       if ((hp->dev->flags & IFF_ALLMULTI) ||
-           (netdev_mc_count(hp->dev) > 64)) {
-               hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
-               hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
-               hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
-               hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
-       } else if ((hp->dev->flags & IFF_PROMISC) == 0) {
-               u16 hash_table[4];
-               struct netdev_hw_addr *ha;
-               u32 crc;
-
-               memset(hash_table, 0, sizeof(hash_table));
-               netdev_for_each_mc_addr(ha, hp->dev) {
-                       crc = ether_crc_le(6, ha->addr);
-                       crc >>= 26;
-                       hash_table[crc >> 4] |= 1 << (crc & 0xf);
-               }
-               hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
-               hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
-               hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
-               hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
-       } else {
-               hme_write32(hp, bregs + BMAC_HTABLE3, 0);
-               hme_write32(hp, bregs + BMAC_HTABLE2, 0);
-               hme_write32(hp, bregs + BMAC_HTABLE1, 0);
-               hme_write32(hp, bregs + BMAC_HTABLE0, 0);
-       }
-
-       /* Set the RX and TX ring ptrs. */
-       HMD(("ring ptrs rxr[%08x] txr[%08x]\n",
-            ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)),
-            ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0))));
-       hme_write32(hp, erxregs + ERX_RING,
-                   ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)));
-       hme_write32(hp, etxregs + ETX_RING,
-                   ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
-
-       /* Parity issues in the ERX unit of some HME revisions can cause some
-        * registers to not be written unless their parity is even.  Detect such
-        * lost writes and simply rewrite with a low bit set (which will be ignored
-        * since the rxring needs to be 2K aligned).
-        */
-       if (hme_read32(hp, erxregs + ERX_RING) !=
-           ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)))
-               hme_write32(hp, erxregs + ERX_RING,
-                           ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))
-                           | 0x4);
-
-       /* Set the supported burst sizes. */
-       HMD(("happy_meal_init: old[%08x] bursts<",
-            hme_read32(hp, gregs + GREG_CFG)));
-
-#ifndef CONFIG_SPARC
-       /* It is always PCI and can handle 64byte bursts. */
-       hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST64);
-#else
-       if ((hp->happy_bursts & DMA_BURST64) &&
-           ((hp->happy_flags & HFLAG_PCI) != 0
-#ifdef CONFIG_SBUS
-            || sbus_can_burst64()
-#endif
-            || 0)) {
-               u32 gcfg = GREG_CFG_BURST64;
-
-               /* I have no idea if I should set the extended
-                * transfer mode bit for Cheerio, so for now I
-                * do not.  -DaveM
-                */
-#ifdef CONFIG_SBUS
-               if ((hp->happy_flags & HFLAG_PCI) == 0) {
-                       struct platform_device *op = hp->happy_dev;
-                       if (sbus_can_dma_64bit()) {
-                               sbus_set_sbus64(&op->dev,
-                                               hp->happy_bursts);
-                               gcfg |= GREG_CFG_64BIT;
-                       }
-               }
-#endif
-
-               HMD(("64>"));
-               hme_write32(hp, gregs + GREG_CFG, gcfg);
-       } else if (hp->happy_bursts & DMA_BURST32) {
-               HMD(("32>"));
-               hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST32);
-       } else if (hp->happy_bursts & DMA_BURST16) {
-               HMD(("16>"));
-               hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST16);
-       } else {
-               HMD(("XXX>"));
-               hme_write32(hp, gregs + GREG_CFG, 0);
-       }
-#endif /* CONFIG_SPARC */
-
-       /* Turn off interrupts we do not want to hear. */
-       HMD((", enable global interrupts, "));
-       hme_write32(hp, gregs + GREG_IMASK,
-                   (GREG_IMASK_GOTFRAME | GREG_IMASK_RCNTEXP |
-                    GREG_IMASK_SENTFRAME | GREG_IMASK_TXPERR));
-
-       /* Set the transmit ring buffer size. */
-       HMD(("tx rsize=%d oreg[%08x], ", (int)TX_RING_SIZE,
-            hme_read32(hp, etxregs + ETX_RSIZE)));
-       hme_write32(hp, etxregs + ETX_RSIZE, (TX_RING_SIZE >> ETX_RSIZE_SHIFT) - 1);
-
-       /* Enable transmitter DVMA. */
-       HMD(("tx dma enable old[%08x], ",
-            hme_read32(hp, etxregs + ETX_CFG)));
-       hme_write32(hp, etxregs + ETX_CFG,
-                   hme_read32(hp, etxregs + ETX_CFG) | ETX_CFG_DMAENABLE);
-
-       /* This chip really rots, for the receiver sometimes when you
-        * write to its control registers not all the bits get there
-        * properly.  I cannot think of a sane way to provide complete
-        * coverage for this hardware bug yet.
-        */
-       HMD(("erx regs bug old[%08x]\n",
-            hme_read32(hp, erxregs + ERX_CFG)));
-       hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
-       regtmp = hme_read32(hp, erxregs + ERX_CFG);
-       hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
-       if (hme_read32(hp, erxregs + ERX_CFG) != ERX_CFG_DEFAULT(RX_OFFSET)) {
-               printk(KERN_ERR "happy meal: Eieee, rx config register gets greasy fries.\n");
-               printk(KERN_ERR "happy meal: Trying to set %08x, reread gives %08x\n",
-                      ERX_CFG_DEFAULT(RX_OFFSET), regtmp);
-               /* XXX Should return failure here... */
-       }
-
-       /* Enable Big Mac hash table filter. */
-       HMD(("happy_meal_init: enable hash rx_cfg_old[%08x], ",
-            hme_read32(hp, bregs + BMAC_RXCFG)));
-       rxcfg = BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_REJME;
-       if (hp->dev->flags & IFF_PROMISC)
-               rxcfg |= BIGMAC_RXCFG_PMISC;
-       hme_write32(hp, bregs + BMAC_RXCFG, rxcfg);
-
-       /* Let the bits settle in the chip. */
-       udelay(10);
-
-       /* Ok, configure the Big Mac transmitter. */
-       HMD(("BIGMAC init, "));
-       regtmp = 0;
-       if (hp->happy_flags & HFLAG_FULL)
-               regtmp |= BIGMAC_TXCFG_FULLDPLX;
-
-       /* Don't turn on the "don't give up" bit for now.  It could cause hme
-        * to deadlock with the PHY if a Jabber occurs.
-        */
-       hme_write32(hp, bregs + BMAC_TXCFG, regtmp /*| BIGMAC_TXCFG_DGIVEUP*/);
-
-       /* Give up after 16 TX attempts. */
-       hme_write32(hp, bregs + BMAC_ALIMIT, 16);
-
-       /* Enable the output drivers no matter what. */
-       regtmp = BIGMAC_XCFG_ODENABLE;
-
-       /* If card can do lance mode, enable it. */
-       if (hp->happy_flags & HFLAG_LANCE)
-               regtmp |= (DEFAULT_IPG0 << 5) | BIGMAC_XCFG_LANCE;
-
-       /* Disable the MII buffers if using external transceiver. */
-       if (hp->tcvr_type == external)
-               regtmp |= BIGMAC_XCFG_MIIDISAB;
-
-       HMD(("XIF config old[%08x], ",
-            hme_read32(hp, bregs + BMAC_XIFCFG)));
-       hme_write32(hp, bregs + BMAC_XIFCFG, regtmp);
-
-       /* Start things up. */
-       HMD(("tx old[%08x] and rx [%08x] ON!\n",
-            hme_read32(hp, bregs + BMAC_TXCFG),
-            hme_read32(hp, bregs + BMAC_RXCFG)));
-
-       /* Set larger TX/RX size to allow for 802.1q */
-       hme_write32(hp, bregs + BMAC_TXMAX, ETH_FRAME_LEN + 8);
-       hme_write32(hp, bregs + BMAC_RXMAX, ETH_FRAME_LEN + 8);
-
-       hme_write32(hp, bregs + BMAC_TXCFG,
-                   hme_read32(hp, bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE);
-       hme_write32(hp, bregs + BMAC_RXCFG,
-                   hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE);
-
-       /* Get the autonegotiation started, and the watch timer ticking. */
-       happy_meal_begin_auto_negotiation(hp, tregs, NULL);
-
-       /* Success. */
-       return 0;
-}
-
-/* hp->happy_lock must be held */
-static void happy_meal_set_initial_advertisement(struct happy_meal *hp)
-{
-       void __iomem *tregs     = hp->tcvregs;
-       void __iomem *bregs     = hp->bigmacregs;
-       void __iomem *gregs     = hp->gregs;
-
-       happy_meal_stop(hp, gregs);
-       hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
-       if (hp->happy_flags & HFLAG_FENABLE)
-               hme_write32(hp, tregs + TCVR_CFG,
-                           hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
-       else
-               hme_write32(hp, tregs + TCVR_CFG,
-                           hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
-       happy_meal_transceiver_check(hp, tregs);
-       switch(hp->tcvr_type) {
-       case none:
-               return;
-       case internal:
-               hme_write32(hp, bregs + BMAC_XIFCFG, 0);
-               break;
-       case external:
-               hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
-               break;
-       }
-       if (happy_meal_tcvr_reset(hp, tregs))
-               return;
-
-       /* Latch PHY registers as of now. */
-       hp->sw_bmsr      = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
-       hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
-
-       /* Advertise everything we can support. */
-       if (hp->sw_bmsr & BMSR_10HALF)
-               hp->sw_advertise |= (ADVERTISE_10HALF);
-       else
-               hp->sw_advertise &= ~(ADVERTISE_10HALF);
-
-       if (hp->sw_bmsr & BMSR_10FULL)
-               hp->sw_advertise |= (ADVERTISE_10FULL);
-       else
-               hp->sw_advertise &= ~(ADVERTISE_10FULL);
-       if (hp->sw_bmsr & BMSR_100HALF)
-               hp->sw_advertise |= (ADVERTISE_100HALF);
-       else
-               hp->sw_advertise &= ~(ADVERTISE_100HALF);
-       if (hp->sw_bmsr & BMSR_100FULL)
-               hp->sw_advertise |= (ADVERTISE_100FULL);
-       else
-               hp->sw_advertise &= ~(ADVERTISE_100FULL);
-
-       /* Update the PHY advertisement register. */
-       happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
-}
-
-/* Once status is latched (by happy_meal_interrupt) it is cleared by
- * the hardware, so we cannot re-read it and get a correct value.
- *
- * hp->happy_lock must be held
- */
-static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
-{
-       int reset = 0;
-
-       /* Only print messages for non-counter related interrupts. */
-       if (status & (GREG_STAT_STSTERR | GREG_STAT_TFIFO_UND |
-                     GREG_STAT_MAXPKTERR | GREG_STAT_RXERR |
-                     GREG_STAT_RXPERR | GREG_STAT_RXTERR | GREG_STAT_EOPERR |
-                     GREG_STAT_MIFIRQ | GREG_STAT_TXEACK | GREG_STAT_TXLERR |
-                     GREG_STAT_TXPERR | GREG_STAT_TXTERR | GREG_STAT_SLVERR |
-                     GREG_STAT_SLVPERR))
-               printk(KERN_ERR "%s: Error interrupt for happy meal, status = %08x\n",
-                      hp->dev->name, status);
-
-       if (status & GREG_STAT_RFIFOVF) {
-               /* Receive FIFO overflow is harmless and the hardware will take
-                  care of it, just some packets are lost. Who cares. */
-               printk(KERN_DEBUG "%s: Happy Meal receive FIFO overflow.\n", hp->dev->name);
-       }
-
-       if (status & GREG_STAT_STSTERR) {
-               /* BigMAC SQE link test failed. */
-               printk(KERN_ERR "%s: Happy Meal BigMAC SQE test failed.\n", hp->dev->name);
-               reset = 1;
-       }
-
-       if (status & GREG_STAT_TFIFO_UND) {
-               /* Transmit FIFO underrun, again DMA error likely. */
-               printk(KERN_ERR "%s: Happy Meal transmitter FIFO underrun, DMA error.\n",
-                      hp->dev->name);
-               reset = 1;
-       }
-
-       if (status & GREG_STAT_MAXPKTERR) {
-               /* Driver error, tried to transmit something larger
-                * than ethernet max mtu.
-                */
-               printk(KERN_ERR "%s: Happy Meal MAX Packet size error.\n", hp->dev->name);
-               reset = 1;
-       }
-
-       if (status & GREG_STAT_NORXD) {
-               /* This is harmless, it just means the system is
-                * quite loaded and the incoming packet rate was
-                * faster than the interrupt handler could keep up
-                * with.
-                */
-               printk(KERN_INFO "%s: Happy Meal out of receive "
-                      "descriptors, packet dropped.\n",
-                      hp->dev->name);
-       }
-
-       if (status & (GREG_STAT_RXERR|GREG_STAT_RXPERR|GREG_STAT_RXTERR)) {
-               /* All sorts of DMA receive errors. */
-               printk(KERN_ERR "%s: Happy Meal rx DMA errors [ ", hp->dev->name);
-               if (status & GREG_STAT_RXERR)
-                       printk("GenericError ");
-               if (status & GREG_STAT_RXPERR)
-                       printk("ParityError ");
-               if (status & GREG_STAT_RXTERR)
-                       printk("RxTagBotch ");
-               printk("]\n");
-               reset = 1;
-       }
-
-       if (status & GREG_STAT_EOPERR) {
-               /* Driver bug, didn't set EOP bit in tx descriptor given
-                * to the happy meal.
-                */
-               printk(KERN_ERR "%s: EOP not set in happy meal transmit descriptor!\n",
-                      hp->dev->name);
-               reset = 1;
-       }
-
-       if (status & GREG_STAT_MIFIRQ) {
-               /* MIF signalled an interrupt, were we polling it? */
-               printk(KERN_ERR "%s: Happy Meal MIF interrupt.\n", hp->dev->name);
-       }
-
-       if (status &
-           (GREG_STAT_TXEACK|GREG_STAT_TXLERR|GREG_STAT_TXPERR|GREG_STAT_TXTERR)) {
-               /* All sorts of transmit DMA errors. */
-               printk(KERN_ERR "%s: Happy Meal tx DMA errors [ ", hp->dev->name);
-               if (status & GREG_STAT_TXEACK)
-                       printk("GenericError ");
-               if (status & GREG_STAT_TXLERR)
-                       printk("LateError ");
-               if (status & GREG_STAT_TXPERR)
-                       printk("ParityErro ");
-               if (status & GREG_STAT_TXTERR)
-                       printk("TagBotch ");
-               printk("]\n");
-               reset = 1;
-       }
-
-       if (status & (GREG_STAT_SLVERR|GREG_STAT_SLVPERR)) {
-               /* Bus or parity error when cpu accessed happy meal registers
-                * or it's internal FIFO's.  Should never see this.
-                */
-               printk(KERN_ERR "%s: Happy Meal register access SBUS slave (%s) error.\n",
-                      hp->dev->name,
-                      (status & GREG_STAT_SLVPERR) ? "parity" : "generic");
-               reset = 1;
-       }
-
-       if (reset) {
-               printk(KERN_NOTICE "%s: Resetting...\n", hp->dev->name);
-               happy_meal_init(hp);
-               return 1;
-       }
-       return 0;
-}
-
-/* hp->happy_lock must be held */
-static void happy_meal_mif_interrupt(struct happy_meal *hp)
-{
-       void __iomem *tregs = hp->tcvregs;
-
-       printk(KERN_INFO "%s: Link status change.\n", hp->dev->name);
-       hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
-       hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
-
-       /* Use the fastest transmission protocol possible. */
-       if (hp->sw_lpa & LPA_100FULL) {
-               printk(KERN_INFO "%s: Switching to 100Mbps at full duplex.", hp->dev->name);
-               hp->sw_bmcr |= (BMCR_FULLDPLX | BMCR_SPEED100);
-       } else if (hp->sw_lpa & LPA_100HALF) {
-               printk(KERN_INFO "%s: Switching to 100MBps at half duplex.", hp->dev->name);
-               hp->sw_bmcr |= BMCR_SPEED100;
-       } else if (hp->sw_lpa & LPA_10FULL) {
-               printk(KERN_INFO "%s: Switching to 10MBps at full duplex.", hp->dev->name);
-               hp->sw_bmcr |= BMCR_FULLDPLX;
-       } else {
-               printk(KERN_INFO "%s: Using 10Mbps at half duplex.", hp->dev->name);
-       }
-       happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
-
-       /* Finally stop polling and shut up the MIF. */
-       happy_meal_poll_stop(hp, tregs);
-}
-
-#ifdef TXDEBUG
-#define TXD(x) printk x
-#else
-#define TXD(x)
-#endif
-
-/* hp->happy_lock must be held */
-static void happy_meal_tx(struct happy_meal *hp)
-{
-       struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
-       struct happy_meal_txd *this;
-       struct net_device *dev = hp->dev;
-       int elem;
-
-       elem = hp->tx_old;
-       TXD(("TX<"));
-       while (elem != hp->tx_new) {
-               struct sk_buff *skb;
-               u32 flags, dma_addr, dma_len;
-               int frag;
-
-               TXD(("[%d]", elem));
-               this = &txbase[elem];
-               flags = hme_read_desc32(hp, &this->tx_flags);
-               if (flags & TXFLAG_OWN)
-                       break;
-               skb = hp->tx_skbs[elem];
-               if (skb_shinfo(skb)->nr_frags) {
-                       int last;
-
-                       last = elem + skb_shinfo(skb)->nr_frags;
-                       last &= (TX_RING_SIZE - 1);
-                       flags = hme_read_desc32(hp, &txbase[last].tx_flags);
-                       if (flags & TXFLAG_OWN)
-                               break;
-               }
-               hp->tx_skbs[elem] = NULL;
-               hp->net_stats.tx_bytes += skb->len;
-
-               for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
-                       dma_addr = hme_read_desc32(hp, &this->tx_addr);
-                       dma_len = hme_read_desc32(hp, &this->tx_flags);
-
-                       dma_len &= TXFLAG_SIZE;
-                       if (!frag)
-                               dma_unmap_single(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
-                       else
-                               dma_unmap_page(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
-
-                       elem = NEXT_TX(elem);
-                       this = &txbase[elem];
-               }
-
-               dev_kfree_skb_irq(skb);
-               hp->net_stats.tx_packets++;
-       }
-       hp->tx_old = elem;
-       TXD((">"));
-
-       if (netif_queue_stopped(dev) &&
-           TX_BUFFS_AVAIL(hp) > (MAX_SKB_FRAGS + 1))
-               netif_wake_queue(dev);
-}
-
-#ifdef RXDEBUG
-#define RXD(x) printk x
-#else
-#define RXD(x)
-#endif
-
-/* Originally I used to handle the allocation failure by just giving back just
- * that one ring buffer to the happy meal.  Problem is that usually when that
- * condition is triggered, the happy meal expects you to do something reasonable
- * with all of the packets it has DMA'd in.  So now I just drop the entire
- * ring when we cannot get a new skb and give them all back to the happy meal,
- * maybe things will be "happier" now.
- *
- * hp->happy_lock must be held
- */
-static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
-{
-       struct happy_meal_rxd *rxbase = &hp->happy_block->happy_meal_rxd[0];
-       struct happy_meal_rxd *this;
-       int elem = hp->rx_new, drops = 0;
-       u32 flags;
-
-       RXD(("RX<"));
-       this = &rxbase[elem];
-       while (!((flags = hme_read_desc32(hp, &this->rx_flags)) & RXFLAG_OWN)) {
-               struct sk_buff *skb;
-               int len = flags >> 16;
-               u16 csum = flags & RXFLAG_CSUM;
-               u32 dma_addr = hme_read_desc32(hp, &this->rx_addr);
-
-               RXD(("[%d ", elem));
-
-               /* Check for errors. */
-               if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) {
-                       RXD(("ERR(%08x)]", flags));
-                       hp->net_stats.rx_errors++;
-                       if (len < ETH_ZLEN)
-                               hp->net_stats.rx_length_errors++;
-                       if (len & (RXFLAG_OVERFLOW >> 16)) {
-                               hp->net_stats.rx_over_errors++;
-                               hp->net_stats.rx_fifo_errors++;
-                       }
-
-                       /* Return it to the Happy meal. */
-       drop_it:
-                       hp->net_stats.rx_dropped++;
-                       hme_write_rxd(hp, this,
-                                     (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
-                                     dma_addr);
-                       goto next;
-               }
-               skb = hp->rx_skbs[elem];
-               if (len > RX_COPY_THRESHOLD) {
-                       struct sk_buff *new_skb;
-
-                       /* Now refill the entry, if we can. */
-                       new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
-                       if (new_skb == NULL) {
-                               drops++;
-                               goto drop_it;
-                       }
-                       dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
-                       hp->rx_skbs[elem] = new_skb;
-                       new_skb->dev = dev;
-                       skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
-                       hme_write_rxd(hp, this,
-                                     (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
-                                     dma_map_single(hp->dma_dev, new_skb->data, RX_BUF_ALLOC_SIZE,
-                                                    DMA_FROM_DEVICE));
-                       skb_reserve(new_skb, RX_OFFSET);
-
-                       /* Trim the original skb for the netif. */
-                       skb_trim(skb, len);
-               } else {
-                       struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
-
-                       if (copy_skb == NULL) {
-                               drops++;
-                               goto drop_it;
-                       }
-
-                       skb_reserve(copy_skb, 2);
-                       skb_put(copy_skb, len);
-                       dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
-                       skb_copy_from_linear_data(skb, copy_skb->data, len);
-                       dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
-                       /* Reuse original ring buffer. */
-                       hme_write_rxd(hp, this,
-                                     (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
-                                     dma_addr);
-
-                       skb = copy_skb;
-               }
-
-               /* This card is _fucking_ hot... */
-               skb->csum = csum_unfold(~(__force __sum16)htons(csum));
-               skb->ip_summed = CHECKSUM_COMPLETE;
-
-               RXD(("len=%d csum=%4x]", len, csum));
-               skb->protocol = eth_type_trans(skb, dev);
-               netif_rx(skb);
-
-               hp->net_stats.rx_packets++;
-               hp->net_stats.rx_bytes += len;
-       next:
-               elem = NEXT_RX(elem);
-               this = &rxbase[elem];
-       }
-       hp->rx_new = elem;
-       if (drops)
-               printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", hp->dev->name);
-       RXD((">"));
-}
-
-static irqreturn_t happy_meal_interrupt(int irq, void *dev_id)
-{
-       struct net_device *dev = dev_id;
-       struct happy_meal *hp  = netdev_priv(dev);
-       u32 happy_status       = hme_read32(hp, hp->gregs + GREG_STAT);
-
-       HMD(("happy_meal_interrupt: status=%08x ", happy_status));
-
-       spin_lock(&hp->happy_lock);
-
-       if (happy_status & GREG_STAT_ERRORS) {
-               HMD(("ERRORS "));
-               if (happy_meal_is_not_so_happy(hp, /* un- */ happy_status))
-                       goto out;
-       }
-
-       if (happy_status & GREG_STAT_MIFIRQ) {
-               HMD(("MIFIRQ "));
-               happy_meal_mif_interrupt(hp);
-       }
-
-       if (happy_status & GREG_STAT_TXALL) {
-               HMD(("TXALL "));
-               happy_meal_tx(hp);
-       }
-
-       if (happy_status & GREG_STAT_RXTOHOST) {
-               HMD(("RXTOHOST "));
-               happy_meal_rx(hp, dev);
-       }
-
-       HMD(("done\n"));
-out:
-       spin_unlock(&hp->happy_lock);
-
-       return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_SBUS
-static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie)
-{
-       struct quattro *qp = (struct quattro *) cookie;
-       int i;
-
-       for (i = 0; i < 4; i++) {
-               struct net_device *dev = qp->happy_meals[i];
-               struct happy_meal *hp  = netdev_priv(dev);
-               u32 happy_status       = hme_read32(hp, hp->gregs + GREG_STAT);
-
-               HMD(("quattro_interrupt: status=%08x ", happy_status));
-
-               if (!(happy_status & (GREG_STAT_ERRORS |
-                                     GREG_STAT_MIFIRQ |
-                                     GREG_STAT_TXALL |
-                                     GREG_STAT_RXTOHOST)))
-                       continue;
-
-               spin_lock(&hp->happy_lock);
-
-               if (happy_status & GREG_STAT_ERRORS) {
-                       HMD(("ERRORS "));
-                       if (happy_meal_is_not_so_happy(hp, happy_status))
-                               goto next;
-               }
-
-               if (happy_status & GREG_STAT_MIFIRQ) {
-                       HMD(("MIFIRQ "));
-                       happy_meal_mif_interrupt(hp);
-               }
-
-               if (happy_status & GREG_STAT_TXALL) {
-                       HMD(("TXALL "));
-                       happy_meal_tx(hp);
-               }
-
-               if (happy_status & GREG_STAT_RXTOHOST) {
-                       HMD(("RXTOHOST "));
-                       happy_meal_rx(hp, dev);
-               }
-
-       next:
-               spin_unlock(&hp->happy_lock);
-       }
-       HMD(("done\n"));
-
-       return IRQ_HANDLED;
-}
-#endif
-
-static int happy_meal_open(struct net_device *dev)
-{
-       struct happy_meal *hp = netdev_priv(dev);
-       int res;
-
-       HMD(("happy_meal_open: "));
-
-       /* On SBUS Quattro QFE cards, all hme interrupts are concentrated
-        * into a single source which we register handling at probe time.
-        */
-       if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) {
-               if (request_irq(dev->irq, happy_meal_interrupt,
-                               IRQF_SHARED, dev->name, (void *)dev)) {
-                       HMD(("EAGAIN\n"));
-                       printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n",
-                              dev->irq);
-
-                       return -EAGAIN;
-               }
-       }
-
-       HMD(("to happy_meal_init\n"));
-
-       spin_lock_irq(&hp->happy_lock);
-       res = happy_meal_init(hp);
-       spin_unlock_irq(&hp->happy_lock);
-
-       if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO))
-               free_irq(dev->irq, dev);
-       return res;
-}
-
-static int happy_meal_close(struct net_device *dev)
-{
-       struct happy_meal *hp = netdev_priv(dev);
-
-       spin_lock_irq(&hp->happy_lock);
-       happy_meal_stop(hp, hp->gregs);
-       happy_meal_clean_rings(hp);
-
-       /* If auto-negotiation timer is running, kill it. */
-       del_timer(&hp->happy_timer);
-
-       spin_unlock_irq(&hp->happy_lock);
-
-       /* On Quattro QFE cards, all hme interrupts are concentrated
-        * into a single source which we register handling at probe
-        * time and never unregister.
-        */
-       if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)
-               free_irq(dev->irq, dev);
-
-       return 0;
-}
-
-#ifdef SXDEBUG
-#define SXD(x) printk x
-#else
-#define SXD(x)
-#endif
-
-static void happy_meal_tx_timeout(struct net_device *dev)
-{
-       struct happy_meal *hp = netdev_priv(dev);
-
-       printk (KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
-       tx_dump_log();
-       printk (KERN_ERR "%s: Happy Status %08x TX[%08x:%08x]\n", dev->name,
-               hme_read32(hp, hp->gregs + GREG_STAT),
-               hme_read32(hp, hp->etxregs + ETX_CFG),
-               hme_read32(hp, hp->bigmacregs + BMAC_TXCFG));
-
-       spin_lock_irq(&hp->happy_lock);
-       happy_meal_init(hp);
-       spin_unlock_irq(&hp->happy_lock);
-
-       netif_wake_queue(dev);
-}
-
-static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
-                                        struct net_device *dev)
-{
-       struct happy_meal *hp = netdev_priv(dev);
-       int entry;
-       u32 tx_flags;
-
-       tx_flags = TXFLAG_OWN;
-       if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               const u32 csum_start_off = skb_checksum_start_offset(skb);
-               const u32 csum_stuff_off = csum_start_off + skb->csum_offset;
-
-               tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE |
-                           ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) |
-                           ((csum_stuff_off << 20) & TXFLAG_CSLOCATION));
-       }
-
-       spin_lock_irq(&hp->happy_lock);
-
-       if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) {
-               netif_stop_queue(dev);
-               spin_unlock_irq(&hp->happy_lock);
-               printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
-                      dev->name);
-               return NETDEV_TX_BUSY;
-       }
-
-       entry = hp->tx_new;
-       SXD(("SX<l[%d]e[%d]>", len, entry));
-       hp->tx_skbs[entry] = skb;
-
-       if (skb_shinfo(skb)->nr_frags == 0) {
-               u32 mapping, len;
-
-               len = skb->len;
-               mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
-               tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
-               hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
-                             (tx_flags | (len & TXFLAG_SIZE)),
-                             mapping);
-               entry = NEXT_TX(entry);
-       } else {
-               u32 first_len, first_mapping;
-               int frag, first_entry = entry;
-
-               /* We must give this initial chunk to the device last.
-                * Otherwise we could race with the device.
-                */
-               first_len = skb_headlen(skb);
-               first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
-                                              DMA_TO_DEVICE);
-               entry = NEXT_TX(entry);
-
-               for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
-                       skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
-                       u32 len, mapping, this_txflags;
-
-                       len = this_frag->size;
-                       mapping = dma_map_page(hp->dma_dev, this_frag->page,
-                                              this_frag->page_offset, len,
-                                              DMA_TO_DEVICE);
-                       this_txflags = tx_flags;
-                       if (frag == skb_shinfo(skb)->nr_frags - 1)
-                               this_txflags |= TXFLAG_EOP;
-                       hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
-                                     (this_txflags | (len & TXFLAG_SIZE)),
-                                     mapping);
-                       entry = NEXT_TX(entry);
-               }
-               hme_write_txd(hp, &hp->happy_block->happy_meal_txd[first_entry],
-                             (tx_flags | TXFLAG_SOP | (first_len & TXFLAG_SIZE)),
-                             first_mapping);
-       }
-
-       hp->tx_new = entry;
-
-       if (TX_BUFFS_AVAIL(hp) <= (MAX_SKB_FRAGS + 1))
-               netif_stop_queue(dev);
-
-       /* Get it going. */
-       hme_write32(hp, hp->etxregs + ETX_PENDING, ETX_TP_DMAWAKEUP);
-
-       spin_unlock_irq(&hp->happy_lock);
-
-       tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
-       return NETDEV_TX_OK;
-}
-
-static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
-{
-       struct happy_meal *hp = netdev_priv(dev);
-
-       spin_lock_irq(&hp->happy_lock);
-       happy_meal_get_counters(hp, hp->bigmacregs);
-       spin_unlock_irq(&hp->happy_lock);
-
-       return &hp->net_stats;
-}
-
-static void happy_meal_set_multicast(struct net_device *dev)
-{
-       struct happy_meal *hp = netdev_priv(dev);
-       void __iomem *bregs = hp->bigmacregs;
-       struct netdev_hw_addr *ha;
-       u32 crc;
-
-       spin_lock_irq(&hp->happy_lock);
-
-       if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
-               hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
-               hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
-               hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
-               hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
-       } else if (dev->flags & IFF_PROMISC) {
-               hme_write32(hp, bregs + BMAC_RXCFG,
-                           hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_PMISC);
-       } else {
-               u16 hash_table[4];
-
-               memset(hash_table, 0, sizeof(hash_table));
-               netdev_for_each_mc_addr(ha, dev) {
-                       crc = ether_crc_le(6, ha->addr);
-                       crc >>= 26;
-                       hash_table[crc >> 4] |= 1 << (crc & 0xf);
-               }
-               hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
-               hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
-               hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
-               hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
-       }
-
-       spin_unlock_irq(&hp->happy_lock);
-}
-
-/* Ethtool support... */
-static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-       struct happy_meal *hp = netdev_priv(dev);
-       u32 speed;
-
-       cmd->supported =
-               (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
-                SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
-                SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
-
-       /* XXX hardcoded stuff for now */
-       cmd->port = PORT_TP; /* XXX no MII support */
-       cmd->transceiver = XCVR_INTERNAL; /* XXX no external xcvr support */
-       cmd->phy_address = 0; /* XXX fixed PHYAD */
-
-       /* Record PHY settings. */
-       spin_lock_irq(&hp->happy_lock);
-       hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
-       hp->sw_lpa = happy_meal_tcvr_read(hp, hp->tcvregs, MII_LPA);
-       spin_unlock_irq(&hp->happy_lock);
-
-       if (hp->sw_bmcr & BMCR_ANENABLE) {
-               cmd->autoneg = AUTONEG_ENABLE;
-               speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ?
-                        SPEED_100 : SPEED_10);
-               if (speed == SPEED_100)
-                       cmd->duplex =
-                               (hp->sw_lpa & (LPA_100FULL)) ?
-                               DUPLEX_FULL : DUPLEX_HALF;
-               else
-                       cmd->duplex =
-                               (hp->sw_lpa & (LPA_10FULL)) ?
-                               DUPLEX_FULL : DUPLEX_HALF;
-       } else {
-               cmd->autoneg = AUTONEG_DISABLE;
-               speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
-               cmd->duplex =
-                       (hp->sw_bmcr & BMCR_FULLDPLX) ?
-                       DUPLEX_FULL : DUPLEX_HALF;
-       }
-       ethtool_cmd_speed_set(cmd, speed);
-       return 0;
-}
-
-static int hme_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-       struct happy_meal *hp = netdev_priv(dev);
-
-       /* Verify the settings we care about. */
-       if (cmd->autoneg != AUTONEG_ENABLE &&
-           cmd->autoneg != AUTONEG_DISABLE)
-               return -EINVAL;
-       if (cmd->autoneg == AUTONEG_DISABLE &&
-           ((ethtool_cmd_speed(cmd) != SPEED_100 &&
-             ethtool_cmd_speed(cmd) != SPEED_10) ||
-            (cmd->duplex != DUPLEX_HALF &&
-             cmd->duplex != DUPLEX_FULL)))
-               return -EINVAL;
-
-       /* Ok, do it to it. */
-       spin_lock_irq(&hp->happy_lock);
-       del_timer(&hp->happy_timer);
-       happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd);
-       spin_unlock_irq(&hp->happy_lock);
-
-       return 0;
-}
-
-static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
-       struct happy_meal *hp = netdev_priv(dev);
-
-       strcpy(info->driver, "sunhme");
-       strcpy(info->version, "2.02");
-       if (hp->happy_flags & HFLAG_PCI) {
-               struct pci_dev *pdev = hp->happy_dev;
-               strcpy(info->bus_info, pci_name(pdev));
-       }
-#ifdef CONFIG_SBUS
-       else {
-               const struct linux_prom_registers *regs;
-               struct platform_device *op = hp->happy_dev;
-               regs = of_get_property(op->dev.of_node, "regs", NULL);
-               if (regs)
-                       sprintf(info->bus_info, "SBUS:%d",
-                               regs->which_io);
-       }
-#endif
-}
-
-static u32 hme_get_link(struct net_device *dev)
-{
-       struct happy_meal *hp = netdev_priv(dev);
-
-       spin_lock_irq(&hp->happy_lock);
-       hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
-       spin_unlock_irq(&hp->happy_lock);
-
-       return hp->sw_bmsr & BMSR_LSTATUS;
-}
-
-static const struct ethtool_ops hme_ethtool_ops = {
-       .get_settings           = hme_get_settings,
-       .set_settings           = hme_set_settings,
-       .get_drvinfo            = hme_get_drvinfo,
-       .get_link               = hme_get_link,
-};
-
-static int hme_version_printed;
-
-#ifdef CONFIG_SBUS
-/* Given a happy meal sbus device, find it's quattro parent.
- * If none exist, allocate and return a new one.
- *
- * Return NULL on failure.
- */
-static struct quattro * __devinit quattro_sbus_find(struct platform_device *child)
-{
-       struct device *parent = child->dev.parent;
-       struct platform_device *op;
-       struct quattro *qp;
-
-       op = to_platform_device(parent);
-       qp = dev_get_drvdata(&op->dev);
-       if (qp)
-               return qp;
-
-       qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
-       if (qp != NULL) {
-               int i;
-
-               for (i = 0; i < 4; i++)
-                       qp->happy_meals[i] = NULL;
-
-               qp->quattro_dev = child;
-               qp->next = qfe_sbus_list;
-               qfe_sbus_list = qp;
-
-               dev_set_drvdata(&op->dev, qp);
-       }
-       return qp;
-}
-
-/* After all quattro cards have been probed, we call these functions
- * to register the IRQ handlers for the cards that have been
- * successfully probed and skip the cards that failed to initialize
- */
-static int __init quattro_sbus_register_irqs(void)
-{
-       struct quattro *qp;
-
-       for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
-               struct platform_device *op = qp->quattro_dev;
-               int err, qfe_slot, skip = 0;
-
-               for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
-                       if (!qp->happy_meals[qfe_slot])
-                               skip = 1;
-               }
-               if (skip)
-                       continue;
-
-               err = request_irq(op->archdata.irqs[0],
-                                 quattro_sbus_interrupt,
-                                 IRQF_SHARED, "Quattro",
-                                 qp);
-               if (err != 0) {
-                       printk(KERN_ERR "Quattro HME: IRQ registration "
-                              "error %d.\n", err);
-                       return err;
-               }
-       }
-
-       return 0;
-}
-
-static void quattro_sbus_free_irqs(void)
-{
-       struct quattro *qp;
-
-       for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
-               struct platform_device *op = qp->quattro_dev;
-               int qfe_slot, skip = 0;
-
-               for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
-                       if (!qp->happy_meals[qfe_slot])
-                               skip = 1;
-               }
-               if (skip)
-                       continue;
-
-               free_irq(op->archdata.irqs[0], qp);
-       }
-}
-#endif /* CONFIG_SBUS */
-
-#ifdef CONFIG_PCI
-static struct quattro * __devinit quattro_pci_find(struct pci_dev *pdev)
-{
-       struct pci_dev *bdev = pdev->bus->self;
-       struct quattro *qp;
-
-       if (!bdev) return NULL;
-       for (qp = qfe_pci_list; qp != NULL; qp = qp->next) {
-               struct pci_dev *qpdev = qp->quattro_dev;
-
-               if (qpdev == bdev)
-                       return qp;
-       }
-       qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
-       if (qp != NULL) {
-               int i;
-
-               for (i = 0; i < 4; i++)
-                       qp->happy_meals[i] = NULL;
-
-               qp->quattro_dev = bdev;
-               qp->next = qfe_pci_list;
-               qfe_pci_list = qp;
-
-               /* No range tricks necessary on PCI. */
-               qp->nranges = 0;
-       }
-       return qp;
-}
-#endif /* CONFIG_PCI */
-
-static const struct net_device_ops hme_netdev_ops = {
-       .ndo_open               = happy_meal_open,
-       .ndo_stop               = happy_meal_close,
-       .ndo_start_xmit         = happy_meal_start_xmit,
-       .ndo_tx_timeout         = happy_meal_tx_timeout,
-       .ndo_get_stats          = happy_meal_get_stats,
-       .ndo_set_multicast_list = happy_meal_set_multicast,
-       .ndo_change_mtu         = eth_change_mtu,
-       .ndo_set_mac_address    = eth_mac_addr,
-       .ndo_validate_addr      = eth_validate_addr,
-};
-
-#ifdef CONFIG_SBUS
-static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
-{
-       struct device_node *dp = op->dev.of_node, *sbus_dp;
-       struct quattro *qp = NULL;
-       struct happy_meal *hp;
-       struct net_device *dev;
-       int i, qfe_slot = -1;
-       int err = -ENODEV;
-
-       sbus_dp = op->dev.parent->of_node;
-
-       /* We can match PCI devices too, do not accept those here. */
-       if (strcmp(sbus_dp->name, "sbus"))
-               return err;
-
-       if (is_qfe) {
-               qp = quattro_sbus_find(op);
-               if (qp == NULL)
-                       goto err_out;
-               for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
-                       if (qp->happy_meals[qfe_slot] == NULL)
-                               break;
-               if (qfe_slot == 4)
-                       goto err_out;
-       }
-
-       err = -ENOMEM;
-       dev = alloc_etherdev(sizeof(struct happy_meal));
-       if (!dev)
-               goto err_out;
-       SET_NETDEV_DEV(dev, &op->dev);
-
-       if (hme_version_printed++ == 0)
-               printk(KERN_INFO "%s", version);
-
-       /* If user did not specify a MAC address specifically, use
-        * the Quattro local-mac-address property...
-        */
-       for (i = 0; i < 6; i++) {
-               if (macaddr[i] != 0)
-                       break;
-       }
-       if (i < 6) { /* a mac address was given */
-               for (i = 0; i < 6; i++)
-                       dev->dev_addr[i] = macaddr[i];
-               macaddr[5]++;
-       } else {
-               const unsigned char *addr;
-               int len;
-
-               addr = of_get_property(dp, "local-mac-address", &len);
-
-               if (qfe_slot != -1 && addr && len == 6)
-                       memcpy(dev->dev_addr, addr, 6);
-               else
-                       memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
-       }
-
-       hp = netdev_priv(dev);
-
-       hp->happy_dev = op;
-       hp->dma_dev = &op->dev;
-
-       spin_lock_init(&hp->happy_lock);
-
-       err = -ENODEV;
-       if (qp != NULL) {
-               hp->qfe_parent = qp;
-               hp->qfe_ent = qfe_slot;
-               qp->happy_meals[qfe_slot] = dev;
-       }
-
-       hp->gregs = of_ioremap(&op->resource[0], 0,
-                              GREG_REG_SIZE, "HME Global Regs");
-       if (!hp->gregs) {
-               printk(KERN_ERR "happymeal: Cannot map global registers.\n");
-               goto err_out_free_netdev;
-       }
-
-       hp->etxregs = of_ioremap(&op->resource[1], 0,
-                                ETX_REG_SIZE, "HME TX Regs");
-       if (!hp->etxregs) {
-               printk(KERN_ERR "happymeal: Cannot map MAC TX registers.\n");
-               goto err_out_iounmap;
-       }
-
-       hp->erxregs = of_ioremap(&op->resource[2], 0,
-                                ERX_REG_SIZE, "HME RX Regs");
-       if (!hp->erxregs) {
-               printk(KERN_ERR "happymeal: Cannot map MAC RX registers.\n");
-               goto err_out_iounmap;
-       }
-
-       hp->bigmacregs = of_ioremap(&op->resource[3], 0,
-                                   BMAC_REG_SIZE, "HME BIGMAC Regs");
-       if (!hp->bigmacregs) {
-               printk(KERN_ERR "happymeal: Cannot map BIGMAC registers.\n");
-               goto err_out_iounmap;
-       }
-
-       hp->tcvregs = of_ioremap(&op->resource[4], 0,
-                                TCVR_REG_SIZE, "HME Tranceiver Regs");
-       if (!hp->tcvregs) {
-               printk(KERN_ERR "happymeal: Cannot map TCVR registers.\n");
-               goto err_out_iounmap;
-       }
-
-       hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
-       if (hp->hm_revision == 0xff)
-               hp->hm_revision = 0xa0;
-
-       /* Now enable the feature flags we can. */
-       if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
-               hp->happy_flags = HFLAG_20_21;
-       else if (hp->hm_revision != 0xa0)
-               hp->happy_flags = HFLAG_NOT_A0;
-
-       if (qp != NULL)
-               hp->happy_flags |= HFLAG_QUATTRO;
-
-       /* Get the supported DVMA burst sizes from our Happy SBUS. */
-       hp->happy_bursts = of_getintprop_default(sbus_dp,
-                                                "burst-sizes", 0x00);
-
-       hp->happy_block = dma_alloc_coherent(hp->dma_dev,
-                                            PAGE_SIZE,
-                                            &hp->hblock_dvma,
-                                            GFP_ATOMIC);
-       err = -ENOMEM;
-       if (!hp->happy_block) {
-               printk(KERN_ERR "happymeal: Cannot allocate descriptors.\n");
-               goto err_out_iounmap;
-       }
-
-       /* Force check of the link first time we are brought up. */
-       hp->linkcheck = 0;
-
-       /* Force timer state to 'asleep' with count of zero. */
-       hp->timer_state = asleep;
-       hp->timer_ticks = 0;
-
-       init_timer(&hp->happy_timer);
-
-       hp->dev = dev;
-       dev->netdev_ops = &hme_netdev_ops;
-       dev->watchdog_timeo = 5*HZ;
-       dev->ethtool_ops = &hme_ethtool_ops;
-
-       /* Happy Meal can do it all... */
-       dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
-       dev->features |= dev->hw_features | NETIF_F_RXCSUM;
-
-       dev->irq = op->archdata.irqs[0];
-
-#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
-       /* Hook up SBUS register/descriptor accessors. */
-       hp->read_desc32 = sbus_hme_read_desc32;
-       hp->write_txd = sbus_hme_write_txd;
-       hp->write_rxd = sbus_hme_write_rxd;
-       hp->read32 = sbus_hme_read32;
-       hp->write32 = sbus_hme_write32;
-#endif
-
-       /* Grrr, Happy Meal comes up by default not advertising
-        * full duplex 100baseT capabilities, fix this.
-        */
-       spin_lock_irq(&hp->happy_lock);
-       happy_meal_set_initial_advertisement(hp);
-       spin_unlock_irq(&hp->happy_lock);
-
-       err = register_netdev(hp->dev);
-       if (err) {
-               printk(KERN_ERR "happymeal: Cannot register net device, "
-                      "aborting.\n");
-               goto err_out_free_coherent;
-       }
-
-       dev_set_drvdata(&op->dev, hp);
-
-       if (qfe_slot != -1)
-               printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ",
-                      dev->name, qfe_slot);
-       else
-               printk(KERN_INFO "%s: HAPPY MEAL (SBUS) 10/100baseT Ethernet ",
-                      dev->name);
-
-       printk("%pM\n", dev->dev_addr);
-
-       return 0;
-
-err_out_free_coherent:
-       dma_free_coherent(hp->dma_dev,
-                         PAGE_SIZE,
-                         hp->happy_block,
-                         hp->hblock_dvma);
-
-err_out_iounmap:
-       if (hp->gregs)
-               of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE);
-       if (hp->etxregs)
-               of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE);
-       if (hp->erxregs)
-               of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE);
-       if (hp->bigmacregs)
-               of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE);
-       if (hp->tcvregs)
-               of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
-
-       if (qp)
-               qp->happy_meals[qfe_slot] = NULL;
-
-err_out_free_netdev:
-       free_netdev(dev);
-
-err_out:
-       return err;
-}
-#endif
-
-#ifdef CONFIG_PCI
-#ifndef CONFIG_SPARC
-static int is_quattro_p(struct pci_dev *pdev)
-{
-       struct pci_dev *busdev = pdev->bus->self;
-       struct list_head *tmp;
-       int n_hmes;
-
-       if (busdev == NULL ||
-           busdev->vendor != PCI_VENDOR_ID_DEC ||
-           busdev->device != PCI_DEVICE_ID_DEC_21153)
-               return 0;
-
-       n_hmes = 0;
-       tmp = pdev->bus->devices.next;
-       while (tmp != &pdev->bus->devices) {
-               struct pci_dev *this_pdev = pci_dev_b(tmp);
-
-               if (this_pdev->vendor == PCI_VENDOR_ID_SUN &&
-                   this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL)
-                       n_hmes++;
-
-               tmp = tmp->next;
-       }
-
-       if (n_hmes != 4)
-               return 0;
-
-       return 1;
-}
-
-/* Fetch MAC address from vital product data of PCI ROM. */
-static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, int index, unsigned char *dev_addr)
-{
-       int this_offset;
-
-       for (this_offset = 0x20; this_offset < len; this_offset++) {
-               void __iomem *p = rom_base + this_offset;
-
-               if (readb(p + 0) != 0x90 ||
-                   readb(p + 1) != 0x00 ||
-                   readb(p + 2) != 0x09 ||
-                   readb(p + 3) != 0x4e ||
-                   readb(p + 4) != 0x41 ||
-                   readb(p + 5) != 0x06)
-                       continue;
-
-               this_offset += 6;
-               p += 6;
-
-               if (index == 0) {
-                       int i;
-
-                       for (i = 0; i < 6; i++)
-                               dev_addr[i] = readb(p + i);
-                       return 1;
-               }
-               index--;
-       }
-       return 0;
-}
-
-static void get_hme_mac_nonsparc(struct pci_dev *pdev, unsigned char *dev_addr)
-{
-       size_t size;
-       void __iomem *p = pci_map_rom(pdev, &size);
-
-       if (p) {
-               int index = 0;
-               int found;
-
-               if (is_quattro_p(pdev))
-                       index = PCI_SLOT(pdev->devfn);
-
-               found = readb(p) == 0x55 &&
-                       readb(p + 1) == 0xaa &&
-                       find_eth_addr_in_vpd(p, (64 * 1024), index, dev_addr);
-               pci_unmap_rom(pdev, p);
-               if (found)
-                       return;
-       }
-
-       /* Sun MAC prefix then 3 random bytes. */
-       dev_addr[0] = 0x08;
-       dev_addr[1] = 0x00;
-       dev_addr[2] = 0x20;
-       get_random_bytes(&dev_addr[3], 3);
-}
-#endif /* !(CONFIG_SPARC) */
-
-static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
-                                         const struct pci_device_id *ent)
-{
-       struct quattro *qp = NULL;
-#ifdef CONFIG_SPARC
-       struct device_node *dp;
-#endif
-       struct happy_meal *hp;
-       struct net_device *dev;
-       void __iomem *hpreg_base;
-       unsigned long hpreg_res;
-       int i, qfe_slot = -1;
-       char prom_name[64];
-       int err;
-
-       /* Now make sure pci_dev cookie is there. */
-#ifdef CONFIG_SPARC
-       dp = pci_device_to_OF_node(pdev);
-       strcpy(prom_name, dp->name);
-#else
-       if (is_quattro_p(pdev))
-               strcpy(prom_name, "SUNW,qfe");
-       else
-               strcpy(prom_name, "SUNW,hme");
-#endif
-
-       err = -ENODEV;
-
-       if (pci_enable_device(pdev))
-               goto err_out;
-       pci_set_master(pdev);
-
-       if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) {
-               qp = quattro_pci_find(pdev);
-               if (qp == NULL)
-                       goto err_out;
-               for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
-                       if (qp->happy_meals[qfe_slot] == NULL)
-                               break;
-               if (qfe_slot == 4)
-                       goto err_out;
-       }
-
-       dev = alloc_etherdev(sizeof(struct happy_meal));
-       err = -ENOMEM;
-       if (!dev)
-               goto err_out;
-       SET_NETDEV_DEV(dev, &pdev->dev);
-
-       if (hme_version_printed++ == 0)
-               printk(KERN_INFO "%s", version);
-
-       dev->base_addr = (long) pdev;
-
-       hp = netdev_priv(dev);
-
-       hp->happy_dev = pdev;
-       hp->dma_dev = &pdev->dev;
-
-       spin_lock_init(&hp->happy_lock);
-
-       if (qp != NULL) {
-               hp->qfe_parent = qp;
-               hp->qfe_ent = qfe_slot;
-               qp->happy_meals[qfe_slot] = dev;
-       }
-
-       hpreg_res = pci_resource_start(pdev, 0);
-       err = -ENODEV;
-       if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
-               printk(KERN_ERR "happymeal(PCI): Cannot find proper PCI device base address.\n");
-               goto err_out_clear_quattro;
-       }
-       if (pci_request_regions(pdev, DRV_NAME)) {
-               printk(KERN_ERR "happymeal(PCI): Cannot obtain PCI resources, "
-                      "aborting.\n");
-               goto err_out_clear_quattro;
-       }
-
-       if ((hpreg_base = ioremap(hpreg_res, 0x8000)) == NULL) {
-               printk(KERN_ERR "happymeal(PCI): Unable to remap card memory.\n");
-               goto err_out_free_res;
-       }
-
-       for (i = 0; i < 6; i++) {
-               if (macaddr[i] != 0)
-                       break;
-       }
-       if (i < 6) { /* a mac address was given */
-               for (i = 0; i < 6; i++)
-                       dev->dev_addr[i] = macaddr[i];
-               macaddr[5]++;
-       } else {
-#ifdef CONFIG_SPARC
-               const unsigned char *addr;
-               int len;
-
-               if (qfe_slot != -1 &&
-                   (addr = of_get_property(dp, "local-mac-address", &len))
-                       != NULL &&
-                   len == 6) {
-                       memcpy(dev->dev_addr, addr, 6);
-               } else {
-                       memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
-               }
-#else
-               get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]);
-#endif
-       }
-
-       /* Layout registers. */
-       hp->gregs      = (hpreg_base + 0x0000UL);
-       hp->etxregs    = (hpreg_base + 0x2000UL);
-       hp->erxregs    = (hpreg_base + 0x4000UL);
-       hp->bigmacregs = (hpreg_base + 0x6000UL);
-       hp->tcvregs    = (hpreg_base + 0x7000UL);
-
-#ifdef CONFIG_SPARC
-       hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
-       if (hp->hm_revision == 0xff)
-               hp->hm_revision = 0xc0 | (pdev->revision & 0x0f);
-#else
-       /* works with this on non-sparc hosts */
-       hp->hm_revision = 0x20;
-#endif
-
-       /* Now enable the feature flags we can. */
-       if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
-               hp->happy_flags = HFLAG_20_21;
-       else if (hp->hm_revision != 0xa0 && hp->hm_revision != 0xc0)
-               hp->happy_flags = HFLAG_NOT_A0;
-
-       if (qp != NULL)
-               hp->happy_flags |= HFLAG_QUATTRO;
-
-       /* And of course, indicate this is PCI. */
-       hp->happy_flags |= HFLAG_PCI;
-
-#ifdef CONFIG_SPARC
-       /* Assume PCI happy meals can handle all burst sizes. */
-       hp->happy_bursts = DMA_BURSTBITS;
-#endif
-
-       hp->happy_block = (struct hmeal_init_block *)
-               dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &hp->hblock_dvma, GFP_KERNEL);
-
-       err = -ENODEV;
-       if (!hp->happy_block) {
-               printk(KERN_ERR "happymeal(PCI): Cannot get hme init block.\n");
-               goto err_out_iounmap;
-       }
-
-       hp->linkcheck = 0;
-       hp->timer_state = asleep;
-       hp->timer_ticks = 0;
-
-       init_timer(&hp->happy_timer);
-
-       hp->dev = dev;
-       dev->netdev_ops = &hme_netdev_ops;
-       dev->watchdog_timeo = 5*HZ;
-       dev->ethtool_ops = &hme_ethtool_ops;
-       dev->irq = pdev->irq;
-       dev->dma = 0;
-
-       /* Happy Meal can do it all... */
-       dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
-       dev->features |= dev->hw_features | NETIF_F_RXCSUM;
-
-#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
-       /* Hook up PCI register/descriptor accessors. */
-       hp->read_desc32 = pci_hme_read_desc32;
-       hp->write_txd = pci_hme_write_txd;
-       hp->write_rxd = pci_hme_write_rxd;
-       hp->read32 = pci_hme_read32;
-       hp->write32 = pci_hme_write32;
-#endif
-
-       /* Grrr, Happy Meal comes up by default not advertising
-        * full duplex 100baseT capabilities, fix this.
-        */
-       spin_lock_irq(&hp->happy_lock);
-       happy_meal_set_initial_advertisement(hp);
-       spin_unlock_irq(&hp->happy_lock);
-
-       err = register_netdev(hp->dev);
-       if (err) {
-               printk(KERN_ERR "happymeal(PCI): Cannot register net device, "
-                      "aborting.\n");
-               goto err_out_iounmap;
-       }
-
-       dev_set_drvdata(&pdev->dev, hp);
-
-       if (!qfe_slot) {
-               struct pci_dev *qpdev = qp->quattro_dev;
-
-               prom_name[0] = 0;
-               if (!strncmp(dev->name, "eth", 3)) {
-                       int i = simple_strtoul(dev->name + 3, NULL, 10);
-                       sprintf(prom_name, "-%d", i + 3);
-               }
-               printk(KERN_INFO "%s%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet ", dev->name, prom_name);
-               if (qpdev->vendor == PCI_VENDOR_ID_DEC &&
-                   qpdev->device == PCI_DEVICE_ID_DEC_21153)
-                       printk("DEC 21153 PCI Bridge\n");
-               else
-                       printk("unknown bridge %04x.%04x\n",
-                               qpdev->vendor, qpdev->device);
-       }
-
-       if (qfe_slot != -1)
-               printk(KERN_INFO "%s: Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet ",
-                      dev->name, qfe_slot);
-       else
-               printk(KERN_INFO "%s: HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet ",
-                      dev->name);
-
-       printk("%pM\n", dev->dev_addr);
-
-       return 0;
-
-err_out_iounmap:
-       iounmap(hp->gregs);
-
-err_out_free_res:
-       pci_release_regions(pdev);
-
-err_out_clear_quattro:
-       if (qp != NULL)
-               qp->happy_meals[qfe_slot] = NULL;
-
-       free_netdev(dev);
-
-err_out:
-       return err;
-}
-
-static void __devexit happy_meal_pci_remove(struct pci_dev *pdev)
-{
-       struct happy_meal *hp = dev_get_drvdata(&pdev->dev);
-       struct net_device *net_dev = hp->dev;
-
-       unregister_netdev(net_dev);
-
-       dma_free_coherent(hp->dma_dev, PAGE_SIZE,
-                         hp->happy_block, hp->hblock_dvma);
-       iounmap(hp->gregs);
-       pci_release_regions(hp->happy_dev);
-
-       free_netdev(net_dev);
-
-       dev_set_drvdata(&pdev->dev, NULL);
-}
-
-static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = {
-       { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
-       { }                     /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(pci, happymeal_pci_ids);
-
-static struct pci_driver hme_pci_driver = {
-       .name           = "hme",
-       .id_table       = happymeal_pci_ids,
-       .probe          = happy_meal_pci_probe,
-       .remove         = __devexit_p(happy_meal_pci_remove),
-};
-
-static int __init happy_meal_pci_init(void)
-{
-       return pci_register_driver(&hme_pci_driver);
-}
-
-static void happy_meal_pci_exit(void)
-{
-       pci_unregister_driver(&hme_pci_driver);
-
-       while (qfe_pci_list) {
-               struct quattro *qfe = qfe_pci_list;
-               struct quattro *next = qfe->next;
-
-               kfree(qfe);
-
-               qfe_pci_list = next;
-       }
-}
-
-#endif
-
-#ifdef CONFIG_SBUS
-static const struct of_device_id hme_sbus_match[];
-static int __devinit hme_sbus_probe(struct platform_device *op)
-{
-       const struct of_device_id *match;
-       struct device_node *dp = op->dev.of_node;
-       const char *model = of_get_property(dp, "model", NULL);
-       int is_qfe;
-
-       match = of_match_device(hme_sbus_match, &op->dev);
-       if (!match)
-               return -EINVAL;
-       is_qfe = (match->data != NULL);
-
-       if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe"))
-               is_qfe = 1;
-
-       return happy_meal_sbus_probe_one(op, is_qfe);
-}
-
-static int __devexit hme_sbus_remove(struct platform_device *op)
-{
-       struct happy_meal *hp = dev_get_drvdata(&op->dev);
-       struct net_device *net_dev = hp->dev;
-
-       unregister_netdev(net_dev);
-
-       /* XXX qfe parent interrupt... */
-
-       of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE);
-       of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE);
-       of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE);
-       of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE);
-       of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
-       dma_free_coherent(hp->dma_dev,
-                         PAGE_SIZE,
-                         hp->happy_block,
-                         hp->hblock_dvma);
-
-       free_netdev(net_dev);
-
-       dev_set_drvdata(&op->dev, NULL);
-
-       return 0;
-}
-
-static const struct of_device_id hme_sbus_match[] = {
-       {
-               .name = "SUNW,hme",
-       },
-       {
-               .name = "SUNW,qfe",
-               .data = (void *) 1,
-       },
-       {
-               .name = "qfe",
-               .data = (void *) 1,
-       },
-       {},
-};
-
-MODULE_DEVICE_TABLE(of, hme_sbus_match);
-
-static struct platform_driver hme_sbus_driver = {
-       .driver = {
-               .name = "hme",
-               .owner = THIS_MODULE,
-               .of_match_table = hme_sbus_match,
-       },
-       .probe          = hme_sbus_probe,
-       .remove         = __devexit_p(hme_sbus_remove),
-};
-
-static int __init happy_meal_sbus_init(void)
-{
-       int err;
-
-       err = platform_driver_register(&hme_sbus_driver);
-       if (!err)
-               err = quattro_sbus_register_irqs();
-
-       return err;
-}
-
-static void happy_meal_sbus_exit(void)
-{
-       platform_driver_unregister(&hme_sbus_driver);
-       quattro_sbus_free_irqs();
-
-       while (qfe_sbus_list) {
-               struct quattro *qfe = qfe_sbus_list;
-               struct quattro *next = qfe->next;
-
-               kfree(qfe);
-
-               qfe_sbus_list = next;
-       }
-}
-#endif
-
-static int __init happy_meal_probe(void)
-{
-       int err = 0;
-
-#ifdef CONFIG_SBUS
-       err = happy_meal_sbus_init();
-#endif
-#ifdef CONFIG_PCI
-       if (!err) {
-               err = happy_meal_pci_init();
-#ifdef CONFIG_SBUS
-               if (err)
-                       happy_meal_sbus_exit();
-#endif
-       }
-#endif
-
-       return err;
-}
-
-
-static void __exit happy_meal_exit(void)
-{
-#ifdef CONFIG_SBUS
-       happy_meal_sbus_exit();
-#endif
-#ifdef CONFIG_PCI
-       happy_meal_pci_exit();
-#endif
-}
-
-module_init(happy_meal_probe);
-module_exit(happy_meal_exit);
diff --git a/drivers/net/sunhme.h b/drivers/net/sunhme.h
deleted file mode 100644 (file)
index 64f2783..0000000
+++ /dev/null
@@ -1,512 +0,0 @@
-/* $Id: sunhme.h,v 1.33 2001/08/03 06:23:04 davem Exp $
- * sunhme.h: Definitions for Sparc HME/BigMac 10/100baseT ethernet driver.
- *           Also known as the "Happy Meal".
- *
- * Copyright (C) 1996, 1999 David S. Miller (davem@redhat.com)
- */
-
-#ifndef _SUNHME_H
-#define _SUNHME_H
-
-#include <linux/pci.h>
-
-/* Happy Meal global registers. */
-#define GREG_SWRESET   0x000UL /* Software Reset  */
-#define GREG_CFG       0x004UL /* Config Register */
-#define GREG_STAT      0x108UL /* Status          */
-#define GREG_IMASK     0x10cUL /* Interrupt Mask  */
-#define GREG_REG_SIZE  0x110UL
-
-/* Global reset register. */
-#define GREG_RESET_ETX         0x01
-#define GREG_RESET_ERX         0x02
-#define GREG_RESET_ALL         0x03
-
-/* Global config register. */
-#define GREG_CFG_BURSTMSK      0x03
-#define GREG_CFG_BURST16       0x00
-#define GREG_CFG_BURST32       0x01
-#define GREG_CFG_BURST64       0x02
-#define GREG_CFG_64BIT         0x04
-#define GREG_CFG_PARITY        0x08
-#define GREG_CFG_RESV          0x10
-
-/* Global status register. */
-#define GREG_STAT_GOTFRAME     0x00000001 /* Received a frame                         */
-#define GREG_STAT_RCNTEXP      0x00000002 /* Receive frame counter expired            */
-#define GREG_STAT_ACNTEXP      0x00000004 /* Align-error counter expired              */
-#define GREG_STAT_CCNTEXP      0x00000008 /* CRC-error counter expired                */
-#define GREG_STAT_LCNTEXP      0x00000010 /* Length-error counter expired             */
-#define GREG_STAT_RFIFOVF      0x00000020 /* Receive FIFO overflow                    */
-#define GREG_STAT_CVCNTEXP     0x00000040 /* Code-violation counter expired           */
-#define GREG_STAT_STSTERR      0x00000080 /* Test error in XIF for SQE                */
-#define GREG_STAT_SENTFRAME    0x00000100 /* Transmitted a frame                      */
-#define GREG_STAT_TFIFO_UND    0x00000200 /* Transmit FIFO underrun                   */
-#define GREG_STAT_MAXPKTERR    0x00000400 /* Max-packet size error                    */
-#define GREG_STAT_NCNTEXP      0x00000800 /* Normal-collision counter expired         */
-#define GREG_STAT_ECNTEXP      0x00001000 /* Excess-collision counter expired         */
-#define GREG_STAT_LCCNTEXP     0x00002000 /* Late-collision counter expired           */
-#define GREG_STAT_FCNTEXP      0x00004000 /* First-collision counter expired          */
-#define GREG_STAT_DTIMEXP      0x00008000 /* Defer-timer expired                      */
-#define GREG_STAT_RXTOHOST     0x00010000 /* Moved from receive-FIFO to host memory   */
-#define GREG_STAT_NORXD        0x00020000 /* No more receive descriptors              */
-#define GREG_STAT_RXERR        0x00040000 /* Error during receive dma                 */
-#define GREG_STAT_RXLATERR     0x00080000 /* Late error during receive dma            */
-#define GREG_STAT_RXPERR       0x00100000 /* Parity error during receive dma          */
-#define GREG_STAT_RXTERR       0x00200000 /* Tag error during receive dma             */
-#define GREG_STAT_EOPERR       0x00400000 /* Transmit descriptor did not have EOP set */
-#define GREG_STAT_MIFIRQ       0x00800000 /* MIF is signaling an interrupt condition  */
-#define GREG_STAT_HOSTTOTX     0x01000000 /* Moved from host memory to transmit-FIFO  */
-#define GREG_STAT_TXALL        0x02000000 /* Transmitted all packets in the tx-fifo   */
-#define GREG_STAT_TXEACK       0x04000000 /* Error during transmit dma                */
-#define GREG_STAT_TXLERR       0x08000000 /* Late error during transmit dma           */
-#define GREG_STAT_TXPERR       0x10000000 /* Parity error during transmit dma         */
-#define GREG_STAT_TXTERR       0x20000000 /* Tag error during transmit dma            */
-#define GREG_STAT_SLVERR       0x40000000 /* PIO access got an error                  */
-#define GREG_STAT_SLVPERR      0x80000000 /* PIO access got a parity error            */
-
-/* All interesting error conditions. */
-#define GREG_STAT_ERRORS       0xfc7efefc
-
-/* Global interrupt mask register. */
-#define GREG_IMASK_GOTFRAME    0x00000001 /* Received a frame                         */
-#define GREG_IMASK_RCNTEXP     0x00000002 /* Receive frame counter expired            */
-#define GREG_IMASK_ACNTEXP     0x00000004 /* Align-error counter expired              */
-#define GREG_IMASK_CCNTEXP     0x00000008 /* CRC-error counter expired                */
-#define GREG_IMASK_LCNTEXP     0x00000010 /* Length-error counter expired             */
-#define GREG_IMASK_RFIFOVF     0x00000020 /* Receive FIFO overflow                    */
-#define GREG_IMASK_CVCNTEXP    0x00000040 /* Code-violation counter expired           */
-#define GREG_IMASK_STSTERR     0x00000080 /* Test error in XIF for SQE                */
-#define GREG_IMASK_SENTFRAME   0x00000100 /* Transmitted a frame                      */
-#define GREG_IMASK_TFIFO_UND   0x00000200 /* Transmit FIFO underrun                   */
-#define GREG_IMASK_MAXPKTERR   0x00000400 /* Max-packet size error                    */
-#define GREG_IMASK_NCNTEXP     0x00000800 /* Normal-collision counter expired         */
-#define GREG_IMASK_ECNTEXP     0x00001000 /* Excess-collision counter expired         */
-#define GREG_IMASK_LCCNTEXP    0x00002000 /* Late-collision counter expired           */
-#define GREG_IMASK_FCNTEXP     0x00004000 /* First-collision counter expired          */
-#define GREG_IMASK_DTIMEXP     0x00008000 /* Defer-timer expired                      */
-#define GREG_IMASK_RXTOHOST    0x00010000 /* Moved from receive-FIFO to host memory   */
-#define GREG_IMASK_NORXD       0x00020000 /* No more receive descriptors              */
-#define GREG_IMASK_RXERR       0x00040000 /* Error during receive dma                 */
-#define GREG_IMASK_RXLATERR    0x00080000 /* Late error during receive dma            */
-#define GREG_IMASK_RXPERR      0x00100000 /* Parity error during receive dma          */
-#define GREG_IMASK_RXTERR      0x00200000 /* Tag error during receive dma             */
-#define GREG_IMASK_EOPERR      0x00400000 /* Transmit descriptor did not have EOP set */
-#define GREG_IMASK_MIFIRQ      0x00800000 /* MIF is signaling an interrupt condition  */
-#define GREG_IMASK_HOSTTOTX    0x01000000 /* Moved from host memory to transmit-FIFO  */
-#define GREG_IMASK_TXALL       0x02000000 /* Transmitted all packets in the tx-fifo   */
-#define GREG_IMASK_TXEACK      0x04000000 /* Error during transmit dma                */
-#define GREG_IMASK_TXLERR      0x08000000 /* Late error during transmit dma           */
-#define GREG_IMASK_TXPERR      0x10000000 /* Parity error during transmit dma         */
-#define GREG_IMASK_TXTERR      0x20000000 /* Tag error during transmit dma            */
-#define GREG_IMASK_SLVERR      0x40000000 /* PIO access got an error                  */
-#define GREG_IMASK_SLVPERR     0x80000000 /* PIO access got a parity error            */
-
-/* Happy Meal external transmitter registers. */
-#define ETX_PENDING    0x00UL  /* Transmit pending/wakeup register */
-#define ETX_CFG                0x04UL  /* Transmit config register         */
-#define ETX_RING       0x08UL  /* Transmit ring pointer            */
-#define ETX_BBASE      0x0cUL  /* Transmit buffer base             */
-#define ETX_BDISP      0x10UL  /* Transmit buffer displacement     */
-#define ETX_FIFOWPTR   0x14UL  /* FIFO write ptr                   */
-#define ETX_FIFOSWPTR  0x18UL  /* FIFO write ptr (shadow register) */
-#define ETX_FIFORPTR   0x1cUL  /* FIFO read ptr                    */
-#define ETX_FIFOSRPTR  0x20UL  /* FIFO read ptr (shadow register)  */
-#define ETX_FIFOPCNT   0x24UL  /* FIFO packet counter              */
-#define ETX_SMACHINE   0x28UL  /* Transmitter state machine        */
-#define ETX_RSIZE      0x2cUL  /* Ring descriptor size             */
-#define ETX_BPTR       0x30UL  /* Transmit data buffer ptr         */
-#define ETX_REG_SIZE   0x34UL
-
-/* ETX transmit pending register. */
-#define ETX_TP_DMAWAKEUP         0x00000001 /* Restart transmit dma             */
-
-/* ETX config register. */
-#define ETX_CFG_DMAENABLE        0x00000001 /* Enable transmit dma              */
-#define ETX_CFG_FIFOTHRESH       0x000003fe /* Transmit FIFO threshold          */
-#define ETX_CFG_IRQDAFTER        0x00000400 /* Interrupt after TX-FIFO drained  */
-#define ETX_CFG_IRQDBEFORE       0x00000000 /* Interrupt before TX-FIFO drained */
-
-#define ETX_RSIZE_SHIFT          4
-
-/* Happy Meal external receiver registers. */
-#define ERX_CFG                0x00UL  /* Receiver config register         */
-#define ERX_RING       0x04UL  /* Receiver ring ptr                */
-#define ERX_BPTR       0x08UL  /* Receiver buffer ptr              */
-#define ERX_FIFOWPTR   0x0cUL  /* FIFO write ptr                   */
-#define ERX_FIFOSWPTR  0x10UL  /* FIFO write ptr (shadow register) */
-#define ERX_FIFORPTR   0x14UL  /* FIFO read ptr                    */
-#define ERX_FIFOSRPTR  0x18UL  /* FIFO read ptr (shadow register)  */
-#define ERX_SMACHINE   0x1cUL  /* Receiver state machine           */
-#define ERX_REG_SIZE   0x20UL
-
-/* ERX config register. */
-#define ERX_CFG_DMAENABLE    0x00000001 /* Enable receive DMA        */
-#define ERX_CFG_RESV1        0x00000006 /* Unused...                 */
-#define ERX_CFG_BYTEOFFSET   0x00000038 /* Receive first byte offset */
-#define ERX_CFG_RESV2        0x000001c0 /* Unused...                 */
-#define ERX_CFG_SIZE32       0x00000000 /* Receive ring size == 32   */
-#define ERX_CFG_SIZE64       0x00000200 /* Receive ring size == 64   */
-#define ERX_CFG_SIZE128      0x00000400 /* Receive ring size == 128  */
-#define ERX_CFG_SIZE256      0x00000600 /* Receive ring size == 256  */
-#define ERX_CFG_RESV3        0x0000f800 /* Unused...                 */
-#define ERX_CFG_CSUMSTART    0x007f0000 /* Offset of checksum start,
-                                        * in halfwords. */
-
-/* I'd like a Big Mac, small fries, small coke, and SparcLinux please. */
-#define BMAC_XIFCFG    0x0000UL        /* XIF config register                */
-       /* 0x4-->0x204, reserved */
-#define BMAC_TXSWRESET 0x208UL /* Transmitter software reset         */
-#define BMAC_TXCFG     0x20cUL /* Transmitter config register        */
-#define BMAC_IGAP1     0x210UL /* Inter-packet gap 1                 */
-#define BMAC_IGAP2     0x214UL /* Inter-packet gap 2                 */
-#define BMAC_ALIMIT    0x218UL /* Transmit attempt limit             */
-#define BMAC_STIME     0x21cUL /* Transmit slot time                 */
-#define BMAC_PLEN      0x220UL /* Size of transmit preamble          */
-#define BMAC_PPAT      0x224UL /* Pattern for transmit preamble      */
-#define BMAC_TXSDELIM  0x228UL /* Transmit delimiter                 */
-#define BMAC_JSIZE     0x22cUL /* Jam size                           */
-#define BMAC_TXMAX     0x230UL /* Transmit max pkt size              */
-#define BMAC_TXMIN     0x234UL /* Transmit min pkt size              */
-#define BMAC_PATTEMPT  0x238UL /* Count of transmit peak attempts    */
-#define BMAC_DTCTR     0x23cUL /* Transmit defer timer               */
-#define BMAC_NCCTR     0x240UL /* Transmit normal-collision counter  */
-#define BMAC_FCCTR     0x244UL /* Transmit first-collision counter   */
-#define BMAC_EXCTR     0x248UL /* Transmit excess-collision counter  */
-#define BMAC_LTCTR     0x24cUL /* Transmit late-collision counter    */
-#define BMAC_RSEED     0x250UL /* Transmit random number seed        */
-#define BMAC_TXSMACHINE        0x254UL /* Transmit state machine             */
-       /* 0x258-->0x304, reserved */
-#define BMAC_RXSWRESET 0x308UL /* Receiver software reset            */
-#define BMAC_RXCFG     0x30cUL /* Receiver config register           */
-#define BMAC_RXMAX     0x310UL /* Receive max pkt size               */
-#define BMAC_RXMIN     0x314UL /* Receive min pkt size               */
-#define BMAC_MACADDR2  0x318UL /* Ether address register 2           */
-#define BMAC_MACADDR1  0x31cUL /* Ether address register 1           */
-#define BMAC_MACADDR0  0x320UL /* Ether address register 0           */
-#define BMAC_FRCTR     0x324UL /* Receive frame receive counter      */
-#define BMAC_GLECTR    0x328UL /* Receive giant-length error counter */
-#define BMAC_UNALECTR  0x32cUL /* Receive unaligned error counter    */
-#define BMAC_RCRCECTR  0x330UL /* Receive CRC error counter          */
-#define BMAC_RXSMACHINE        0x334UL /* Receiver state machine             */
-#define BMAC_RXCVALID  0x338UL /* Receiver code violation            */
-       /* 0x33c, reserved */
-#define BMAC_HTABLE3   0x340UL /* Hash table 3                       */
-#define BMAC_HTABLE2   0x344UL /* Hash table 2                       */
-#define BMAC_HTABLE1   0x348UL /* Hash table 1                       */
-#define BMAC_HTABLE0   0x34cUL /* Hash table 0                       */
-#define BMAC_AFILTER2  0x350UL /* Address filter 2                   */
-#define BMAC_AFILTER1  0x354UL /* Address filter 1                   */
-#define BMAC_AFILTER0  0x358UL /* Address filter 0                   */
-#define BMAC_AFMASK    0x35cUL /* Address filter mask                */
-#define BMAC_REG_SIZE  0x360UL
-
-/* BigMac XIF config register. */
-#define BIGMAC_XCFG_ODENABLE  0x00000001 /* Output driver enable         */
-#define BIGMAC_XCFG_XLBACK    0x00000002 /* Loopback-mode XIF enable     */
-#define BIGMAC_XCFG_MLBACK    0x00000004 /* Loopback-mode MII enable     */
-#define BIGMAC_XCFG_MIIDISAB  0x00000008 /* MII receive buffer disable   */
-#define BIGMAC_XCFG_SQENABLE  0x00000010 /* SQE test enable              */
-#define BIGMAC_XCFG_SQETWIN   0x000003e0 /* SQE time window              */
-#define BIGMAC_XCFG_LANCE     0x00000010 /* Lance mode enable            */
-#define BIGMAC_XCFG_LIPG0     0x000003e0 /* Lance mode IPG0              */
-
-/* BigMac transmit config register. */
-#define BIGMAC_TXCFG_ENABLE   0x00000001 /* Enable the transmitter       */
-#define BIGMAC_TXCFG_SMODE    0x00000020 /* Enable slow transmit mode    */
-#define BIGMAC_TXCFG_CIGN     0x00000040 /* Ignore transmit collisions   */
-#define BIGMAC_TXCFG_FCSOFF   0x00000080 /* Do not emit FCS              */
-#define BIGMAC_TXCFG_DBACKOFF 0x00000100 /* Disable backoff              */
-#define BIGMAC_TXCFG_FULLDPLX 0x00000200 /* Enable full-duplex           */
-#define BIGMAC_TXCFG_DGIVEUP  0x00000400 /* Don't give up on transmits   */
-
-/* BigMac receive config register. */
-#define BIGMAC_RXCFG_ENABLE   0x00000001 /* Enable the receiver             */
-#define BIGMAC_RXCFG_PSTRIP   0x00000020 /* Pad byte strip enable           */
-#define BIGMAC_RXCFG_PMISC    0x00000040 /* Enable promiscuous mode          */
-#define BIGMAC_RXCFG_DERR     0x00000080 /* Disable error checking          */
-#define BIGMAC_RXCFG_DCRCS    0x00000100 /* Disable CRC stripping           */
-#define BIGMAC_RXCFG_REJME    0x00000200 /* Reject packets addressed to me  */
-#define BIGMAC_RXCFG_PGRP     0x00000400 /* Enable promisc group mode       */
-#define BIGMAC_RXCFG_HENABLE  0x00000800 /* Enable the hash filter          */
-#define BIGMAC_RXCFG_AENABLE  0x00001000 /* Enable the address filter       */
-
-/* These are the "Management Interface" (ie. MIF) registers of the transceiver. */
-#define TCVR_BBCLOCK   0x00UL  /* Bit bang clock register          */
-#define TCVR_BBDATA    0x04UL  /* Bit bang data register           */
-#define TCVR_BBOENAB   0x08UL  /* Bit bang output enable           */
-#define TCVR_FRAME     0x0cUL  /* Frame control/data register      */
-#define TCVR_CFG       0x10UL  /* MIF config register              */
-#define TCVR_IMASK     0x14UL  /* MIF interrupt mask               */
-#define TCVR_STATUS    0x18UL  /* MIF status                       */
-#define TCVR_SMACHINE  0x1cUL  /* MIF state machine                */
-#define TCVR_REG_SIZE  0x20UL
-
-/* Frame commands. */
-#define FRAME_WRITE           0x50020000
-#define FRAME_READ            0x60020000
-
-/* Transceiver config register */
-#define TCV_CFG_PSELECT       0x00000001 /* Select PHY                      */
-#define TCV_CFG_PENABLE       0x00000002 /* Enable MIF polling              */
-#define TCV_CFG_BENABLE       0x00000004 /* Enable the "bit banger" oh baby */
-#define TCV_CFG_PREGADDR      0x000000f8 /* Address of poll register        */
-#define TCV_CFG_MDIO0         0x00000100 /* MDIO zero, data/attached        */
-#define TCV_CFG_MDIO1         0x00000200 /* MDIO one,  data/attached        */
-#define TCV_CFG_PDADDR        0x00007c00 /* Device PHY address polling      */
-
-/* Here are some PHY addresses. */
-#define TCV_PADDR_ETX         0          /* Internal transceiver            */
-#define TCV_PADDR_ITX         1          /* External transceiver            */
-
-/* Transceiver status register */
-#define TCV_STAT_BASIC        0xffff0000 /* The "basic" part                */
-#define TCV_STAT_NORMAL       0x0000ffff /* The "non-basic" part            */
-
-/* Inside the Happy Meal transceiver is the physical layer, they use an
- * implementations for National Semiconductor, part number DP83840VCE.
- * You can retrieve the data sheets and programming docs for this beast
- * from http://www.national.com/
- *
- * The DP83840 is capable of both 10 and 100Mbps ethernet, in both
- * half and full duplex mode.  It also supports auto negotiation.
- *
- * But.... THIS THING IS A PAIN IN THE ASS TO PROGRAM!
- * Debugging eeprom burnt code is more fun than programming this chip!
- */
-
-/* Generic MII registers defined in linux/mii.h, these below
- * are DP83840 specific.
- */
-#define DP83840_CSCONFIG        0x17        /* CS configuration            */
-
-/* The Carrier Sense config register. */
-#define CSCONFIG_RESV1          0x0001  /* Unused...                   */
-#define CSCONFIG_LED4           0x0002  /* Pin for full-dplx LED4      */
-#define CSCONFIG_LED1           0x0004  /* Pin for conn-status LED1    */
-#define CSCONFIG_RESV2          0x0008  /* Unused...                   */
-#define CSCONFIG_TCVDISAB       0x0010  /* Turns off the transceiver   */
-#define CSCONFIG_DFBYPASS       0x0020  /* Bypass disconnect function  */
-#define CSCONFIG_GLFORCE        0x0040  /* Good link force for 100mbps */
-#define CSCONFIG_CLKTRISTATE    0x0080  /* Tristate 25m clock          */
-#define CSCONFIG_RESV3          0x0700  /* Unused...                   */
-#define CSCONFIG_ENCODE         0x0800  /* 1=MLT-3, 0=binary           */
-#define CSCONFIG_RENABLE        0x1000  /* Repeater mode enable        */
-#define CSCONFIG_TCDISABLE      0x2000  /* Disable timeout counter     */
-#define CSCONFIG_RESV4          0x4000  /* Unused...                   */
-#define CSCONFIG_NDISABLE       0x8000  /* Disable NRZI                */
-
-/* Happy Meal descriptor rings and such.
- * All descriptor rings must be aligned on a 2K boundary.
- * All receive buffers must be 64 byte aligned.
- * Always write the address first before setting the ownership
- * bits to avoid races with the hardware scanning the ring.
- */
-typedef u32 __bitwise__ hme32;
-
-struct happy_meal_rxd {
-       hme32 rx_flags;
-       hme32 rx_addr;
-};
-
-#define RXFLAG_OWN         0x80000000 /* 1 = hardware, 0 = software */
-#define RXFLAG_OVERFLOW    0x40000000 /* 1 = buffer overflow        */
-#define RXFLAG_SIZE        0x3fff0000 /* Size of the buffer         */
-#define RXFLAG_CSUM        0x0000ffff /* HW computed checksum       */
-
-struct happy_meal_txd {
-       hme32 tx_flags;
-       hme32 tx_addr;
-};
-
-#define TXFLAG_OWN         0x80000000 /* 1 = hardware, 0 = software */
-#define TXFLAG_SOP         0x40000000 /* 1 = start of packet        */
-#define TXFLAG_EOP         0x20000000 /* 1 = end of packet          */
-#define TXFLAG_CSENABLE    0x10000000 /* 1 = enable hw-checksums    */
-#define TXFLAG_CSLOCATION  0x0ff00000 /* Where to stick the csum    */
-#define TXFLAG_CSBUFBEGIN  0x000fc000 /* Where to begin checksum    */
-#define TXFLAG_SIZE        0x00003fff /* Size of the packet         */
-
-#define TX_RING_SIZE       32         /* Must be >16 and <255, multiple of 16  */
-#define RX_RING_SIZE       32         /* see ERX_CFG_SIZE* for possible values */
-
-#if (TX_RING_SIZE < 16 || TX_RING_SIZE > 256 || (TX_RING_SIZE % 16) != 0)
-#error TX_RING_SIZE holds illegal value
-#endif
-
-#define TX_RING_MAXSIZE    256
-#define RX_RING_MAXSIZE    256
-
-/* We use a 14 byte offset for checksum computation. */
-#if (RX_RING_SIZE == 32)
-#define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE32|((14/2)<<16))
-#else
-#if (RX_RING_SIZE == 64)
-#define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE64|((14/2)<<16))
-#else
-#if (RX_RING_SIZE == 128)
-#define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE128|((14/2)<<16))
-#else
-#if (RX_RING_SIZE == 256)
-#define ERX_CFG_DEFAULT(off) (ERX_CFG_DMAENABLE|((off)<<3)|ERX_CFG_SIZE256|((14/2)<<16))
-#else
-#error RX_RING_SIZE holds illegal value
-#endif
-#endif
-#endif
-#endif
-
-#define NEXT_RX(num)       (((num) + 1) & (RX_RING_SIZE - 1))
-#define NEXT_TX(num)       (((num) + 1) & (TX_RING_SIZE - 1))
-#define PREV_RX(num)       (((num) - 1) & (RX_RING_SIZE - 1))
-#define PREV_TX(num)       (((num) - 1) & (TX_RING_SIZE - 1))
-
-#define TX_BUFFS_AVAIL(hp)                                    \
-        (((hp)->tx_old <= (hp)->tx_new) ?                     \
-         (hp)->tx_old + (TX_RING_SIZE - 1) - (hp)->tx_new :  \
-                           (hp)->tx_old - (hp)->tx_new - 1)
-
-#define RX_OFFSET          2
-#define RX_BUF_ALLOC_SIZE  (1546 + RX_OFFSET + 64)
-
-#define RX_COPY_THRESHOLD  256
-
-struct hmeal_init_block {
-       struct happy_meal_rxd happy_meal_rxd[RX_RING_MAXSIZE];
-       struct happy_meal_txd happy_meal_txd[TX_RING_MAXSIZE];
-};
-
-#define hblock_offset(mem, elem) \
-((__u32)((unsigned long)(&(((struct hmeal_init_block *)0)->mem[elem]))))
-
-/* Now software state stuff. */
-enum happy_transceiver {
-       external = 0,
-       internal = 1,
-       none     = 2,
-};
-
-/* Timer state engine. */
-enum happy_timer_state {
-       arbwait  = 0,  /* Waiting for auto negotiation to complete.          */
-       lupwait  = 1,  /* Auto-neg complete, awaiting link-up status.        */
-       ltrywait = 2,  /* Forcing try of all modes, from fastest to slowest. */
-       asleep   = 3,  /* Time inactive.                                     */
-};
-
-struct quattro;
-
-/* Happy happy, joy joy! */
-struct happy_meal {
-       void __iomem    *gregs;                 /* Happy meal global registers       */
-       struct hmeal_init_block  *happy_block;  /* RX and TX descriptors (CPU addr)  */
-
-#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
-       u32 (*read_desc32)(hme32 *);
-       void (*write_txd)(struct happy_meal_txd *, u32, u32);
-       void (*write_rxd)(struct happy_meal_rxd *, u32, u32);
-#endif
-
-       /* This is either an platform_device or a pci_dev. */
-       void                      *happy_dev;
-       struct device             *dma_dev;
-
-       spinlock_t                happy_lock;
-
-       struct sk_buff           *rx_skbs[RX_RING_SIZE];
-       struct sk_buff           *tx_skbs[TX_RING_SIZE];
-
-       int rx_new, tx_new, rx_old, tx_old;
-
-       struct net_device_stats   net_stats;      /* Statistical counters              */
-
-#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
-       u32 (*read32)(void __iomem *);
-       void (*write32)(void __iomem *, u32);
-#endif
-
-       void __iomem    *etxregs;        /* External transmitter regs        */
-       void __iomem    *erxregs;        /* External receiver regs           */
-       void __iomem    *bigmacregs;     /* BIGMAC core regs                 */
-       void __iomem    *tcvregs;        /* MIF transceiver regs             */
-
-       dma_addr_t                hblock_dvma;    /* DVMA visible address happy block  */
-       unsigned int              happy_flags;    /* Driver state flags                */
-       enum happy_transceiver    tcvr_type;      /* Kind of transceiver in use        */
-       unsigned int              happy_bursts;   /* Get your mind out of the gutter   */
-       unsigned int              paddr;          /* PHY address for transceiver       */
-       unsigned short            hm_revision;    /* Happy meal revision               */
-       unsigned short            sw_bmcr;        /* SW copy of BMCR                   */
-       unsigned short            sw_bmsr;        /* SW copy of BMSR                   */
-       unsigned short            sw_physid1;     /* SW copy of PHYSID1                */
-       unsigned short            sw_physid2;     /* SW copy of PHYSID2                */
-       unsigned short            sw_advertise;   /* SW copy of ADVERTISE              */
-       unsigned short            sw_lpa;         /* SW copy of LPA                    */
-       unsigned short            sw_expansion;   /* SW copy of EXPANSION              */
-       unsigned short            sw_csconfig;    /* SW copy of CSCONFIG               */
-       unsigned int              auto_speed;     /* Auto-nego link speed              */
-        unsigned int              forced_speed;   /* Force mode link speed             */
-       unsigned int              poll_data;      /* MIF poll data                     */
-       unsigned int              poll_flag;      /* MIF poll flag                     */
-       unsigned int              linkcheck;      /* Have we checked the link yet?     */
-       unsigned int              lnkup;          /* Is the link up as far as we know? */
-       unsigned int              lnkdown;        /* Trying to force the link down?    */
-       unsigned int              lnkcnt;         /* Counter for link-up attempts.     */
-       struct timer_list         happy_timer;    /* To watch the link when coming up. */
-       enum happy_timer_state    timer_state;    /* State of the auto-neg timer.      */
-       unsigned int              timer_ticks;    /* Number of clicks at each state.   */
-
-       struct net_device        *dev;          /* Backpointer                       */
-       struct quattro           *qfe_parent;   /* For Quattro cards                 */
-       int                       qfe_ent;      /* Which instance on quattro         */
-};
-
-/* Here are the happy flags. */
-#define HFLAG_POLL                0x00000001      /* We are doing MIF polling          */
-#define HFLAG_FENABLE             0x00000002      /* The MII frame is enabled          */
-#define HFLAG_LANCE               0x00000004      /* We are using lance-mode           */
-#define HFLAG_RXENABLE            0x00000008      /* Receiver is enabled               */
-#define HFLAG_AUTO                0x00000010      /* Using auto-negotiation, 0 = force */
-#define HFLAG_FULL                0x00000020      /* Full duplex enable                */
-#define HFLAG_MACFULL             0x00000040      /* Using full duplex in the MAC      */
-#define HFLAG_POLLENABLE          0x00000080      /* Actually try MIF polling          */
-#define HFLAG_RXCV                0x00000100      /* XXX RXCV ENABLE                   */
-#define HFLAG_INIT                0x00000200      /* Init called at least once         */
-#define HFLAG_LINKUP              0x00000400      /* 1 = Link is up                    */
-#define HFLAG_PCI                 0x00000800      /* PCI based Happy Meal              */
-#define HFLAG_QUATTRO            0x00001000      /* On QFE/Quattro card               */
-
-#define HFLAG_20_21  (HFLAG_POLLENABLE | HFLAG_FENABLE)
-#define HFLAG_NOT_A0 (HFLAG_POLLENABLE | HFLAG_FENABLE | HFLAG_LANCE | HFLAG_RXCV)
-
-/* Support for QFE/Quattro cards. */
-struct quattro {
-       struct net_device       *happy_meals[4];
-
-       /* This is either a sbus_dev or a pci_dev. */
-       void                    *quattro_dev;
-
-       struct quattro          *next;
-
-       /* PROM ranges, if any. */
-#ifdef CONFIG_SBUS
-       struct linux_prom_ranges  ranges[8];
-#endif
-       int                       nranges;
-};
-
-/* We use this to acquire receive skb's that we can DMA directly into. */
-#define ALIGNED_RX_SKB_ADDR(addr) \
-        ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
-#define happy_meal_alloc_skb(__length, __gfp_flags) \
-({     struct sk_buff *__skb; \
-       __skb = alloc_skb((__length) + 64, (__gfp_flags)); \
-       if(__skb) { \
-               int __offset = (int) ALIGNED_RX_SKB_ADDR(__skb->data); \
-               if(__offset) \
-                       skb_reserve(__skb, __offset); \
-       } \
-       __skb; \
-})
-
-#endif /* !(_SUNHME_H) */
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
deleted file mode 100644 (file)
index 209c7f8..0000000
+++ /dev/null
@@ -1,1007 +0,0 @@
-/* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
- *          Once again I am out to prove that every ethernet
- *          controller out there can be most efficiently programmed
- *          if you make it look like a LANCE.
- *
- * Copyright (C) 1996, 1999, 2003, 2006, 2008 David S. Miller (davem@davemloft.net)
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/crc32.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/ethtool.h>
-#include <linux/bitops.h>
-#include <linux/dma-mapping.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/byteorder.h>
-#include <asm/idprom.h>
-#include <asm/openprom.h>
-#include <asm/oplib.h>
-#include <asm/auxio.h>
-#include <asm/pgtable.h>
-#include <asm/irq.h>
-
-#include "sunqe.h"
-
-#define DRV_NAME       "sunqe"
-#define DRV_VERSION    "4.1"
-#define DRV_RELDATE    "August 27, 2008"
-#define DRV_AUTHOR     "David S. Miller (davem@davemloft.net)"
-
-static char version[] =
-       DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
-
-MODULE_VERSION(DRV_VERSION);
-MODULE_AUTHOR(DRV_AUTHOR);
-MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver");
-MODULE_LICENSE("GPL");
-
-static struct sunqec *root_qec_dev;
-
-static void qe_set_multicast(struct net_device *dev);
-
-#define QEC_RESET_TRIES 200
-
-static inline int qec_global_reset(void __iomem *gregs)
-{
-       int tries = QEC_RESET_TRIES;
-
-       sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);
-       while (--tries) {
-               u32 tmp = sbus_readl(gregs + GLOB_CTRL);
-               if (tmp & GLOB_CTRL_RESET) {
-                       udelay(20);
-                       continue;
-               }
-               break;
-       }
-       if (tries)
-               return 0;
-       printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n");
-       return -1;
-}
-
-#define MACE_RESET_RETRIES 200
-#define QE_RESET_RETRIES   200
-
-static inline int qe_stop(struct sunqe *qep)
-{
-       void __iomem *cregs = qep->qcregs;
-       void __iomem *mregs = qep->mregs;
-       int tries;
-
-       /* Reset the MACE, then the QEC channel. */
-       sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG);
-       tries = MACE_RESET_RETRIES;
-       while (--tries) {
-               u8 tmp = sbus_readb(mregs + MREGS_BCONFIG);
-               if (tmp & MREGS_BCONFIG_RESET) {
-                       udelay(20);
-                       continue;
-               }
-               break;
-       }
-       if (!tries) {
-               printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n");
-               return -1;
-       }
-
-       sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL);
-       tries = QE_RESET_RETRIES;
-       while (--tries) {
-               u32 tmp = sbus_readl(cregs + CREG_CTRL);
-               if (tmp & CREG_CTRL_RESET) {
-                       udelay(20);
-                       continue;
-               }
-               break;
-       }
-       if (!tries) {
-               printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n");
-               return -1;
-       }
-       return 0;
-}
-
-static void qe_init_rings(struct sunqe *qep)
-{
-       struct qe_init_block *qb = qep->qe_block;
-       struct sunqe_buffers *qbufs = qep->buffers;
-       __u32 qbufs_dvma = qep->buffers_dvma;
-       int i;
-
-       qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
-       memset(qb, 0, sizeof(struct qe_init_block));
-       memset(qbufs, 0, sizeof(struct sunqe_buffers));
-       for (i = 0; i < RX_RING_SIZE; i++) {
-               qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i);
-               qb->qe_rxd[i].rx_flags =
-                       (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
-       }
-}
-
-static int qe_init(struct sunqe *qep, int from_irq)
-{
-       struct sunqec *qecp = qep->parent;
-       void __iomem *cregs = qep->qcregs;
-       void __iomem *mregs = qep->mregs;
-       void __iomem *gregs = qecp->gregs;
-       unsigned char *e = &qep->dev->dev_addr[0];
-       u32 tmp;
-       int i;
-
-       /* Shut it up. */
-       if (qe_stop(qep))
-               return -EAGAIN;
-
-       /* Setup initial rx/tx init block pointers. */
-       sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
-       sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
-
-       /* Enable/mask the various irq's. */
-       sbus_writel(0, cregs + CREG_RIMASK);
-       sbus_writel(1, cregs + CREG_TIMASK);
-
-       sbus_writel(0, cregs + CREG_QMASK);
-       sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK);
-
-       /* Setup the FIFO pointers into QEC local memory. */
-       tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE);
-       sbus_writel(tmp, cregs + CREG_RXRBUFPTR);
-       sbus_writel(tmp, cregs + CREG_RXWBUFPTR);
-
-       tmp = sbus_readl(cregs + CREG_RXRBUFPTR) +
-               sbus_readl(gregs + GLOB_RSIZE);
-       sbus_writel(tmp, cregs + CREG_TXRBUFPTR);
-       sbus_writel(tmp, cregs + CREG_TXWBUFPTR);
-
-       /* Clear the channel collision counter. */
-       sbus_writel(0, cregs + CREG_CCNT);
-
-       /* For 10baseT, inter frame space nor throttle seems to be necessary. */
-       sbus_writel(0, cregs + CREG_PIPG);
-
-       /* Now dork with the AMD MACE. */
-       sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG);
-       sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL);
-       sbus_writeb(0, mregs + MREGS_RXFCNTL);
-
-       /* The QEC dma's the rx'd packets from local memory out to main memory,
-        * and therefore it interrupts when the packet reception is "complete".
-        * So don't listen for the MACE talking about it.
-        */
-       sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK);
-       sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG);
-       sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 |
-                    MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU),
-                   mregs + MREGS_FCONFIG);
-
-       /* Only usable interface on QuadEther is twisted pair. */
-       sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG);
-
-       /* Tell MACE we are changing the ether address. */
-       sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET,
-                   mregs + MREGS_IACONFIG);
-       while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
-               barrier();
-       sbus_writeb(e[0], mregs + MREGS_ETHADDR);
-       sbus_writeb(e[1], mregs + MREGS_ETHADDR);
-       sbus_writeb(e[2], mregs + MREGS_ETHADDR);
-       sbus_writeb(e[3], mregs + MREGS_ETHADDR);
-       sbus_writeb(e[4], mregs + MREGS_ETHADDR);
-       sbus_writeb(e[5], mregs + MREGS_ETHADDR);
-
-       /* Clear out the address filter. */
-       sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
-                   mregs + MREGS_IACONFIG);
-       while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
-               barrier();
-       for (i = 0; i < 8; i++)
-               sbus_writeb(0, mregs + MREGS_FILTER);
-
-       /* Address changes are now complete. */
-       sbus_writeb(0, mregs + MREGS_IACONFIG);
-
-       qe_init_rings(qep);
-
-       /* Wait a little bit for the link to come up... */
-       mdelay(5);
-       if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) {
-               int tries = 50;
-
-               while (--tries) {
-                       u8 tmp;
-
-                       mdelay(5);
-                       barrier();
-                       tmp = sbus_readb(mregs + MREGS_PHYCONFIG);
-                       if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0)
-                               break;
-               }
-               if (tries == 0)
-                       printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name);
-       }
-
-       /* Missed packet counter is cleared on a read. */
-       sbus_readb(mregs + MREGS_MPCNT);
-
-       /* Reload multicast information, this will enable the receiver
-        * and transmitter.
-        */
-       qe_set_multicast(qep->dev);
-
-       /* QEC should now start to show interrupts. */
-       return 0;
-}
-
-/* Grrr, certain error conditions completely lock up the AMD MACE,
- * so when we get these we _must_ reset the chip.
- */
-static int qe_is_bolixed(struct sunqe *qep, u32 qe_status)
-{
-       struct net_device *dev = qep->dev;
-       int mace_hwbug_workaround = 0;
-
-       if (qe_status & CREG_STAT_EDEFER) {
-               printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name);
-               dev->stats.tx_errors++;
-       }
-
-       if (qe_status & CREG_STAT_CLOSS) {
-               printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name);
-               dev->stats.tx_errors++;
-               dev->stats.tx_carrier_errors++;
-       }
-
-       if (qe_status & CREG_STAT_ERETRIES) {
-               printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name);
-               dev->stats.tx_errors++;
-               mace_hwbug_workaround = 1;
-       }
-
-       if (qe_status & CREG_STAT_LCOLL) {
-               printk(KERN_ERR "%s: Late transmit collision.\n", dev->name);
-               dev->stats.tx_errors++;
-               dev->stats.collisions++;
-               mace_hwbug_workaround = 1;
-       }
-
-       if (qe_status & CREG_STAT_FUFLOW) {
-               printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name);
-               dev->stats.tx_errors++;
-               mace_hwbug_workaround = 1;
-       }
-
-       if (qe_status & CREG_STAT_JERROR) {
-               printk(KERN_ERR "%s: Jabber error.\n", dev->name);
-       }
-
-       if (qe_status & CREG_STAT_BERROR) {
-               printk(KERN_ERR "%s: Babble error.\n", dev->name);
-       }
-
-       if (qe_status & CREG_STAT_CCOFLOW) {
-               dev->stats.tx_errors += 256;
-               dev->stats.collisions += 256;
-       }
-
-       if (qe_status & CREG_STAT_TXDERROR) {
-               printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name);
-               dev->stats.tx_errors++;
-               dev->stats.tx_aborted_errors++;
-               mace_hwbug_workaround = 1;
-       }
-
-       if (qe_status & CREG_STAT_TXLERR) {
-               printk(KERN_ERR "%s: Transmit late error.\n", dev->name);
-               dev->stats.tx_errors++;
-               mace_hwbug_workaround = 1;
-       }
-
-       if (qe_status & CREG_STAT_TXPERR) {
-               printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name);
-               dev->stats.tx_errors++;
-               dev->stats.tx_aborted_errors++;
-               mace_hwbug_workaround = 1;
-       }
-
-       if (qe_status & CREG_STAT_TXSERR) {
-               printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name);
-               dev->stats.tx_errors++;
-               dev->stats.tx_aborted_errors++;
-               mace_hwbug_workaround = 1;
-       }
-
-       if (qe_status & CREG_STAT_RCCOFLOW) {
-               dev->stats.rx_errors += 256;
-               dev->stats.collisions += 256;
-       }
-
-       if (qe_status & CREG_STAT_RUOFLOW) {
-               dev->stats.rx_errors += 256;
-               dev->stats.rx_over_errors += 256;
-       }
-
-       if (qe_status & CREG_STAT_MCOFLOW) {
-               dev->stats.rx_errors += 256;
-               dev->stats.rx_missed_errors += 256;
-       }
-
-       if (qe_status & CREG_STAT_RXFOFLOW) {
-               printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name);
-               dev->stats.rx_errors++;
-               dev->stats.rx_over_errors++;
-       }
-
-       if (qe_status & CREG_STAT_RLCOLL) {
-               printk(KERN_ERR "%s: Late receive collision.\n", dev->name);
-               dev->stats.rx_errors++;
-               dev->stats.collisions++;
-       }
-
-       if (qe_status & CREG_STAT_FCOFLOW) {
-               dev->stats.rx_errors += 256;
-               dev->stats.rx_frame_errors += 256;
-       }
-
-       if (qe_status & CREG_STAT_CECOFLOW) {
-               dev->stats.rx_errors += 256;
-               dev->stats.rx_crc_errors += 256;
-       }
-
-       if (qe_status & CREG_STAT_RXDROP) {
-               printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name);
-               dev->stats.rx_errors++;
-               dev->stats.rx_dropped++;
-               dev->stats.rx_missed_errors++;
-       }
-
-       if (qe_status & CREG_STAT_RXSMALL) {
-               printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name);
-               dev->stats.rx_errors++;
-               dev->stats.rx_length_errors++;
-       }
-
-       if (qe_status & CREG_STAT_RXLERR) {
-               printk(KERN_ERR "%s: Receive late error.\n", dev->name);
-               dev->stats.rx_errors++;
-               mace_hwbug_workaround = 1;
-       }
-
-       if (qe_status & CREG_STAT_RXPERR) {
-               printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name);
-               dev->stats.rx_errors++;
-               dev->stats.rx_missed_errors++;
-               mace_hwbug_workaround = 1;
-       }
-
-       if (qe_status & CREG_STAT_RXSERR) {
-               printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name);
-               dev->stats.rx_errors++;
-               dev->stats.rx_missed_errors++;
-               mace_hwbug_workaround = 1;
-       }
-
-       if (mace_hwbug_workaround)
-               qe_init(qep, 1);
-       return mace_hwbug_workaround;
-}
-
-/* Per-QE receive interrupt service routine.  Just like on the happy meal
- * we receive directly into skb's with a small packet copy water mark.
- */
-static void qe_rx(struct sunqe *qep)
-{
-       struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0];
-       struct net_device *dev = qep->dev;
-       struct qe_rxd *this;
-       struct sunqe_buffers *qbufs = qep->buffers;
-       __u32 qbufs_dvma = qep->buffers_dvma;
-       int elem = qep->rx_new, drops = 0;
-       u32 flags;
-
-       this = &rxbase[elem];
-       while (!((flags = this->rx_flags) & RXD_OWN)) {
-               struct sk_buff *skb;
-               unsigned char *this_qbuf =
-                       &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0];
-               __u32 this_qbuf_dvma = qbufs_dvma +
-                       qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1)));
-               struct qe_rxd *end_rxd =
-                       &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)];
-               int len = (flags & RXD_LENGTH) - 4;  /* QE adds ether FCS size to len */
-
-               /* Check for errors. */
-               if (len < ETH_ZLEN) {
-                       dev->stats.rx_errors++;
-                       dev->stats.rx_length_errors++;
-                       dev->stats.rx_dropped++;
-               } else {
-                       skb = dev_alloc_skb(len + 2);
-                       if (skb == NULL) {
-                               drops++;
-                               dev->stats.rx_dropped++;
-                       } else {
-                               skb_reserve(skb, 2);
-                               skb_put(skb, len);
-                               skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf,
-                                                len);
-                               skb->protocol = eth_type_trans(skb, qep->dev);
-                               netif_rx(skb);
-                               dev->stats.rx_packets++;
-                               dev->stats.rx_bytes += len;
-                       }
-               }
-               end_rxd->rx_addr = this_qbuf_dvma;
-               end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
-
-               elem = NEXT_RX(elem);
-               this = &rxbase[elem];
-       }
-       qep->rx_new = elem;
-       if (drops)
-               printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name);
-}
-
-static void qe_tx_reclaim(struct sunqe *qep);
-
-/* Interrupts for all QE's get filtered out via the QEC master controller,
- * so we just run through each qe and check to see who is signaling
- * and thus needs to be serviced.
- */
-static irqreturn_t qec_interrupt(int irq, void *dev_id)
-{
-       struct sunqec *qecp = dev_id;
-       u32 qec_status;
-       int channel = 0;
-
-       /* Latch the status now. */
-       qec_status = sbus_readl(qecp->gregs + GLOB_STAT);
-       while (channel < 4) {
-               if (qec_status & 0xf) {
-                       struct sunqe *qep = qecp->qes[channel];
-                       u32 qe_status;
-
-                       qe_status = sbus_readl(qep->qcregs + CREG_STAT);
-                       if (qe_status & CREG_STAT_ERRORS) {
-                               if (qe_is_bolixed(qep, qe_status))
-                                       goto next;
-                       }
-                       if (qe_status & CREG_STAT_RXIRQ)
-                               qe_rx(qep);
-                       if (netif_queue_stopped(qep->dev) &&
-                           (qe_status & CREG_STAT_TXIRQ)) {
-                               spin_lock(&qep->lock);
-                               qe_tx_reclaim(qep);
-                               if (TX_BUFFS_AVAIL(qep) > 0) {
-                                       /* Wake net queue and return to
-                                        * lazy tx reclaim.
-                                        */
-                                       netif_wake_queue(qep->dev);
-                                       sbus_writel(1, qep->qcregs + CREG_TIMASK);
-                               }
-                               spin_unlock(&qep->lock);
-                       }
-       next:
-                       ;
-               }
-               qec_status >>= 4;
-               channel++;
-       }
-
-       return IRQ_HANDLED;
-}
-
-static int qe_open(struct net_device *dev)
-{
-       struct sunqe *qep = netdev_priv(dev);
-
-       qep->mconfig = (MREGS_MCONFIG_TXENAB |
-                       MREGS_MCONFIG_RXENAB |
-                       MREGS_MCONFIG_MBAENAB);
-       return qe_init(qep, 0);
-}
-
-static int qe_close(struct net_device *dev)
-{
-       struct sunqe *qep = netdev_priv(dev);
-
-       qe_stop(qep);
-       return 0;
-}
-
-/* Reclaim TX'd frames from the ring.  This must always run under
- * the IRQ protected qep->lock.
- */
-static void qe_tx_reclaim(struct sunqe *qep)
-{
-       struct qe_txd *txbase = &qep->qe_block->qe_txd[0];
-       int elem = qep->tx_old;
-
-       while (elem != qep->tx_new) {
-               u32 flags = txbase[elem].tx_flags;
-
-               if (flags & TXD_OWN)
-                       break;
-               elem = NEXT_TX(elem);
-       }
-       qep->tx_old = elem;
-}
-
-static void qe_tx_timeout(struct net_device *dev)
-{
-       struct sunqe *qep = netdev_priv(dev);
-       int tx_full;
-
-       spin_lock_irq(&qep->lock);
-
-       /* Try to reclaim, if that frees up some tx
-        * entries, we're fine.
-        */
-       qe_tx_reclaim(qep);
-       tx_full = TX_BUFFS_AVAIL(qep) <= 0;
-
-       spin_unlock_irq(&qep->lock);
-
-       if (! tx_full)
-               goto out;
-
-       printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
-       qe_init(qep, 1);
-
-out:
-       netif_wake_queue(dev);
-}
-
-/* Get a packet queued to go onto the wire. */
-static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-       struct sunqe *qep = netdev_priv(dev);
-       struct sunqe_buffers *qbufs = qep->buffers;
-       __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma;
-       unsigned char *txbuf;
-       int len, entry;
-
-       spin_lock_irq(&qep->lock);
-
-       qe_tx_reclaim(qep);
-
-       len = skb->len;
-       entry = qep->tx_new;
-
-       txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0];
-       txbuf_dvma = qbufs_dvma +
-               qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1)));
-
-       /* Avoid a race... */
-       qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;
-
-       skb_copy_from_linear_data(skb, txbuf, len);
-
-       qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma;
-       qep->qe_block->qe_txd[entry].tx_flags =
-               (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
-       qep->tx_new = NEXT_TX(entry);
-
-       /* Get it going. */
-       sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);
-
-       dev->stats.tx_packets++;
-       dev->stats.tx_bytes += len;
-
-       if (TX_BUFFS_AVAIL(qep) <= 0) {
-               /* Halt the net queue and enable tx interrupts.
-                * When the tx queue empties the tx irq handler
-                * will wake up the queue and return us back to
-                * the lazy tx reclaim scheme.
-                */
-               netif_stop_queue(dev);
-               sbus_writel(0, qep->qcregs + CREG_TIMASK);
-       }
-       spin_unlock_irq(&qep->lock);
-
-       dev_kfree_skb(skb);
-
-       return NETDEV_TX_OK;
-}
-
-static void qe_set_multicast(struct net_device *dev)
-{
-       struct sunqe *qep = netdev_priv(dev);
-       struct netdev_hw_addr *ha;
-       u8 new_mconfig = qep->mconfig;
-       int i;
-       u32 crc;
-
-       /* Lock out others. */
-       netif_stop_queue(dev);
-
-       if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
-               sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
-                           qep->mregs + MREGS_IACONFIG);
-               while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
-                       barrier();
-               for (i = 0; i < 8; i++)
-                       sbus_writeb(0xff, qep->mregs + MREGS_FILTER);
-               sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
-       } else if (dev->flags & IFF_PROMISC) {
-               new_mconfig |= MREGS_MCONFIG_PROMISC;
-       } else {
-               u16 hash_table[4];
-               u8 *hbytes = (unsigned char *) &hash_table[0];
-
-               memset(hash_table, 0, sizeof(hash_table));
-               netdev_for_each_mc_addr(ha, dev) {
-                       crc = ether_crc_le(6, ha->addr);
-                       crc >>= 26;
-                       hash_table[crc >> 4] |= 1 << (crc & 0xf);
-               }
-               /* Program the qe with the new filter value. */
-               sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
-                           qep->mregs + MREGS_IACONFIG);
-               while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
-                       barrier();
-               for (i = 0; i < 8; i++) {
-                       u8 tmp = *hbytes++;
-                       sbus_writeb(tmp, qep->mregs + MREGS_FILTER);
-               }
-               sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
-       }
-
-       /* Any change of the logical address filter, the physical address,
-        * or enabling/disabling promiscuous mode causes the MACE to disable
-        * the receiver.  So we must re-enable them here or else the MACE
-        * refuses to listen to anything on the network.  Sheesh, took
-        * me a day or two to find this bug.
-        */
-       qep->mconfig = new_mconfig;
-       sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG);
-
-       /* Let us get going again. */
-       netif_wake_queue(dev);
-}
-
-/* Ethtool support... */
-static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
-       const struct linux_prom_registers *regs;
-       struct sunqe *qep = netdev_priv(dev);
-       struct platform_device *op;
-
-       strcpy(info->driver, "sunqe");
-       strcpy(info->version, "3.0");
-
-       op = qep->op;
-       regs = of_get_property(op->dev.of_node, "reg", NULL);
-       if (regs)
-               sprintf(info->bus_info, "SBUS:%d", regs->which_io);
-
-}
-
-static u32 qe_get_link(struct net_device *dev)
-{
-       struct sunqe *qep = netdev_priv(dev);
-       void __iomem *mregs = qep->mregs;
-       u8 phyconfig;
-
-       spin_lock_irq(&qep->lock);
-       phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG);
-       spin_unlock_irq(&qep->lock);
-
-       return phyconfig & MREGS_PHYCONFIG_LSTAT;
-}
-
-static const struct ethtool_ops qe_ethtool_ops = {
-       .get_drvinfo            = qe_get_drvinfo,
-       .get_link               = qe_get_link,
-};
-
-/* This is only called once at boot time for each card probed. */
-static void qec_init_once(struct sunqec *qecp, struct platform_device *op)
-{
-       u8 bsizes = qecp->qec_bursts;
-
-       if (sbus_can_burst64() && (bsizes & DMA_BURST64)) {
-               sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL);
-       } else if (bsizes & DMA_BURST32) {
-               sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL);
-       } else {
-               sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL);
-       }
-
-       /* Packetsize only used in 100baseT BigMAC configurations,
-        * set it to zero just to be on the safe side.
-        */
-       sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE);
-
-       /* Set the local memsize register, divided up to one piece per QE channel. */
-       sbus_writel((resource_size(&op->resource[1]) >> 2),
-                   qecp->gregs + GLOB_MSIZE);
-
-       /* Divide up the local QEC memory amongst the 4 QE receiver and
-        * transmitter FIFOs.  Basically it is (total / 2 / num_channels).
-        */
-       sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
-                   qecp->gregs + GLOB_TSIZE);
-       sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
-                   qecp->gregs + GLOB_RSIZE);
-}
-
-static u8 __devinit qec_get_burst(struct device_node *dp)
-{
-       u8 bsizes, bsizes_more;
-
-       /* Find and set the burst sizes for the QEC, since it
-        * does the actual dma for all 4 channels.
-        */
-       bsizes = of_getintprop_default(dp, "burst-sizes", 0xff);
-       bsizes &= 0xff;
-       bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff);
-
-       if (bsizes_more != 0xff)
-               bsizes &= bsizes_more;
-       if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
-           (bsizes & DMA_BURST32)==0)
-               bsizes = (DMA_BURST32 - 1);
-
-       return bsizes;
-}
-
-static struct sunqec * __devinit get_qec(struct platform_device *child)
-{
-       struct platform_device *op = to_platform_device(child->dev.parent);
-       struct sunqec *qecp;
-
-       qecp = dev_get_drvdata(&op->dev);
-       if (!qecp) {
-               qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL);
-               if (qecp) {
-                       u32 ctrl;
-
-                       qecp->op = op;
-                       qecp->gregs = of_ioremap(&op->resource[0], 0,
-                                                GLOB_REG_SIZE,
-                                                "QEC Global Registers");
-                       if (!qecp->gregs)
-                               goto fail;
-
-                       /* Make sure the QEC is in MACE mode. */
-                       ctrl = sbus_readl(qecp->gregs + GLOB_CTRL);
-                       ctrl &= 0xf0000000;
-                       if (ctrl != GLOB_CTRL_MMODE) {
-                               printk(KERN_ERR "qec: Not in MACE mode!\n");
-                               goto fail;
-                       }
-
-                       if (qec_global_reset(qecp->gregs))
-                               goto fail;
-
-                       qecp->qec_bursts = qec_get_burst(op->dev.of_node);
-
-                       qec_init_once(qecp, op);
-
-                       if (request_irq(op->archdata.irqs[0], qec_interrupt,
-                                       IRQF_SHARED, "qec", (void *) qecp)) {
-                               printk(KERN_ERR "qec: Can't register irq.\n");
-                               goto fail;
-                       }
-
-                       dev_set_drvdata(&op->dev, qecp);
-
-                       qecp->next_module = root_qec_dev;
-                       root_qec_dev = qecp;
-               }
-       }
-
-       return qecp;
-
-fail:
-       if (qecp->gregs)
-               of_iounmap(&op->resource[0], qecp->gregs, GLOB_REG_SIZE);
-       kfree(qecp);
-       return NULL;
-}
-
-static const struct net_device_ops qec_ops = {
-       .ndo_open               = qe_open,
-       .ndo_stop               = qe_close,
-       .ndo_start_xmit         = qe_start_xmit,
-       .ndo_set_multicast_list = qe_set_multicast,
-       .ndo_tx_timeout         = qe_tx_timeout,
-       .ndo_change_mtu         = eth_change_mtu,
-       .ndo_set_mac_address    = eth_mac_addr,
-       .ndo_validate_addr      = eth_validate_addr,
-};
-
-static int __devinit qec_ether_init(struct platform_device *op)
-{
-       static unsigned version_printed;
-       struct net_device *dev;
-       struct sunqec *qecp;
-       struct sunqe *qe;
-       int i, res;
-
-       if (version_printed++ == 0)
-               printk(KERN_INFO "%s", version);
-
-       dev = alloc_etherdev(sizeof(struct sunqe));
-       if (!dev)
-               return -ENOMEM;
-
-       memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
-
-       qe = netdev_priv(dev);
-
-       res = -ENODEV;
-
-       i = of_getintprop_default(op->dev.of_node, "channel#", -1);
-       if (i == -1)
-               goto fail;
-       qe->channel = i;
-       spin_lock_init(&qe->lock);
-
-       qecp = get_qec(op);
-       if (!qecp)
-               goto fail;
-
-       qecp->qes[qe->channel] = qe;
-       qe->dev = dev;
-       qe->parent = qecp;
-       qe->op = op;
-
-       res = -ENOMEM;
-       qe->qcregs = of_ioremap(&op->resource[0], 0,
-                               CREG_REG_SIZE, "QEC Channel Registers");
-       if (!qe->qcregs) {
-               printk(KERN_ERR "qe: Cannot map channel registers.\n");
-               goto fail;
-       }
-
-       qe->mregs = of_ioremap(&op->resource[1], 0,
-                              MREGS_REG_SIZE, "QE MACE Registers");
-       if (!qe->mregs) {
-               printk(KERN_ERR "qe: Cannot map MACE registers.\n");
-               goto fail;
-       }
-
-       qe->qe_block = dma_alloc_coherent(&op->dev, PAGE_SIZE,
-                                         &qe->qblock_dvma, GFP_ATOMIC);
-       qe->buffers = dma_alloc_coherent(&op->dev, sizeof(struct sunqe_buffers),
-                                        &qe->buffers_dvma, GFP_ATOMIC);
-       if (qe->qe_block == NULL || qe->qblock_dvma == 0 ||
-           qe->buffers == NULL || qe->buffers_dvma == 0)
-               goto fail;
-
-       /* Stop this QE. */
-       qe_stop(qe);
-
-       SET_NETDEV_DEV(dev, &op->dev);
-
-       dev->watchdog_timeo = 5*HZ;
-       dev->irq = op->archdata.irqs[0];
-       dev->dma = 0;
-       dev->ethtool_ops = &qe_ethtool_ops;
-       dev->netdev_ops = &qec_ops;
-
-       res = register_netdev(dev);
-       if (res)
-               goto fail;
-
-       dev_set_drvdata(&op->dev, qe);
-
-       printk(KERN_INFO "%s: qe channel[%d] ", dev->name, qe->channel);
-       for (i = 0; i < 6; i++)
-               printk ("%2.2x%c",
-                       dev->dev_addr[i],
-                       i == 5 ? ' ': ':');
-       printk("\n");
-
-
-       return 0;
-
-fail:
-       if (qe->qcregs)
-               of_iounmap(&op->resource[0], qe->qcregs, CREG_REG_SIZE);
-       if (qe->mregs)
-               of_iounmap(&op->resource[1], qe->mregs, MREGS_REG_SIZE);
-       if (qe->qe_block)
-               dma_free_coherent(&op->dev, PAGE_SIZE,
-                                 qe->qe_block, qe->qblock_dvma);
-       if (qe->buffers)
-               dma_free_coherent(&op->dev,
-                                 sizeof(struct sunqe_buffers),
-                                 qe->buffers,
-                                 qe->buffers_dvma);
-
-       free_netdev(dev);
-
-       return res;
-}
-
-static int __devinit qec_sbus_probe(struct platform_device *op)
-{
-       return qec_ether_init(op);
-}
-
-static int __devexit qec_sbus_remove(struct platform_device *op)
-{
-       struct sunqe *qp = dev_get_drvdata(&op->dev);
-       struct net_device *net_dev = qp->dev;
-
-       unregister_netdev(net_dev);
-
-       of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE);
-       of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE);
-       dma_free_coherent(&op->dev, PAGE_SIZE,
-                         qp->qe_block, qp->qblock_dvma);
-       dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers),
-                         qp->buffers, qp->buffers_dvma);
-
-       free_netdev(net_dev);
-
-       dev_set_drvdata(&op->dev, NULL);
-
-       return 0;
-}
-
-static const struct of_device_id qec_sbus_match[] = {
-       {
-               .name = "qe",
-       },
-       {},
-};
-
-MODULE_DEVICE_TABLE(of, qec_sbus_match);
-
-static struct platform_driver qec_sbus_driver = {
-       .driver = {
-               .name = "qec",
-               .owner = THIS_MODULE,
-               .of_match_table = qec_sbus_match,
-       },
-       .probe          = qec_sbus_probe,
-       .remove         = __devexit_p(qec_sbus_remove),
-};
-
-static int __init qec_init(void)
-{
-       return platform_driver_register(&qec_sbus_driver);
-}
-
-static void __exit qec_exit(void)
-{
-       platform_driver_unregister(&qec_sbus_driver);
-
-       while (root_qec_dev) {
-               struct sunqec *next = root_qec_dev->next_module;
-               struct platform_device *op = root_qec_dev->op;
-
-               free_irq(op->archdata.irqs[0], (void *) root_qec_dev);
-               of_iounmap(&op->resource[0], root_qec_dev->gregs,
-                          GLOB_REG_SIZE);
-               kfree(root_qec_dev);
-
-               root_qec_dev = next;
-       }
-}
-
-module_init(qec_init);
-module_exit(qec_exit);
diff --git a/drivers/net/sunqe.h b/drivers/net/sunqe.h
deleted file mode 100644 (file)
index 581781b..0000000
+++ /dev/null
@@ -1,350 +0,0 @@
-/* $Id: sunqe.h,v 1.13 2000/02/09 11:15:42 davem Exp $
- * sunqe.h: Definitions for the Sun QuadEthernet driver.
- *
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- */
-
-#ifndef _SUNQE_H
-#define _SUNQE_H
-
-/* QEC global registers. */
-#define GLOB_CTRL      0x00UL          /* Control                      */
-#define GLOB_STAT      0x04UL          /* Status                       */
-#define GLOB_PSIZE     0x08UL          /* Packet Size                  */
-#define GLOB_MSIZE     0x0cUL          /* Local-memory Size            */
-#define GLOB_RSIZE     0x10UL          /* Receive partition size       */
-#define GLOB_TSIZE     0x14UL          /* Transmit partition size      */
-#define GLOB_REG_SIZE  0x18UL
-
-#define GLOB_CTRL_MMODE       0x40000000 /* MACE qec mode            */
-#define GLOB_CTRL_BMODE       0x10000000 /* BigMAC qec mode          */
-#define GLOB_CTRL_EPAR        0x00000020 /* Enable parity            */
-#define GLOB_CTRL_ACNTRL      0x00000018 /* SBUS arbitration control */
-#define GLOB_CTRL_B64         0x00000004 /* 64 byte dvma bursts      */
-#define GLOB_CTRL_B32         0x00000002 /* 32 byte dvma bursts      */
-#define GLOB_CTRL_B16         0x00000000 /* 16 byte dvma bursts      */
-#define GLOB_CTRL_RESET       0x00000001 /* Reset the QEC            */
-
-#define GLOB_STAT_TX          0x00000008 /* BigMAC Transmit IRQ      */
-#define GLOB_STAT_RX          0x00000004 /* BigMAC Receive IRQ       */
-#define GLOB_STAT_BM          0x00000002 /* BigMAC Global IRQ        */
-#define GLOB_STAT_ER          0x00000001 /* BigMAC Error IRQ         */
-
-#define GLOB_PSIZE_2048       0x00       /* 2k packet size           */
-#define GLOB_PSIZE_4096       0x01       /* 4k packet size           */
-#define GLOB_PSIZE_6144       0x10       /* 6k packet size           */
-#define GLOB_PSIZE_8192       0x11       /* 8k packet size           */
-
-/* In MACE mode, there are four qe channels.  Each channel has it's own
- * status bits in the QEC status register.  This macro picks out the
- * ones you want.
- */
-#define GLOB_STAT_PER_QE(status, channel) (((status) >> ((channel) * 4)) & 0xf)
-
-/* The following registers are for per-qe channel information/status. */
-#define CREG_CTRL      0x00UL  /* Control                   */
-#define CREG_STAT      0x04UL  /* Status                    */
-#define CREG_RXDS      0x08UL  /* RX descriptor ring ptr    */
-#define CREG_TXDS      0x0cUL  /* TX descriptor ring ptr    */
-#define CREG_RIMASK    0x10UL  /* RX Interrupt Mask         */
-#define CREG_TIMASK    0x14UL  /* TX Interrupt Mask         */
-#define CREG_QMASK     0x18UL  /* QEC Error Interrupt Mask  */
-#define CREG_MMASK     0x1cUL  /* MACE Error Interrupt Mask */
-#define CREG_RXWBUFPTR 0x20UL  /* Local memory rx write ptr */
-#define CREG_RXRBUFPTR 0x24UL  /* Local memory rx read ptr  */
-#define CREG_TXWBUFPTR 0x28UL  /* Local memory tx write ptr */
-#define CREG_TXRBUFPTR 0x2cUL  /* Local memory tx read ptr  */
-#define CREG_CCNT      0x30UL  /* Collision Counter         */
-#define CREG_PIPG      0x34UL  /* Inter-Frame Gap           */
-#define CREG_REG_SIZE  0x38UL
-
-#define CREG_CTRL_RXOFF       0x00000004  /* Disable this qe's receiver*/
-#define CREG_CTRL_RESET       0x00000002  /* Reset this qe channel     */
-#define CREG_CTRL_TWAKEUP     0x00000001  /* Transmitter Wakeup, 'go'. */
-
-#define CREG_STAT_EDEFER      0x10000000  /* Excessive Defers          */
-#define CREG_STAT_CLOSS       0x08000000  /* Carrier Loss              */
-#define CREG_STAT_ERETRIES    0x04000000  /* More than 16 retries      */
-#define CREG_STAT_LCOLL       0x02000000  /* Late TX Collision         */
-#define CREG_STAT_FUFLOW      0x01000000  /* FIFO Underflow            */
-#define CREG_STAT_JERROR      0x00800000  /* Jabber Error              */
-#define CREG_STAT_BERROR      0x00400000  /* Babble Error              */
-#define CREG_STAT_TXIRQ       0x00200000  /* Transmit Interrupt        */
-#define CREG_STAT_CCOFLOW     0x00100000  /* TX Coll-counter Overflow  */
-#define CREG_STAT_TXDERROR    0x00080000  /* TX Descriptor is bogus    */
-#define CREG_STAT_TXLERR      0x00040000  /* Late Transmit Error       */
-#define CREG_STAT_TXPERR      0x00020000  /* Transmit Parity Error     */
-#define CREG_STAT_TXSERR      0x00010000  /* Transmit SBUS error ack   */
-#define CREG_STAT_RCCOFLOW    0x00001000  /* RX Coll-counter Overflow  */
-#define CREG_STAT_RUOFLOW     0x00000800  /* Runt Counter Overflow     */
-#define CREG_STAT_MCOFLOW     0x00000400  /* Missed Counter Overflow   */
-#define CREG_STAT_RXFOFLOW    0x00000200  /* RX FIFO Overflow          */
-#define CREG_STAT_RLCOLL      0x00000100  /* RX Late Collision         */
-#define CREG_STAT_FCOFLOW     0x00000080  /* Frame Counter Overflow    */
-#define CREG_STAT_CECOFLOW    0x00000040  /* CRC Error-counter Overflow*/
-#define CREG_STAT_RXIRQ       0x00000020  /* Receive Interrupt         */
-#define CREG_STAT_RXDROP      0x00000010  /* Dropped a RX'd packet     */
-#define CREG_STAT_RXSMALL     0x00000008  /* Receive buffer too small  */
-#define CREG_STAT_RXLERR      0x00000004  /* Receive Late Error        */
-#define CREG_STAT_RXPERR      0x00000002  /* Receive Parity Error      */
-#define CREG_STAT_RXSERR      0x00000001  /* Receive SBUS Error ACK    */
-
-#define CREG_STAT_ERRORS      (CREG_STAT_EDEFER|CREG_STAT_CLOSS|CREG_STAT_ERETRIES|     \
-                              CREG_STAT_LCOLL|CREG_STAT_FUFLOW|CREG_STAT_JERROR|       \
-                              CREG_STAT_BERROR|CREG_STAT_CCOFLOW|CREG_STAT_TXDERROR|   \
-                              CREG_STAT_TXLERR|CREG_STAT_TXPERR|CREG_STAT_TXSERR|      \
-                              CREG_STAT_RCCOFLOW|CREG_STAT_RUOFLOW|CREG_STAT_MCOFLOW| \
-                              CREG_STAT_RXFOFLOW|CREG_STAT_RLCOLL|CREG_STAT_FCOFLOW|   \
-                              CREG_STAT_CECOFLOW|CREG_STAT_RXDROP|CREG_STAT_RXSMALL|   \
-                              CREG_STAT_RXLERR|CREG_STAT_RXPERR|CREG_STAT_RXSERR)
-
-#define CREG_QMASK_COFLOW     0x00100000  /* CollCntr overflow         */
-#define CREG_QMASK_TXDERROR   0x00080000  /* TXD error                 */
-#define CREG_QMASK_TXLERR     0x00040000  /* TX late error             */
-#define CREG_QMASK_TXPERR     0x00020000  /* TX parity error           */
-#define CREG_QMASK_TXSERR     0x00010000  /* TX sbus error ack         */
-#define CREG_QMASK_RXDROP     0x00000010  /* RX drop                   */
-#define CREG_QMASK_RXBERROR   0x00000008  /* RX buffer error           */
-#define CREG_QMASK_RXLEERR    0x00000004  /* RX late error             */
-#define CREG_QMASK_RXPERR     0x00000002  /* RX parity error           */
-#define CREG_QMASK_RXSERR     0x00000001  /* RX sbus error ack         */
-
-#define CREG_MMASK_EDEFER     0x10000000  /* Excess defer              */
-#define CREG_MMASK_CLOSS      0x08000000  /* Carrier loss              */
-#define CREG_MMASK_ERETRY     0x04000000  /* Excess retry              */
-#define CREG_MMASK_LCOLL      0x02000000  /* Late collision error      */
-#define CREG_MMASK_UFLOW      0x01000000  /* Underflow                 */
-#define CREG_MMASK_JABBER     0x00800000  /* Jabber error              */
-#define CREG_MMASK_BABBLE     0x00400000  /* Babble error              */
-#define CREG_MMASK_OFLOW      0x00000800  /* Overflow                  */
-#define CREG_MMASK_RXCOLL     0x00000400  /* RX Coll-Cntr overflow     */
-#define CREG_MMASK_RPKT       0x00000200  /* Runt pkt overflow         */
-#define CREG_MMASK_MPKT       0x00000100  /* Missed pkt overflow       */
-
-#define CREG_PIPG_TENAB       0x00000020  /* Enable Throttle           */
-#define CREG_PIPG_MMODE       0x00000010  /* Manual Mode               */
-#define CREG_PIPG_WMASK       0x0000000f  /* SBUS Wait Mask            */
-
-/* Per-channel AMD 79C940 MACE registers. */
-#define MREGS_RXFIFO   0x00UL  /* Receive FIFO                   */
-#define MREGS_TXFIFO   0x01UL  /* Transmit FIFO                  */
-#define MREGS_TXFCNTL  0x02UL  /* Transmit Frame Control         */
-#define MREGS_TXFSTAT  0x03UL  /* Transmit Frame Status          */
-#define MREGS_TXRCNT   0x04UL  /* Transmit Retry Count           */
-#define MREGS_RXFCNTL  0x05UL  /* Receive Frame Control          */
-#define MREGS_RXFSTAT  0x06UL  /* Receive Frame Status           */
-#define MREGS_FFCNT    0x07UL  /* FIFO Frame Count               */
-#define MREGS_IREG     0x08UL  /* Interrupt Register             */
-#define MREGS_IMASK    0x09UL  /* Interrupt Mask                 */
-#define MREGS_POLL     0x0aUL  /* POLL Register                  */
-#define MREGS_BCONFIG  0x0bUL  /* BIU Config                     */
-#define MREGS_FCONFIG  0x0cUL  /* FIFO Config                    */
-#define MREGS_MCONFIG  0x0dUL  /* MAC Config                     */
-#define MREGS_PLSCONFIG        0x0eUL  /* PLS Config                     */
-#define MREGS_PHYCONFIG        0x0fUL  /* PHY Config                     */
-#define MREGS_CHIPID1  0x10UL  /* Chip-ID, low bits              */
-#define MREGS_CHIPID2  0x11UL  /* Chip-ID, high bits             */
-#define MREGS_IACONFIG 0x12UL  /* Internal Address Config        */
-       /* 0x13UL, reserved */
-#define MREGS_FILTER   0x14UL  /* Logical Address Filter         */
-#define MREGS_ETHADDR  0x15UL  /* Our Ethernet Address           */
-       /* 0x16UL, reserved */
-       /* 0x17UL, reserved */
-#define MREGS_MPCNT    0x18UL  /* Missed Packet Count            */
-       /* 0x19UL, reserved */
-#define MREGS_RPCNT    0x1aUL  /* Runt Packet Count              */
-#define MREGS_RCCNT    0x1bUL  /* RX Collision Count             */
-       /* 0x1cUL, reserved */
-#define MREGS_UTEST    0x1dUL  /* User Test                      */
-#define MREGS_RTEST1   0x1eUL  /* Reserved Test 1                */
-#define MREGS_RTEST2   0x1fUL  /* Reserved Test 2                */
-#define MREGS_REG_SIZE 0x20UL
-
-#define MREGS_TXFCNTL_DRETRY        0x80 /* Retry disable                  */
-#define MREGS_TXFCNTL_DFCS          0x08 /* Disable TX FCS                 */
-#define MREGS_TXFCNTL_AUTOPAD       0x01 /* TX auto pad                    */
-
-#define MREGS_TXFSTAT_VALID         0x80 /* TX valid                       */
-#define MREGS_TXFSTAT_UNDERFLOW     0x40 /* TX underflow                   */
-#define MREGS_TXFSTAT_LCOLL         0x20 /* TX late collision              */
-#define MREGS_TXFSTAT_MRETRY        0x10 /* TX > 1 retries                 */
-#define MREGS_TXFSTAT_ORETRY        0x08 /* TX 1 retry                     */
-#define MREGS_TXFSTAT_PDEFER        0x04 /* TX pkt deferred                */
-#define MREGS_TXFSTAT_CLOSS         0x02 /* TX carrier lost                */
-#define MREGS_TXFSTAT_RERROR        0x01 /* TX retry error                 */
-
-#define MREGS_TXRCNT_EDEFER         0x80 /* TX Excess defers               */
-#define MREGS_TXRCNT_CMASK          0x0f /* TX retry count                 */
-
-#define MREGS_RXFCNTL_LOWLAT        0x08 /* RX low latency                 */
-#define MREGS_RXFCNTL_AREJECT       0x04 /* RX addr match rej              */
-#define MREGS_RXFCNTL_AUTOSTRIP     0x01 /* RX auto strip                  */
-
-#define MREGS_RXFSTAT_OVERFLOW      0x80 /* RX overflow                    */
-#define MREGS_RXFSTAT_LCOLL         0x40 /* RX late collision              */
-#define MREGS_RXFSTAT_FERROR        0x20 /* RX framing error               */
-#define MREGS_RXFSTAT_FCSERROR      0x10 /* RX FCS error                   */
-#define MREGS_RXFSTAT_RBCNT         0x0f /* RX msg byte count              */
-
-#define MREGS_FFCNT_RX              0xf0 /* RX FIFO frame cnt              */
-#define MREGS_FFCNT_TX              0x0f /* TX FIFO frame cnt              */
-
-#define MREGS_IREG_JABBER           0x80 /* IRQ Jabber error               */
-#define MREGS_IREG_BABBLE           0x40 /* IRQ Babble error               */
-#define MREGS_IREG_COLL             0x20 /* IRQ Collision error            */
-#define MREGS_IREG_RCCO             0x10 /* IRQ Collision cnt overflow     */
-#define MREGS_IREG_RPKTCO           0x08 /* IRQ Runt packet count overflow */
-#define MREGS_IREG_MPKTCO           0x04 /* IRQ missed packet cnt overflow */
-#define MREGS_IREG_RXIRQ            0x02 /* IRQ RX'd a packet              */
-#define MREGS_IREG_TXIRQ            0x01 /* IRQ TX'd a packet              */
-
-#define MREGS_IMASK_BABBLE          0x40 /* IMASK Babble errors            */
-#define MREGS_IMASK_COLL            0x20 /* IMASK Collision errors         */
-#define MREGS_IMASK_MPKTCO          0x04 /* IMASK Missed pkt cnt overflow  */
-#define MREGS_IMASK_RXIRQ           0x02 /* IMASK RX interrupts            */
-#define MREGS_IMASK_TXIRQ           0x01 /* IMASK TX interrupts            */
-
-#define MREGS_POLL_TXVALID          0x80 /* TX is valid                    */
-#define MREGS_POLL_TDTR             0x40 /* TX data transfer request       */
-#define MREGS_POLL_RDTR             0x20 /* RX data transfer request       */
-
-#define MREGS_BCONFIG_BSWAP         0x40 /* Byte Swap                      */
-#define MREGS_BCONFIG_4TS           0x00 /* 4byte transmit start point     */
-#define MREGS_BCONFIG_16TS          0x10 /* 16byte transmit start point    */
-#define MREGS_BCONFIG_64TS          0x20 /* 64byte transmit start point    */
-#define MREGS_BCONFIG_112TS         0x30 /* 112byte transmit start point   */
-#define MREGS_BCONFIG_RESET         0x01 /* SW-Reset the MACE              */
-
-#define MREGS_FCONFIG_TXF8          0x00 /* TX fifo 8 write cycles         */
-#define MREGS_FCONFIG_TXF32         0x80 /* TX fifo 32 write cycles        */
-#define MREGS_FCONFIG_TXF16         0x40 /* TX fifo 16 write cycles        */
-#define MREGS_FCONFIG_RXF64         0x20 /* RX fifo 64 write cycles        */
-#define MREGS_FCONFIG_RXF32         0x10 /* RX fifo 32 write cycles        */
-#define MREGS_FCONFIG_RXF16         0x00 /* RX fifo 16 write cycles        */
-#define MREGS_FCONFIG_TFWU          0x08 /* TX fifo watermark update       */
-#define MREGS_FCONFIG_RFWU          0x04 /* RX fifo watermark update       */
-#define MREGS_FCONFIG_TBENAB        0x02 /* TX burst enable                */
-#define MREGS_FCONFIG_RBENAB        0x01 /* RX burst enable                */
-
-#define MREGS_MCONFIG_PROMISC       0x80 /* Promiscuous mode enable        */
-#define MREGS_MCONFIG_TPDDISAB      0x40 /* TX 2part deferral enable       */
-#define MREGS_MCONFIG_MBAENAB       0x20 /* Modified backoff enable        */
-#define MREGS_MCONFIG_RPADISAB      0x08 /* RX physical addr disable       */
-#define MREGS_MCONFIG_RBDISAB       0x04 /* RX broadcast disable           */
-#define MREGS_MCONFIG_TXENAB        0x02 /* Enable transmitter             */
-#define MREGS_MCONFIG_RXENAB        0x01 /* Enable receiver                */
-
-#define MREGS_PLSCONFIG_TXMS        0x08 /* TX mode select                 */
-#define MREGS_PLSCONFIG_GPSI        0x06 /* Use GPSI connector             */
-#define MREGS_PLSCONFIG_DAI         0x04 /* Use DAI connector              */
-#define MREGS_PLSCONFIG_TP          0x02 /* Use TwistedPair connector      */
-#define MREGS_PLSCONFIG_AUI         0x00 /* Use AUI connector              */
-#define MREGS_PLSCONFIG_IOENAB      0x01 /* PLS I/O enable                 */
-
-#define MREGS_PHYCONFIG_LSTAT       0x80 /* Link status                    */
-#define MREGS_PHYCONFIG_LTESTDIS    0x40 /* Disable link test logic        */
-#define MREGS_PHYCONFIG_RXPOLARITY  0x20 /* RX polarity                    */
-#define MREGS_PHYCONFIG_APCDISAB    0x10 /* AutoPolarityCorrect disab      */
-#define MREGS_PHYCONFIG_LTENAB      0x08 /* Select low threshold           */
-#define MREGS_PHYCONFIG_AUTO        0x04 /* Connector port auto-sel        */
-#define MREGS_PHYCONFIG_RWU         0x02 /* Remote WakeUp                  */
-#define MREGS_PHYCONFIG_AW          0x01 /* Auto Wakeup                    */
-
-#define MREGS_IACONFIG_ACHNGE       0x80 /* Do address change              */
-#define MREGS_IACONFIG_PARESET      0x04 /* Physical address reset         */
-#define MREGS_IACONFIG_LARESET      0x02 /* Logical address reset          */
-
-#define MREGS_UTEST_RTRENAB         0x80 /* Enable resv test register      */
-#define MREGS_UTEST_RTRDISAB        0x40 /* Disab resv test register       */
-#define MREGS_UTEST_RPACCEPT        0x20 /* Accept runt packets            */
-#define MREGS_UTEST_FCOLL           0x10 /* Force collision status         */
-#define MREGS_UTEST_FCSENAB         0x08 /* Enable FCS on RX               */
-#define MREGS_UTEST_INTLOOPM        0x06 /* Intern lpback w/MENDEC         */
-#define MREGS_UTEST_INTLOOP         0x04 /* Intern lpback                  */
-#define MREGS_UTEST_EXTLOOP         0x02 /* Extern lpback                  */
-#define MREGS_UTEST_NOLOOP          0x00 /* No loopback                    */
-
-struct qe_rxd {
-       u32 rx_flags;
-       u32 rx_addr;
-};
-
-#define RXD_OWN      0x80000000 /* Ownership.      */
-#define RXD_UPDATE   0x10000000 /* Being Updated?  */
-#define RXD_LENGTH   0x000007ff /* Packet Length.  */
-
-struct qe_txd {
-       u32 tx_flags;
-       u32 tx_addr;
-};
-
-#define TXD_OWN      0x80000000 /* Ownership.      */
-#define TXD_SOP      0x40000000 /* Start Of Packet */
-#define TXD_EOP      0x20000000 /* End Of Packet   */
-#define TXD_UPDATE   0x10000000 /* Being Updated?  */
-#define TXD_LENGTH   0x000007ff /* Packet Length.  */
-
-#define TX_RING_MAXSIZE   256
-#define RX_RING_MAXSIZE   256
-
-#define TX_RING_SIZE      16
-#define RX_RING_SIZE      16
-
-#define NEXT_RX(num)       (((num) + 1) & (RX_RING_MAXSIZE - 1))
-#define NEXT_TX(num)       (((num) + 1) & (TX_RING_MAXSIZE - 1))
-#define PREV_RX(num)       (((num) - 1) & (RX_RING_MAXSIZE - 1))
-#define PREV_TX(num)       (((num) - 1) & (TX_RING_MAXSIZE - 1))
-
-#define TX_BUFFS_AVAIL(qp)                                    \
-        (((qp)->tx_old <= (qp)->tx_new) ?                     \
-         (qp)->tx_old + (TX_RING_SIZE - 1) - (qp)->tx_new :  \
-                           (qp)->tx_old - (qp)->tx_new - 1)
-
-struct qe_init_block {
-       struct qe_rxd qe_rxd[RX_RING_MAXSIZE];
-       struct qe_txd qe_txd[TX_RING_MAXSIZE];
-};
-
-#define qib_offset(mem, elem) \
-((__u32)((unsigned long)(&(((struct qe_init_block *)0)->mem[elem]))))
-
-struct sunqe;
-
-struct sunqec {
-       void __iomem            *gregs;         /* QEC Global Registers         */
-       struct sunqe            *qes[4];        /* Each child MACE              */
-       unsigned int            qec_bursts;     /* Support burst sizes          */
-       struct platform_device  *op;            /* QEC's OF device              */
-       struct sunqec           *next_module;   /* List of all QECs in system   */
-};
-
-#define PKT_BUF_SZ     1664
-#define RXD_PKT_SZ     1664
-
-struct sunqe_buffers {
-       u8      tx_buf[TX_RING_SIZE][PKT_BUF_SZ];
-       u8      __pad[2];
-       u8      rx_buf[RX_RING_SIZE][PKT_BUF_SZ];
-};
-
-#define qebuf_offset(mem, elem) \
-((__u32)((unsigned long)(&(((struct sunqe_buffers *)0)->mem[elem][0]))))
-
-struct sunqe {
-       void __iomem                    *qcregs;                /* QEC per-channel Registers   */
-       void __iomem                    *mregs;         /* Per-channel MACE Registers  */
-       struct qe_init_block            *qe_block;      /* RX and TX descriptors       */
-       __u32                           qblock_dvma;    /* RX and TX descriptors       */
-       spinlock_t                      lock;           /* Protects txfull state       */
-       int                             rx_new, rx_old; /* RX ring extents             */
-       int                             tx_new, tx_old; /* TX ring extents             */
-       struct sunqe_buffers            *buffers;       /* CPU visible address.        */
-       __u32                           buffers_dvma;   /* DVMA visible address.       */
-       struct sunqec                   *parent;
-       u8                              mconfig;        /* Base MACE mconfig value     */
-       struct platform_device          *op;            /* QE's OF device struct       */
-       struct net_device               *dev;           /* QE's netdevice struct       */
-       int                             channel;        /* Who am I?                   */
-};
-
-#endif /* !(_SUNQE_H) */
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
deleted file mode 100644 (file)
index bf3c762..0000000
+++ /dev/null
@@ -1,1284 +0,0 @@
-/* sunvnet.c: Sun LDOM Virtual Network Driver.
- *
- * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/ethtool.h>
-#include <linux/etherdevice.h>
-#include <linux/mutex.h>
-
-#include <asm/vio.h>
-#include <asm/ldc.h>
-
-#include "sunvnet.h"
-
-#define DRV_MODULE_NAME                "sunvnet"
-#define DRV_MODULE_VERSION     "1.0"
-#define DRV_MODULE_RELDATE     "June 25, 2007"
-
-static char version[] __devinitdata =
-       DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
-MODULE_DESCRIPTION("Sun LDOM virtual network driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_MODULE_VERSION);
-
-/* Ordered from largest major to lowest */
-static struct vio_version vnet_versions[] = {
-       { .major = 1, .minor = 0 },
-};
-
-static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
-{
-       return vio_dring_avail(dr, VNET_TX_RING_SIZE);
-}
-
-static int vnet_handle_unknown(struct vnet_port *port, void *arg)
-{
-       struct vio_msg_tag *pkt = arg;
-
-       pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n",
-              pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
-       pr_err("Resetting connection\n");
-
-       ldc_disconnect(port->vio.lp);
-
-       return -ECONNRESET;
-}
-
-static int vnet_send_attr(struct vio_driver_state *vio)
-{
-       struct vnet_port *port = to_vnet_port(vio);
-       struct net_device *dev = port->vp->dev;
-       struct vio_net_attr_info pkt;
-       int i;
-
-       memset(&pkt, 0, sizeof(pkt));
-       pkt.tag.type = VIO_TYPE_CTRL;
-       pkt.tag.stype = VIO_SUBTYPE_INFO;
-       pkt.tag.stype_env = VIO_ATTR_INFO;
-       pkt.tag.sid = vio_send_sid(vio);
-       pkt.xfer_mode = VIO_DRING_MODE;
-       pkt.addr_type = VNET_ADDR_ETHERMAC;
-       pkt.ack_freq = 0;
-       for (i = 0; i < 6; i++)
-               pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
-       pkt.mtu = ETH_FRAME_LEN;
-
-       viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
-              "ackfreq[%u] mtu[%llu]\n",
-              pkt.xfer_mode, pkt.addr_type,
-              (unsigned long long) pkt.addr,
-              pkt.ack_freq,
-              (unsigned long long) pkt.mtu);
-
-       return vio_ldc_send(vio, &pkt, sizeof(pkt));
-}
-
-static int handle_attr_info(struct vio_driver_state *vio,
-                           struct vio_net_attr_info *pkt)
-{
-       viodbg(HS, "GOT NET ATTR INFO xmode[0x%x] atype[0x%x] addr[%llx] "
-              "ackfreq[%u] mtu[%llu]\n",
-              pkt->xfer_mode, pkt->addr_type,
-              (unsigned long long) pkt->addr,
-              pkt->ack_freq,
-              (unsigned long long) pkt->mtu);
-
-       pkt->tag.sid = vio_send_sid(vio);
-
-       if (pkt->xfer_mode != VIO_DRING_MODE ||
-           pkt->addr_type != VNET_ADDR_ETHERMAC ||
-           pkt->mtu != ETH_FRAME_LEN) {
-               viodbg(HS, "SEND NET ATTR NACK\n");
-
-               pkt->tag.stype = VIO_SUBTYPE_NACK;
-
-               (void) vio_ldc_send(vio, pkt, sizeof(*pkt));
-
-               return -ECONNRESET;
-       } else {
-               viodbg(HS, "SEND NET ATTR ACK\n");
-
-               pkt->tag.stype = VIO_SUBTYPE_ACK;
-
-               return vio_ldc_send(vio, pkt, sizeof(*pkt));
-       }
-
-}
-
-static int handle_attr_ack(struct vio_driver_state *vio,
-                          struct vio_net_attr_info *pkt)
-{
-       viodbg(HS, "GOT NET ATTR ACK\n");
-
-       return 0;
-}
-
-static int handle_attr_nack(struct vio_driver_state *vio,
-                           struct vio_net_attr_info *pkt)
-{
-       viodbg(HS, "GOT NET ATTR NACK\n");
-
-       return -ECONNRESET;
-}
-
-static int vnet_handle_attr(struct vio_driver_state *vio, void *arg)
-{
-       struct vio_net_attr_info *pkt = arg;
-
-       switch (pkt->tag.stype) {
-       case VIO_SUBTYPE_INFO:
-               return handle_attr_info(vio, pkt);
-
-       case VIO_SUBTYPE_ACK:
-               return handle_attr_ack(vio, pkt);
-
-       case VIO_SUBTYPE_NACK:
-               return handle_attr_nack(vio, pkt);
-
-       default:
-               return -ECONNRESET;
-       }
-}
-
-static void vnet_handshake_complete(struct vio_driver_state *vio)
-{
-       struct vio_dring_state *dr;
-
-       dr = &vio->drings[VIO_DRIVER_RX_RING];
-       dr->snd_nxt = dr->rcv_nxt = 1;
-
-       dr = &vio->drings[VIO_DRIVER_TX_RING];
-       dr->snd_nxt = dr->rcv_nxt = 1;
-}
-
-/* The hypervisor interface that implements copying to/from imported
- * memory from another domain requires that copies are done to 8-byte
- * aligned buffers, and that the lengths of such copies are also 8-byte
- * multiples.
- *
- * So we align skb->data to an 8-byte multiple and pad-out the data
- * area so we can round the copy length up to the next multiple of
- * 8 for the copy.
- *
- * The transmitter puts the actual start of the packet 6 bytes into
- * the buffer it sends over, so that the IP headers after the ethernet
- * header are aligned properly.  These 6 bytes are not in the descriptor
- * length, they are simply implied.  This offset is represented using
- * the VNET_PACKET_SKIP macro.
- */
-static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
-                                          unsigned int len)
-{
-       struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8);
-       unsigned long addr, off;
-
-       if (unlikely(!skb))
-               return NULL;
-
-       addr = (unsigned long) skb->data;
-       off = ((addr + 7UL) & ~7UL) - addr;
-       if (off)
-               skb_reserve(skb, off);
-
-       return skb;
-}
-
-static int vnet_rx_one(struct vnet_port *port, unsigned int len,
-                      struct ldc_trans_cookie *cookies, int ncookies)
-{
-       struct net_device *dev = port->vp->dev;
-       unsigned int copy_len;
-       struct sk_buff *skb;
-       int err;
-
-       err = -EMSGSIZE;
-       if (unlikely(len < ETH_ZLEN || len > ETH_FRAME_LEN)) {
-               dev->stats.rx_length_errors++;
-               goto out_dropped;
-       }
-
-       skb = alloc_and_align_skb(dev, len);
-       err = -ENOMEM;
-       if (unlikely(!skb)) {
-               dev->stats.rx_missed_errors++;
-               goto out_dropped;
-       }
-
-       copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U;
-       skb_put(skb, copy_len);
-       err = ldc_copy(port->vio.lp, LDC_COPY_IN,
-                      skb->data, copy_len, 0,
-                      cookies, ncookies);
-       if (unlikely(err < 0)) {
-               dev->stats.rx_frame_errors++;
-               goto out_free_skb;
-       }
-
-       skb_pull(skb, VNET_PACKET_SKIP);
-       skb_trim(skb, len);
-       skb->protocol = eth_type_trans(skb, dev);
-
-       dev->stats.rx_packets++;
-       dev->stats.rx_bytes += len;
-
-       netif_rx(skb);
-
-       return 0;
-
-out_free_skb:
-       kfree_skb(skb);
-
-out_dropped:
-       dev->stats.rx_dropped++;
-       return err;
-}
-
-static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
-                        u32 start, u32 end, u8 vio_dring_state)
-{
-       struct vio_dring_data hdr = {
-               .tag = {
-                       .type           = VIO_TYPE_DATA,
-                       .stype          = VIO_SUBTYPE_ACK,
-                       .stype_env      = VIO_DRING_DATA,
-                       .sid            = vio_send_sid(&port->vio),
-               },
-               .dring_ident            = dr->ident,
-               .start_idx              = start,
-               .end_idx                = end,
-               .state                  = vio_dring_state,
-       };
-       int err, delay;
-
-       hdr.seq = dr->snd_nxt;
-       delay = 1;
-       do {
-               err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
-               if (err > 0) {
-                       dr->snd_nxt++;
-                       break;
-               }
-               udelay(delay);
-               if ((delay <<= 1) > 128)
-                       delay = 128;
-       } while (err == -EAGAIN);
-
-       return err;
-}
-
-static u32 next_idx(u32 idx, struct vio_dring_state *dr)
-{
-       if (++idx == dr->num_entries)
-               idx = 0;
-       return idx;
-}
-
-static u32 prev_idx(u32 idx, struct vio_dring_state *dr)
-{
-       if (idx == 0)
-               idx = dr->num_entries - 1;
-       else
-               idx--;
-
-       return idx;
-}
-
-static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
-                                       struct vio_dring_state *dr,
-                                       u32 index)
-{
-       struct vio_net_desc *desc = port->vio.desc_buf;
-       int err;
-
-       err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size,
-                                 (index * dr->entry_size),
-                                 dr->cookies, dr->ncookies);
-       if (err < 0)
-               return ERR_PTR(err);
-
-       return desc;
-}
-
-static int put_rx_desc(struct vnet_port *port,
-                      struct vio_dring_state *dr,
-                      struct vio_net_desc *desc,
-                      u32 index)
-{
-       int err;
-
-       err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size,
-                                 (index * dr->entry_size),
-                                 dr->cookies, dr->ncookies);
-       if (err < 0)
-               return err;
-
-       return 0;
-}
-
-static int vnet_walk_rx_one(struct vnet_port *port,
-                           struct vio_dring_state *dr,
-                           u32 index, int *needs_ack)
-{
-       struct vio_net_desc *desc = get_rx_desc(port, dr, index);
-       struct vio_driver_state *vio = &port->vio;
-       int err;
-
-       if (IS_ERR(desc))
-               return PTR_ERR(desc);
-
-       viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
-              desc->hdr.state, desc->hdr.ack,
-              desc->size, desc->ncookies,
-              desc->cookies[0].cookie_addr,
-              desc->cookies[0].cookie_size);
-
-       if (desc->hdr.state != VIO_DESC_READY)
-               return 1;
-       err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies);
-       if (err == -ECONNRESET)
-               return err;
-       desc->hdr.state = VIO_DESC_DONE;
-       err = put_rx_desc(port, dr, desc, index);
-       if (err < 0)
-               return err;
-       *needs_ack = desc->hdr.ack;
-       return 0;
-}
-
-static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
-                       u32 start, u32 end)
-{
-       struct vio_driver_state *vio = &port->vio;
-       int ack_start = -1, ack_end = -1;
-
-       end = (end == (u32) -1) ? prev_idx(start, dr) : next_idx(end, dr);
-
-       viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
-
-       while (start != end) {
-               int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack);
-               if (err == -ECONNRESET)
-                       return err;
-               if (err != 0)
-                       break;
-               if (ack_start == -1)
-                       ack_start = start;
-               ack_end = start;
-               start = next_idx(start, dr);
-               if (ack && start != end) {
-                       err = vnet_send_ack(port, dr, ack_start, ack_end,
-                                           VIO_DRING_ACTIVE);
-                       if (err == -ECONNRESET)
-                               return err;
-                       ack_start = -1;
-               }
-       }
-       if (unlikely(ack_start == -1))
-               ack_start = ack_end = prev_idx(start, dr);
-       return vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_STOPPED);
-}
-
-static int vnet_rx(struct vnet_port *port, void *msgbuf)
-{
-       struct vio_dring_data *pkt = msgbuf;
-       struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
-       struct vio_driver_state *vio = &port->vio;
-
-       viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n",
-              pkt->tag.stype_env, pkt->seq, dr->rcv_nxt);
-
-       if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
-               return 0;
-       if (unlikely(pkt->seq != dr->rcv_nxt)) {
-               pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n",
-                      pkt->seq, dr->rcv_nxt);
-               return 0;
-       }
-
-       dr->rcv_nxt++;
-
-       /* XXX Validate pkt->start_idx and pkt->end_idx XXX */
-
-       return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx);
-}
-
-static int idx_is_pending(struct vio_dring_state *dr, u32 end)
-{
-       u32 idx = dr->cons;
-       int found = 0;
-
-       while (idx != dr->prod) {
-               if (idx == end) {
-                       found = 1;
-                       break;
-               }
-               idx = next_idx(idx, dr);
-       }
-       return found;
-}
-
-static int vnet_ack(struct vnet_port *port, void *msgbuf)
-{
-       struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
-       struct vio_dring_data *pkt = msgbuf;
-       struct net_device *dev;
-       struct vnet *vp;
-       u32 end;
-
-       if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
-               return 0;
-
-       end = pkt->end_idx;
-       if (unlikely(!idx_is_pending(dr, end)))
-               return 0;
-
-       dr->cons = next_idx(end, dr);
-
-       vp = port->vp;
-       dev = vp->dev;
-       if (unlikely(netif_queue_stopped(dev) &&
-                    vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
-               return 1;
-
-       return 0;
-}
-
-static int vnet_nack(struct vnet_port *port, void *msgbuf)
-{
-       /* XXX just reset or similar XXX */
-       return 0;
-}
-
-static int handle_mcast(struct vnet_port *port, void *msgbuf)
-{
-       struct vio_net_mcast_info *pkt = msgbuf;
-
-       if (pkt->tag.stype != VIO_SUBTYPE_ACK)
-               pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n",
-                      port->vp->dev->name,
-                      pkt->tag.type,
-                      pkt->tag.stype,
-                      pkt->tag.stype_env,
-                      pkt->tag.sid);
-
-       return 0;
-}
-
-static void maybe_tx_wakeup(struct vnet *vp)
-{
-       struct net_device *dev = vp->dev;
-
-       netif_tx_lock(dev);
-       if (likely(netif_queue_stopped(dev))) {
-               struct vnet_port *port;
-               int wake = 1;
-
-               list_for_each_entry(port, &vp->port_list, list) {
-                       struct vio_dring_state *dr;
-
-                       dr = &port->vio.drings[VIO_DRIVER_TX_RING];
-                       if (vnet_tx_dring_avail(dr) <
-                           VNET_TX_WAKEUP_THRESH(dr)) {
-                               wake = 0;
-                               break;
-                       }
-               }
-               if (wake)
-                       netif_wake_queue(dev);
-       }
-       netif_tx_unlock(dev);
-}
-
-static void vnet_event(void *arg, int event)
-{
-       struct vnet_port *port = arg;
-       struct vio_driver_state *vio = &port->vio;
-       unsigned long flags;
-       int tx_wakeup, err;
-
-       spin_lock_irqsave(&vio->lock, flags);
-
-       if (unlikely(event == LDC_EVENT_RESET ||
-                    event == LDC_EVENT_UP)) {
-               vio_link_state_change(vio, event);
-               spin_unlock_irqrestore(&vio->lock, flags);
-
-               if (event == LDC_EVENT_RESET)
-                       vio_port_up(vio);
-               return;
-       }
-
-       if (unlikely(event != LDC_EVENT_DATA_READY)) {
-               pr_warning("Unexpected LDC event %d\n", event);
-               spin_unlock_irqrestore(&vio->lock, flags);
-               return;
-       }
-
-       tx_wakeup = err = 0;
-       while (1) {
-               union {
-                       struct vio_msg_tag tag;
-                       u64 raw[8];
-               } msgbuf;
-
-               err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
-               if (unlikely(err < 0)) {
-                       if (err == -ECONNRESET)
-                               vio_conn_reset(vio);
-                       break;
-               }
-               if (err == 0)
-                       break;
-               viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
-                      msgbuf.tag.type,
-                      msgbuf.tag.stype,
-                      msgbuf.tag.stype_env,
-                      msgbuf.tag.sid);
-               err = vio_validate_sid(vio, &msgbuf.tag);
-               if (err < 0)
-                       break;
-
-               if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
-                       if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
-                               err = vnet_rx(port, &msgbuf);
-                       } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
-                               err = vnet_ack(port, &msgbuf);
-                               if (err > 0)
-                                       tx_wakeup |= err;
-                       } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) {
-                               err = vnet_nack(port, &msgbuf);
-                       }
-               } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
-                       if (msgbuf.tag.stype_env == VNET_MCAST_INFO)
-                               err = handle_mcast(port, &msgbuf);
-                       else
-                               err = vio_control_pkt_engine(vio, &msgbuf);
-                       if (err)
-                               break;
-               } else {
-                       err = vnet_handle_unknown(port, &msgbuf);
-               }
-               if (err == -ECONNRESET)
-                       break;
-       }
-       spin_unlock(&vio->lock);
-       if (unlikely(tx_wakeup && err != -ECONNRESET))
-               maybe_tx_wakeup(port->vp);
-       local_irq_restore(flags);
-}
-
-static int __vnet_tx_trigger(struct vnet_port *port)
-{
-       struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
-       struct vio_dring_data hdr = {
-               .tag = {
-                       .type           = VIO_TYPE_DATA,
-                       .stype          = VIO_SUBTYPE_INFO,
-                       .stype_env      = VIO_DRING_DATA,
-                       .sid            = vio_send_sid(&port->vio),
-               },
-               .dring_ident            = dr->ident,
-               .start_idx              = dr->prod,
-               .end_idx                = (u32) -1,
-       };
-       int err, delay;
-
-       hdr.seq = dr->snd_nxt;
-       delay = 1;
-       do {
-               err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
-               if (err > 0) {
-                       dr->snd_nxt++;
-                       break;
-               }
-               udelay(delay);
-               if ((delay <<= 1) > 128)
-                       delay = 128;
-       } while (err == -EAGAIN);
-
-       return err;
-}
-
-struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
-{
-       unsigned int hash = vnet_hashfn(skb->data);
-       struct hlist_head *hp = &vp->port_hash[hash];
-       struct hlist_node *n;
-       struct vnet_port *port;
-
-       hlist_for_each_entry(port, n, hp, hash) {
-               if (!compare_ether_addr(port->raddr, skb->data))
-                       return port;
-       }
-       port = NULL;
-       if (!list_empty(&vp->port_list))
-               port = list_entry(vp->port_list.next, struct vnet_port, list);
-
-       return port;
-}
-
-struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
-{
-       struct vnet_port *ret;
-       unsigned long flags;
-
-       spin_lock_irqsave(&vp->lock, flags);
-       ret = __tx_port_find(vp, skb);
-       spin_unlock_irqrestore(&vp->lock, flags);
-
-       return ret;
-}
-
-static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-       struct vnet *vp = netdev_priv(dev);
-       struct vnet_port *port = tx_port_find(vp, skb);
-       struct vio_dring_state *dr;
-       struct vio_net_desc *d;
-       unsigned long flags;
-       unsigned int len;
-       void *tx_buf;
-       int i, err;
-
-       if (unlikely(!port))
-               goto out_dropped;
-
-       spin_lock_irqsave(&port->vio.lock, flags);
-
-       dr = &port->vio.drings[VIO_DRIVER_TX_RING];
-       if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
-               if (!netif_queue_stopped(dev)) {
-                       netif_stop_queue(dev);
-
-                       /* This is a hard error, log it. */
-                       netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
-                       dev->stats.tx_errors++;
-               }
-               spin_unlock_irqrestore(&port->vio.lock, flags);
-               return NETDEV_TX_BUSY;
-       }
-
-       d = vio_dring_cur(dr);
-
-       tx_buf = port->tx_bufs[dr->prod].buf;
-       skb_copy_from_linear_data(skb, tx_buf + VNET_PACKET_SKIP, skb->len);
-
-       len = skb->len;
-       if (len < ETH_ZLEN) {
-               len = ETH_ZLEN;
-               memset(tx_buf+VNET_PACKET_SKIP+skb->len, 0, len - skb->len);
-       }
-
-       d->hdr.ack = VIO_ACK_ENABLE;
-       d->size = len;
-       d->ncookies = port->tx_bufs[dr->prod].ncookies;
-       for (i = 0; i < d->ncookies; i++)
-               d->cookies[i] = port->tx_bufs[dr->prod].cookies[i];
-
-       /* This has to be a non-SMP write barrier because we are writing
-        * to memory which is shared with the peer LDOM.
-        */
-       wmb();
-
-       d->hdr.state = VIO_DESC_READY;
-
-       err = __vnet_tx_trigger(port);
-       if (unlikely(err < 0)) {
-               netdev_info(dev, "TX trigger error %d\n", err);
-               d->hdr.state = VIO_DESC_FREE;
-               dev->stats.tx_carrier_errors++;
-               goto out_dropped_unlock;
-       }
-
-       dev->stats.tx_packets++;
-       dev->stats.tx_bytes += skb->len;
-
-       dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
-       if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
-               netif_stop_queue(dev);
-               if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
-                       netif_wake_queue(dev);
-       }
-
-       spin_unlock_irqrestore(&port->vio.lock, flags);
-
-       dev_kfree_skb(skb);
-
-       return NETDEV_TX_OK;
-
-out_dropped_unlock:
-       spin_unlock_irqrestore(&port->vio.lock, flags);
-
-out_dropped:
-       dev_kfree_skb(skb);
-       dev->stats.tx_dropped++;
-       return NETDEV_TX_OK;
-}
-
-static void vnet_tx_timeout(struct net_device *dev)
-{
-       /* XXX Implement me XXX */
-}
-
-static int vnet_open(struct net_device *dev)
-{
-       netif_carrier_on(dev);
-       netif_start_queue(dev);
-
-       return 0;
-}
-
-static int vnet_close(struct net_device *dev)
-{
-       netif_stop_queue(dev);
-       netif_carrier_off(dev);
-
-       return 0;
-}
-
-static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
-{
-       struct vnet_mcast_entry *m;
-
-       for (m = vp->mcast_list; m; m = m->next) {
-               if (!memcmp(m->addr, addr, ETH_ALEN))
-                       return m;
-       }
-       return NULL;
-}
-
-static void __update_mc_list(struct vnet *vp, struct net_device *dev)
-{
-       struct netdev_hw_addr *ha;
-
-       netdev_for_each_mc_addr(ha, dev) {
-               struct vnet_mcast_entry *m;
-
-               m = __vnet_mc_find(vp, ha->addr);
-               if (m) {
-                       m->hit = 1;
-                       continue;
-               }
-
-               if (!m) {
-                       m = kzalloc(sizeof(*m), GFP_ATOMIC);
-                       if (!m)
-                               continue;
-                       memcpy(m->addr, ha->addr, ETH_ALEN);
-                       m->hit = 1;
-
-                       m->next = vp->mcast_list;
-                       vp->mcast_list = m;
-               }
-       }
-}
-
-static void __send_mc_list(struct vnet *vp, struct vnet_port *port)
-{
-       struct vio_net_mcast_info info;
-       struct vnet_mcast_entry *m, **pp;
-       int n_addrs;
-
-       memset(&info, 0, sizeof(info));
-
-       info.tag.type = VIO_TYPE_CTRL;
-       info.tag.stype = VIO_SUBTYPE_INFO;
-       info.tag.stype_env = VNET_MCAST_INFO;
-       info.tag.sid = vio_send_sid(&port->vio);
-       info.set = 1;
-
-       n_addrs = 0;
-       for (m = vp->mcast_list; m; m = m->next) {
-               if (m->sent)
-                       continue;
-               m->sent = 1;
-               memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
-                      m->addr, ETH_ALEN);
-               if (++n_addrs == VNET_NUM_MCAST) {
-                       info.count = n_addrs;
-
-                       (void) vio_ldc_send(&port->vio, &info,
-                                           sizeof(info));
-                       n_addrs = 0;
-               }
-       }
-       if (n_addrs) {
-               info.count = n_addrs;
-               (void) vio_ldc_send(&port->vio, &info, sizeof(info));
-       }
-
-       info.set = 0;
-
-       n_addrs = 0;
-       pp = &vp->mcast_list;
-       while ((m = *pp) != NULL) {
-               if (m->hit) {
-                       m->hit = 0;
-                       pp = &m->next;
-                       continue;
-               }
-
-               memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
-                      m->addr, ETH_ALEN);
-               if (++n_addrs == VNET_NUM_MCAST) {
-                       info.count = n_addrs;
-                       (void) vio_ldc_send(&port->vio, &info,
-                                           sizeof(info));
-                       n_addrs = 0;
-               }
-
-               *pp = m->next;
-               kfree(m);
-       }
-       if (n_addrs) {
-               info.count = n_addrs;
-               (void) vio_ldc_send(&port->vio, &info, sizeof(info));
-       }
-}
-
-static void vnet_set_rx_mode(struct net_device *dev)
-{
-       struct vnet *vp = netdev_priv(dev);
-       struct vnet_port *port;
-       unsigned long flags;
-
-       spin_lock_irqsave(&vp->lock, flags);
-       if (!list_empty(&vp->port_list)) {
-               port = list_entry(vp->port_list.next, struct vnet_port, list);
-
-               if (port->switch_port) {
-                       __update_mc_list(vp, dev);
-                       __send_mc_list(vp, port);
-               }
-       }
-       spin_unlock_irqrestore(&vp->lock, flags);
-}
-
-static int vnet_change_mtu(struct net_device *dev, int new_mtu)
-{
-       if (new_mtu != ETH_DATA_LEN)
-               return -EINVAL;
-
-       dev->mtu = new_mtu;
-       return 0;
-}
-
-static int vnet_set_mac_addr(struct net_device *dev, void *p)
-{
-       return -EINVAL;
-}
-
-static void vnet_get_drvinfo(struct net_device *dev,
-                            struct ethtool_drvinfo *info)
-{
-       strcpy(info->driver, DRV_MODULE_NAME);
-       strcpy(info->version, DRV_MODULE_VERSION);
-}
-
-static u32 vnet_get_msglevel(struct net_device *dev)
-{
-       struct vnet *vp = netdev_priv(dev);
-       return vp->msg_enable;
-}
-
-static void vnet_set_msglevel(struct net_device *dev, u32 value)
-{
-       struct vnet *vp = netdev_priv(dev);
-       vp->msg_enable = value;
-}
-
-static const struct ethtool_ops vnet_ethtool_ops = {
-       .get_drvinfo            = vnet_get_drvinfo,
-       .get_msglevel           = vnet_get_msglevel,
-       .set_msglevel           = vnet_set_msglevel,
-       .get_link               = ethtool_op_get_link,
-};
-
-static void vnet_port_free_tx_bufs(struct vnet_port *port)
-{
-       struct vio_dring_state *dr;
-       int i;
-
-       dr = &port->vio.drings[VIO_DRIVER_TX_RING];
-       if (dr->base) {
-               ldc_free_exp_dring(port->vio.lp, dr->base,
-                                  (dr->entry_size * dr->num_entries),
-                                  dr->cookies, dr->ncookies);
-               dr->base = NULL;
-               dr->entry_size = 0;
-               dr->num_entries = 0;
-               dr->pending = 0;
-               dr->ncookies = 0;
-       }
-
-       for (i = 0; i < VNET_TX_RING_SIZE; i++) {
-               void *buf = port->tx_bufs[i].buf;
-
-               if (!buf)
-                       continue;
-
-               ldc_unmap(port->vio.lp,
-                         port->tx_bufs[i].cookies,
-                         port->tx_bufs[i].ncookies);
-
-               kfree(buf);
-               port->tx_bufs[i].buf = NULL;
-       }
-}
-
-static int __devinit vnet_port_alloc_tx_bufs(struct vnet_port *port)
-{
-       struct vio_dring_state *dr;
-       unsigned long len;
-       int i, err, ncookies;
-       void *dring;
-
-       for (i = 0; i < VNET_TX_RING_SIZE; i++) {
-               void *buf = kzalloc(ETH_FRAME_LEN + 8, GFP_KERNEL);
-               int map_len = (ETH_FRAME_LEN + 7) & ~7;
-
-               err = -ENOMEM;
-               if (!buf) {
-                       pr_err("TX buffer allocation failure\n");
-                       goto err_out;
-               }
-               err = -EFAULT;
-               if ((unsigned long)buf & (8UL - 1)) {
-                       pr_err("TX buffer misaligned\n");
-                       kfree(buf);
-                       goto err_out;
-               }
-
-               err = ldc_map_single(port->vio.lp, buf, map_len,
-                                    port->tx_bufs[i].cookies, 2,
-                                    (LDC_MAP_SHADOW |
-                                     LDC_MAP_DIRECT |
-                                     LDC_MAP_RW));
-               if (err < 0) {
-                       kfree(buf);
-                       goto err_out;
-               }
-               port->tx_bufs[i].buf = buf;
-               port->tx_bufs[i].ncookies = err;
-       }
-
-       dr = &port->vio.drings[VIO_DRIVER_TX_RING];
-
-       len = (VNET_TX_RING_SIZE *
-              (sizeof(struct vio_net_desc) +
-               (sizeof(struct ldc_trans_cookie) * 2)));
-
-       ncookies = VIO_MAX_RING_COOKIES;
-       dring = ldc_alloc_exp_dring(port->vio.lp, len,
-                                   dr->cookies, &ncookies,
-                                   (LDC_MAP_SHADOW |
-                                    LDC_MAP_DIRECT |
-                                    LDC_MAP_RW));
-       if (IS_ERR(dring)) {
-               err = PTR_ERR(dring);
-               goto err_out;
-       }
-
-       dr->base = dring;
-       dr->entry_size = (sizeof(struct vio_net_desc) +
-                         (sizeof(struct ldc_trans_cookie) * 2));
-       dr->num_entries = VNET_TX_RING_SIZE;
-       dr->prod = dr->cons = 0;
-       dr->pending = VNET_TX_RING_SIZE;
-       dr->ncookies = ncookies;
-
-       return 0;
-
-err_out:
-       vnet_port_free_tx_bufs(port);
-
-       return err;
-}
-
-static LIST_HEAD(vnet_list);
-static DEFINE_MUTEX(vnet_list_mutex);
-
-static const struct net_device_ops vnet_ops = {
-       .ndo_open               = vnet_open,
-       .ndo_stop               = vnet_close,
-       .ndo_set_multicast_list = vnet_set_rx_mode,
-       .ndo_set_mac_address    = vnet_set_mac_addr,
-       .ndo_validate_addr      = eth_validate_addr,
-       .ndo_tx_timeout         = vnet_tx_timeout,
-       .ndo_change_mtu         = vnet_change_mtu,
-       .ndo_start_xmit         = vnet_start_xmit,
-};
-
-static struct vnet * __devinit vnet_new(const u64 *local_mac)
-{
-       struct net_device *dev;
-       struct vnet *vp;
-       int err, i;
-
-       dev = alloc_etherdev(sizeof(*vp));
-       if (!dev) {
-               pr_err("Etherdev alloc failed, aborting\n");
-               return ERR_PTR(-ENOMEM);
-       }
-
-       for (i = 0; i < ETH_ALEN; i++)
-               dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
-
-       memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
-
-       vp = netdev_priv(dev);
-
-       spin_lock_init(&vp->lock);
-       vp->dev = dev;
-
-       INIT_LIST_HEAD(&vp->port_list);
-       for (i = 0; i < VNET_PORT_HASH_SIZE; i++)
-               INIT_HLIST_HEAD(&vp->port_hash[i]);
-       INIT_LIST_HEAD(&vp->list);
-       vp->local_mac = *local_mac;
-
-       dev->netdev_ops = &vnet_ops;
-       dev->ethtool_ops = &vnet_ethtool_ops;
-       dev->watchdog_timeo = VNET_TX_TIMEOUT;
-
-       err = register_netdev(dev);
-       if (err) {
-               pr_err("Cannot register net device, aborting\n");
-               goto err_out_free_dev;
-       }
-
-       netdev_info(dev, "Sun LDOM vnet %pM\n", dev->dev_addr);
-
-       list_add(&vp->list, &vnet_list);
-
-       return vp;
-
-err_out_free_dev:
-       free_netdev(dev);
-
-       return ERR_PTR(err);
-}
-
-static struct vnet * __devinit vnet_find_or_create(const u64 *local_mac)
-{
-       struct vnet *iter, *vp;
-
-       mutex_lock(&vnet_list_mutex);
-       vp = NULL;
-       list_for_each_entry(iter, &vnet_list, list) {
-               if (iter->local_mac == *local_mac) {
-                       vp = iter;
-                       break;
-               }
-       }
-       if (!vp)
-               vp = vnet_new(local_mac);
-       mutex_unlock(&vnet_list_mutex);
-
-       return vp;
-}
-
-static const char *local_mac_prop = "local-mac-address";
-
-static struct vnet * __devinit vnet_find_parent(struct mdesc_handle *hp,
-                                               u64 port_node)
-{
-       const u64 *local_mac = NULL;
-       u64 a;
-
-       mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) {
-               u64 target = mdesc_arc_target(hp, a);
-               const char *name;
-
-               name = mdesc_get_property(hp, target, "name", NULL);
-               if (!name || strcmp(name, "network"))
-                       continue;
-
-               local_mac = mdesc_get_property(hp, target,
-                                              local_mac_prop, NULL);
-               if (local_mac)
-                       break;
-       }
-       if (!local_mac)
-               return ERR_PTR(-ENODEV);
-
-       return vnet_find_or_create(local_mac);
-}
-
-static struct ldc_channel_config vnet_ldc_cfg = {
-       .event          = vnet_event,
-       .mtu            = 64,
-       .mode           = LDC_MODE_UNRELIABLE,
-};
-
-static struct vio_driver_ops vnet_vio_ops = {
-       .send_attr              = vnet_send_attr,
-       .handle_attr            = vnet_handle_attr,
-       .handshake_complete     = vnet_handshake_complete,
-};
-
-static void __devinit print_version(void)
-{
-       printk_once(KERN_INFO "%s", version);
-}
-
-const char *remote_macaddr_prop = "remote-mac-address";
-
-static int __devinit vnet_port_probe(struct vio_dev *vdev,
-                                    const struct vio_device_id *id)
-{
-       struct mdesc_handle *hp;
-       struct vnet_port *port;
-       unsigned long flags;
-       struct vnet *vp;
-       const u64 *rmac;
-       int len, i, err, switch_port;
-
-       print_version();
-
-       hp = mdesc_grab();
-
-       vp = vnet_find_parent(hp, vdev->mp);
-       if (IS_ERR(vp)) {
-               pr_err("Cannot find port parent vnet\n");
-               err = PTR_ERR(vp);
-               goto err_out_put_mdesc;
-       }
-
-       rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
-       err = -ENODEV;
-       if (!rmac) {
-               pr_err("Port lacks %s property\n", remote_macaddr_prop);
-               goto err_out_put_mdesc;
-       }
-
-       port = kzalloc(sizeof(*port), GFP_KERNEL);
-       err = -ENOMEM;
-       if (!port) {
-               pr_err("Cannot allocate vnet_port\n");
-               goto err_out_put_mdesc;
-       }
-
-       for (i = 0; i < ETH_ALEN; i++)
-               port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff;
-
-       port->vp = vp;
-
-       err = vio_driver_init(&port->vio, vdev, VDEV_NETWORK,
-                             vnet_versions, ARRAY_SIZE(vnet_versions),
-                             &vnet_vio_ops, vp->dev->name);
-       if (err)
-               goto err_out_free_port;
-
-       err = vio_ldc_alloc(&port->vio, &vnet_ldc_cfg, port);
-       if (err)
-               goto err_out_free_port;
-
-       err = vnet_port_alloc_tx_bufs(port);
-       if (err)
-               goto err_out_free_ldc;
-
-       INIT_HLIST_NODE(&port->hash);
-       INIT_LIST_HEAD(&port->list);
-
-       switch_port = 0;
-       if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL)
-               switch_port = 1;
-       port->switch_port = switch_port;
-
-       spin_lock_irqsave(&vp->lock, flags);
-       if (switch_port)
-               list_add(&port->list, &vp->port_list);
-       else
-               list_add_tail(&port->list, &vp->port_list);
-       hlist_add_head(&port->hash, &vp->port_hash[vnet_hashfn(port->raddr)]);
-       spin_unlock_irqrestore(&vp->lock, flags);
-
-       dev_set_drvdata(&vdev->dev, port);
-
-       pr_info("%s: PORT ( remote-mac %pM%s )\n",
-               vp->dev->name, port->raddr, switch_port ? " switch-port" : "");
-
-       vio_port_up(&port->vio);
-
-       mdesc_release(hp);
-
-       return 0;
-
-err_out_free_ldc:
-       vio_ldc_free(&port->vio);
-
-err_out_free_port:
-       kfree(port);
-
-err_out_put_mdesc:
-       mdesc_release(hp);
-       return err;
-}
-
-static int vnet_port_remove(struct vio_dev *vdev)
-{
-       struct vnet_port *port = dev_get_drvdata(&vdev->dev);
-
-       if (port) {
-               struct vnet *vp = port->vp;
-               unsigned long flags;
-
-               del_timer_sync(&port->vio.timer);
-
-               spin_lock_irqsave(&vp->lock, flags);
-               list_del(&port->list);
-               hlist_del(&port->hash);
-               spin_unlock_irqrestore(&vp->lock, flags);
-
-               vnet_port_free_tx_bufs(port);
-               vio_ldc_free(&port->vio);
-
-               dev_set_drvdata(&vdev->dev, NULL);
-
-               kfree(port);
-       }
-       return 0;
-}
-
-static const struct vio_device_id vnet_port_match[] = {
-       {
-               .type = "vnet-port",
-       },
-       {},
-};
-MODULE_DEVICE_TABLE(vio, vnet_port_match);
-
-static struct vio_driver vnet_port_driver = {
-       .id_table       = vnet_port_match,
-       .probe          = vnet_port_probe,
-       .remove         = vnet_port_remove,
-       .driver         = {
-               .name   = "vnet_port",
-               .owner  = THIS_MODULE,
-       }
-};
-
-static int __init vnet_init(void)
-{
-       return vio_register_driver(&vnet_port_driver);
-}
-
-static void __exit vnet_exit(void)
-{
-       vio_unregister_driver(&vnet_port_driver);
-}
-
-module_init(vnet_init);
-module_exit(vnet_exit);
diff --git a/drivers/net/sunvnet.h b/drivers/net/sunvnet.h
deleted file mode 100644 (file)
index d347a5b..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-#ifndef _SUNVNET_H
-#define _SUNVNET_H
-
-#define DESC_NCOOKIES(entry_size)      \
-       ((entry_size) - sizeof(struct vio_net_desc))
-
-/* length of time before we decide the hardware is borked,
- * and dev->tx_timeout() should be called to fix the problem
- */
-#define VNET_TX_TIMEOUT                        (5 * HZ)
-
-#define VNET_TX_RING_SIZE              512
-#define VNET_TX_WAKEUP_THRESH(dr)      ((dr)->pending / 4)
-
-/* VNET packets are sent in buffers with the first 6 bytes skipped
- * so that after the ethernet header the IPv4/IPv6 headers are aligned
- * properly.
- */
-#define VNET_PACKET_SKIP               6
-
-struct vnet_tx_entry {
-       void                    *buf;
-       unsigned int            ncookies;
-       struct ldc_trans_cookie cookies[2];
-};
-
-struct vnet;
-struct vnet_port {
-       struct vio_driver_state vio;
-
-       struct hlist_node       hash;
-       u8                      raddr[ETH_ALEN];
-       u8                      switch_port;
-       u8                      __pad;
-
-       struct vnet             *vp;
-
-       struct vnet_tx_entry    tx_bufs[VNET_TX_RING_SIZE];
-
-       struct list_head        list;
-};
-
-static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio)
-{
-       return container_of(vio, struct vnet_port, vio);
-}
-
-#define VNET_PORT_HASH_SIZE    16
-#define VNET_PORT_HASH_MASK    (VNET_PORT_HASH_SIZE - 1)
-
-static inline unsigned int vnet_hashfn(u8 *mac)
-{
-       unsigned int val = mac[4] ^ mac[5];
-
-       return val & (VNET_PORT_HASH_MASK);
-}
-
-struct vnet_mcast_entry {
-       u8                      addr[ETH_ALEN];
-       u8                      sent;
-       u8                      hit;
-       struct vnet_mcast_entry *next;
-};
-
-struct vnet {
-       /* Protects port_list and port_hash.  */
-       spinlock_t              lock;
-
-       struct net_device       *dev;
-
-       u32                     msg_enable;
-
-       struct list_head        port_list;
-
-       struct hlist_head       port_hash[VNET_PORT_HASH_SIZE];
-
-       struct vnet_mcast_entry *mcast_list;
-
-       struct list_head        list;
-       u64                     local_mac;
-};
-
-#endif /* _SUNVNET_H */