Staging: Add ServerEngines benet 10Gb ethernet driver
authorSubbu Seetharaman <subbus@serverengines.com>
Sun, 2 Nov 2008 13:09:57 +0000 (08:09 -0500)
committerGreg Kroah-Hartman <gregkh@suse.de>
Tue, 6 Jan 2009 21:52:14 +0000 (13:52 -0800)
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
39 files changed:
drivers/staging/Kconfig
drivers/staging/Makefile
drivers/staging/benet/Kconfig [new file with mode: 0644]
drivers/staging/benet/MAINTAINERS [new file with mode: 0644]
drivers/staging/benet/Makefile [new file with mode: 0644]
drivers/staging/benet/TODO [new file with mode: 0644]
drivers/staging/benet/be_ethtool.c [new file with mode: 0644]
drivers/staging/benet/be_init.c [new file with mode: 0644]
drivers/staging/benet/be_int.c [new file with mode: 0644]
drivers/staging/benet/be_netif.c [new file with mode: 0644]
drivers/staging/benet/benet.h [new file with mode: 0644]
drivers/staging/benet/fw/asyncmesg.h [new file with mode: 0644]
drivers/staging/benet/fw/be_cm.h [new file with mode: 0644]
drivers/staging/benet/fw/be_common.h [new file with mode: 0644]
drivers/staging/benet/fw/cev.h [new file with mode: 0644]
drivers/staging/benet/fw/descriptors.h [new file with mode: 0644]
drivers/staging/benet/fw/doorbells.h [new file with mode: 0644]
drivers/staging/benet/fw/ep.h [new file with mode: 0644]
drivers/staging/benet/fw/etx_context.h [new file with mode: 0644]
drivers/staging/benet/fw/fwcmd_common.h [new file with mode: 0644]
drivers/staging/benet/fw/fwcmd_common_bmap.h [new file with mode: 0644]
drivers/staging/benet/fw/fwcmd_eth_bmap.h [new file with mode: 0644]
drivers/staging/benet/fw/fwcmd_hdr_bmap.h [new file with mode: 0644]
drivers/staging/benet/fw/fwcmd_mcc.h [new file with mode: 0644]
drivers/staging/benet/fw/fwcmd_opcodes.h [new file with mode: 0644]
drivers/staging/benet/fw/fwcmd_types_bmap.h [new file with mode: 0644]
drivers/staging/benet/fw/host_struct.h [new file with mode: 0644]
drivers/staging/benet/fw/mpu.h [new file with mode: 0644]
drivers/staging/benet/fw/mpu_context.h [new file with mode: 0644]
drivers/staging/benet/fw/pcicfg.h [new file with mode: 0644]
drivers/staging/benet/fw/post_codes.h [new file with mode: 0644]
drivers/staging/benet/fw/regmap.h [new file with mode: 0644]
drivers/staging/benet/hwlib/bestatus.h [new file with mode: 0644]
drivers/staging/benet/hwlib/cq.c [new file with mode: 0644]
drivers/staging/benet/hwlib/eq.c [new file with mode: 0644]
drivers/staging/benet/hwlib/eth.c [new file with mode: 0644]
drivers/staging/benet/hwlib/funcobj.c [new file with mode: 0644]
drivers/staging/benet/hwlib/hwlib.h [new file with mode: 0644]
drivers/staging/benet/hwlib/mpu.c [new file with mode: 0644]

index f9b785a039074eab9eab3f080b07add2b0205059..132403263b0fed26d7ee520ced9563288c5e3549 100644 (file)
@@ -69,5 +69,7 @@ source "drivers/staging/otus/Kconfig"
 
 source "drivers/staging/rt2860/Kconfig"
 
+source "drivers/staging/benet/Kconfig"
+
 endif # !STAGING_EXCLUDE_BUILD
 endif # STAGING
index 147a467358aa7b51354e80d3f65d38feed073b72..7c9b9b6e15af477cd4eae783f8838f97222fc555 100644 (file)
@@ -17,3 +17,4 @@ obj-$(CONFIG_POCH)            += poch/
 obj-$(CONFIG_AGNX)             += agnx/
 obj-$(CONFIG_OTUS)             += otus/
 obj-$(CONFIG_RT2860)           += rt2860/
+obj-$(CONFIG_BENET)            += benet/
diff --git a/drivers/staging/benet/Kconfig b/drivers/staging/benet/Kconfig
new file mode 100644 (file)
index 0000000..f680607
--- /dev/null
@@ -0,0 +1,7 @@
+config BENET
+       tristate "ServerEngines 10Gb NIC - BladeEngine"
+       depends on PCI && INET
+       select INET_LRO
+       help
+         This driver implements the NIC functionality for ServerEngines
+         10Gb network adapter BladeEngine (EC 3210).
diff --git a/drivers/staging/benet/MAINTAINERS b/drivers/staging/benet/MAINTAINERS
new file mode 100644 (file)
index 0000000..d5ce340
--- /dev/null
@@ -0,0 +1,6 @@
+SERVER ENGINES 10Gbe NIC - BLADE-ENGINE
+P:     Subbu Seetharaman
+M:     subbus@serverengines.com
+L:     netdev@vger.kernel.org
+W:     http://www.serverengines.com
+S:     Supported
diff --git a/drivers/staging/benet/Makefile b/drivers/staging/benet/Makefile
new file mode 100644 (file)
index 0000000..58c050d
--- /dev/null
@@ -0,0 +1,9 @@
+#
+# Makefile to build the network driver for ServerEngine's BladeEngine.
+#
+EXTRA_CFLAGS = -I$(src)/hwlib -I$(src)/fw
+
+obj-$(CONFIG_BENET) += benet.o
+
+benet-y :=  be_init.o be_int.o be_netif.o be_ethtool.o \
+       hwlib/funcobj.o hwlib/cq.o hwlib/eq.o hwlib/mpu.o hwlib/eth.o
diff --git a/drivers/staging/benet/TODO b/drivers/staging/benet/TODO
new file mode 100644 (file)
index 0000000..b087d7d
--- /dev/null
@@ -0,0 +1,8 @@
+TODO:
+       - remove subdirectories
+       - fix minor checkpatch.pl issues
+       - remove wrappers around common iowrite functions
+       - full netdev audit of common problems/issues
+
+Please send all patches and questions to Subbu Seetharaman
+<subbus@serverengines.com> and Greg Kroah-Hartman <greg@kroah.com>
diff --git a/drivers/staging/benet/be_ethtool.c b/drivers/staging/benet/be_ethtool.c
new file mode 100644 (file)
index 0000000..8c3ac90
--- /dev/null
@@ -0,0 +1,348 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * be_ethtool.c
+ *
+ *     This file contains various functions that ethtool can use
+ *     to talk to the driver and the BE H/W.
+ */
+
+#include "benet.h"
+
+#include <linux/ethtool.h>
+
+static const char benet_gstrings_stats[][ETH_GSTRING_LEN] = {
+/* net_device_stats */
+       "rx_packets",
+       "tx_packets",
+       "rx_bytes",
+       "tx_bytes",
+       "rx_errors",
+       "tx_errors",
+       "rx_dropped",
+       "tx_dropped",
+       "multicast",
+       "collisions",
+       "rx_length_errors",
+       "rx_over_errors",
+       "rx_crc_errors",
+       "rx_frame_errors",
+       "rx_fifo_errors",
+       "rx_missed_errors",
+       "tx_aborted_errors",
+       "tx_carrier_errors",
+       "tx_fifo_errors",
+       "tx_heartbeat_errors",
+       "tx_window_errors",
+       "rx_compressed",
+       "tc_compressed",
+/* BE driver Stats */
+       "bes_tx_reqs",
+       "bes_tx_fails",
+       "bes_fwd_reqs",
+       "bes_tx_wrbs",
+       "bes_interrupts",
+       "bes_events",
+       "bes_tx_events",
+       "bes_rx_events",
+       "bes_tx_compl",
+       "bes_rx_compl",
+       "bes_ethrx_post_fail",
+       "bes_802_3_dropped_frames",
+       "bes_802_3_malformed_frames",
+       "bes_rx_misc_pkts",
+       "bes_eth_tx_rate",
+       "bes_eth_rx_rate",
+       "Num Packets collected",
+       "Num Times Flushed",
+};
+
+#define NET_DEV_STATS_LEN \
+       (sizeof(struct net_device_stats)/sizeof(unsigned long))
+
+#define BENET_STATS_LEN  ARRAY_SIZE(benet_gstrings_stats)
+
+static void
+be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+       struct be_net_object *pnob = netdev->priv;
+       struct be_adapter *adapter = pnob->adapter;
+
+       strncpy(drvinfo->driver, be_driver_name, 32);
+       strncpy(drvinfo->version, be_drvr_ver, 32);
+       strncpy(drvinfo->fw_version, be_fw_ver, 32);
+       strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
+       drvinfo->testinfo_len = 0;
+       drvinfo->regdump_len = 0;
+       drvinfo->eedump_len = 0;
+}
+
+static int
+be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
+{
+       struct be_net_object *pnob = netdev->priv;
+       struct be_adapter *adapter = pnob->adapter;
+
+       coalesce->rx_max_coalesced_frames = adapter->max_rx_coal;
+
+       coalesce->rx_coalesce_usecs = adapter->cur_eqd;
+       coalesce->rx_coalesce_usecs_high = adapter->max_eqd;
+       coalesce->rx_coalesce_usecs_low = adapter->min_eqd;
+
+       coalesce->tx_coalesce_usecs = adapter->cur_eqd;
+       coalesce->tx_coalesce_usecs_high = adapter->max_eqd;
+       coalesce->tx_coalesce_usecs_low = adapter->min_eqd;
+
+       coalesce->use_adaptive_rx_coalesce = adapter->enable_aic;
+       coalesce->use_adaptive_tx_coalesce = adapter->enable_aic;
+
+       return 0;
+}
+
+/*
+ * This routine is used to set interrup coalescing delay *as well as*
+ * the number of pkts to coalesce for LRO.
+ */
+static int
+be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
+{
+       struct be_net_object *pnob = netdev->priv;
+       struct be_adapter *adapter = pnob->adapter;
+       struct be_eq_object *eq_objectp;
+       u32 max, min, cur;
+       int status;
+
+       adapter->max_rx_coal = coalesce->rx_max_coalesced_frames;
+       if (adapter->max_rx_coal >= BE_LRO_MAX_PKTS)
+               adapter->max_rx_coal = BE_LRO_MAX_PKTS;
+
+       if (adapter->enable_aic == 0 &&
+               coalesce->use_adaptive_rx_coalesce == 1) {
+               /* if AIC is being turned on now, start with an EQD of 0 */
+               adapter->cur_eqd = 0;
+       }
+       adapter->enable_aic = coalesce->use_adaptive_rx_coalesce;
+
+       /* round off to nearest multiple of 8 */
+       max = (((coalesce->rx_coalesce_usecs_high + 4) >> 3) << 3);
+       min = (((coalesce->rx_coalesce_usecs_low + 4) >> 3) << 3);
+       cur = (((coalesce->rx_coalesce_usecs + 4) >> 3) << 3);
+
+       if (adapter->enable_aic) {
+               /* accept low and high if AIC is enabled */
+               if (max > MAX_EQD)
+                       max = MAX_EQD;
+               if (min > max)
+                       min = max;
+               adapter->max_eqd = max;
+               adapter->min_eqd = min;
+               if (adapter->cur_eqd > max)
+                       adapter->cur_eqd = max;
+               if (adapter->cur_eqd < min)
+                       adapter->cur_eqd = min;
+       } else {
+               /* accept specified coalesce_usecs only if AIC is disabled */
+               if (cur > MAX_EQD)
+                       cur = MAX_EQD;
+               eq_objectp = &pnob->event_q_obj;
+               status =
+                   be_eq_modify_delay(&pnob->fn_obj, 1, &eq_objectp, &cur,
+                                      NULL, NULL, NULL);
+               if (status == BE_SUCCESS)
+                       adapter->cur_eqd = cur;
+       }
+       return 0;
+}
+
+static u32 be_get_rx_csum(struct net_device *netdev)
+{
+       struct be_net_object *pnob = netdev->priv;
+       struct be_adapter *adapter = pnob->adapter;
+       return adapter->rx_csum;
+}
+
+static int be_set_rx_csum(struct net_device *netdev, uint32_t data)
+{
+       struct be_net_object *pnob = netdev->priv;
+       struct be_adapter *adapter = pnob->adapter;
+
+       if (data)
+               adapter->rx_csum = 1;
+       else
+               adapter->rx_csum = 0;
+
+       return 0;
+}
+
+static void
+be_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
+{
+       switch (stringset) {
+       case ETH_SS_STATS:
+               memcpy(data, *benet_gstrings_stats,
+                      sizeof(benet_gstrings_stats));
+               break;
+       }
+}
+
+static int be_get_stats_count(struct net_device *netdev)
+{
+       return BENET_STATS_LEN;
+}
+
+static void
+be_get_ethtool_stats(struct net_device *netdev,
+                    struct ethtool_stats *stats, uint64_t *data)
+{
+       struct be_net_object *pnob = netdev->priv;
+       struct be_adapter *adapter = pnob->adapter;
+       int i;
+
+       benet_get_stats(netdev);
+
+       for (i = 0; i <= NET_DEV_STATS_LEN; i++)
+               data[i] = ((unsigned long *)&adapter->benet_stats)[i];
+
+       data[i] = adapter->be_stat.bes_tx_reqs;
+       data[i++] = adapter->be_stat.bes_tx_fails;
+       data[i++] = adapter->be_stat.bes_fwd_reqs;
+       data[i++] = adapter->be_stat.bes_tx_wrbs;
+
+       data[i++] = adapter->be_stat.bes_ints;
+       data[i++] = adapter->be_stat.bes_events;
+       data[i++] = adapter->be_stat.bes_tx_events;
+       data[i++] = adapter->be_stat.bes_rx_events;
+       data[i++] = adapter->be_stat.bes_tx_compl;
+       data[i++] = adapter->be_stat.bes_rx_compl;
+       data[i++] = adapter->be_stat.bes_ethrx_post_fail;
+       data[i++] = adapter->be_stat.bes_802_3_dropped_frames;
+       data[i++] = adapter->be_stat.bes_802_3_malformed_frames;
+       data[i++] = adapter->be_stat.bes_rx_misc_pkts;
+       data[i++] = adapter->be_stat.bes_eth_tx_rate;
+       data[i++] = adapter->be_stat.bes_eth_rx_rate;
+       data[i++] = adapter->be_stat.bes_rx_coal;
+       data[i++] = adapter->be_stat.bes_rx_flush;
+
+}
+
+static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+       ecmd->speed = SPEED_10000;
+       ecmd->duplex = DUPLEX_FULL;
+       ecmd->autoneg = AUTONEG_DISABLE;
+       return 0;
+}
+
+/* Get the Ring parameters from the pnob */
+static void
+be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+{
+       struct be_net_object *pnob = netdev->priv;
+
+       /* Pre Set Maxims */
+       ring->rx_max_pending = pnob->rx_q_len;
+       ring->rx_mini_max_pending = ring->rx_mini_max_pending;
+       ring->rx_jumbo_max_pending = ring->rx_jumbo_max_pending;
+       ring->tx_max_pending = pnob->tx_q_len;
+
+       /* Current hardware Settings                */
+       ring->rx_pending = atomic_read(&pnob->rx_q_posted);
+       ring->rx_mini_pending = ring->rx_mini_pending;
+       ring->rx_jumbo_pending = ring->rx_jumbo_pending;
+       ring->tx_pending = atomic_read(&pnob->tx_q_used);
+
+}
+
+static void
+be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
+{
+       struct be_net_object *pnob = netdev->priv;
+       bool rxfc, txfc;
+       int status;
+
+       status = be_eth_get_flow_control(&pnob->fn_obj, &txfc, &rxfc);
+       if (status != BE_SUCCESS) {
+               dev_info(&netdev->dev, "Unable to get pause frame settings\n");
+               /* return defaults */
+               ecmd->rx_pause = 1;
+               ecmd->tx_pause = 0;
+               ecmd->autoneg = AUTONEG_ENABLE;
+               return;
+       }
+
+       if (txfc == true)
+               ecmd->tx_pause = 1;
+       else
+               ecmd->tx_pause = 0;
+
+       if (rxfc == true)
+               ecmd->rx_pause = 1;
+       else
+               ecmd->rx_pause = 0;
+
+       ecmd->autoneg = AUTONEG_ENABLE;
+}
+
+static int
+be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
+{
+       struct be_net_object *pnob = netdev->priv;
+       bool txfc, rxfc;
+       int status;
+
+       if (ecmd->autoneg != AUTONEG_ENABLE)
+               return -EINVAL;
+
+       if (ecmd->tx_pause)
+               txfc = true;
+       else
+               txfc = false;
+
+       if (ecmd->rx_pause)
+               rxfc = true;
+       else
+               rxfc = false;
+
+       status = be_eth_set_flow_control(&pnob->fn_obj, txfc, rxfc);
+       if (status != BE_SUCCESS) {
+               dev_info(&netdev->dev, "Unable to set pause frame settings\n");
+               return -1;
+       }
+       return 0;
+}
+
+struct ethtool_ops be_ethtool_ops = {
+       .get_settings = be_get_settings,
+       .get_drvinfo = be_get_drvinfo,
+       .get_link = ethtool_op_get_link,
+       .get_coalesce = be_get_coalesce,
+       .set_coalesce = be_set_coalesce,
+       .get_ringparam = be_get_ringparam,
+       .get_pauseparam = be_get_pauseparam,
+       .set_pauseparam = be_set_pauseparam,
+       .get_rx_csum = be_get_rx_csum,
+       .set_rx_csum = be_set_rx_csum,
+       .get_tx_csum = ethtool_op_get_tx_csum,
+       .set_tx_csum = ethtool_op_set_tx_csum,
+       .get_sg = ethtool_op_get_sg,
+       .set_sg = ethtool_op_set_sg,
+       .get_tso = ethtool_op_get_tso,
+       .set_tso = ethtool_op_set_tso,
+       .get_strings = be_get_strings,
+       .get_stats_count = be_get_stats_count,
+       .get_ethtool_stats = be_get_ethtool_stats,
+};
diff --git a/drivers/staging/benet/be_init.c b/drivers/staging/benet/be_init.c
new file mode 100644 (file)
index 0000000..9e5bd08
--- /dev/null
@@ -0,0 +1,1381 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+#include <linux/etherdevice.h>
+#include "benet.h"
+
+#define  DRVR_VERSION  "1.0.728"
+
+static const struct pci_device_id be_device_id_table[] = {
+       {PCI_DEVICE(0x19a2, 0x0201)},
+       {0}
+};
+
+MODULE_DEVICE_TABLE(pci, be_device_id_table);
+
+MODULE_VERSION(DRVR_VERSION);
+
+#define DRV_DESCRIPTION "ServerEngines BladeEngine Network Driver Version "
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION DRVR_VERSION);
+MODULE_AUTHOR("ServerEngines");
+MODULE_LICENSE("GPL");
+
+static unsigned int msix = 1;
+module_param(msix, uint, S_IRUGO);
+MODULE_PARM_DESC(msix, "Use MSI-x interrupts");
+
+static unsigned int rxbuf_size = 2048; /* Default RX frag size */
+module_param(rxbuf_size, uint, S_IRUGO);
+MODULE_PARM_DESC(rxbuf_size, "Size of buffers to hold Rx data");
+
+const char be_drvr_ver[] = DRVR_VERSION;
+char be_fw_ver[32];            /* F/W version filled in by be_probe */
+char be_driver_name[] = "benet";
+
+/*
+ * Number of entries in each queue.
+ */
+#define EVENT_Q_LEN            1024
+#define ETH_TXQ_LEN            2048
+#define ETH_TXCQ_LEN           1024
+#define ETH_RXQ_LEN            1024    /* Does not support any other value */
+#define ETH_UC_RXCQ_LEN                1024
+#define ETH_BC_RXCQ_LEN                256
+#define MCC_Q_LEN               64     /* total size not to exceed 8 pages */
+#define MCC_CQ_LEN              256
+
+/* Bit mask describing events of interest to be traced */
+unsigned int trace_level;
+
+static int
+init_pci_be_function(struct be_adapter *adapter, struct pci_dev *pdev)
+{
+       u64 pa;
+
+       /* CSR */
+       pa = pci_resource_start(pdev, 2);
+       adapter->csr_va = ioremap_nocache(pa, pci_resource_len(pdev, 2));
+       if (adapter->csr_va == NULL)
+               return -ENOMEM;
+
+       /* Door Bell */
+       pa = pci_resource_start(pdev, 4);
+       adapter->db_va = ioremap_nocache(pa, (128 * 1024));
+       if (adapter->db_va == NULL) {
+               iounmap(adapter->csr_va);
+               return -ENOMEM;
+       }
+
+       /* PCI */
+       pa = pci_resource_start(pdev, 1);
+       adapter->pci_va = ioremap_nocache(pa, pci_resource_len(pdev, 1));
+       if (adapter->pci_va == NULL) {
+               iounmap(adapter->csr_va);
+               iounmap(adapter->db_va);
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+/*
+   This function enables the interrupt corresponding to the Event
+   queue ID for the given NetObject
+*/
+void be_enable_eq_intr(struct be_net_object *pnob)
+{
+       struct CQ_DB_AMAP cqdb;
+       cqdb.dw[0] = 0;
+       AMAP_SET_BITS_PTR(CQ_DB, event, &cqdb, 1);
+       AMAP_SET_BITS_PTR(CQ_DB, rearm, &cqdb, 1);
+       AMAP_SET_BITS_PTR(CQ_DB, num_popped, &cqdb, 0);
+       AMAP_SET_BITS_PTR(CQ_DB, qid, &cqdb, pnob->event_q_id);
+       PD_WRITE(&pnob->fn_obj, cq_db, cqdb.dw[0]);
+}
+
+/*
+   This function disables the interrupt corresponding to the Event
+   queue ID for the given NetObject
+*/
+void be_disable_eq_intr(struct be_net_object *pnob)
+{
+       struct CQ_DB_AMAP cqdb;
+       cqdb.dw[0] = 0;
+       AMAP_SET_BITS_PTR(CQ_DB, event, &cqdb, 1);
+       AMAP_SET_BITS_PTR(CQ_DB, rearm, &cqdb, 0);
+       AMAP_SET_BITS_PTR(CQ_DB, num_popped, &cqdb, 0);
+       AMAP_SET_BITS_PTR(CQ_DB, qid, &cqdb, pnob->event_q_id);
+       PD_WRITE(&pnob->fn_obj, cq_db, cqdb.dw[0]);
+}
+
+/*
+    This function enables the interrupt from the  network function
+    of the BladeEngine. Use the function be_disable_eq_intr()
+    to enable the interrupt from the event queue of only one specific
+    NetObject
+*/
+void be_enable_intr(struct be_net_object *pnob)
+{
+       struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP ctrl;
+       u32 host_intr;
+
+       ctrl.dw[0] = PCICFG1_READ(&pnob->fn_obj, host_timer_int_ctrl);
+       host_intr = AMAP_GET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR,
+                                                       hostintr, ctrl.dw);
+       if (!host_intr) {
+               AMAP_SET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR,
+                       hostintr, ctrl.dw, 1);
+               PCICFG1_WRITE(&pnob->fn_obj, host_timer_int_ctrl,
+                       ctrl.dw[0]);
+       }
+}
+
+/*
+   This function disables the interrupt from the network function of
+   the BladeEngine.  Use the function be_disable_eq_intr() to
+   disable the interrupt from the event queue of only one specific NetObject
+*/
+void be_disable_intr(struct be_net_object *pnob)
+{
+
+       struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP ctrl;
+       u32 host_intr;
+       ctrl.dw[0] = PCICFG1_READ(&pnob->fn_obj, host_timer_int_ctrl);
+       host_intr = AMAP_GET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR,
+                                                       hostintr, ctrl.dw);
+       if (host_intr) {
+               AMAP_SET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR, hostintr,
+                       ctrl.dw, 0);
+               PCICFG1_WRITE(&pnob->fn_obj, host_timer_int_ctrl,
+                       ctrl.dw[0]);
+       }
+}
+
+static int be_enable_msix(struct be_adapter *adapter)
+{
+       int i, ret;
+
+       if (!msix)
+               return -1;
+
+       for (i = 0; i < BE_MAX_REQ_MSIX_VECTORS; i++)
+               adapter->msix_entries[i].entry = i;
+
+       ret = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+               BE_MAX_REQ_MSIX_VECTORS);
+
+       if (ret == 0)
+               adapter->msix_enabled = 1;
+       return ret;
+}
+
+static int be_register_isr(struct be_adapter *adapter,
+               struct be_net_object *pnob)
+{
+       struct net_device *netdev = pnob->netdev;
+       int intx = 0, r;
+
+       netdev->irq = adapter->pdev->irq;
+       r = be_enable_msix(adapter);
+
+       if (r == 0) {
+               r = request_irq(adapter->msix_entries[0].vector,
+                               be_int, IRQF_SHARED, netdev->name, netdev);
+               if (r) {
+                       printk(KERN_WARNING
+                               "MSIX Request IRQ failed - Errno %d\n", r);
+                       intx = 1;
+                       pci_disable_msix(adapter->pdev);
+                       adapter->msix_enabled = 0;
+               }
+       } else {
+               intx = 1;
+       }
+
+       if (intx) {
+               r = request_irq(netdev->irq, be_int, IRQF_SHARED,
+                               netdev->name, netdev);
+               if (r) {
+                       printk(KERN_WARNING
+                               "INTx Request IRQ failed - Errno %d\n", r);
+                       return -1;
+               }
+       }
+       adapter->isr_registered = 1;
+       return 0;
+}
+
+static void be_unregister_isr(struct be_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdevp;
+       if (adapter->isr_registered) {
+               if (adapter->msix_enabled) {
+                       free_irq(adapter->msix_entries[0].vector, netdev);
+                       pci_disable_msix(adapter->pdev);
+                       adapter->msix_enabled = 0;
+               } else {
+                       free_irq(netdev->irq, netdev);
+               }
+               adapter->isr_registered = 0;
+       }
+}
+
+/*
+    This function processes the Flush Completions that are issued by the
+    ARM F/W, when a Recv Ring is destroyed.  A flush completion is
+    identified when a Rx COmpl descriptor has the tcpcksum and udpcksum
+    set and the pktsize is 32.  These completions are received on the
+    Rx Completion Queue.
+*/
+static u32 be_process_rx_flush_cmpl(struct be_net_object *pnob)
+{
+       struct ETH_RX_COMPL_AMAP *rxcp;
+       unsigned int i = 0;
+       while ((rxcp = be_get_rx_cmpl(pnob)) != NULL) {
+               be_notify_cmpl(pnob, 1, pnob->rx_cq_id, 1);
+               i++;
+       }
+       return i;
+}
+
+static void be_tx_q_clean(struct be_net_object *pnob)
+{
+       while (atomic_read(&pnob->tx_q_used))
+               process_one_tx_compl(pnob, tx_compl_lastwrb_idx_get(pnob));
+}
+
+static void be_rx_q_clean(struct be_net_object *pnob)
+{
+       if (pnob->rx_ctxt) {
+               int i;
+               struct be_rx_page_info *rx_page_info;
+               for (i = 0; i < pnob->rx_q_len; i++) {
+                       rx_page_info = &(pnob->rx_page_info[i]);
+                       if (!pnob->rx_pg_shared || rx_page_info->page_offset) {
+                               pci_unmap_page(pnob->adapter->pdev,
+                                      pci_unmap_addr(rx_page_info, bus),
+                                              pnob->rx_buf_size,
+                                              PCI_DMA_FROMDEVICE);
+                       }
+                       if (rx_page_info->page)
+                               put_page(rx_page_info->page);
+                       memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
+               }
+               pnob->rx_pg_info_hd = 0;
+       }
+}
+
+static void be_destroy_netobj(struct be_net_object *pnob)
+{
+       int status;
+
+       if (pnob->tx_q_created) {
+               status = be_eth_sq_destroy(&pnob->tx_q_obj);
+               pnob->tx_q_created = 0;
+       }
+
+       if (pnob->rx_q_created) {
+               status = be_eth_rq_destroy(&pnob->rx_q_obj);
+               if (status != 0) {
+                       status = be_eth_rq_destroy_options(&pnob->rx_q_obj, 0,
+                                                     NULL, NULL);
+                       BUG_ON(status);
+               }
+               pnob->rx_q_created = 0;
+       }
+
+       be_process_rx_flush_cmpl(pnob);
+
+       if (pnob->tx_cq_created) {
+               status = be_cq_destroy(&pnob->tx_cq_obj);
+               pnob->tx_cq_created = 0;
+       }
+
+       if (pnob->rx_cq_created) {
+               status = be_cq_destroy(&pnob->rx_cq_obj);
+               pnob->rx_cq_created = 0;
+       }
+
+       if (pnob->mcc_q_created) {
+               status = be_mcc_ring_destroy(&pnob->mcc_q_obj);
+               pnob->mcc_q_created = 0;
+       }
+       if (pnob->mcc_cq_created) {
+               status = be_cq_destroy(&pnob->mcc_cq_obj);
+               pnob->mcc_cq_created = 0;
+       }
+
+       if (pnob->event_q_created) {
+               status = be_eq_destroy(&pnob->event_q_obj);
+               pnob->event_q_created = 0;
+       }
+       be_function_cleanup(&pnob->fn_obj);
+}
+
+/*
+ * free all resources associated with a pnob
+ * Called at the time of module cleanup as well a any error during
+ * module init.  Some resources may be partially allocated in a NetObj.
+ */
+static void netobject_cleanup(struct be_adapter *adapter,
+                       struct be_net_object *pnob)
+{
+       struct net_device *netdev = adapter->netdevp;
+
+       if (netif_running(netdev)) {
+               netif_stop_queue(netdev);
+               be_wait_nic_tx_cmplx_cmpl(pnob);
+               be_disable_eq_intr(pnob);
+       }
+
+       be_unregister_isr(adapter);
+
+       if (adapter->tasklet_started) {
+               tasklet_kill(&(adapter->sts_handler));
+               adapter->tasklet_started = 0;
+       }
+       if (pnob->fn_obj_created)
+               be_disable_intr(pnob);
+
+       if (adapter->dev_state != BE_DEV_STATE_NONE)
+               unregister_netdev(netdev);
+
+       if (pnob->fn_obj_created)
+               be_destroy_netobj(pnob);
+
+       adapter->net_obj = NULL;
+       adapter->netdevp = NULL;
+
+       be_rx_q_clean(pnob);
+       if (pnob->rx_ctxt) {
+               kfree(pnob->rx_page_info);
+               kfree(pnob->rx_ctxt);
+       }
+
+       be_tx_q_clean(pnob);
+       kfree(pnob->tx_ctxt);
+
+       if (pnob->mcc_q)
+               pci_free_consistent(adapter->pdev, pnob->mcc_q_size,
+                       pnob->mcc_q, pnob->mcc_q_bus);
+
+       if (pnob->mcc_wrb_ctxt)
+               free_pages((unsigned long)pnob->mcc_wrb_ctxt,
+                          get_order(pnob->mcc_wrb_ctxt_size));
+
+       if (pnob->mcc_cq)
+               pci_free_consistent(adapter->pdev, pnob->mcc_cq_size,
+                       pnob->mcc_cq, pnob->mcc_cq_bus);
+
+       if (pnob->event_q)
+               pci_free_consistent(adapter->pdev, pnob->event_q_size,
+                       pnob->event_q, pnob->event_q_bus);
+
+       if (pnob->tx_cq)
+               pci_free_consistent(adapter->pdev, pnob->tx_cq_size,
+                       pnob->tx_cq, pnob->tx_cq_bus);
+
+       if (pnob->tx_q)
+               pci_free_consistent(adapter->pdev, pnob->tx_q_size,
+                       pnob->tx_q, pnob->tx_q_bus);
+
+       if (pnob->rx_q)
+               pci_free_consistent(adapter->pdev, pnob->rx_q_size,
+                       pnob->rx_q, pnob->rx_q_bus);
+
+       if (pnob->rx_cq)
+               pci_free_consistent(adapter->pdev, pnob->rx_cq_size,
+                       pnob->rx_cq, pnob->rx_cq_bus);
+
+
+       if (pnob->mb_ptr)
+               pci_free_consistent(adapter->pdev, pnob->mb_size, pnob->mb_ptr,
+                       pnob->mb_bus);
+
+       free_netdev(netdev);
+}
+
+
+static int be_nob_ring_alloc(struct be_adapter *adapter,
+       struct be_net_object *pnob)
+{
+       u32 size;
+
+       /* Mail box rd; mailbox pointer needs to be 16 byte aligned */
+       pnob->mb_size = sizeof(struct MCC_MAILBOX_AMAP) + 16;
+       pnob->mb_ptr = pci_alloc_consistent(adapter->pdev, pnob->mb_size,
+                               &pnob->mb_bus);
+       if (!pnob->mb_bus)
+               return -1;
+       memset(pnob->mb_ptr, 0, pnob->mb_size);
+       pnob->mb_rd.va = PTR_ALIGN(pnob->mb_ptr, 16);
+       pnob->mb_rd.pa = PTR_ALIGN(pnob->mb_bus, 16);
+       pnob->mb_rd.length = sizeof(struct MCC_MAILBOX_AMAP);
+       /*
+        * Event queue
+        */
+       pnob->event_q_len = EVENT_Q_LEN;
+       pnob->event_q_size = pnob->event_q_len * sizeof(struct EQ_ENTRY_AMAP);
+       pnob->event_q = pci_alloc_consistent(adapter->pdev, pnob->event_q_size,
+                               &pnob->event_q_bus);
+       if (!pnob->event_q_bus)
+               return -1;
+       memset(pnob->event_q, 0, pnob->event_q_size);
+       /*
+        * Eth TX queue
+        */
+       pnob->tx_q_len = ETH_TXQ_LEN;
+       pnob->tx_q_port = 0;
+       pnob->tx_q_size =  pnob->tx_q_len * sizeof(struct ETH_WRB_AMAP);
+       pnob->tx_q = pci_alloc_consistent(adapter->pdev, pnob->tx_q_size,
+                               &pnob->tx_q_bus);
+       if (!pnob->tx_q_bus)
+               return -1;
+       memset(pnob->tx_q, 0, pnob->tx_q_size);
+       /*
+        * Eth TX Compl queue
+        */
+       pnob->txcq_len = ETH_TXCQ_LEN;
+       pnob->tx_cq_size = pnob->txcq_len * sizeof(struct ETH_TX_COMPL_AMAP);
+       pnob->tx_cq = pci_alloc_consistent(adapter->pdev, pnob->tx_cq_size,
+                               &pnob->tx_cq_bus);
+       if (!pnob->tx_cq_bus)
+               return -1;
+       memset(pnob->tx_cq, 0, pnob->tx_cq_size);
+       /*
+        * Eth RX queue
+        */
+       pnob->rx_q_len = ETH_RXQ_LEN;
+       pnob->rx_q_size =  pnob->rx_q_len * sizeof(struct ETH_RX_D_AMAP);
+       pnob->rx_q = pci_alloc_consistent(adapter->pdev, pnob->rx_q_size,
+                               &pnob->rx_q_bus);
+       if (!pnob->rx_q_bus)
+               return -1;
+       memset(pnob->rx_q, 0, pnob->rx_q_size);
+       /*
+        * Eth Unicast RX Compl queue
+        */
+       pnob->rx_cq_len = ETH_UC_RXCQ_LEN;
+       pnob->rx_cq_size =  pnob->rx_cq_len *
+                       sizeof(struct ETH_RX_COMPL_AMAP);
+       pnob->rx_cq = pci_alloc_consistent(adapter->pdev, pnob->rx_cq_size,
+                               &pnob->rx_cq_bus);
+       if (!pnob->rx_cq_bus)
+               return -1;
+       memset(pnob->rx_cq, 0, pnob->rx_cq_size);
+
+       /* TX resources */
+       size = pnob->tx_q_len * sizeof(void **);
+       pnob->tx_ctxt = kzalloc(size, GFP_KERNEL);
+       if (pnob->tx_ctxt == NULL)
+               return -1;
+
+       /* RX resources */
+       size = pnob->rx_q_len * sizeof(void *);
+       pnob->rx_ctxt = kzalloc(size, GFP_KERNEL);
+       if (pnob->rx_ctxt == NULL)
+               return -1;
+
+       size = (pnob->rx_q_len * sizeof(struct be_rx_page_info));
+       pnob->rx_page_info = kzalloc(size, GFP_KERNEL);
+       if (pnob->rx_page_info == NULL)
+               return -1;
+
+       adapter->eth_statsp = kzalloc(sizeof(struct FWCMD_ETH_GET_STATISTICS),
+                               GFP_KERNEL);
+       if (adapter->eth_statsp == NULL)
+               return -1;
+       pnob->rx_buf_size = rxbuf_size;
+       return 0;
+}
+
+/*
+    This function initializes the be_net_object for subsequent
+    network operations.
+
+    Before calling this function, the driver  must have allocated
+    space for the NetObject structure, initialized the structure,
+    allocated DMAable memory for all the network queues that form
+    part of the NetObject and populated the start address (virtual)
+    and number of entries allocated for each queue in the NetObject structure.
+
+    The driver must also have allocated memory to hold the
+    mailbox structure (MCC_MAILBOX) and post the physical address,
+    virtual addresses and the size of the mailbox memory in the
+    NetObj.mb_rd.  This structure is used by BECLIB for
+    initial communication with the embedded MCC processor. BECLIB
+    uses the mailbox until MCC rings are created for  more  efficient
+    communication with the MCC processor.
+
+    If the driver wants to create multiple network interface for more
+    than one protection domain, it can call be_create_netobj()
+    multiple times  once for each protection domain.  A Maximum of
+    32 protection domains are supported.
+
+*/
+static int
+be_create_netobj(struct be_net_object *pnob, u8 __iomem *csr_va,
+       u8 __iomem *db_va, u8 __iomem *pci_va)
+{
+       int status = 0;
+       bool  eventable = false, tx_no_delay = false, rx_no_delay = false;
+       struct be_eq_object *eq_objectp = NULL;
+       struct be_function_object *pfob = &pnob->fn_obj;
+       struct ring_desc rd;
+       u32 set_rxbuf_size;
+       u32 tx_cmpl_wm = CEV_WMARK_96;  /* 0xffffffff to disable */
+       u32 rx_cmpl_wm = CEV_WMARK_160; /* 0xffffffff to disable */
+       u32 eq_delay = 0; /* delay in 8usec units. 0xffffffff to disable */
+
+       memset(&rd, 0, sizeof(struct ring_desc));
+
+       status = be_function_object_create(csr_va, db_va, pci_va,
+                       BE_FUNCTION_TYPE_NETWORK, &pnob->mb_rd, pfob);
+       if (status != BE_SUCCESS)
+               return status;
+       pnob->fn_obj_created = true;
+
+       if (tx_cmpl_wm == 0xffffffff)
+               tx_no_delay = true;
+       if (rx_cmpl_wm == 0xffffffff)
+               rx_no_delay = true;
+       /*
+        * now create the necessary rings
+        * Event Queue first.
+        */
+       if (pnob->event_q_len) {
+               rd.va = pnob->event_q;
+               rd.pa = pnob->event_q_bus;
+               rd.length = pnob->event_q_size;
+
+               status = be_eq_create(pfob, &rd, 4, pnob->event_q_len,
+                               (u32) -1,       /* CEV_WMARK_* or -1 */
+                               eq_delay,       /* in 8us units, or -1 */
+                               &pnob->event_q_obj);
+               if (status != BE_SUCCESS)
+                       goto error_ret;
+               pnob->event_q_id = pnob->event_q_obj.eq_id;
+               pnob->event_q_created = 1;
+               eventable = true;
+               eq_objectp = &pnob->event_q_obj;
+       }
+       /*
+        * Now Eth Tx Compl. queue.
+        */
+       if (pnob->txcq_len) {
+               rd.va = pnob->tx_cq;
+               rd.pa = pnob->tx_cq_bus;
+               rd.length = pnob->tx_cq_size;
+
+               status = be_cq_create(pfob, &rd,
+                       pnob->txcq_len * sizeof(struct ETH_TX_COMPL_AMAP),
+                       false,  /* solicted events,  */
+                       tx_no_delay,    /* nodelay  */
+                       tx_cmpl_wm,     /* Watermark encodings */
+                       eq_objectp, &pnob->tx_cq_obj);
+               if (status != BE_SUCCESS)
+                       goto error_ret;
+
+               pnob->tx_cq_id = pnob->tx_cq_obj.cq_id;
+               pnob->tx_cq_created = 1;
+       }
+       /*
+        * Eth Tx queue
+        */
+       if (pnob->tx_q_len) {
+               struct be_eth_sq_parameters ex_params = { 0 };
+               u32 type;
+
+               if (pnob->tx_q_port) {
+                       /* TXQ to be bound to a specific port */
+                       type = BE_ETH_TX_RING_TYPE_BOUND;
+                       ex_params.port = pnob->tx_q_port - 1;
+               } else
+                       type = BE_ETH_TX_RING_TYPE_STANDARD;
+
+               rd.va = pnob->tx_q;
+               rd.pa = pnob->tx_q_bus;
+               rd.length = pnob->tx_q_size;
+
+               status = be_eth_sq_create_ex(pfob, &rd,
+                               pnob->tx_q_len * sizeof(struct ETH_WRB_AMAP),
+                               type, 2, &pnob->tx_cq_obj,
+                               &ex_params, &pnob->tx_q_obj);
+
+               if (status != BE_SUCCESS)
+                       goto error_ret;
+
+               pnob->tx_q_id = pnob->tx_q_obj.bid;
+               pnob->tx_q_created = 1;
+       }
+       /*
+        * Now Eth Rx compl. queue.  Always needed.
+        */
+       rd.va = pnob->rx_cq;
+       rd.pa = pnob->rx_cq_bus;
+       rd.length = pnob->rx_cq_size;
+
+       status = be_cq_create(pfob, &rd,
+                       pnob->rx_cq_len * sizeof(struct ETH_RX_COMPL_AMAP),
+                       false,  /* solicted events,  */
+                       rx_no_delay,    /* nodelay  */
+                       rx_cmpl_wm,     /* Watermark encodings */
+                       eq_objectp, &pnob->rx_cq_obj);
+       if (status != BE_SUCCESS)
+               goto error_ret;
+
+       pnob->rx_cq_id = pnob->rx_cq_obj.cq_id;
+       pnob->rx_cq_created = 1;
+
+       status = be_eth_rq_set_frag_size(pfob, pnob->rx_buf_size,
+                       (u32 *) &set_rxbuf_size);
+       if (status != BE_SUCCESS) {
+               be_eth_rq_get_frag_size(pfob, (u32 *) &pnob->rx_buf_size);
+               if ((pnob->rx_buf_size != 2048) && (pnob->rx_buf_size != 4096)
+                   && (pnob->rx_buf_size != 8192))
+                       goto error_ret;
+       } else {
+               if (pnob->rx_buf_size != set_rxbuf_size)
+                       pnob->rx_buf_size = set_rxbuf_size;
+       }
+       /*
+        * Eth RX queue. be_eth_rq_create() always assumes 2 pages size
+        */
+       rd.va = pnob->rx_q;
+       rd.pa = pnob->rx_q_bus;
+       rd.length = pnob->rx_q_size;
+
+       status = be_eth_rq_create(pfob, &rd, &pnob->rx_cq_obj,
+                            &pnob->rx_cq_obj, &pnob->rx_q_obj);
+
+       if (status != BE_SUCCESS)
+               goto error_ret;
+
+       pnob->rx_q_id = pnob->rx_q_obj.rid;
+       pnob->rx_q_created = 1;
+
+       return BE_SUCCESS;      /* All required queues created. */
+
+error_ret:
+       be_destroy_netobj(pnob);
+       return status;
+}
+
+static int be_nob_ring_init(struct be_adapter *adapter,
+                               struct be_net_object *pnob)
+{
+       int status;
+
+       pnob->event_q_tl = 0;
+
+       pnob->tx_q_hd = 0;
+       pnob->tx_q_tl = 0;
+
+       pnob->tx_cq_tl = 0;
+
+       pnob->rx_cq_tl = 0;
+
+       memset(pnob->event_q, 0, pnob->event_q_size);
+       memset(pnob->tx_cq, 0, pnob->tx_cq_size);
+       memset(pnob->tx_ctxt, 0, pnob->tx_q_len * sizeof(void **));
+       memset(pnob->rx_ctxt, 0, pnob->rx_q_len * sizeof(void *));
+       pnob->rx_pg_info_hd = 0;
+       pnob->rx_q_hd = 0;
+       atomic_set(&pnob->rx_q_posted, 0);
+
+       status = be_create_netobj(pnob, adapter->csr_va, adapter->db_va,
+                               adapter->pci_va);
+       if (status != BE_SUCCESS)
+               return -1;
+
+       be_post_eth_rx_buffs(pnob);
+       return 0;
+}
+
+/* This function handles async callback for link status */
+static void
+be_link_status_async_callback(void *context, u32 event_code, void *event)
+{
+       struct ASYNC_EVENT_LINK_STATE_AMAP *link_status = event;
+       struct be_adapter *adapter = context;
+       bool link_enable = false;
+       struct be_net_object *pnob;
+       struct ASYNC_EVENT_TRAILER_AMAP *async_trailer;
+       struct net_device *netdev;
+       u32 async_event_code, async_event_type, active_port;
+       u32 port0_link_status, port1_link_status, port0_duplex, port1_duplex;
+       u32 port0_speed, port1_speed;
+
+       if (event_code != ASYNC_EVENT_CODE_LINK_STATE) {
+               /* Not our event to handle */
+               return;
+       }
+       async_trailer = (struct ASYNC_EVENT_TRAILER_AMAP *)
+           ((u8 *) event + sizeof(struct MCC_CQ_ENTRY_AMAP) -
+            sizeof(struct ASYNC_EVENT_TRAILER_AMAP));
+
+       async_event_code = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER, event_code,
+                                            async_trailer);
+       BUG_ON(async_event_code != ASYNC_EVENT_CODE_LINK_STATE);
+
+       pnob = adapter->net_obj;
+       netdev = pnob->netdev;
+
+       /* Determine if this event is a switch VLD or a physical link event */
+       async_event_type = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER, event_type,
+                                            async_trailer);
+       active_port = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
+                                       active_port, link_status);
+       port0_link_status = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
+                                             port0_link_status, link_status);
+       port1_link_status = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
+                                             port1_link_status, link_status);
+       port0_duplex = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
+                                        port0_duplex, link_status);
+       port1_duplex = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
+                                        port1_duplex, link_status);
+       port0_speed = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
+                                       port0_speed, link_status);
+       port1_speed = AMAP_GET_BITS_PTR(ASYNC_EVENT_LINK_STATE,
+                                       port1_speed, link_status);
+       if (async_event_type == NTWK_LINK_TYPE_VIRTUAL) {
+               adapter->be_stat.bes_link_change_virtual++;
+               if (adapter->be_link_sts->active_port != active_port) {
+                       dev_notice(&netdev->dev,
+                              "Active port changed due to VLD on switch\n");
+               } else {
+                       dev_notice(&netdev->dev, "Link status update\n");
+               }
+
+       } else {
+               adapter->be_stat.bes_link_change_physical++;
+               if (adapter->be_link_sts->active_port != active_port) {
+                       dev_notice(&netdev->dev,
+                              "Active port changed due to port link"
+                              " status change\n");
+               } else {
+                       dev_notice(&netdev->dev, "Link status update\n");
+               }
+       }
+
+       memset(adapter->be_link_sts, 0, sizeof(adapter->be_link_sts));
+
+       if ((port0_link_status == ASYNC_EVENT_LINK_UP) ||
+           (port1_link_status == ASYNC_EVENT_LINK_UP)) {
+               if ((adapter->port0_link_sts == BE_PORT_LINK_DOWN) &&
+                   (adapter->port1_link_sts == BE_PORT_LINK_DOWN)) {
+                       /* Earlier both the ports are down So link is up */
+                       link_enable = true;
+               }
+
+               if (port0_link_status == ASYNC_EVENT_LINK_UP) {
+                       adapter->port0_link_sts = BE_PORT_LINK_UP;
+                       adapter->be_link_sts->mac0_duplex = port0_duplex;
+                       adapter->be_link_sts->mac0_speed = port0_speed;
+                       if (active_port == NTWK_PORT_A)
+                               adapter->be_link_sts->active_port = 0;
+               } else
+                       adapter->port0_link_sts = BE_PORT_LINK_DOWN;
+
+               if (port1_link_status == ASYNC_EVENT_LINK_UP) {
+                       adapter->port1_link_sts = BE_PORT_LINK_UP;
+                       adapter->be_link_sts->mac1_duplex = port1_duplex;
+                       adapter->be_link_sts->mac1_speed = port1_speed;
+                       if (active_port == NTWK_PORT_B)
+                               adapter->be_link_sts->active_port = 1;
+               } else
+                       adapter->port1_link_sts = BE_PORT_LINK_DOWN;
+
+               printk(KERN_INFO "Link Properties for %s:\n", netdev->name);
+               dev_info(&netdev->dev, "Link Properties:\n");
+               be_print_link_info(adapter->be_link_sts);
+
+               if (!link_enable)
+                       return;
+               /*
+                * Both ports were down previously, but atleast one of
+                * them has come up if this netdevice's carrier is not up,
+                * then indicate to stack
+                */
+               if (!netif_carrier_ok(netdev)) {
+                       netif_start_queue(netdev);
+                       netif_carrier_on(netdev);
+               }
+               return;
+       }
+
+       /* Now both the ports are down. Tell the stack about it */
+       dev_info(&netdev->dev, "Both ports are down\n");
+       adapter->port0_link_sts = BE_PORT_LINK_DOWN;
+       adapter->port1_link_sts = BE_PORT_LINK_DOWN;
+       if (netif_carrier_ok(netdev)) {
+               netif_carrier_off(netdev);
+               netif_stop_queue(netdev);
+       }
+       return;
+}
+
+static int be_mcc_create(struct be_adapter *adapter)
+{
+       struct be_net_object *pnob;
+
+       pnob = adapter->net_obj;
+       /*
+        * Create the MCC ring so that all further communication with
+        * MCC can go thru the ring. we do this at the end since
+        * we do not want to be dealing with interrupts until the
+        * initialization is complete.
+        */
+       pnob->mcc_q_len = MCC_Q_LEN;
+       pnob->mcc_q_size = pnob->mcc_q_len * sizeof(struct MCC_WRB_AMAP);
+       pnob->mcc_q =  pci_alloc_consistent(adapter->pdev, pnob->mcc_q_size,
+                               &pnob->mcc_q_bus);
+       if (!pnob->mcc_q_bus)
+               return -1;
+       /*
+        * space for MCC WRB context
+        */
+       pnob->mcc_wrb_ctxtLen = MCC_Q_LEN;
+       pnob->mcc_wrb_ctxt_size =  pnob->mcc_wrb_ctxtLen *
+               sizeof(struct be_mcc_wrb_context);
+       pnob->mcc_wrb_ctxt = (void *)__get_free_pages(GFP_KERNEL,
+               get_order(pnob->mcc_wrb_ctxt_size));
+       if (pnob->mcc_wrb_ctxt == NULL)
+               return -1;
+       /*
+        * Space for MCC compl. ring
+        */
+       pnob->mcc_cq_len = MCC_CQ_LEN;
+       pnob->mcc_cq_size = pnob->mcc_cq_len * sizeof(struct MCC_CQ_ENTRY_AMAP);
+       pnob->mcc_cq = pci_alloc_consistent(adapter->pdev, pnob->mcc_cq_size,
+                               &pnob->mcc_cq_bus);
+       if (!pnob->mcc_cq_bus)
+               return -1;
+       return 0;
+}
+
+/*
+    This function creates the MCC request and completion ring required
+    for communicating with the ARM processor.  The caller must have
+    allocated required amount of memory for the MCC ring and MCC
+    completion ring and posted the virtual address and number of
+    entries in the corresponding members (mcc_q and mcc_cq) in the
+    NetObject struture.
+
+    When this call is completed, all further communication with
+    ARM will switch from mailbox to this ring.
+
+    pnob       - Pointer to the NetObject structure. This NetObject should
+                 have been created using a previous call to be_create_netobj()
+*/
+int be_create_mcc_rings(struct be_net_object *pnob)
+{
+       int status = 0;
+       struct ring_desc rd;
+       struct be_function_object *pfob = &pnob->fn_obj;
+
+       memset(&rd, 0, sizeof(struct ring_desc));
+       if (pnob->mcc_cq_len) {
+               rd.va = pnob->mcc_cq;
+               rd.pa = pnob->mcc_cq_bus;
+               rd.length = pnob->mcc_cq_size;
+
+               status = be_cq_create(pfob, &rd,
+                       pnob->mcc_cq_len * sizeof(struct MCC_CQ_ENTRY_AMAP),
+                       false,  /* solicted events,  */
+                       true,   /* nodelay  */
+                       0,      /* 0 Watermark since Nodelay is true */
+                       &pnob->event_q_obj,
+                       &pnob->mcc_cq_obj);
+
+               if (status != BE_SUCCESS)
+                       return status;
+
+               pnob->mcc_cq_id = pnob->mcc_cq_obj.cq_id;
+               pnob->mcc_cq_created = 1;
+       }
+       if (pnob->mcc_q_len) {
+               rd.va = pnob->mcc_q;
+               rd.pa = pnob->mcc_q_bus;
+               rd.length = pnob->mcc_q_size;
+
+               status = be_mcc_ring_create(pfob, &rd,
+                               pnob->mcc_q_len * sizeof(struct MCC_WRB_AMAP),
+                               pnob->mcc_wrb_ctxt, pnob->mcc_wrb_ctxtLen,
+                               &pnob->mcc_cq_obj, &pnob->mcc_q_obj);
+
+               if (status != BE_SUCCESS)
+                       return status;
+
+               pnob->mcc_q_created = 1;
+       }
+       return BE_SUCCESS;
+}
+
+static int be_mcc_init(struct be_adapter *adapter)
+{
+       u32 r;
+       struct be_net_object *pnob;
+
+       pnob = adapter->net_obj;
+       memset(pnob->mcc_q, 0, pnob->mcc_q_size);
+       pnob->mcc_q_hd = 0;
+
+       memset(pnob->mcc_wrb_ctxt, 0, pnob->mcc_wrb_ctxt_size);
+
+       memset(pnob->mcc_cq, 0, pnob->mcc_cq_size);
+       pnob->mcc_cq_tl = 0;
+
+       r = be_create_mcc_rings(adapter->net_obj);
+       if (r != BE_SUCCESS)
+               return -1;
+
+       return 0;
+}
+
+static void be_remove(struct pci_dev *pdev)
+{
+       struct be_net_object *pnob;
+       struct be_adapter *adapter;
+
+       adapter = pci_get_drvdata(pdev);
+       if (!adapter)
+               return;
+
+       pci_set_drvdata(pdev, NULL);
+       pnob = (struct be_net_object *)adapter->net_obj;
+
+       flush_scheduled_work();
+
+       if (pnob) {
+               /* Unregister async callback function for link status updates */
+               if (pnob->mcc_q_created)
+                       be_mcc_add_async_event_callback(&pnob->mcc_q_obj,
+                                                               NULL, NULL);
+               netobject_cleanup(adapter, pnob);
+       }
+
+       if (adapter->csr_va)
+               iounmap(adapter->csr_va);
+       if (adapter->db_va)
+               iounmap(adapter->db_va);
+       if (adapter->pci_va)
+               iounmap(adapter->pci_va);
+
+       pci_release_regions(adapter->pdev);
+       pci_disable_device(adapter->pdev);
+
+       kfree(adapter->be_link_sts);
+       kfree(adapter->eth_statsp);
+
+       if (adapter->timer_ctxt.get_stats_timer.function)
+               del_timer_sync(&adapter->timer_ctxt.get_stats_timer);
+       kfree(adapter);
+}
+
+/*
+ * This function is called by the PCI sub-system when it finds a PCI
+ * device with dev/vendor IDs that match with one of our devices.
+ * All of the driver initialization is done in this function.
+ */
+static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
+{
+       int status = 0;
+       struct be_adapter *adapter;
+       struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD get_fwv;
+       struct be_net_object *pnob;
+       struct net_device *netdev;
+
+       status = pci_enable_device(pdev);
+       if (status)
+               goto error;
+
+       status = pci_request_regions(pdev, be_driver_name);
+       if (status)
+               goto error_pci_req;
+
+       pci_set_master(pdev);
+       adapter = kzalloc(sizeof(struct be_adapter), GFP_KERNEL);
+       if (adapter == NULL) {
+               status = -ENOMEM;
+               goto error_adapter;
+       }
+       adapter->dev_state = BE_DEV_STATE_NONE;
+       adapter->pdev = pdev;
+       pci_set_drvdata(pdev, adapter);
+
+       adapter->enable_aic = 1;
+       adapter->max_eqd = MAX_EQD;
+       adapter->min_eqd = 0;
+       adapter->cur_eqd = 0;
+
+       status = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
+       if (!status) {
+               adapter->dma_64bit_cap = true;
+       } else {
+               adapter->dma_64bit_cap = false;
+               status = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+               if (status != 0) {
+                       printk(KERN_ERR "Could not set PCI DMA Mask\n");
+                       goto cleanup;
+               }
+       }
+
+       status = init_pci_be_function(adapter, pdev);
+       if (status != 0) {
+               printk(KERN_ERR "Failed to map PCI BARS\n");
+               status = -ENOMEM;
+               goto cleanup;
+       }
+
+       be_trace_set_level(DL_ALWAYS | DL_ERR);
+
+       adapter->be_link_sts = kmalloc(sizeof(struct BE_LINK_STATUS),
+                                       GFP_KERNEL);
+       if (adapter->be_link_sts == NULL) {
+               printk(KERN_ERR "Memory allocation for link status "
+                      "buffer failed\n");
+               goto cleanup;
+       }
+       spin_lock_init(&adapter->txq_lock);
+
+       netdev = alloc_etherdev(sizeof(struct be_net_object));
+       if (netdev == NULL) {
+               status = -ENOMEM;
+               goto cleanup;
+       }
+       pnob = netdev->priv;
+       adapter->net_obj = pnob;
+       adapter->netdevp = netdev;
+       pnob->adapter = adapter;
+       pnob->netdev = netdev;
+
+       status = be_nob_ring_alloc(adapter, pnob);
+       if (status != 0)
+               goto cleanup;
+
+       status = be_nob_ring_init(adapter, pnob);
+       if (status != 0)
+               goto cleanup;
+
+       be_rxf_mac_address_read_write(&pnob->fn_obj, false, false, false,
+               false, false, netdev->dev_addr, NULL, NULL);
+
+       netdev->init = &benet_init;
+       netif_carrier_off(netdev);
+       netif_stop_queue(netdev);
+
+       SET_NETDEV_DEV(netdev, &(adapter->pdev->dev));
+
+       netif_napi_add(netdev, &pnob->napi, be_poll, 64);
+
+       /* if the rx_frag size if 2K, one page is shared as two RX frags */
+       pnob->rx_pg_shared = (pnob->rx_buf_size <= PAGE_SIZE / 2)? true : false;
+       if (pnob->rx_buf_size != rxbuf_size) {
+               printk(KERN_WARNING
+                      "Could not set Rx buffer size to %d. Using %d\n",
+                                      rxbuf_size, pnob->rx_buf_size);
+               rxbuf_size = pnob->rx_buf_size;
+       }
+
+       tasklet_init(&(adapter->sts_handler), be_process_intr,
+                    (unsigned long)adapter);
+       adapter->tasklet_started = 1;
+       spin_lock_init(&(adapter->int_lock));
+
+       status = be_register_isr(adapter, pnob);
+       if (status != 0)
+               goto cleanup;
+
+       adapter->rx_csum = 1;
+       adapter->max_rx_coal = BE_LRO_MAX_PKTS;
+
+       memset(&get_fwv, 0,
+              sizeof(struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD));
+       printk(KERN_INFO "BladeEngine Driver version:%s. "
+              "Copyright ServerEngines, Corporation 2005 - 2008\n",
+                              be_drvr_ver);
+       status = be_function_get_fw_version(&pnob->fn_obj, &get_fwv, NULL,
+                                           NULL);
+       if (status == BE_SUCCESS) {
+               strncpy(be_fw_ver, get_fwv.firmware_version_string, 32);
+               printk(KERN_INFO "BladeEngine Firmware Version:%s\n",
+                      get_fwv.firmware_version_string);
+       } else {
+               printk(KERN_WARNING "Unable to get BE Firmware Version\n");
+       }
+
+       sema_init(&adapter->get_eth_stat_sem, 0);
+       init_timer(&adapter->timer_ctxt.get_stats_timer);
+       atomic_set(&adapter->timer_ctxt.get_stat_flag, 0);
+       adapter->timer_ctxt.get_stats_timer.function =
+           &be_get_stats_timer_handler;
+
+       status = be_mcc_create(adapter);
+       if (status < 0)
+               goto cleanup;
+       status = be_mcc_init(adapter);
+       if (status < 0)
+               goto cleanup;
+
+
+       status = be_mcc_add_async_event_callback(&adapter->net_obj->mcc_q_obj,
+                        be_link_status_async_callback, (void *)adapter);
+       if (status != BE_SUCCESS) {
+               printk(KERN_WARNING "add_async_event_callback failed");
+               printk(KERN_WARNING
+                      "Link status changes may not be reflected\n");
+       }
+
+       status = register_netdev(netdev);
+       if (status != 0)
+               goto cleanup;
+       be_update_link_status(adapter);
+       adapter->dev_state = BE_DEV_STATE_INIT;
+       return 0;
+
+cleanup:
+       be_remove(pdev);
+       return status;
+error_adapter:
+       pci_release_regions(pdev);
+error_pci_req:
+       pci_disable_device(pdev);
+error:
+       printk(KERN_ERR "BladeEngine initalization failed\n");
+       return status;
+}
+
+/*
+ * Get the current link status and print the status on console
+ */
+void be_update_link_status(struct be_adapter *adapter)
+{
+       int status;
+       struct be_net_object *pnob = adapter->net_obj;
+
+       status = be_rxf_link_status(&pnob->fn_obj, adapter->be_link_sts, NULL,
+                       NULL, NULL);
+       if (status == BE_SUCCESS) {
+               if (adapter->be_link_sts->mac0_speed &&
+                   adapter->be_link_sts->mac0_duplex)
+                       adapter->port0_link_sts = BE_PORT_LINK_UP;
+               else
+                       adapter->port0_link_sts = BE_PORT_LINK_DOWN;
+
+               if (adapter->be_link_sts->mac1_speed &&
+                   adapter->be_link_sts->mac1_duplex)
+                       adapter->port1_link_sts = BE_PORT_LINK_UP;
+               else
+                       adapter->port1_link_sts = BE_PORT_LINK_DOWN;
+
+               dev_info(&pnob->netdev->dev, "Link Properties:\n");
+               be_print_link_info(adapter->be_link_sts);
+               return;
+       }
+       dev_info(&pnob->netdev->dev, "Could not get link status\n");
+       return;
+}
+
+
+#ifdef CONFIG_PM
+static void
+be_pm_cleanup(struct be_adapter *adapter,
+             struct be_net_object *pnob, struct net_device *netdev)
+{
+       netif_carrier_off(netdev);
+       netif_stop_queue(netdev);
+
+       be_wait_nic_tx_cmplx_cmpl(pnob);
+       be_disable_eq_intr(pnob);
+
+       if (adapter->tasklet_started) {
+               tasklet_kill(&adapter->sts_handler);
+               adapter->tasklet_started = 0;
+       }
+
+       be_unregister_isr(adapter);
+       be_disable_intr(pnob);
+
+       be_tx_q_clean(pnob);
+       be_rx_q_clean(pnob);
+
+       be_destroy_netobj(pnob);
+}
+
+static int be_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       struct be_adapter *adapter = pci_get_drvdata(pdev);
+       struct net_device *netdev =  adapter->netdevp;
+       struct be_net_object *pnob = (struct be_net_object *)netdev->priv;
+
+       adapter->dev_pm_state = adapter->dev_state;
+       adapter->dev_state = BE_DEV_STATE_SUSPEND;
+
+       netif_device_detach(netdev);
+       if (netif_running(netdev))
+               be_pm_cleanup(adapter, pnob, netdev);
+
+       pci_enable_wake(pdev, 3, 1);
+       pci_enable_wake(pdev, 4, 1);    /* D3 Cold = 4 */
+       pci_save_state(pdev);
+       pci_disable_device(pdev);
+       pci_set_power_state(pdev, pci_choose_state(pdev, state));
+       return 0;
+}
+
+static void be_up(struct be_adapter *adapter)
+{
+       struct be_net_object *pnob = adapter->net_obj;
+
+       if (pnob->num_vlans != 0)
+               be_rxf_vlan_config(&pnob->fn_obj, false, pnob->num_vlans,
+                       pnob->vlan_tag, NULL, NULL, NULL);
+
+}
+
+static int be_resume(struct pci_dev *pdev)
+{
+       int status = 0;
+       struct be_adapter *adapter = pci_get_drvdata(pdev);
+       struct net_device *netdev =  adapter->netdevp;
+       struct be_net_object *pnob = (struct be_net_object *)netdev->priv;
+
+       netif_device_detach(netdev);
+
+       status = pci_enable_device(pdev);
+       if (status)
+               return status;
+
+       pci_set_power_state(pdev, 0);
+       pci_restore_state(pdev);
+       pci_enable_wake(pdev, 3, 0);
+       pci_enable_wake(pdev, 4, 0);    /* 4 is D3 cold */
+
+       netif_carrier_on(netdev);
+       netif_start_queue(netdev);
+
+       if (netif_running(netdev)) {
+               be_rxf_mac_address_read_write(&pnob->fn_obj, false, false,
+                       false, true, false, netdev->dev_addr, NULL, NULL);
+
+               status = be_nob_ring_init(adapter, pnob);
+               if (status < 0)
+                       return status;
+
+               tasklet_init(&(adapter->sts_handler), be_process_intr,
+                            (unsigned long)adapter);
+               adapter->tasklet_started = 1;
+
+               if (be_register_isr(adapter, pnob) != 0) {
+                       printk(KERN_ERR "be_register_isr failed\n");
+                       return status;
+               }
+
+
+               status = be_mcc_init(adapter);
+               if (status < 0) {
+                       printk(KERN_ERR "be_mcc_init failed\n");
+                       return status;
+               }
+               be_update_link_status(adapter);
+               /*
+                * Register async call back function to handle link
+                * status updates
+                */
+               status = be_mcc_add_async_event_callback(
+                               &adapter->net_obj->mcc_q_obj,
+                               be_link_status_async_callback, (void *)adapter);
+               if (status != BE_SUCCESS) {
+                       printk(KERN_WARNING "add_async_event_callback failed");
+                       printk(KERN_WARNING
+                              "Link status changes may not be reflected\n");
+               }
+               be_enable_intr(pnob);
+               be_enable_eq_intr(pnob);
+               be_up(adapter);
+       }
+       netif_device_attach(netdev);
+       adapter->dev_state = adapter->dev_pm_state;
+       return 0;
+
+}
+
+#endif
+
+/* Wait until no more pending transmits  */
+void be_wait_nic_tx_cmplx_cmpl(struct be_net_object *pnob)
+{
+       int i;
+
+       /* Wait for 20us * 50000 (= 1s) and no more */
+       i = 0;
+       while ((pnob->tx_q_tl != pnob->tx_q_hd) && (i < 50000)) {
+               ++i;
+               udelay(20);
+       }
+
+       /* Check for no more pending transmits */
+       if (i >= 50000) {
+               printk(KERN_WARNING
+                      "Did not receive completions for all TX requests\n");
+       }
+}
+
+static struct pci_driver be_driver = {
+       .name = be_driver_name,
+       .id_table = be_device_id_table,
+       .probe = be_probe,
+#ifdef CONFIG_PM
+       .suspend = be_suspend,
+       .resume = be_resume,
+#endif
+       .remove = be_remove
+};
+
+/*
+ * Module init entry point. Registers our our device and return.
+ * Our probe will be called if the device is found.
+ */
+static int __init be_init_module(void)
+{
+       int ret;
+
+       if (rxbuf_size != 8192 && rxbuf_size != 4096 && rxbuf_size != 2048) {
+               printk(KERN_WARNING
+                      "Unsupported receive buffer size (%d) requested\n",
+                      rxbuf_size);
+               printk(KERN_WARNING
+                      "Must be 2048, 4096 or 8192. Defaulting to 2048\n");
+               rxbuf_size = 2048;
+       }
+
+       ret = pci_register_driver(&be_driver);
+
+       return ret;
+}
+
+module_init(be_init_module);
+
+/*
+ * be_exit_module - Driver Exit Cleanup Routine
+ */
+static void __exit be_exit_module(void)
+{
+       pci_unregister_driver(&be_driver);
+}
+
+module_exit(be_exit_module);
diff --git a/drivers/staging/benet/be_int.c b/drivers/staging/benet/be_int.c
new file mode 100644 (file)
index 0000000..82472b4
--- /dev/null
@@ -0,0 +1,874 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+#include <linux/if_vlan.h>
+#include <linux/inet_lro.h>
+
+#include "benet.h"
+
+/* number of bytes of RX frame that are copied to skb->data */
+#define BE_HDR_LEN 64
+
+#define NETIF_RX(skb) netif_receive_skb(skb)
+#define VLAN_ACCEL_RX(skb, pnob, vt) \
+               vlan_hwaccel_rx(skb, pnob->vlan_grp, vt)
+
+/*
+    This function notifies BladeEngine of the number of completion
+    entries processed from the specified completion queue by writing
+    the number of popped entries to the door bell.
+
+    pnob       - Pointer to the NetObject structure
+    n          - Number of completion entries processed
+    cq_id      - Queue ID of the completion queue for which notification
+                       is being done.
+    re_arm     - 1  - rearm the completion ring to generate an event.
+               - 0  - dont rearm the completion ring to generate an event
+*/
+void be_notify_cmpl(struct be_net_object *pnob, int n, int cq_id, int re_arm)
+{
+       struct CQ_DB_AMAP cqdb;
+
+       cqdb.dw[0] = 0;
+       AMAP_SET_BITS_PTR(CQ_DB, qid, &cqdb, cq_id);
+       AMAP_SET_BITS_PTR(CQ_DB, rearm, &cqdb, re_arm);
+       AMAP_SET_BITS_PTR(CQ_DB, num_popped, &cqdb, n);
+       PD_WRITE(&pnob->fn_obj, cq_db, cqdb.dw[0]);
+}
+
+/*
+ * adds additional receive frags indicated by BE starting from given
+ * frag index (fi) to specified skb's frag list
+ */
+static void
+add_skb_frags(struct be_net_object *pnob, struct sk_buff *skb,
+             u32 nresid, u32 fi)
+{
+       struct be_adapter *adapter = pnob->adapter;
+       u32 sk_frag_idx, n;
+       struct be_rx_page_info *rx_page_info;
+       u32 frag_sz = pnob->rx_buf_size;
+
+       sk_frag_idx = skb_shinfo(skb)->nr_frags;
+       while (nresid) {
+               index_inc(&fi, pnob->rx_q_len);
+
+               rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
+               pnob->rx_ctxt[fi] = NULL;
+               if ((rx_page_info->page_offset) ||
+                   (pnob->rx_pg_shared == false)) {
+                       pci_unmap_page(adapter->pdev,
+                                      pci_unmap_addr(rx_page_info, bus),
+                                      frag_sz, PCI_DMA_FROMDEVICE);
+               }
+
+               n = min(nresid, frag_sz);
+               skb_shinfo(skb)->frags[sk_frag_idx].page = rx_page_info->page;
+               skb_shinfo(skb)->frags[sk_frag_idx].page_offset
+                   = rx_page_info->page_offset;
+               skb_shinfo(skb)->frags[sk_frag_idx].size = n;
+
+               sk_frag_idx++;
+               skb->len += n;
+               skb->data_len += n;
+               skb_shinfo(skb)->nr_frags++;
+               nresid -= n;
+
+               memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
+               atomic_dec(&pnob->rx_q_posted);
+       }
+}
+
+/*
+ * This function processes incoming nic packets over various Rx queues.
+ * This function takes the adapter, the current Rx status descriptor
+ * entry and the Rx completion queue ID as argument.
+ */
+static inline int process_nic_rx_completion(struct be_net_object *pnob,
+                                           struct ETH_RX_COMPL_AMAP *rxcp)
+{
+       struct be_adapter *adapter = pnob->adapter;
+       struct sk_buff *skb;
+       int udpcksm, tcpcksm;
+       int n;
+       u32 nresid, fi;
+       u32 frag_sz = pnob->rx_buf_size;
+       u8 *va;
+       struct be_rx_page_info *rx_page_info;
+       u32 numfrags, vtp, vtm, vlan_tag, pktsize;
+
+       fi = AMAP_GET_BITS_PTR(ETH_RX_COMPL, fragndx, rxcp);
+       BUG_ON(fi >= (int)pnob->rx_q_len);
+       BUG_ON(fi < 0);
+
+       rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
+       BUG_ON(!rx_page_info->page);
+       pnob->rx_ctxt[fi] = NULL;
+
+       /*
+        * If one page is used per fragment or if this is the second half of
+        *  of the page, unmap the page here
+        */
+       if ((rx_page_info->page_offset) || (pnob->rx_pg_shared == false)) {
+               pci_unmap_page(adapter->pdev,
+                              pci_unmap_addr(rx_page_info, bus), frag_sz,
+                              PCI_DMA_FROMDEVICE);
+       }
+
+       atomic_dec(&pnob->rx_q_posted);
+       udpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, udpcksm, rxcp);
+       tcpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, tcpcksm, rxcp);
+       pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
+       /*
+        * get rid of RX flush completions first.
+        */
+       if ((tcpcksm) && (udpcksm) && (pktsize == 32)) {
+               put_page(rx_page_info->page);
+               memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
+               return 0;
+       }
+       skb = netdev_alloc_skb(pnob->netdev, BE_HDR_LEN + NET_IP_ALIGN);
+       if (skb == NULL) {
+               dev_info(&pnob->netdev->dev, "alloc_skb() failed\n");
+               put_page(rx_page_info->page);
+               memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
+               goto free_frags;
+       }
+       skb_reserve(skb, NET_IP_ALIGN);
+
+       skb->dev = pnob->netdev;
+
+       n = min(pktsize, frag_sz);
+
+       va = page_address(rx_page_info->page) + rx_page_info->page_offset;
+       prefetch(va);
+
+       skb->len = skb->data_len = n;
+       if (n <= BE_HDR_LEN) {
+               memcpy(skb->data, va, n);
+               put_page(rx_page_info->page);
+               skb->data_len -= n;
+               skb->tail += n;
+       } else {
+
+               /* Setup the SKB with page buffer information */
+               skb_shinfo(skb)->frags[0].page = rx_page_info->page;
+               skb_shinfo(skb)->nr_frags++;
+
+               /* Copy the header into the skb_data */
+               memcpy(skb->data, va, BE_HDR_LEN);
+               skb_shinfo(skb)->frags[0].page_offset =
+                   rx_page_info->page_offset + BE_HDR_LEN;
+               skb_shinfo(skb)->frags[0].size = n - BE_HDR_LEN;
+               skb->data_len -= BE_HDR_LEN;
+               skb->tail += BE_HDR_LEN;
+       }
+       memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
+       nresid = pktsize - n;
+
+       skb->protocol = eth_type_trans(skb, pnob->netdev);
+
+       if ((tcpcksm || udpcksm) && adapter->rx_csum)
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+       else
+               skb->ip_summed = CHECKSUM_NONE;
+       /*
+        * if we have more bytes left, the frame has been
+        * given to us in multiple fragments.  This happens
+        * with Jumbo frames. Add the remaining fragments to
+        * skb->frags[] array.
+        */
+       if (nresid)
+               add_skb_frags(pnob, skb, nresid, fi);
+
+       /* update the the true size of the skb. */
+       skb->truesize = skb->len + sizeof(struct sk_buff);
+
+       /*
+        * If a 802.3 frame or 802.2 LLC frame
+        * (i.e) contains length field in MAC Hdr
+        * and frame len is greater than 64 bytes
+        */
+       if (((skb->protocol == ntohs(ETH_P_802_2)) ||
+            (skb->protocol == ntohs(ETH_P_802_3)))
+           && (pktsize > BE_HDR_LEN)) {
+               /*
+                * If the length given in Mac Hdr is less than frame size
+                * Erraneous frame, Drop it
+                */
+               if ((ntohs(*(u16 *) (va + 12)) + ETH_HLEN) < pktsize) {
+                       /* Increment Non Ether type II frames dropped */
+                       adapter->be_stat.bes_802_3_dropped_frames++;
+
+                       kfree_skb(skb);
+                       return 0;
+               }
+               /*
+                * else if the length given in Mac Hdr is greater than
+                * frame size, should not be seeing this sort of frames
+                * dump the pkt and pass to stack
+                */
+               else if ((ntohs(*(u16 *) (va + 12)) + ETH_HLEN) > pktsize) {
+                       /* Increment Non Ether type II frames malformed */
+                       adapter->be_stat.bes_802_3_malformed_frames++;
+               }
+       }
+
+       vtp = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtp, rxcp);
+       vtm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtm, rxcp);
+       if (vtp && vtm) {
+               /* Vlan tag present in pkt and BE found
+                * that the tag matched an entry in VLAN table
+                */
+               if (!pnob->vlan_grp || pnob->num_vlans == 0) {
+                       /* But we have no VLANs configured.
+                        * This should never happen.  Drop the packet.
+                        */
+                       dev_info(&pnob->netdev->dev,
+                              "BladeEngine: Unexpected vlan tagged packet\n");
+                       kfree_skb(skb);
+                       return 0;
+               }
+               /* pass the VLAN packet to stack */
+               vlan_tag = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vlan_tag, rxcp);
+               VLAN_ACCEL_RX(skb, pnob, be16_to_cpu(vlan_tag));
+
+       } else {
+               NETIF_RX(skb);
+       }
+       return 0;
+
+free_frags:
+       /* free all frags associated with the current rxcp */
+       numfrags = AMAP_GET_BITS_PTR(ETH_RX_COMPL, numfrags, rxcp);
+       while (numfrags-- > 1) {
+               index_inc(&fi, pnob->rx_q_len);
+
+               rx_page_info = (struct be_rx_page_info *)
+                   pnob->rx_ctxt[fi];
+               pnob->rx_ctxt[fi] = (void *)NULL;
+               if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
+                       pci_unmap_page(adapter->pdev,
+                                      pci_unmap_addr(rx_page_info, bus),
+                                      frag_sz, PCI_DMA_FROMDEVICE);
+               }
+
+               put_page(rx_page_info->page);
+               memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
+               atomic_dec(&pnob->rx_q_posted);
+       }
+       return -ENOMEM;
+}
+
+static void process_nic_rx_completion_lro(struct be_net_object *pnob,
+                                         struct ETH_RX_COMPL_AMAP *rxcp)
+{
+       struct be_adapter *adapter = pnob->adapter;
+       struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
+       unsigned int udpcksm, tcpcksm;
+       u32 numfrags, vlanf, vtm, vlan_tag, nresid;
+       u16 vlant;
+       unsigned int fi, idx, n;
+       struct be_rx_page_info *rx_page_info;
+       u32 frag_sz = pnob->rx_buf_size, pktsize;
+       bool rx_coal = (adapter->max_rx_coal <= 1) ? 0 : 1;
+       u8 err, *va;
+       __wsum csum = 0;
+
+       if (AMAP_GET_BITS_PTR(ETH_RX_COMPL, ipsec, rxcp)) {
+               /*  Drop the pkt and move to the next completion.  */
+               adapter->be_stat.bes_rx_misc_pkts++;
+               return;
+       }
+       err = AMAP_GET_BITS_PTR(ETH_RX_COMPL, err, rxcp);
+       if (err || !rx_coal) {
+               /* We won't coalesce Rx pkts if the err bit set.
+                * take the path of normal completion processing */
+               process_nic_rx_completion(pnob, rxcp);
+               return;
+       }
+
+       fi = AMAP_GET_BITS_PTR(ETH_RX_COMPL, fragndx, rxcp);
+       BUG_ON(fi >= (int)pnob->rx_q_len);
+       BUG_ON(fi < 0);
+       rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
+       BUG_ON(!rx_page_info->page);
+       pnob->rx_ctxt[fi] = (void *)NULL;
+       /*  If one page is used per fragment or if this is the
+        * second half of the page, unmap the page here
+        */
+       if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
+               pci_unmap_page(adapter->pdev,
+                              pci_unmap_addr(rx_page_info, bus),
+                              frag_sz, PCI_DMA_FROMDEVICE);
+       }
+
+       numfrags = AMAP_GET_BITS_PTR(ETH_RX_COMPL, numfrags, rxcp);
+       udpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, udpcksm, rxcp);
+       tcpcksm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, tcpcksm, rxcp);
+       vlan_tag = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vlan_tag, rxcp);
+       vlant = be16_to_cpu(vlan_tag);
+       vlanf = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtp, rxcp);
+       vtm = AMAP_GET_BITS_PTR(ETH_RX_COMPL, vtm, rxcp);
+       pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
+
+       atomic_dec(&pnob->rx_q_posted);
+
+       if (tcpcksm && udpcksm && pktsize == 32) {
+               /* flush completion entries */
+               put_page(rx_page_info->page);
+               memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
+               return;
+       }
+       /* Only one of udpcksum and tcpcksum can be set */
+       BUG_ON(udpcksm && tcpcksm);
+
+       /* jumbo frames could come in multiple fragments */
+       BUG_ON(numfrags != ((pktsize + (frag_sz - 1)) / frag_sz));
+       n = min(pktsize, frag_sz);
+       nresid = pktsize - n;   /* will be useful for jumbo pkts */
+       idx = 0;
+
+       va = page_address(rx_page_info->page) + rx_page_info->page_offset;
+       prefetch(va);
+       rx_frags[idx].page = rx_page_info->page;
+       rx_frags[idx].page_offset = (rx_page_info->page_offset);
+       rx_frags[idx].size = n;
+       memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
+
+       /* If we got multiple fragments, we have more data. */
+       while (nresid) {
+               idx++;
+               index_inc(&fi, pnob->rx_q_len);
+
+               rx_page_info = (struct be_rx_page_info *)pnob->rx_ctxt[fi];
+               pnob->rx_ctxt[fi] = (void *)NULL;
+               if (rx_page_info->page_offset || !pnob->rx_pg_shared) {
+                       pci_unmap_page(adapter->pdev,
+                                      pci_unmap_addr(rx_page_info, bus),
+                                      frag_sz, PCI_DMA_FROMDEVICE);
+               }
+
+               n = min(nresid, frag_sz);
+               rx_frags[idx].page = rx_page_info->page;
+               rx_frags[idx].page_offset = (rx_page_info->page_offset);
+               rx_frags[idx].size = n;
+
+               nresid -= n;
+               memset(rx_page_info, 0, sizeof(struct be_rx_page_info));
+               atomic_dec(&pnob->rx_q_posted);
+       }
+
+       if (likely(!(vlanf && vtm))) {
+               lro_receive_frags(&pnob->lro_mgr, rx_frags,
+                                 pktsize, pktsize,
+                                 (void *)(unsigned long)csum, csum);
+       } else {
+               /* Vlan tag present in pkt and BE found
+                * that the tag matched an entry in VLAN table
+                */
+               if (unlikely(!pnob->vlan_grp || pnob->num_vlans == 0)) {
+                       /* But we have no VLANs configured.
+                        * This should never happen.  Drop the packet.
+                        */
+                       dev_info(&pnob->netdev->dev,
+                              "BladeEngine: Unexpected vlan tagged packet\n");
+                       return;
+               }
+               /* pass the VLAN packet to stack */
+               lro_vlan_hwaccel_receive_frags(&pnob->lro_mgr,
+                                              rx_frags, pktsize, pktsize,
+                                              pnob->vlan_grp, vlant,
+                                              (void *)(unsigned long)csum,
+                                              csum);
+       }
+
+       adapter->be_stat.bes_rx_coal++;
+}
+
+struct ETH_RX_COMPL_AMAP *be_get_rx_cmpl(struct be_net_object *pnob)
+{
+       struct ETH_RX_COMPL_AMAP *rxcp = &pnob->rx_cq[pnob->rx_cq_tl];
+       u32 valid, ct;
+
+       valid = AMAP_GET_BITS_PTR(ETH_RX_COMPL, valid, rxcp);
+       if (valid == 0)
+               return NULL;
+
+       ct = AMAP_GET_BITS_PTR(ETH_RX_COMPL, ct, rxcp);
+       if (ct != 0) {
+               /* Invalid chute #. treat as error */
+               AMAP_SET_BITS_PTR(ETH_RX_COMPL, err, rxcp, 1);
+       }
+
+       be_adv_rxcq_tl(pnob);
+       AMAP_SET_BITS_PTR(ETH_RX_COMPL, valid, rxcp, 0);
+       return rxcp;
+}
+
+static void update_rx_rate(struct be_adapter *adapter)
+{
+       /* update the rate once in two seconds */
+       if ((jiffies - adapter->eth_rx_jiffies) > 2 * (HZ)) {
+               u32 r;
+               r = adapter->eth_rx_bytes /
+                   ((jiffies - adapter->eth_rx_jiffies) / (HZ));
+               r = (r / 1000000);      /* MB/Sec */
+
+               /* Mega Bits/Sec */
+               adapter->be_stat.bes_eth_rx_rate = (r * 8);
+               adapter->eth_rx_jiffies = jiffies;
+               adapter->eth_rx_bytes = 0;
+       }
+}
+
+static int process_rx_completions(struct be_net_object *pnob, int max_work)
+{
+       struct be_adapter *adapter = pnob->adapter;
+       struct ETH_RX_COMPL_AMAP *rxcp;
+       u32 nc = 0;
+       unsigned int pktsize;
+
+       while (max_work && (rxcp = be_get_rx_cmpl(pnob))) {
+               prefetch(rxcp);
+               pktsize = AMAP_GET_BITS_PTR(ETH_RX_COMPL, pktsize, rxcp);
+               process_nic_rx_completion_lro(pnob, rxcp);
+               adapter->eth_rx_bytes += pktsize;
+               update_rx_rate(adapter);
+               nc++;
+               max_work--;
+               adapter->be_stat.bes_rx_compl++;
+       }
+       if (likely(adapter->max_rx_coal > 1)) {
+               adapter->be_stat.bes_rx_flush++;
+               lro_flush_all(&pnob->lro_mgr);
+       }
+
+       /* Refill the queue */
+       if (atomic_read(&pnob->rx_q_posted) < 900)
+               be_post_eth_rx_buffs(pnob);
+
+       return nc;
+}
+
+static struct ETH_TX_COMPL_AMAP *be_get_tx_cmpl(struct be_net_object *pnob)
+{
+       struct ETH_TX_COMPL_AMAP *txcp = &pnob->tx_cq[pnob->tx_cq_tl];
+       u32 valid;
+
+       valid = AMAP_GET_BITS_PTR(ETH_TX_COMPL, valid, txcp);
+       if (valid == 0)
+               return NULL;
+
+       AMAP_SET_BITS_PTR(ETH_TX_COMPL, valid, txcp, 0);
+       be_adv_txcq_tl(pnob);
+       return txcp;
+
+}
+
+void process_one_tx_compl(struct be_net_object *pnob, u32 end_idx)
+{
+       struct be_adapter *adapter = pnob->adapter;
+       int cur_index, tx_wrbs_completed = 0;
+       struct sk_buff *skb;
+       u64 busaddr, pa, pa_lo, pa_hi;
+       struct ETH_WRB_AMAP *wrb;
+       u32 frag_len, last_index, j;
+
+       last_index = tx_compl_lastwrb_idx_get(pnob);
+       BUG_ON(last_index != end_idx);
+       pnob->tx_ctxt[pnob->tx_q_tl] = NULL;
+       do {
+               cur_index = pnob->tx_q_tl;
+               wrb = &pnob->tx_q[cur_index];
+               pa_hi = AMAP_GET_BITS_PTR(ETH_WRB, frag_pa_hi, wrb);
+               pa_lo = AMAP_GET_BITS_PTR(ETH_WRB, frag_pa_lo, wrb);
+               frag_len = AMAP_GET_BITS_PTR(ETH_WRB, frag_len, wrb);
+               busaddr = (pa_hi << 32) | pa_lo;
+               if (busaddr != 0) {
+                       pa = le64_to_cpu(busaddr);
+                       pci_unmap_single(adapter->pdev, pa,
+                                        frag_len, PCI_DMA_TODEVICE);
+               }
+               if (cur_index == last_index) {
+                       skb = (struct sk_buff *)pnob->tx_ctxt[cur_index];
+                       BUG_ON(!skb);
+                       for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
+                               struct skb_frag_struct *frag;
+                               frag = &skb_shinfo(skb)->frags[j];
+                               pci_unmap_page(adapter->pdev,
+                                              (ulong) frag->page, frag->size,
+                                              PCI_DMA_TODEVICE);
+                       }
+                       kfree_skb(skb);
+                       pnob->tx_ctxt[cur_index] = NULL;
+               } else {
+                       BUG_ON(pnob->tx_ctxt[cur_index]);
+               }
+               tx_wrbs_completed++;
+               be_adv_txq_tl(pnob);
+       } while (cur_index != last_index);
+       atomic_sub(tx_wrbs_completed, &pnob->tx_q_used);
+}
+
+/* there is no need to take an SMP lock here since currently
+ * we have only one instance of the tasklet that does completion
+ * processing.
+ */
+static void process_nic_tx_completions(struct be_net_object *pnob)
+{
+       struct be_adapter *adapter = pnob->adapter;
+       struct ETH_TX_COMPL_AMAP *txcp;
+       struct net_device *netdev = pnob->netdev;
+       u32 end_idx, num_processed = 0;
+
+       adapter->be_stat.bes_tx_events++;
+
+       while ((txcp = be_get_tx_cmpl(pnob))) {
+               end_idx = AMAP_GET_BITS_PTR(ETH_TX_COMPL, wrb_index, txcp);
+               process_one_tx_compl(pnob, end_idx);
+               num_processed++;
+               adapter->be_stat.bes_tx_compl++;
+       }
+       be_notify_cmpl(pnob, num_processed, pnob->tx_cq_id, 1);
+       /*
+        * We got Tx completions and have usable WRBs.
+        * If the netdev's queue has been stopped
+        * because we had run out of WRBs, wake it now.
+        */
+       spin_lock(&adapter->txq_lock);
+       if (netif_queue_stopped(netdev)
+           && atomic_read(&pnob->tx_q_used) < pnob->tx_q_len / 2) {
+               netif_wake_queue(netdev);
+       }
+       spin_unlock(&adapter->txq_lock);
+}
+
+static u32 post_rx_buffs(struct be_net_object *pnob, struct list_head *rxbl)
+{
+       u32 nposted = 0;
+       struct ETH_RX_D_AMAP *rxd = NULL;
+       struct be_recv_buffer *rxbp;
+       void **rx_ctxp;
+       struct RQ_DB_AMAP rqdb;
+
+       rx_ctxp = pnob->rx_ctxt;
+
+       while (!list_empty(rxbl) &&
+              (rx_ctxp[pnob->rx_q_hd] == NULL) && nposted < 255) {
+
+               rxbp = list_first_entry(rxbl, struct be_recv_buffer, rxb_list);
+               list_del(&rxbp->rxb_list);
+               rxd = pnob->rx_q + pnob->rx_q_hd;
+               AMAP_SET_BITS_PTR(ETH_RX_D, fragpa_lo, rxd, rxbp->rxb_pa_lo);
+               AMAP_SET_BITS_PTR(ETH_RX_D, fragpa_hi, rxd, rxbp->rxb_pa_hi);
+
+               rx_ctxp[pnob->rx_q_hd] = rxbp->rxb_ctxt;
+               be_adv_rxq_hd(pnob);
+               nposted++;
+       }
+
+       if (nposted) {
+               /* Now press the door bell to notify BladeEngine. */
+               rqdb.dw[0] = 0;
+               AMAP_SET_BITS_PTR(RQ_DB, numPosted, &rqdb, nposted);
+               AMAP_SET_BITS_PTR(RQ_DB, rq, &rqdb, pnob->rx_q_id);
+               PD_WRITE(&pnob->fn_obj, erx_rq_db, rqdb.dw[0]);
+       }
+       atomic_add(nposted, &pnob->rx_q_posted);
+       return nposted;
+}
+
+void be_post_eth_rx_buffs(struct be_net_object *pnob)
+{
+       struct be_adapter *adapter = pnob->adapter;
+       u32 num_bufs, r;
+       u64 busaddr = 0, tmp_pa;
+       u32 max_bufs, pg_hd;
+       u32 frag_size;
+       struct be_recv_buffer *rxbp;
+       struct list_head rxbl;
+       struct be_rx_page_info *rx_page_info;
+       struct page *page = NULL;
+       u32 page_order = 0;
+       gfp_t alloc_flags = GFP_ATOMIC;
+
+       BUG_ON(!adapter);
+
+       max_bufs = 64;          /* should be even # <= 255. */
+
+       frag_size = pnob->rx_buf_size;
+       page_order = get_order(frag_size);
+
+       if (frag_size == 8192)
+               alloc_flags |= (gfp_t) __GFP_COMP;
+       /*
+        * Form a linked list of RECV_BUFFFER structure to be be posted.
+        * We will post even number of buffer so that pages can be
+        * shared.
+        */
+       INIT_LIST_HEAD(&rxbl);
+
+       for (num_bufs = 0; num_bufs < max_bufs; ++num_bufs) {
+
+               rxbp = &pnob->eth_rx_bufs[num_bufs];
+               pg_hd = pnob->rx_pg_info_hd;
+               rx_page_info = &pnob->rx_page_info[pg_hd];
+
+               if (!page) {
+                       /*
+                        * before we allocate a page make sure that we
+                        * have space in the RX queue to post the buffer.
+                        * We check for two vacant slots since with
+                        * 2K frags, we will need two slots.
+                        */
+                       if ((pnob->rx_ctxt[(pnob->rx_q_hd + num_bufs) &
+                                          (pnob->rx_q_len - 1)] != NULL)
+                           || (pnob->rx_ctxt[(pnob->rx_q_hd + num_bufs + 1) %
+                                             pnob->rx_q_len] != NULL)) {
+                               break;
+                       }
+                       page = alloc_pages(alloc_flags, page_order);
+                       if (unlikely(page == NULL)) {
+                               adapter->be_stat.bes_ethrx_post_fail++;
+                               pnob->rxbuf_post_fail++;
+                               break;
+                       }
+                       pnob->rxbuf_post_fail = 0;
+                       busaddr = pci_map_page(adapter->pdev, page, 0,
+                                              frag_size, PCI_DMA_FROMDEVICE);
+                       rx_page_info->page_offset = 0;
+                       rx_page_info->page = page;
+                       /*
+                        * If we are sharing a page among two skbs,
+                        * alloc a new one on the next iteration
+                        */
+                       if (pnob->rx_pg_shared == false)
+                               page = NULL;
+               } else {
+                       get_page(page);
+                       rx_page_info->page_offset += frag_size;
+                       rx_page_info->page = page;
+                       /*
+                        * We are finished with the alloced page,
+                        * Alloc a new one on the next iteration
+                        */
+                       page = NULL;
+               }
+               rxbp->rxb_ctxt = (void *)rx_page_info;
+               index_inc(&pnob->rx_pg_info_hd, pnob->rx_q_len);
+
+               pci_unmap_addr_set(rx_page_info, bus, busaddr);
+               tmp_pa = busaddr + rx_page_info->page_offset;
+               rxbp->rxb_pa_lo = (tmp_pa & 0xFFFFFFFF);
+               rxbp->rxb_pa_hi = (tmp_pa >> 32);
+               rxbp->rxb_len = frag_size;
+               list_add_tail(&rxbp->rxb_list, &rxbl);
+       }                       /* End of for */
+
+       r = post_rx_buffs(pnob, &rxbl);
+       BUG_ON(r != num_bufs);
+       return;
+}
+
+/*
+ * Interrupt service for network function.  We just schedule the
+ * tasklet which does all completion processing.
+ */
+irqreturn_t be_int(int irq, void *dev)
+{
+       struct net_device *netdev = dev;
+       struct be_net_object *pnob = (struct be_net_object *)(netdev->priv);
+       struct be_adapter *adapter = pnob->adapter;
+       u32 isr;
+
+       isr = CSR_READ(&pnob->fn_obj, cev.isr1);
+       if (unlikely(!isr))
+               return IRQ_NONE;
+
+       spin_lock(&adapter->int_lock);
+       adapter->isr |= isr;
+       spin_unlock(&adapter->int_lock);
+
+       adapter->be_stat.bes_ints++;
+
+       tasklet_schedule(&adapter->sts_handler);
+       return IRQ_HANDLED;
+}
+
+/*
+ * Poll function called by NAPI with a work budget.
+ * We process as many UC. BC and MC receive completions
+ * as the budget allows and return the actual number of
+ * RX ststutses processed.
+ */
+int be_poll(struct napi_struct *napi, int budget)
+{
+       struct net_device *netdev = napi->dev;
+       struct be_net_object *pnob = (struct be_net_object *)netdev->priv;
+       struct be_adapter *adapter = pnob->adapter;
+       u32 work_done;
+
+       adapter->be_stat.bes_polls++;
+       work_done = process_rx_completions(pnob, budget);
+       BUG_ON(work_done > budget);
+
+       /* All consumed */
+       if (work_done < budget) {
+               netif_rx_complete(netdev, napi);
+               /* enable intr */
+               be_notify_cmpl(pnob, work_done, pnob->rx_cq_id, 1);
+       } else {
+               /* More to be consumed; continue with interrupts disabled */
+               be_notify_cmpl(pnob, work_done, pnob->rx_cq_id, 0);
+       }
+       return work_done;
+}
+
+static struct EQ_ENTRY_AMAP *get_event(struct be_net_object *pnob)
+{
+       struct EQ_ENTRY_AMAP *eqp = &(pnob->event_q[pnob->event_q_tl]);
+       if (!AMAP_GET_BITS_PTR(EQ_ENTRY, Valid, eqp))
+               return NULL;
+       be_adv_eq_tl(pnob);
+       return eqp;
+}
+
+/*
+ * Processes all valid events in the event ring associated with given
+ * NetObject.  Also, notifies BE the number of events processed.
+ */
+static inline u32 process_events(struct be_net_object *pnob)
+{
+       struct be_adapter *adapter = pnob->adapter;
+       struct EQ_ENTRY_AMAP *eqp;
+       u32 rid, num_events = 0;
+       struct net_device *netdev = pnob->netdev;
+
+       while ((eqp = get_event(pnob)) != NULL) {
+               adapter->be_stat.bes_events++;
+               rid = AMAP_GET_BITS_PTR(EQ_ENTRY, ResourceID, eqp);
+               if (rid == pnob->rx_cq_id) {
+                       adapter->be_stat.bes_rx_events++;
+                       netif_rx_schedule(netdev, &pnob->napi);
+               } else if (rid == pnob->tx_cq_id) {
+                       process_nic_tx_completions(pnob);
+               } else if (rid == pnob->mcc_cq_id) {
+                       be_mcc_process_cq(&pnob->mcc_q_obj, 1);
+               } else {
+                       dev_info(&netdev->dev,
+                                       "Invalid EQ ResourceID %d\n", rid);
+               }
+               AMAP_SET_BITS_PTR(EQ_ENTRY, Valid, eqp, 0);
+               AMAP_SET_BITS_PTR(EQ_ENTRY, ResourceID, eqp, 0);
+               num_events++;
+       }
+       return num_events;
+}
+
+static void update_eqd(struct be_adapter *adapter, struct be_net_object *pnob)
+{
+       int status;
+       struct be_eq_object *eq_objectp;
+
+       /* update once a second */
+       if ((jiffies - adapter->ips_jiffies) > 1 * (HZ)) {
+               /* One second elapsed since last update  */
+               u32 r, new_eqd = -1;
+               r = adapter->be_stat.bes_ints - adapter->be_stat.bes_prev_ints;
+               r = r / ((jiffies - adapter->ips_jiffies) / (HZ));
+               adapter->be_stat.bes_ips = r;
+               adapter->ips_jiffies = jiffies;
+               adapter->be_stat.bes_prev_ints = adapter->be_stat.bes_ints;
+               if (r > IPS_HI_WM && adapter->cur_eqd < adapter->max_eqd)
+                       new_eqd = (adapter->cur_eqd + 8);
+               if (r < IPS_LO_WM && adapter->cur_eqd > adapter->min_eqd)
+                       new_eqd = (adapter->cur_eqd - 8);
+               if (adapter->enable_aic && new_eqd != -1) {
+                       eq_objectp = &pnob->event_q_obj;
+                       status = be_eq_modify_delay(&pnob->fn_obj, 1,
+                                                   &eq_objectp, &new_eqd, NULL,
+                                                   NULL, NULL);
+                       if (status == BE_SUCCESS)
+                               adapter->cur_eqd = new_eqd;
+               }
+       }
+}
+
+/*
+    This function notifies BladeEngine of how many events were processed
+    from the event queue by ringing the corresponding door bell and
+    optionally re-arms the event queue.
+    n          - number of events processed
+    re_arm     - 1 - re-arm the EQ, 0 - do not re-arm the EQ
+
+*/
+static void be_notify_event(struct be_net_object *pnob, int n, int re_arm)
+{
+       struct CQ_DB_AMAP eqdb;
+       eqdb.dw[0] = 0;
+
+       AMAP_SET_BITS_PTR(CQ_DB, qid, &eqdb, pnob->event_q_id);
+       AMAP_SET_BITS_PTR(CQ_DB, rearm, &eqdb, re_arm);
+       AMAP_SET_BITS_PTR(CQ_DB, event, &eqdb, 1);
+       AMAP_SET_BITS_PTR(CQ_DB, num_popped, &eqdb, n);
+       /*
+        * Under some situations we see an interrupt and no valid
+        * EQ entry.  To keep going, we need to ring the DB even if
+        * numPOsted is 0.
+        */
+       PD_WRITE(&pnob->fn_obj, cq_db, eqdb.dw[0]);
+       return;
+}
+
+/*
+ * Called from the tasklet scheduled by ISR.  All real interrupt processing
+ * is done here.
+ */
+void be_process_intr(unsigned long context)
+{
+       struct be_adapter *adapter = (struct be_adapter *)context;
+       struct be_net_object *pnob = adapter->net_obj;
+       u32 isr, n;
+       ulong flags = 0;
+
+       isr = adapter->isr;
+
+       /*
+        * we create only one NIC event queue in Linux. Event is
+        * expected only in the first event queue
+        */
+       BUG_ON(isr & 0xfffffffe);
+       if ((isr & 1) == 0)
+               return;         /* not our interrupt */
+       n = process_events(pnob);
+       /*
+        * Clear the event bit. adapter->isr is  set by
+        * hard interrupt.  Prevent race with lock.
+        */
+       spin_lock_irqsave(&adapter->int_lock, flags);
+       adapter->isr &= ~1;
+       spin_unlock_irqrestore(&adapter->int_lock, flags);
+       be_notify_event(pnob, n, 1);
+       /*
+        * If previous allocation attempts had failed and
+        * BE has used up all posted buffers, post RX buffers here
+        */
+       if (pnob->rxbuf_post_fail && atomic_read(&pnob->rx_q_posted) == 0)
+               be_post_eth_rx_buffs(pnob);
+       update_eqd(adapter, pnob);
+       return;
+}
diff --git a/drivers/staging/benet/be_netif.c b/drivers/staging/benet/be_netif.c
new file mode 100644 (file)
index 0000000..2e59044
--- /dev/null
@@ -0,0 +1,706 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * be_netif.c
+ *
+ * This file contains various entry points of drivers seen by tcp/ip stack.
+ */
+
+#include <linux/if_vlan.h>
+#include <linux/in.h>
+#include "benet.h"
+#include <linux/ip.h>
+#include <linux/inet_lro.h>
+
+/* Strings to print Link properties */
+static const char *link_speed[] = {
+       "Invalid link Speed Value",
+       "10 Mbps",
+       "100 Mbps",
+       "1 Gbps",
+       "10 Gbps"
+};
+
+static const char *link_duplex[] = {
+       "Invalid Duplex Value",
+       "Half Duplex",
+       "Full Duplex"
+};
+
+static const char *link_state[] = {
+       "",
+       "(active)"
+};
+
+void be_print_link_info(struct BE_LINK_STATUS *lnk_status)
+{
+       u16 si, di, ai;
+
+       /* Port 0 */
+       if (lnk_status->mac0_speed && lnk_status->mac0_duplex) {
+               /* Port is up and running */
+               si = (lnk_status->mac0_speed < 5) ? lnk_status->mac0_speed : 0;
+               di = (lnk_status->mac0_duplex < 3) ?
+                   lnk_status->mac0_duplex : 0;
+               ai = (lnk_status->active_port == 0) ? 1 : 0;
+               printk(KERN_INFO "PortNo. 0: Speed - %s %s %s\n",
+                      link_speed[si], link_duplex[di], link_state[ai]);
+       } else
+               printk(KERN_INFO "PortNo. 0: Down\n");
+
+       /* Port 1 */
+       if (lnk_status->mac1_speed && lnk_status->mac1_duplex) {
+               /* Port is up and running */
+               si = (lnk_status->mac1_speed < 5) ? lnk_status->mac1_speed : 0;
+               di = (lnk_status->mac1_duplex < 3) ?
+                   lnk_status->mac1_duplex : 0;
+               ai = (lnk_status->active_port == 0) ? 1 : 0;
+               printk(KERN_INFO "PortNo. 1: Speed - %s %s %s\n",
+                      link_speed[si], link_duplex[di], link_state[ai]);
+       } else
+               printk(KERN_INFO "PortNo. 1: Down\n");
+
+       return;
+}
+
+static int
+be_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
+                  void **ip_hdr, void **tcpudp_hdr,
+                  u64 *hdr_flags, void *priv)
+{
+       struct ethhdr *eh;
+       struct vlan_ethhdr *veh;
+       struct iphdr *iph;
+       u8 *va = page_address(frag->page) + frag->page_offset;
+       unsigned long ll_hlen;
+
+       /* find the mac header, abort if not IPv4 */
+
+       prefetch(va);
+       eh = (struct ethhdr *)va;
+       *mac_hdr = eh;
+       ll_hlen = ETH_HLEN;
+       if (eh->h_proto != htons(ETH_P_IP)) {
+               if (eh->h_proto == htons(ETH_P_8021Q)) {
+                       veh = (struct vlan_ethhdr *)va;
+                       if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
+                               return -1;
+
+                       ll_hlen += VLAN_HLEN;
+
+               } else {
+                       return -1;
+               }
+       }
+       *hdr_flags = LRO_IPV4;
+
+       iph = (struct iphdr *)(va + ll_hlen);
+       *ip_hdr = iph;
+       if (iph->protocol != IPPROTO_TCP)
+               return -1;
+       *hdr_flags |= LRO_TCP;
+       *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
+
+       return 0;
+}
+
+static int benet_open(struct net_device *netdev)
+{
+       struct be_net_object *pnob = (struct be_net_object *)netdev->priv;
+       struct be_adapter *adapter = pnob->adapter;
+       struct net_lro_mgr *lro_mgr;
+
+       if (adapter->dev_state < BE_DEV_STATE_INIT)
+               return -EAGAIN;
+
+       lro_mgr = &pnob->lro_mgr;
+       lro_mgr->dev = netdev;
+
+       lro_mgr->features = LRO_F_NAPI;
+       lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
+       lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
+       lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS;
+       lro_mgr->lro_arr = pnob->lro_desc;
+       lro_mgr->get_frag_header = be_get_frag_header;
+       lro_mgr->max_aggr = adapter->max_rx_coal;
+       lro_mgr->frag_align_pad = 2;
+       if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
+               lro_mgr->max_aggr = MAX_SKB_FRAGS;
+
+       adapter->max_rx_coal = BE_LRO_MAX_PKTS;
+
+       be_update_link_status(adapter);
+
+       /*
+        * Set carrier on only if Physical Link up
+        * Either of the port link status up signifies this
+        */
+       if ((adapter->port0_link_sts == BE_PORT_LINK_UP) ||
+           (adapter->port1_link_sts == BE_PORT_LINK_UP)) {
+               netif_start_queue(netdev);
+               netif_carrier_on(netdev);
+       }
+
+       adapter->dev_state = BE_DEV_STATE_OPEN;
+       napi_enable(&pnob->napi);
+       be_enable_intr(pnob);
+       be_enable_eq_intr(pnob);
+       /*
+        * RX completion queue may be in dis-armed state. Arm it.
+        */
+       be_notify_cmpl(pnob, 0, pnob->rx_cq_id, 1);
+
+       return 0;
+}
+
+static int benet_close(struct net_device *netdev)
+{
+       struct be_net_object *pnob = (struct be_net_object *)netdev->priv;
+       struct be_adapter *adapter = pnob->adapter;
+
+       netif_stop_queue(netdev);
+       synchronize_irq(netdev->irq);
+
+       be_wait_nic_tx_cmplx_cmpl(pnob);
+       adapter->dev_state = BE_DEV_STATE_INIT;
+       netif_carrier_off(netdev);
+
+       adapter->port0_link_sts = BE_PORT_LINK_DOWN;
+       adapter->port1_link_sts = BE_PORT_LINK_DOWN;
+       be_disable_intr(pnob);
+       be_disable_eq_intr(pnob);
+       napi_disable(&pnob->napi);
+
+       return 0;
+}
+
+/*
+ * Setting a Mac Address for BE
+ * Takes netdev and a void pointer as arguments.
+ * The pointer holds the new addres to be used.
+ */
+static int benet_set_mac_addr(struct net_device *netdev, void *p)
+{
+       struct sockaddr *addr = p;
+       struct be_net_object *pnob;
+
+       pnob = (struct be_net_object *)netdev->priv;
+
+       memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+       be_rxf_mac_address_read_write(&pnob->fn_obj, 0, 0, false, true, false,
+                               netdev->dev_addr, NULL, NULL);
+       /*
+        * Since we are doing Active-Passive failover, both
+        * ports should have matching MAC addresses everytime.
+        */
+       be_rxf_mac_address_read_write(&pnob->fn_obj, 1, 0, false, true, false,
+                                     netdev->dev_addr, NULL, NULL);
+
+       return 0;
+}
+
+void be_get_stats_timer_handler(unsigned long context)
+{
+       struct be_timer_ctxt *ctxt = (struct be_timer_ctxt *)context;
+
+       if (atomic_read(&ctxt->get_stat_flag)) {
+               atomic_dec(&ctxt->get_stat_flag);
+               up((void *)ctxt->get_stat_sem_addr);
+       }
+       del_timer(&ctxt->get_stats_timer);
+       return;
+}
+
+void be_get_stat_cb(void *context, int status,
+                   struct MCC_WRB_AMAP *optional_wrb)
+{
+       struct be_timer_ctxt *ctxt = (struct be_timer_ctxt *)context;
+       /*
+        * just up the semaphore if the get_stat_flag
+        * reads 1. so that the waiter can continue.
+        * If it is 0, then it was handled by the timer handler.
+        */
+       del_timer(&ctxt->get_stats_timer);
+       if (atomic_read(&ctxt->get_stat_flag)) {
+               atomic_dec(&ctxt->get_stat_flag);
+               up((void *)ctxt->get_stat_sem_addr);
+       }
+}
+
+struct net_device_stats *benet_get_stats(struct net_device *dev)
+{
+       struct be_net_object *pnob = dev->priv;
+       struct be_adapter *adapter = pnob->adapter;
+       u64 pa;
+       struct be_timer_ctxt *ctxt = &adapter->timer_ctxt;
+
+       if (adapter->dev_state != BE_DEV_STATE_OPEN) {
+               /* Return previously read stats */
+               return &(adapter->benet_stats);
+       }
+       /* Get Physical Addr */
+       pa = pci_map_single(adapter->pdev, adapter->eth_statsp,
+                           sizeof(struct FWCMD_ETH_GET_STATISTICS),
+                           PCI_DMA_FROMDEVICE);
+       ctxt->get_stat_sem_addr = (unsigned long)&adapter->get_eth_stat_sem;
+       atomic_inc(&ctxt->get_stat_flag);
+
+       be_rxf_query_eth_statistics(&pnob->fn_obj, adapter->eth_statsp,
+                                   cpu_to_le64(pa), be_get_stat_cb, ctxt,
+                                   NULL);
+
+       ctxt->get_stats_timer.data = (unsigned long)ctxt;
+       mod_timer(&ctxt->get_stats_timer, (jiffies + (HZ * 2)));
+       down((void *)ctxt->get_stat_sem_addr);  /* callback will unblock us */
+
+       /* Adding port0 and port1 stats. */
+       adapter->benet_stats.rx_packets =
+           adapter->eth_statsp->params.response.p0recvdtotalframes +
+           adapter->eth_statsp->params.response.p1recvdtotalframes;
+       adapter->benet_stats.tx_packets =
+           adapter->eth_statsp->params.response.p0xmitunicastframes +
+           adapter->eth_statsp->params.response.p1xmitunicastframes;
+       adapter->benet_stats.tx_bytes =
+           adapter->eth_statsp->params.response.p0xmitbyteslsd +
+           adapter->eth_statsp->params.response.p1xmitbyteslsd;
+       adapter->benet_stats.rx_errors =
+           adapter->eth_statsp->params.response.p0crcerrors +
+           adapter->eth_statsp->params.response.p1crcerrors;
+       adapter->benet_stats.rx_errors +=
+           adapter->eth_statsp->params.response.p0alignmentsymerrs +
+           adapter->eth_statsp->params.response.p1alignmentsymerrs;
+       adapter->benet_stats.rx_errors +=
+           adapter->eth_statsp->params.response.p0inrangelenerrors +
+           adapter->eth_statsp->params.response.p1inrangelenerrors;
+       adapter->benet_stats.rx_bytes =
+           adapter->eth_statsp->params.response.p0recvdtotalbytesLSD +
+           adapter->eth_statsp->params.response.p1recvdtotalbytesLSD;
+       adapter->benet_stats.rx_crc_errors =
+           adapter->eth_statsp->params.response.p0crcerrors +
+           adapter->eth_statsp->params.response.p1crcerrors;
+
+       adapter->benet_stats.tx_packets +=
+           adapter->eth_statsp->params.response.p0xmitmulticastframes +
+           adapter->eth_statsp->params.response.p1xmitmulticastframes;
+       adapter->benet_stats.tx_packets +=
+           adapter->eth_statsp->params.response.p0xmitbroadcastframes +
+           adapter->eth_statsp->params.response.p1xmitbroadcastframes;
+       adapter->benet_stats.tx_errors = 0;
+
+       adapter->benet_stats.multicast =
+           adapter->eth_statsp->params.response.p0xmitmulticastframes +
+           adapter->eth_statsp->params.response.p1xmitmulticastframes;
+
+       adapter->benet_stats.rx_fifo_errors =
+           adapter->eth_statsp->params.response.p0rxfifooverflowdropped +
+           adapter->eth_statsp->params.response.p1rxfifooverflowdropped;
+       adapter->benet_stats.rx_frame_errors =
+           adapter->eth_statsp->params.response.p0alignmentsymerrs +
+           adapter->eth_statsp->params.response.p1alignmentsymerrs;
+       adapter->benet_stats.rx_length_errors =
+           adapter->eth_statsp->params.response.p0inrangelenerrors +
+           adapter->eth_statsp->params.response.p1inrangelenerrors;
+       adapter->benet_stats.rx_length_errors +=
+           adapter->eth_statsp->params.response.p0outrangeerrors +
+           adapter->eth_statsp->params.response.p1outrangeerrors;
+       adapter->benet_stats.rx_length_errors +=
+           adapter->eth_statsp->params.response.p0frametoolongerrors +
+           adapter->eth_statsp->params.response.p1frametoolongerrors;
+
+       pci_unmap_single(adapter->pdev, (ulong) adapter->eth_statsp,
+                        sizeof(struct FWCMD_ETH_GET_STATISTICS),
+                        PCI_DMA_FROMDEVICE);
+       return &(adapter->benet_stats);
+
+}
+
+static void be_start_tx(struct be_net_object *pnob, u32 nposted)
+{
+#define CSR_ETH_MAX_SQPOSTS 255
+       struct SQ_DB_AMAP sqdb;
+
+       sqdb.dw[0] = 0;
+
+       AMAP_SET_BITS_PTR(SQ_DB, cid, &sqdb, pnob->tx_q_id);
+       while (nposted) {
+               if (nposted > CSR_ETH_MAX_SQPOSTS) {
+                       AMAP_SET_BITS_PTR(SQ_DB, numPosted, &sqdb,
+                                         CSR_ETH_MAX_SQPOSTS);
+                       nposted -= CSR_ETH_MAX_SQPOSTS;
+               } else {
+                       AMAP_SET_BITS_PTR(SQ_DB, numPosted, &sqdb, nposted);
+                       nposted = 0;
+               }
+               PD_WRITE(&pnob->fn_obj, etx_sq_db, sqdb.dw[0]);
+       }
+
+       return;
+}
+
+static void update_tx_rate(struct be_adapter *adapter)
+{
+       /* update the rate once in two seconds */
+       if ((jiffies - adapter->eth_tx_jiffies) > 2 * (HZ)) {
+               u32 r;
+               r = adapter->eth_tx_bytes /
+                   ((jiffies - adapter->eth_tx_jiffies) / (HZ));
+               r = (r / 1000000);      /* M bytes/s */
+               adapter->be_stat.bes_eth_tx_rate = (r * 8); /* M bits/s */
+               adapter->eth_tx_jiffies = jiffies;
+               adapter->eth_tx_bytes = 0;
+       }
+}
+
+static int wrb_cnt_in_skb(struct sk_buff *skb)
+{
+       int cnt = 0;
+       while (skb) {
+               if (skb->len > skb->data_len)
+                       cnt++;
+               cnt += skb_shinfo(skb)->nr_frags;
+               skb = skb_shinfo(skb)->frag_list;
+       }
+       BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
+       return cnt;
+}
+
+static void wrb_fill(struct ETH_WRB_AMAP *wrb, u64 addr, int len)
+{
+       AMAP_SET_BITS_PTR(ETH_WRB, frag_pa_hi, wrb, addr >> 32);
+       AMAP_SET_BITS_PTR(ETH_WRB, frag_pa_lo, wrb, addr & 0xFFFFFFFF);
+       AMAP_SET_BITS_PTR(ETH_WRB, frag_len, wrb, len);
+}
+
+static void wrb_fill_extra(struct ETH_WRB_AMAP *wrb, struct sk_buff *skb,
+                          struct be_net_object *pnob)
+{
+       wrb->dw[2] = wrb->dw[3] = 0;
+       AMAP_SET_BITS_PTR(ETH_WRB, crc, wrb, 1);
+       if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
+               AMAP_SET_BITS_PTR(ETH_WRB, lso, wrb, 1);
+               AMAP_SET_BITS_PTR(ETH_WRB, lso_mss, wrb,
+                                 skb_shinfo(skb)->gso_size);
+       } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               u8 proto = ((struct iphdr *)ip_hdr(skb))->protocol;
+               if (proto == IPPROTO_TCP)
+                       AMAP_SET_BITS_PTR(ETH_WRB, tcpcs, wrb, 1);
+               else if (proto == IPPROTO_UDP)
+                       AMAP_SET_BITS_PTR(ETH_WRB, udpcs, wrb, 1);
+       }
+       if (pnob->vlan_grp && vlan_tx_tag_present(skb)) {
+               AMAP_SET_BITS_PTR(ETH_WRB, vlan, wrb, 1);
+               AMAP_SET_BITS_PTR(ETH_WRB, vlan_tag, wrb, vlan_tx_tag_get(skb));
+       }
+}
+
+static inline void wrb_copy_extra(struct ETH_WRB_AMAP *to,
+                                 struct ETH_WRB_AMAP *from)
+{
+
+       to->dw[2] = from->dw[2];
+       to->dw[3] = from->dw[3];
+}
+
+/* Returns the actual count of wrbs used including a possible dummy */
+static int copy_skb_to_txq(struct be_net_object *pnob, struct sk_buff *skb,
+                          u32 wrb_cnt, u32 *copied)
+{
+       u64 busaddr;
+       struct ETH_WRB_AMAP *wrb = NULL, *first = NULL;
+       u32 i;
+       bool dummy = true;
+       struct pci_dev *pdev = pnob->adapter->pdev;
+
+       if (wrb_cnt & 1)
+               wrb_cnt++;
+       else
+               dummy = false;
+
+       atomic_add(wrb_cnt, &pnob->tx_q_used);
+
+       while (skb) {
+               if (skb->len > skb->data_len) {
+                       int len = skb->len - skb->data_len;
+                       busaddr = pci_map_single(pdev, skb->data, len,
+                                                PCI_DMA_TODEVICE);
+                       busaddr = cpu_to_le64(busaddr);
+                       wrb = &pnob->tx_q[pnob->tx_q_hd];
+                       if (first == NULL) {
+                               wrb_fill_extra(wrb, skb, pnob);
+                               first = wrb;
+                       } else {
+                               wrb_copy_extra(wrb, first);
+                       }
+                       wrb_fill(wrb, busaddr, len);
+                       be_adv_txq_hd(pnob);
+                       *copied += len;
+               }
+
+               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+                       struct skb_frag_struct *frag =
+                           &skb_shinfo(skb)->frags[i];
+                       busaddr = pci_map_page(pdev, frag->page,
+                                              frag->page_offset, frag->size,
+                                              PCI_DMA_TODEVICE);
+                       busaddr = cpu_to_le64(busaddr);
+                       wrb = &pnob->tx_q[pnob->tx_q_hd];
+                       if (first == NULL) {
+                               wrb_fill_extra(wrb, skb, pnob);
+                               first = wrb;
+                       } else {
+                               wrb_copy_extra(wrb, first);
+                       }
+                       wrb_fill(wrb, busaddr, frag->size);
+                       be_adv_txq_hd(pnob);
+                       *copied += frag->size;
+               }
+               skb = skb_shinfo(skb)->frag_list;
+       }
+
+       if (dummy) {
+               wrb = &pnob->tx_q[pnob->tx_q_hd];
+               BUG_ON(first == NULL);
+               wrb_copy_extra(wrb, first);
+               wrb_fill(wrb, 0, 0);
+               be_adv_txq_hd(pnob);
+       }
+       AMAP_SET_BITS_PTR(ETH_WRB, complete, wrb, 1);
+       AMAP_SET_BITS_PTR(ETH_WRB, last, wrb, 1);
+       return wrb_cnt;
+}
+
+/* For each skb transmitted, tx_ctxt stores the num of wrbs in the
+ * start index and skb pointer in the end index
+ */
+static inline void be_tx_wrb_info_remember(struct be_net_object *pnob,
+                                          struct sk_buff *skb, int wrb_cnt,
+                                          u32 start)
+{
+       *(u32 *) (&pnob->tx_ctxt[start]) = wrb_cnt;
+       index_adv(&start, wrb_cnt - 1, pnob->tx_q_len);
+       pnob->tx_ctxt[start] = skb;
+}
+
+static int benet_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+       struct be_net_object *pnob = netdev->priv;
+       struct be_adapter *adapter = pnob->adapter;
+       u32 wrb_cnt, copied = 0;
+       u32 start = pnob->tx_q_hd;
+
+       adapter->be_stat.bes_tx_reqs++;
+
+       wrb_cnt = wrb_cnt_in_skb(skb);
+       spin_lock_bh(&adapter->txq_lock);
+       if ((pnob->tx_q_len - 2 - atomic_read(&pnob->tx_q_used)) <= wrb_cnt) {
+               netif_stop_queue(pnob->netdev);
+               spin_unlock_bh(&adapter->txq_lock);
+               adapter->be_stat.bes_tx_fails++;
+               return NETDEV_TX_BUSY;
+       }
+       spin_unlock_bh(&adapter->txq_lock);
+
+       wrb_cnt = copy_skb_to_txq(pnob, skb, wrb_cnt, &copied);
+       be_tx_wrb_info_remember(pnob, skb, wrb_cnt, start);
+
+       be_start_tx(pnob, wrb_cnt);
+
+       adapter->eth_tx_bytes += copied;
+       adapter->be_stat.bes_tx_wrbs += wrb_cnt;
+       update_tx_rate(adapter);
+       netdev->trans_start = jiffies;
+
+       return NETDEV_TX_OK;
+}
+
+/*
+ * This is the driver entry point to change the mtu of the device
+ * Returns 0 for success and errno for failure.
+ */
+static int benet_change_mtu(struct net_device *netdev, int new_mtu)
+{
+       /*
+        * BE supports jumbo frame size upto 9000 bytes including the link layer
+        * header. Considering the different variants of frame formats possible
+        * like VLAN, SNAP/LLC, the maximum possible value for MTU is 8974 bytes
+        */
+
+       if (new_mtu < (ETH_ZLEN + ETH_FCS_LEN) || (new_mtu > BE_MAX_MTU)) {
+               dev_info(&netdev->dev, "Invalid MTU requested. "
+                              "Must be between %d and %d bytes\n",
+                                      (ETH_ZLEN + ETH_FCS_LEN), BE_MAX_MTU);
+               return -EINVAL;
+       }
+       dev_info(&netdev->dev, "MTU changed from %d to %d\n",
+                                               netdev->mtu, new_mtu);
+       netdev->mtu = new_mtu;
+       return 0;
+}
+
+/*
+ * This is the driver entry point to register a vlan with the device
+ */
+static void benet_vlan_register(struct net_device *netdev,
+                               struct vlan_group *grp)
+{
+       struct be_net_object *pnob = netdev->priv;
+
+       be_disable_eq_intr(pnob);
+       pnob->vlan_grp = grp;
+       pnob->num_vlans = 0;
+       be_enable_eq_intr(pnob);
+}
+
+/*
+ * This is the driver entry point to add a vlan vlan_id
+ * with the device netdev
+ */
+static void benet_vlan_add_vid(struct net_device *netdev, u16 vlan_id)
+{
+       struct be_net_object *pnob = netdev->priv;
+
+       if (pnob->num_vlans == (BE_NUM_VLAN_SUPPORTED - 1)) {
+               /* no  way to return an error */
+               dev_info(&netdev->dev,
+                      "BladeEngine: Cannot configure more than %d Vlans\n",
+                              BE_NUM_VLAN_SUPPORTED);
+               return;
+       }
+       /* The new vlan tag will be in the slot indicated by num_vlans. */
+       pnob->vlan_tag[pnob->num_vlans++] = vlan_id;
+       be_rxf_vlan_config(&pnob->fn_obj, false, pnob->num_vlans,
+                          pnob->vlan_tag, NULL, NULL, NULL);
+}
+
+/*
+ * This is the driver entry point to remove a vlan vlan_id
+ * with the device netdev
+ */
+static void benet_vlan_rem_vid(struct net_device *netdev, u16 vlan_id)
+{
+       struct be_net_object *pnob = netdev->priv;
+
+       u32 i, value;
+
+       /*
+        * In Blade Engine, we support 32 vlan tag filters across both ports.
+        * To program a vlan tag, the RXF_RTPR_CSR register is used.
+        * Each 32-bit value of RXF_RTDR_CSR can address 2 vlan tag entries.
+        * The Vlan table is of depth 16. thus we support 32 tags.
+        */
+
+       value = vlan_id | VLAN_VALID_BIT;
+       for (i = 0; i < BE_NUM_VLAN_SUPPORTED; i++) {
+               if (pnob->vlan_tag[i] == vlan_id)
+                       break;
+       }
+
+       if (i == BE_NUM_VLAN_SUPPORTED)
+               return;
+       /* Now compact the vlan tag array by removing hole created. */
+       while ((i + 1) < BE_NUM_VLAN_SUPPORTED) {
+               pnob->vlan_tag[i] = pnob->vlan_tag[i + 1];
+               i++;
+       }
+       if ((i + 1) == BE_NUM_VLAN_SUPPORTED)
+               pnob->vlan_tag[i] = (u16) 0x0;
+       pnob->num_vlans--;
+       be_rxf_vlan_config(&pnob->fn_obj, false, pnob->num_vlans,
+                          pnob->vlan_tag, NULL, NULL, NULL);
+}
+
+/*
+ * This function is called to program multicast
+ * address in the multicast filter of the ASIC.
+ */
+static void be_set_multicast_filter(struct net_device *netdev)
+{
+       struct be_net_object *pnob = netdev->priv;
+       struct dev_mc_list *mc_ptr;
+       u8 mac_addr[32][ETH_ALEN];
+       int i;
+
+       if (netdev->flags & IFF_ALLMULTI) {
+               /* set BE in Multicast promiscuous */
+               be_rxf_multicast_config(&pnob->fn_obj, true, 0, NULL, NULL,
+                                       NULL, NULL);
+               return;
+       }
+
+       for (mc_ptr = netdev->mc_list, i = 0; mc_ptr;
+            mc_ptr = mc_ptr->next, i++) {
+               memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN);
+       }
+
+       /* reset the promiscuous mode also. */
+       be_rxf_multicast_config(&pnob->fn_obj, false, i,
+                               &mac_addr[0][0], NULL, NULL, NULL);
+}
+
+/*
+ * This is the driver entry point to set multicast list
+ * with the device netdev. This function will be used to
+ * set promiscuous mode or multicast promiscuous mode
+ * or multicast mode....
+ */
+static void benet_set_multicast_list(struct net_device *netdev)
+{
+       struct be_net_object *pnob = netdev->priv;
+
+       if (netdev->flags & IFF_PROMISC) {
+               be_rxf_promiscuous(&pnob->fn_obj, 1, 1, NULL, NULL, NULL);
+       } else {
+               be_rxf_promiscuous(&pnob->fn_obj, 0, 0, NULL, NULL, NULL);
+               be_set_multicast_filter(netdev);
+       }
+}
+
+int benet_init(struct net_device *netdev)
+{
+       struct be_net_object *pnob = netdev->priv;
+       struct be_adapter *adapter = pnob->adapter;
+
+       ether_setup(netdev);
+
+       netdev->open = &benet_open;
+       netdev->stop = &benet_close;
+       netdev->hard_start_xmit = &benet_xmit;
+
+       netdev->get_stats = &benet_get_stats;
+
+       netdev->set_multicast_list = &benet_set_multicast_list;
+
+       netdev->change_mtu = &benet_change_mtu;
+       netdev->set_mac_address = &benet_set_mac_addr;
+
+       netdev->vlan_rx_register = benet_vlan_register;
+       netdev->vlan_rx_add_vid = benet_vlan_add_vid;
+       netdev->vlan_rx_kill_vid = benet_vlan_rem_vid;
+
+       netdev->features =
+           NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
+           NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM;
+
+       netdev->flags |= IFF_MULTICAST;
+
+       /* If device is DAC Capable, set the HIGHDMA flag for netdevice. */
+       if (adapter->dma_64bit_cap)
+               netdev->features |= NETIF_F_HIGHDMA;
+
+       SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
+       return 0;
+}
diff --git a/drivers/staging/benet/benet.h b/drivers/staging/benet/benet.h
new file mode 100644 (file)
index 0000000..865022c
--- /dev/null
@@ -0,0 +1,429 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+#ifndef _BENET_H_
+#define _BENET_H_
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/inet_lro.h>
+#include "hwlib.h"
+
+#define _SA_MODULE_NAME "net-driver"
+
+#define VLAN_VALID_BIT         0x8000
+#define BE_NUM_VLAN_SUPPORTED  32
+#define BE_PORT_LINK_DOWN       0000
+#define BE_PORT_LINK_UP         0001
+#define        BE_MAX_TX_FRAG_COUNT            (30)
+
+/* Flag bits for send operation */
+#define IPCS            (1 << 0)       /* Enable IP checksum offload */
+#define UDPCS           (1 << 1)       /* Enable UDP checksum offload */
+#define TCPCS           (1 << 2)       /* Enable TCP checksum offload */
+#define LSO             (1 << 3)       /* Enable Large Segment  offload */
+#define ETHVLAN         (1 << 4)       /* Enable VLAN insert */
+#define ETHEVENT        (1 << 5)       /* Generate  event on completion */
+#define ETHCOMPLETE     (1 << 6)       /* Generate completion when done */
+#define IPSEC           (1 << 7)       /* Enable IPSEC */
+#define FORWARD         (1 << 8)       /* Send the packet in forwarding path */
+#define FIN             (1 << 9)       /* Issue FIN segment */
+
+#define BE_MAX_MTU     8974
+
+#define BE_MAX_LRO_DESCRIPTORS                 8
+#define BE_LRO_MAX_PKTS                                64
+#define BE_MAX_FRAGS_PER_FRAME                 6
+
+extern const char be_drvr_ver[];
+extern char be_fw_ver[];
+extern char be_driver_name[];
+
+extern struct ethtool_ops be_ethtool_ops;
+
+#define BE_DEV_STATE_NONE 0
+#define BE_DEV_STATE_INIT 1
+#define BE_DEV_STATE_OPEN 2
+#define BE_DEV_STATE_SUSPEND 3
+
+/* This structure is used to describe physical fragments to use
+ * for DMAing data from NIC.
+ */
+struct be_recv_buffer {
+       struct list_head rxb_list;      /* for maintaining a linked list */
+       void *rxb_va;           /* buffer virtual address */
+       u32 rxb_pa_lo;          /* low part of physical address */
+       u32 rxb_pa_hi;          /* high part of physical address */
+       u32 rxb_len;            /* length of recv buffer */
+       void *rxb_ctxt;         /* context for OSM driver to use */
+};
+
+/*
+ * fragment list to describe scattered data.
+ */
+struct be_tx_frag_list {
+       u32 txb_len;            /* Size of this fragment */
+       u32 txb_pa_lo;          /* Lower 32 bits of 64 bit physical addr */
+       u32 txb_pa_hi;          /* Higher 32 bits of 64 bit physical addr */
+};
+
+struct be_rx_page_info {
+       struct page *page;
+       dma_addr_t bus;
+       u16 page_offset;
+};
+
+/*
+ *  This structure is the main tracking structure for a NIC interface.
+ */
+struct be_net_object {
+       /* MCC Ring - used to send fwcmds to embedded ARM processor */
+       struct MCC_WRB_AMAP *mcc_q;     /* VA of the start of the ring */
+       u32 mcc_q_len;                  /* # of WRB entries in this ring */
+       u32 mcc_q_size;
+       u32 mcc_q_hd;                   /* MCC ring head */
+       u8 mcc_q_created;               /* flag to help cleanup */
+       struct be_mcc_object mcc_q_obj; /* BECLIB's MCC ring Object */
+       dma_addr_t mcc_q_bus;           /* DMA'ble bus address */
+
+       /* MCC Completion Ring - FW responses to fwcmds sent from MCC ring */
+       struct MCC_CQ_ENTRY_AMAP *mcc_cq; /* VA of the start of the ring */
+       u32 mcc_cq_len;                 /* # of compl. entries in this ring */
+       u32 mcc_cq_size;
+       u32 mcc_cq_tl;                  /* compl. ring tail */
+       u8 mcc_cq_created;              /* flag to help cleanup */
+       struct be_cq_object mcc_cq_obj; /* BECLIB's MCC compl. ring object */
+       u32 mcc_cq_id;                  /* MCC ring ID */
+       dma_addr_t mcc_cq_bus;          /* DMA'ble bus address */
+
+       struct ring_desc mb_rd;         /* RD for MCC_MAIL_BOX */
+       void *mb_ptr;                   /* mailbox ptr to be freed  */
+       dma_addr_t mb_bus;              /* DMA'ble bus address */
+       u32 mb_size;
+
+       /* BEClib uses an array of context objects to track outstanding
+        * requests to the MCC.  We need allocate the same number of
+        * conext entries as the number of entries in the MCC WRB ring
+        */
+       u32 mcc_wrb_ctxt_size;
+       void *mcc_wrb_ctxt;             /* pointer to the context area */
+       u32 mcc_wrb_ctxtLen;            /* Number of entries in the context */
+       /*
+        * NIC send request ring - used for xmitting raw ether frames.
+        */
+       struct ETH_WRB_AMAP *tx_q;      /* VA of the start of the ring */
+       u32 tx_q_len;                   /* # if entries in the send ring */
+       u32 tx_q_size;
+       u32 tx_q_hd;                    /* Head index. Next req. goes here */
+       u32 tx_q_tl;                    /* Tail indx. oldest outstanding req. */
+       u8 tx_q_created;                /* flag to help cleanup */
+       struct be_ethsq_object tx_q_obj;/* BECLIB's send Q handle */
+       dma_addr_t tx_q_bus;            /* DMA'ble bus address */
+       u32 tx_q_id;                    /* send queue ring ID */
+       u32 tx_q_port;                  /* 0 no binding, 1 port A,  2 port B */
+       atomic_t tx_q_used;             /* # of WRBs used */
+       /* ptr to an array in which we store context info for each send req. */
+       void **tx_ctxt;
+       /*
+        * NIC Send compl. ring - completion status for all NIC frames xmitted.
+        */
+       struct ETH_TX_COMPL_AMAP *tx_cq;/* VA of start of the ring */
+       u32 txcq_len;                   /* # of entries in the ring */
+       u32 tx_cq_size;
+       /*
+        * index into compl ring where the host expects next completion entry
+        */
+       u32 tx_cq_tl;
+       u32 tx_cq_id;                   /* completion queue id */
+       u8 tx_cq_created;               /* flag to help cleanup */
+       struct be_cq_object tx_cq_obj;
+       dma_addr_t tx_cq_bus;           /* DMA'ble bus address */
+       /*
+        * Event Queue - all completion entries post events here.
+        */
+       struct EQ_ENTRY_AMAP *event_q;  /* VA of start of event queue */
+       u32 event_q_len;                /* # of entries */
+       u32 event_q_size;
+       u32 event_q_tl;                 /* Tail of the event queue */
+       u32 event_q_id;                 /* Event queue ID */
+       u8 event_q_created;             /* flag to help cleanup */
+       struct be_eq_object event_q_obj; /* Queue handle */
+       dma_addr_t event_q_bus;         /* DMA'ble bus address */
+       /*
+        * NIC receive queue - Data buffers to be used for receiving unicast,
+        * broadcast and multi-cast frames  are posted here.
+        */
+       struct ETH_RX_D_AMAP *rx_q;     /* VA of start of the queue */
+       u32 rx_q_len;                   /* # of entries */
+       u32 rx_q_size;
+       u32 rx_q_hd;                    /* Head of the queue */
+       atomic_t rx_q_posted;           /* number of posted buffers */
+       u32 rx_q_id;                    /* queue ID */
+       u8 rx_q_created;                /* flag to help cleanup */
+       struct be_ethrq_object rx_q_obj;        /* NIC RX queue handle */
+       dma_addr_t rx_q_bus;            /* DMA'ble bus address */
+       /*
+        * Pointer to an array of opaque context object for use by OSM driver
+        */
+       void **rx_ctxt;
+       /*
+        * NIC unicast RX completion queue - all unicast ether frame completion
+        * statuses from BE come here.
+        */
+       struct ETH_RX_COMPL_AMAP *rx_cq;        /* VA of start of the queue */
+       u32 rx_cq_len;          /* # of entries */
+       u32 rx_cq_size;
+       u32 rx_cq_tl;                   /* Tail of the queue */
+       u32 rx_cq_id;                   /* queue ID */
+       u8 rx_cq_created;               /* flag to help cleanup */
+       struct be_cq_object rx_cq_obj;  /* queue handle */
+       dma_addr_t rx_cq_bus;           /* DMA'ble bus address */
+       struct be_function_object fn_obj;       /* function object   */
+       bool    fn_obj_created;
+       u32 rx_buf_size;                /* Size of the RX buffers */
+
+       struct net_device *netdev;
+       struct be_recv_buffer eth_rx_bufs[256]; /* to pass Rx buffer
+                                                          addresses */
+       struct be_adapter *adapter;     /* Pointer to OSM adapter */
+       u32 devno;              /* OSM, network dev no. */
+       u32 use_port;           /* Current active port */
+       struct be_rx_page_info *rx_page_info;   /* Array of Rx buf pages */
+       u32 rx_pg_info_hd;      /* Head of queue */
+       int rxbuf_post_fail;    /* RxBuff posting fail count */
+       bool rx_pg_shared;      /* Is an allocsted page shared as two frags ? */
+       struct vlan_group *vlan_grp;
+       u32 num_vlans;          /* Number of vlans in BE's filter */
+       u16 vlan_tag[BE_NUM_VLAN_SUPPORTED]; /* vlans currently configured */
+       struct napi_struct napi;
+       struct net_lro_mgr lro_mgr;
+       struct net_lro_desc lro_desc[BE_MAX_LRO_DESCRIPTORS];
+};
+
+#define NET_FH(np)       (&(np)->fn_obj)
+
+/*
+ * BE driver statistics.
+ */
+struct be_drvr_stat {
+       u32 bes_tx_reqs;        /* number of TX requests initiated */
+       u32 bes_tx_fails;       /* number of TX requests that failed */
+       u32 bes_fwd_reqs;       /* number of send reqs through forwarding i/f */
+       u32 bes_tx_wrbs;        /* number of tx WRBs used */
+
+       u32 bes_ints;           /* number of interrupts */
+       u32 bes_polls;          /* number of times NAPI called poll function */
+       u32 bes_events;         /* total evet entries processed */
+       u32 bes_tx_events;      /* number of tx completion events  */
+       u32 bes_rx_events;      /* number of ucast rx completion events  */
+       u32 bes_tx_compl;       /* number of tx completion entries processed */
+       u32 bes_rx_compl;       /* number of rx completion entries
+                                  processed */
+       u32 bes_ethrx_post_fail;        /* number of ethrx buffer alloc
+                                          failures */
+       /*
+        * number of non ether type II frames dropped where
+        * frame len > length field of Mac Hdr
+        */
+       u32 bes_802_3_dropped_frames;
+       /*
+        * number of non ether type II frames malformed where
+        * in frame len < length field of Mac Hdr
+        */
+       u32 bes_802_3_malformed_frames;
+       u32 bes_ips;            /*  interrupts / sec */
+       u32 bes_prev_ints;      /* bes_ints at last IPS calculation  */
+       u16 bes_eth_tx_rate;    /*  ETH TX rate - Mb/sec */
+       u16 bes_eth_rx_rate;    /*  ETH RX rate - Mb/sec */
+       u32 bes_rx_coal;        /* Num pkts coalasced */
+       u32 bes_rx_flush;       /* Num times coalasced */
+       u32 bes_link_change_physical;   /*Num of times physical link changed */
+       u32 bes_link_change_virtual;    /*Num of times virtual link changed */
+       u32 bes_rx_misc_pkts;   /* Misc pkts received */
+};
+
+/* Maximum interrupt delay (in microseconds) allowed */
+#define MAX_EQD                                120
+
+/*
+ * timer to prevent system shutdown hang for ever if h/w stops responding
+ */
+struct be_timer_ctxt {
+       atomic_t get_stat_flag;
+       struct timer_list get_stats_timer;
+       unsigned long get_stat_sem_addr;
+} ;
+
+/* This structure is the main BladeEngine driver context.  */
+struct be_adapter {
+       struct net_device *netdevp;
+       struct be_drvr_stat be_stat;
+       struct net_device_stats benet_stats;
+
+       /* PCI BAR mapped addresses */
+       u8 __iomem *csr_va;     /* CSR */
+       u8 __iomem *db_va;      /* Door  Bell  */
+       u8 __iomem *pci_va;     /* PCI Config */
+
+       struct tasklet_struct sts_handler;
+       struct timer_list cq_timer;
+       spinlock_t int_lock;
+
+       struct FWCMD_ETH_GET_STATISTICS *eth_statsp;
+       /*
+        * This will enable the use of ethtool to enable or disable
+        * Checksum on Rx pkts to be obeyed or disobeyed.
+        * If this is true = 1, then whatever is the checksum on the
+        * Received pkt as per BE, it will be given to the stack.
+        * Else the stack will re calculate it.
+        */
+       bool rx_csum;
+       /*
+        * This will enable the use of ethtool to enable or disable
+        * Coalese on Rx pkts to be obeyed or disobeyed.
+        * If this is grater than 0 and less than 16 then coalascing
+        * is enabled else it is disabled
+        */
+       u32 max_rx_coal;
+       struct pci_dev *pdev;   /* Pointer to OS's PCI dvice */
+
+       spinlock_t txq_lock;
+
+       u32 isr;                /* copy of Intr status reg. */
+
+       u32 port0_link_sts;     /* Port 0 link status */
+       u32 port1_link_sts;     /* port 1 list status */
+       struct BE_LINK_STATUS *be_link_sts;
+
+       /* pointer to the first netobject of this adapter */
+       struct be_net_object *net_obj;
+
+       /*  Flags to indicate what to clean up */
+       bool tasklet_started;
+       bool isr_registered;
+       /*
+        * adaptive interrupt coalescing (AIC) related
+        */
+       bool enable_aic;        /* 1 if AIC is enabled */
+       u16 min_eqd;            /* minimum EQ delay in usec */
+       u16 max_eqd;            /* minimum EQ delay in usec */
+       u16 cur_eqd;            /* current EQ delay in usec */
+       /*
+        * book keeping for interrupt / sec and TX/RX rate calculation
+        */
+       ulong ips_jiffies;      /* jiffies at last IPS calc */
+       u32 eth_tx_bytes;
+       ulong eth_tx_jiffies;
+       u32 eth_rx_bytes;
+       ulong eth_rx_jiffies;
+
+       struct semaphore get_eth_stat_sem;
+
+       /* timer ctxt to prevent shutdown hanging due to un-responsive BE */
+       struct be_timer_ctxt timer_ctxt;
+
+#define BE_MAX_MSIX_VECTORS             32
+#define BE_MAX_REQ_MSIX_VECTORS         1 /* only one EQ in Linux driver */
+       struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
+       bool msix_enabled;
+       bool dma_64bit_cap;     /* the Device DAC capable  or not */
+       u8 dev_state;   /* The current state of the device */
+       u8 dev_pm_state; /* The State of device before going to suspend */
+};
+
+/*
+ * Every second we look at the ints/sec and adjust eq_delay
+ * between adapter->min_eqd and adapter->max_eqd to keep the ints/sec between
+ * IPS_HI_WM and IPS_LO_WM.
+ */
+#define IPS_HI_WM      18000
+#define IPS_LO_WM      8000
+
+
+static inline void index_adv(u32 *index, u32 val,  u32 limit)
+{
+       BUG_ON(limit & (limit-1));
+       *index = (*index + val) & (limit - 1);
+}
+
+static inline void index_inc(u32 *index, u32 limit)
+{
+       BUG_ON(limit & (limit-1));
+       *index = (*index + 1) & (limit - 1);
+}
+
+static inline void be_adv_eq_tl(struct be_net_object *pnob)
+{
+       index_inc(&pnob->event_q_tl, pnob->event_q_len);
+}
+
+static inline void be_adv_txq_hd(struct be_net_object *pnob)
+{
+       index_inc(&pnob->tx_q_hd, pnob->tx_q_len);
+}
+
+static inline void be_adv_txq_tl(struct be_net_object *pnob)
+{
+       index_inc(&pnob->tx_q_tl, pnob->tx_q_len);
+}
+
+static inline void be_adv_txcq_tl(struct be_net_object *pnob)
+{
+       index_inc(&pnob->tx_cq_tl, pnob->txcq_len);
+}
+
+static inline void be_adv_rxq_hd(struct be_net_object *pnob)
+{
+       index_inc(&pnob->rx_q_hd, pnob->rx_q_len);
+}
+
+static inline void be_adv_rxcq_tl(struct be_net_object *pnob)
+{
+       index_inc(&pnob->rx_cq_tl, pnob->rx_cq_len);
+}
+
+static inline u32 tx_compl_lastwrb_idx_get(struct be_net_object *pnob)
+{
+       return (pnob->tx_q_tl + *(u32 *)&pnob->tx_ctxt[pnob->tx_q_tl] - 1)
+                   & (pnob->tx_q_len - 1);
+}
+
+int benet_init(struct net_device *);
+int be_ethtool_ioctl(struct net_device *, struct ifreq *);
+struct net_device_stats *benet_get_stats(struct net_device *);
+void be_process_intr(unsigned long context);
+irqreturn_t be_int(int irq, void *dev);
+void be_post_eth_rx_buffs(struct be_net_object *);
+void be_get_stat_cb(void *, int, struct MCC_WRB_AMAP *);
+void be_get_stats_timer_handler(unsigned long);
+void be_wait_nic_tx_cmplx_cmpl(struct be_net_object *);
+void be_print_link_info(struct BE_LINK_STATUS *);
+void be_update_link_status(struct be_adapter *);
+void be_init_procfs(struct be_adapter *);
+void be_cleanup_procfs(struct be_adapter *);
+int be_poll(struct napi_struct *, int);
+struct ETH_RX_COMPL_AMAP *be_get_rx_cmpl(struct be_net_object *);
+void be_notify_cmpl(struct be_net_object *, int, int, int);
+void be_enable_intr(struct be_net_object *);
+void be_enable_eq_intr(struct be_net_object *);
+void be_disable_intr(struct be_net_object *);
+void be_disable_eq_intr(struct be_net_object *);
+int be_set_uc_mac_adr(struct be_net_object *, u8, u8, u8,
+                   u8 *, mcc_wrb_cqe_callback, void *);
+int be_get_flow_ctl(struct be_function_object *pFnObj, bool *, bool *);
+void process_one_tx_compl(struct be_net_object *pnob, u32 end_idx);
+
+#endif /* _BENET_H_ */
diff --git a/drivers/staging/benet/fw/asyncmesg.h b/drivers/staging/benet/fw/asyncmesg.h
new file mode 100644 (file)
index 0000000..b47c9a5
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __asyncmesg_amap_h__
+#define __asyncmesg_amap_h__
+#include "fwcmd_common.h"
+
+/* --- ASYNC_EVENT_CODES --- */
+#define ASYNC_EVENT_CODE_LINK_STATE     (1)
+#define ASYNC_EVENT_CODE_ISCSI          (2)
+
+/* --- ASYNC_LINK_STATES --- */
+#define ASYNC_EVENT_LINK_DOWN           (0)    /* Link Down on a port */
+#define ASYNC_EVENT_LINK_UP             (1)    /* Link Up on a port */
+
+/*
+ * The last 4 bytes of the async events have this common format.  It allows
+ * the driver to distinguish [link]MCC_CQ_ENTRY[/link] structs from
+ * asynchronous events.  Both arrive on the same completion queue.  This
+ * structure also contains the common fields used to decode the async event.
+ */
+struct BE_ASYNC_EVENT_TRAILER_AMAP {
+       u8 rsvd0[8];    /* DWORD 0 */
+       u8 event_code[8];       /* DWORD 0 */
+       u8 event_type[8];       /* DWORD 0 */
+       u8 rsvd1[6];    /* DWORD 0 */
+       u8 async_event; /* DWORD 0 */
+       u8 valid;               /* DWORD 0 */
+} __packed;
+struct ASYNC_EVENT_TRAILER_AMAP {
+       u32 dw[1];
+};
+
+/*
+ * Applicable in Initiator, Target and NIC modes.
+ * A link state async event is seen by all device drivers as soon they
+ * create an MCC ring. Thereafter, anytime the link status changes the
+ * drivers will receive a link state async event. Notifications continue to
+ * be sent until a driver destroys its MCC ring. A link down event is
+ * reported when either port loses link. A link up event is reported
+ * when either port regains link. When BE's failover mechanism is enabled, a
+ * link down on the active port causes traffic to be diverted to the standby
+ * port by the BE's ARM firmware (assuming the standby port has link). In
+ * this case, the standy port assumes the active status. Note: when link is
+ * restored on the failed port, traffic continues on the currently active
+ * port. The ARM firmware does not attempt to 'fail back' traffic to
+ * the restored port.
+ */
+#if 0
+struct BE_ASYNC_EVENT_LINK_STATE_AMAP {
+       struct BE_UEXACT8_AMAP port0_link_status;
+       struct BE_UEXACT8_AMAP port1_link_status;
+       struct BE_UEXACT8_AMAP active_port;
+       u8 rsvd0[8];    /* DWORD 0 */
+       struct BE_UEXACT8_AMAP port0_duplex;
+       struct BE_UEXACT8_AMAP port0_speed;
+       struct BE_UEXACT8_AMAP port1_duplex;
+       struct BE_UEXACT8_AMAP port1_speed;
+       struct BE_UEXACT8_AMAP port0_fault;
+       struct BE_UEXACT8_AMAP port1_fault;
+       u8 rsvd1[2][8]; /* DWORD 2 */
+       struct BE_ASYNC_EVENT_TRAILER_AMAP trailer;
+} __packed;
+#endif
+struct BE_ASYNC_EVENT_LINK_STATE_AMAP {
+       u8 port0_link_status[8];
+       u8 port1_link_status[8];
+       u8 active_port[8];
+       u8 rsvd0[8];    /* DWORD 0 */
+       u8 port0_duplex[8];
+       u8 port0_speed[8];
+       u8 port1_duplex[8];
+       u8 port1_speed[8];
+       u8 port0_fault[8];
+       u8 port1_fault[8];
+       u8 rsvd1[2][8]; /* DWORD 2 */
+       struct BE_ASYNC_EVENT_TRAILER_AMAP trailer;
+} __packed;
+struct ASYNC_EVENT_LINK_STATE_AMAP {
+       u32 dw[4];
+};
+#endif /* __asyncmesg_amap_h__ */
diff --git a/drivers/staging/benet/fw/be_cm.h b/drivers/staging/benet/fw/be_cm.h
new file mode 100644 (file)
index 0000000..b7a1dfd
--- /dev/null
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __be_cm_amap_h__
+#define __be_cm_amap_h__
+#include "be_common.h"
+#include "etx_context.h"
+#include "mpu_context.h"
+
+/*
+ * --- CEV_WATERMARK_ENUM ---
+ * CQ/EQ Watermark Encodings. Encoded as number of free entries in
+ * Queue when Watermark is reached.
+ */
+#define CEV_WMARK_0        (0) /* Watermark when Queue full */
+#define CEV_WMARK_16       (1) /* Watermark at 16 free entries */
+#define CEV_WMARK_32       (2) /* Watermark at 32 free entries */
+#define CEV_WMARK_48       (3) /* Watermark at 48 free entries */
+#define CEV_WMARK_64       (4) /* Watermark at 64 free entries */
+#define CEV_WMARK_80       (5) /* Watermark at 80 free entries */
+#define CEV_WMARK_96       (6) /* Watermark at 96 free entries */
+#define CEV_WMARK_112      (7) /* Watermark at 112 free entries */
+#define CEV_WMARK_128      (8) /* Watermark at 128 free entries */
+#define CEV_WMARK_144      (9) /* Watermark at 144 free entries */
+#define CEV_WMARK_160      (10)        /* Watermark at 160 free entries */
+#define CEV_WMARK_176      (11)        /* Watermark at 176 free entries */
+#define CEV_WMARK_192      (12)        /* Watermark at 192 free entries */
+#define CEV_WMARK_208      (13)        /* Watermark at 208 free entries */
+#define CEV_WMARK_224      (14)        /* Watermark at 224 free entries */
+#define CEV_WMARK_240      (15)        /* Watermark at 240 free entries */
+
+/*
+ * --- CQ_CNT_ENUM ---
+ * Completion Queue Count Encodings.
+ */
+#define CEV_CQ_CNT_256                  (0)    /* CQ has 256 entries */
+#define CEV_CQ_CNT_512                  (1)    /* CQ has 512 entries */
+#define CEV_CQ_CNT_1024                 (2)    /* CQ has 1024 entries */
+
+/*
+ * --- EQ_CNT_ENUM ---
+ * Event Queue Count Encodings.
+ */
+#define CEV_EQ_CNT_256     (0) /* EQ has 256 entries (16-byte EQEs only) */
+#define CEV_EQ_CNT_512     (1) /* EQ has 512 entries (16-byte EQEs only) */
+#define CEV_EQ_CNT_1024    (2) /* EQ has 1024 entries (4-byte or */
+                               /* 16-byte EQEs only) */
+#define CEV_EQ_CNT_2048    (3) /* EQ has 2048 entries (4-byte or */
+                               /* 16-byte EQEs only) */
+#define CEV_EQ_CNT_4096    (4) /* EQ has 4096 entries (4-byte EQEs only) */
+
+/*
+ * --- EQ_SIZE_ENUM ---
+ * Event Queue Entry Size Encoding.
+ */
+#define CEV_EQ_SIZE_4                   (0)    /* EQE is 4 bytes */
+#define CEV_EQ_SIZE_16                  (1)    /* EQE is 16 bytes */
+
+/*
+ * Completion Queue Context Table Entry. Contains the state of a CQ.
+ * Located in RAM within the CEV block.
+ */
+struct BE_CQ_CONTEXT_AMAP {
+       u8 Cidx[11];    /* DWORD 0 */
+       u8 Watermark[4];        /* DWORD 0 */
+       u8 NoDelay;             /* DWORD 0 */
+       u8 EPIdx[11];   /* DWORD 0 */
+       u8 Count[2];    /* DWORD 0 */
+       u8 valid;               /* DWORD 0 */
+       u8 SolEvent;    /* DWORD 0 */
+       u8 Eventable;   /* DWORD 0 */
+       u8 Pidx[11];    /* DWORD 1 */
+       u8 PD[10];              /* DWORD 1 */
+       u8 EQID[7];             /* DWORD 1 */
+       u8 Func;                /* DWORD 1 */
+       u8 WME;         /* DWORD 1 */
+       u8 Stalled;             /* DWORD 1 */
+       u8 Armed;               /* DWORD 1 */
+} __packed;
+struct CQ_CONTEXT_AMAP {
+       u32 dw[2];
+};
+
+/*
+ * Event Queue Context Table Entry. Contains the state of an EQ.
+ * Located in RAM in the CEV block.
+ */
+struct BE_EQ_CONTEXT_AMAP {
+       u8 Cidx[13];    /* DWORD 0 */
+       u8 rsvd0[2];    /* DWORD 0 */
+       u8 Func;                /* DWORD 0 */
+       u8 EPIdx[13];   /* DWORD 0 */
+       u8 valid;               /* DWORD 0 */
+       u8 rsvd1;               /* DWORD 0 */
+       u8 Size;                /* DWORD 0 */
+       u8 Pidx[13];    /* DWORD 1 */
+       u8 rsvd2[3];    /* DWORD 1 */
+       u8 PD[10];              /* DWORD 1 */
+       u8 Count[3];    /* DWORD 1 */
+       u8 SolEvent;    /* DWORD 1 */
+       u8 Stalled;             /* DWORD 1 */
+       u8 Armed;               /* DWORD 1 */
+       u8 Watermark[4];        /* DWORD 2 */
+       u8 WME;         /* DWORD 2 */
+       u8 rsvd3[3];    /* DWORD 2 */
+       u8 EventVect[6];        /* DWORD 2 */
+       u8 rsvd4[2];    /* DWORD 2 */
+       u8 Delay[8];    /* DWORD 2 */
+       u8 rsvd5[6];    /* DWORD 2 */
+       u8 TMR;         /* DWORD 2 */
+       u8 rsvd6;               /* DWORD 2 */
+       u8 rsvd7[32];   /* DWORD 3 */
+} __packed;
+struct EQ_CONTEXT_AMAP {
+       u32 dw[4];
+};
+
+#endif /* __be_cm_amap_h__ */
diff --git a/drivers/staging/benet/fw/be_common.h b/drivers/staging/benet/fw/be_common.h
new file mode 100644 (file)
index 0000000..7e63dc5
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __be_common_amap_h__
+#define __be_common_amap_h__
+
+/* Physical Address. */
+struct BE_PHYS_ADDR_AMAP {
+       u8 lo[32];              /* DWORD 0 */
+       u8 hi[32];              /* DWORD 1 */
+} __packed;
+struct PHYS_ADDR_AMAP {
+       u32 dw[2];
+};
+
+/* Virtual Address. */
+struct BE_VIRT_ADDR_AMAP {
+       u8 lo[32];              /* DWORD 0 */
+       u8 hi[32];              /* DWORD 1 */
+} __packed;
+struct VIRT_ADDR_AMAP {
+       u32 dw[2];
+};
+
+/* Scatter gather element. */
+struct BE_SGE_AMAP {
+       u8 addr_hi[32]; /* DWORD 0 */
+       u8 addr_lo[32]; /* DWORD 1 */
+       u8 rsvd0[32];   /* DWORD 2 */
+       u8 len[16];             /* DWORD 3 */
+       u8 rsvd1[16];   /* DWORD 3 */
+} __packed;
+struct SGE_AMAP {
+       u32 dw[4];
+};
+
+#endif /* __be_common_amap_h__ */
diff --git a/drivers/staging/benet/fw/cev.h b/drivers/staging/benet/fw/cev.h
new file mode 100644 (file)
index 0000000..3099692
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __cev_amap_h__
+#define __cev_amap_h__
+#include "ep.h"
+
+/*
+ * Host Interrupt Status Register 0. The first of four application
+ * interrupt status registers. This register contains the interrupts
+ * for Event Queues EQ0 through EQ31.
+ */
+struct BE_CEV_ISR0_CSR_AMAP {
+       u8 interrupt0;  /* DWORD 0 */
+       u8 interrupt1;  /* DWORD 0 */
+       u8 interrupt2;  /* DWORD 0 */
+       u8 interrupt3;  /* DWORD 0 */
+       u8 interrupt4;  /* DWORD 0 */
+       u8 interrupt5;  /* DWORD 0 */
+       u8 interrupt6;  /* DWORD 0 */
+       u8 interrupt7;  /* DWORD 0 */
+       u8 interrupt8;  /* DWORD 0 */
+       u8 interrupt9;  /* DWORD 0 */
+       u8 interrupt10; /* DWORD 0 */
+       u8 interrupt11; /* DWORD 0 */
+       u8 interrupt12; /* DWORD 0 */
+       u8 interrupt13; /* DWORD 0 */
+       u8 interrupt14; /* DWORD 0 */
+       u8 interrupt15; /* DWORD 0 */
+       u8 interrupt16; /* DWORD 0 */
+       u8 interrupt17; /* DWORD 0 */
+       u8 interrupt18; /* DWORD 0 */
+       u8 interrupt19; /* DWORD 0 */
+       u8 interrupt20; /* DWORD 0 */
+       u8 interrupt21; /* DWORD 0 */
+       u8 interrupt22; /* DWORD 0 */
+       u8 interrupt23; /* DWORD 0 */
+       u8 interrupt24; /* DWORD 0 */
+       u8 interrupt25; /* DWORD 0 */
+       u8 interrupt26; /* DWORD 0 */
+       u8 interrupt27; /* DWORD 0 */
+       u8 interrupt28; /* DWORD 0 */
+       u8 interrupt29; /* DWORD 0 */
+       u8 interrupt30; /* DWORD 0 */
+       u8 interrupt31; /* DWORD 0 */
+} __packed;
+struct CEV_ISR0_CSR_AMAP {
+       u32 dw[1];
+};
+
+/*
+ * Host Interrupt Status Register 1. The second of four application
+ * interrupt status registers. This register contains the interrupts
+ * for Event Queues EQ32 through EQ63.
+ */
+struct BE_CEV_ISR1_CSR_AMAP {
+       u8 interrupt32; /* DWORD 0 */
+       u8 interrupt33; /* DWORD 0 */
+       u8 interrupt34; /* DWORD 0 */
+       u8 interrupt35; /* DWORD 0 */
+       u8 interrupt36; /* DWORD 0 */
+       u8 interrupt37; /* DWORD 0 */
+       u8 interrupt38; /* DWORD 0 */
+       u8 interrupt39; /* DWORD 0 */
+       u8 interrupt40; /* DWORD 0 */
+       u8 interrupt41; /* DWORD 0 */
+       u8 interrupt42; /* DWORD 0 */
+       u8 interrupt43; /* DWORD 0 */
+       u8 interrupt44; /* DWORD 0 */
+       u8 interrupt45; /* DWORD 0 */
+       u8 interrupt46; /* DWORD 0 */
+       u8 interrupt47; /* DWORD 0 */
+       u8 interrupt48; /* DWORD 0 */
+       u8 interrupt49; /* DWORD 0 */
+       u8 interrupt50; /* DWORD 0 */
+       u8 interrupt51; /* DWORD 0 */
+       u8 interrupt52; /* DWORD 0 */
+       u8 interrupt53; /* DWORD 0 */
+       u8 interrupt54; /* DWORD 0 */
+       u8 interrupt55; /* DWORD 0 */
+       u8 interrupt56; /* DWORD 0 */
+       u8 interrupt57; /* DWORD 0 */
+       u8 interrupt58; /* DWORD 0 */
+       u8 interrupt59; /* DWORD 0 */
+       u8 interrupt60; /* DWORD 0 */
+       u8 interrupt61; /* DWORD 0 */
+       u8 interrupt62; /* DWORD 0 */
+       u8 interrupt63; /* DWORD 0 */
+} __packed;
+struct CEV_ISR1_CSR_AMAP {
+       u32 dw[1];
+};
+/*
+ * Host Interrupt Status Register 2. The third of four application
+ * interrupt status registers. This register contains the interrupts
+ * for Event Queues EQ64 through EQ95.
+ */
+struct BE_CEV_ISR2_CSR_AMAP {
+       u8 interrupt64; /* DWORD 0 */
+       u8 interrupt65; /* DWORD 0 */
+       u8 interrupt66; /* DWORD 0 */
+       u8 interrupt67; /* DWORD 0 */
+       u8 interrupt68; /* DWORD 0 */
+       u8 interrupt69; /* DWORD 0 */
+       u8 interrupt70; /* DWORD 0 */
+       u8 interrupt71; /* DWORD 0 */
+       u8 interrupt72; /* DWORD 0 */
+       u8 interrupt73; /* DWORD 0 */
+       u8 interrupt74; /* DWORD 0 */
+       u8 interrupt75; /* DWORD 0 */
+       u8 interrupt76; /* DWORD 0 */
+       u8 interrupt77; /* DWORD 0 */
+       u8 interrupt78; /* DWORD 0 */
+       u8 interrupt79; /* DWORD 0 */
+       u8 interrupt80; /* DWORD 0 */
+       u8 interrupt81; /* DWORD 0 */
+       u8 interrupt82; /* DWORD 0 */
+       u8 interrupt83; /* DWORD 0 */
+       u8 interrupt84; /* DWORD 0 */
+       u8 interrupt85; /* DWORD 0 */
+       u8 interrupt86; /* DWORD 0 */
+       u8 interrupt87; /* DWORD 0 */
+       u8 interrupt88; /* DWORD 0 */
+       u8 interrupt89; /* DWORD 0 */
+       u8 interrupt90; /* DWORD 0 */
+       u8 interrupt91; /* DWORD 0 */
+       u8 interrupt92; /* DWORD 0 */
+       u8 interrupt93; /* DWORD 0 */
+       u8 interrupt94; /* DWORD 0 */
+       u8 interrupt95; /* DWORD 0 */
+} __packed;
+struct CEV_ISR2_CSR_AMAP {
+       u32 dw[1];
+};
+
+/*
+ * Host Interrupt Status Register 3. The fourth of four application
+ * interrupt status registers. This register contains the interrupts
+ * for Event Queues EQ96 through EQ127.
+ */
+struct BE_CEV_ISR3_CSR_AMAP {
+       u8 interrupt96; /* DWORD 0 */
+       u8 interrupt97; /* DWORD 0 */
+       u8 interrupt98; /* DWORD 0 */
+       u8 interrupt99; /* DWORD 0 */
+       u8 interrupt100;        /* DWORD 0 */
+       u8 interrupt101;        /* DWORD 0 */
+       u8 interrupt102;        /* DWORD 0 */
+       u8 interrupt103;        /* DWORD 0 */
+       u8 interrupt104;        /* DWORD 0 */
+       u8 interrupt105;        /* DWORD 0 */
+       u8 interrupt106;        /* DWORD 0 */
+       u8 interrupt107;        /* DWORD 0 */
+       u8 interrupt108;        /* DWORD 0 */
+       u8 interrupt109;        /* DWORD 0 */
+       u8 interrupt110;        /* DWORD 0 */
+       u8 interrupt111;        /* DWORD 0 */
+       u8 interrupt112;        /* DWORD 0 */
+       u8 interrupt113;        /* DWORD 0 */
+       u8 interrupt114;        /* DWORD 0 */
+       u8 interrupt115;        /* DWORD 0 */
+       u8 interrupt116;        /* DWORD 0 */
+       u8 interrupt117;        /* DWORD 0 */
+       u8 interrupt118;        /* DWORD 0 */
+       u8 interrupt119;        /* DWORD 0 */
+       u8 interrupt120;        /* DWORD 0 */
+       u8 interrupt121;        /* DWORD 0 */
+       u8 interrupt122;        /* DWORD 0 */
+       u8 interrupt123;        /* DWORD 0 */
+       u8 interrupt124;        /* DWORD 0 */
+       u8 interrupt125;        /* DWORD 0 */
+       u8 interrupt126;        /* DWORD 0 */
+       u8 interrupt127;        /* DWORD 0 */
+} __packed;
+struct CEV_ISR3_CSR_AMAP {
+       u32 dw[1];
+};
+
+/*  Completions and Events block Registers.  */
+struct BE_CEV_CSRMAP_AMAP {
+       u8 rsvd0[32];   /* DWORD 0 */
+       u8 rsvd1[32];   /* DWORD 1 */
+       u8 rsvd2[32];   /* DWORD 2 */
+       u8 rsvd3[32];   /* DWORD 3 */
+       struct BE_CEV_ISR0_CSR_AMAP isr0;
+       struct BE_CEV_ISR1_CSR_AMAP isr1;
+       struct BE_CEV_ISR2_CSR_AMAP isr2;
+       struct BE_CEV_ISR3_CSR_AMAP isr3;
+       u8 rsvd4[32];   /* DWORD 8 */
+       u8 rsvd5[32];   /* DWORD 9 */
+       u8 rsvd6[32];   /* DWORD 10 */
+       u8 rsvd7[32];   /* DWORD 11 */
+       u8 rsvd8[32];   /* DWORD 12 */
+       u8 rsvd9[32];   /* DWORD 13 */
+       u8 rsvd10[32];  /* DWORD 14 */
+       u8 rsvd11[32];  /* DWORD 15 */
+       u8 rsvd12[32];  /* DWORD 16 */
+       u8 rsvd13[32];  /* DWORD 17 */
+       u8 rsvd14[32];  /* DWORD 18 */
+       u8 rsvd15[32];  /* DWORD 19 */
+       u8 rsvd16[32];  /* DWORD 20 */
+       u8 rsvd17[32];  /* DWORD 21 */
+       u8 rsvd18[32];  /* DWORD 22 */
+       u8 rsvd19[32];  /* DWORD 23 */
+       u8 rsvd20[32];  /* DWORD 24 */
+       u8 rsvd21[32];  /* DWORD 25 */
+       u8 rsvd22[32];  /* DWORD 26 */
+       u8 rsvd23[32];  /* DWORD 27 */
+       u8 rsvd24[32];  /* DWORD 28 */
+       u8 rsvd25[32];  /* DWORD 29 */
+       u8 rsvd26[32];  /* DWORD 30 */
+       u8 rsvd27[32];  /* DWORD 31 */
+       u8 rsvd28[32];  /* DWORD 32 */
+       u8 rsvd29[32];  /* DWORD 33 */
+       u8 rsvd30[192]; /* DWORD 34 */
+       u8 rsvd31[192]; /* DWORD 40 */
+       u8 rsvd32[160]; /* DWORD 46 */
+       u8 rsvd33[160]; /* DWORD 51 */
+       u8 rsvd34[160]; /* DWORD 56 */
+       u8 rsvd35[96];  /* DWORD 61 */
+       u8 rsvd36[192][32];     /* DWORD 64 */
+} __packed;
+struct CEV_CSRMAP_AMAP {
+       u32 dw[256];
+};
+
+#endif /* __cev_amap_h__ */
diff --git a/drivers/staging/benet/fw/descriptors.h b/drivers/staging/benet/fw/descriptors.h
new file mode 100644 (file)
index 0000000..8da438c
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __descriptors_amap_h__
+#define __descriptors_amap_h__
+
+/*
+ * --- IPC_NODE_ID_ENUM ---
+ * IPC processor id values
+ */
+#define TPOST_NODE_ID                   (0)    /* TPOST ID */
+#define TPRE_NODE_ID                    (1)    /* TPRE ID */
+#define TXULP0_NODE_ID                  (2)    /* TXULP0 ID */
+#define TXULP1_NODE_ID                  (3)    /* TXULP1 ID */
+#define TXULP2_NODE_ID                  (4)    /* TXULP2 ID */
+#define RXULP0_NODE_ID                  (5)    /* RXULP0 ID */
+#define RXULP1_NODE_ID                  (6)    /* RXULP1 ID */
+#define RXULP2_NODE_ID                  (7)    /* RXULP2 ID */
+#define MPU_NODE_ID                     (15)   /* MPU ID */
+
+/*
+ * --- MAC_ID_ENUM ---
+ * Meaning of the mac_id field in rxpp_eth_d
+ */
+#define PORT0_HOST_MAC0    (0)  /* PD 0, Port 0, host networking, MAC 0. */
+#define PORT0_HOST_MAC1    (1) /* PD 0, Port 0, host networking, MAC 1. */
+#define PORT0_STORAGE_MAC0 (2) /* PD 0, Port 0, host storage, MAC 0. */
+#define PORT0_STORAGE_MAC1 (3) /* PD 0, Port 0, host storage, MAC 1. */
+#define PORT1_HOST_MAC0    (4) /* PD 0, Port 1 host networking, MAC 0. */
+#define PORT1_HOST_MAC1    (5) /* PD 0, Port 1 host networking, MAC 1. */
+#define PORT1_STORAGE_MAC0 (6) /* PD 0, Port 1 host storage, MAC 0. */
+#define PORT1_STORAGE_MAC1 (7) /* PD 0, Port 1 host storage, MAC 1. */
+#define FIRST_VM_MAC       (8) /* PD 1 MAC. Protection domains have IDs */
+                               /* from 0x8-0x26, one per PD. */
+#define LAST_VM_MAC        (38)        /* PD 31 MAC. */
+#define MGMT_MAC           (39)        /* Management port MAC. */
+#define MARBLE_MAC0        (59)        /* Used for flushing function 0 receive */
+                                 /*
+                                  * queues before re-using a torn-down
+                                  * receive ring. the DA =
+                                  * 00-00-00-00-00-00, and the MSB of the
+                                  * SA = 00
+                                  */
+#define MARBLE_MAC1        (60)        /* Used for flushing function 1 receive */
+                                 /*
+                                  * queues before re-using a torn-down
+                                  * receive ring. the DA =
+                                  * 00-00-00-00-00-00, and the MSB of the
+                                  * SA != 00
+                                  */
+#define NULL_MAC           (61)        /* Promiscuous mode, indicates no match */
+#define MCAST_MAC          (62)        /* Multicast match. */
+#define BCAST_MATCH        (63)        /* Broadcast match. */
+
+#endif /* __descriptors_amap_h__ */
diff --git a/drivers/staging/benet/fw/doorbells.h b/drivers/staging/benet/fw/doorbells.h
new file mode 100644 (file)
index 0000000..550cc4d
--- /dev/null
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __doorbells_amap_h__
+#define __doorbells_amap_h__
+
+/* The TX/RDMA send queue doorbell. */
+struct BE_SQ_DB_AMAP {
+       u8 cid[11];             /* DWORD 0 */
+       u8 rsvd0[5];    /* DWORD 0 */
+       u8 numPosted[14];       /* DWORD 0 */
+       u8 rsvd1[2];    /* DWORD 0 */
+} __packed;
+struct SQ_DB_AMAP {
+       u32 dw[1];
+};
+
+/* The receive queue doorbell. */
+struct BE_RQ_DB_AMAP {
+       u8 rq[10];              /* DWORD 0 */
+       u8 rsvd0[13];   /* DWORD 0 */
+       u8 Invalidate;  /* DWORD 0 */
+       u8 numPosted[8];        /* DWORD 0 */
+} __packed;
+struct RQ_DB_AMAP {
+       u32 dw[1];
+};
+
+/*
+ * The CQ/EQ doorbell. Software MUST set reserved fields in this
+ * descriptor to zero, otherwise (CEV) hardware will not execute the
+ * doorbell (flagging a bad_db_qid error instead).
+ */
+struct BE_CQ_DB_AMAP {
+       u8 qid[10];             /* DWORD 0 */
+       u8 rsvd0[4];    /* DWORD 0 */
+       u8 rearm;               /* DWORD 0 */
+       u8 event;               /* DWORD 0 */
+       u8 num_popped[13];      /* DWORD 0 */
+       u8 rsvd1[3];    /* DWORD 0 */
+} __packed;
+struct CQ_DB_AMAP {
+       u32 dw[1];
+};
+
+struct BE_TPM_RQ_DB_AMAP {
+       u8 qid[10];             /* DWORD 0 */
+       u8 rsvd0[6];    /* DWORD 0 */
+       u8 numPosted[11];       /* DWORD 0 */
+       u8 mss_cnt[5];  /* DWORD 0 */
+} __packed;
+struct TPM_RQ_DB_AMAP {
+       u32 dw[1];
+};
+
+/*
+ * Post WRB Queue Doorbell Register used by the host Storage stack
+ * to notify the controller of a posted Work Request Block
+ */
+struct BE_WRB_POST_DB_AMAP {
+       u8 wrb_cid[10]; /* DWORD 0 */
+       u8 rsvd0[6];    /* DWORD 0 */
+       u8 wrb_index[8];        /* DWORD 0 */
+       u8 numberPosted[8];     /* DWORD 0 */
+} __packed;
+struct WRB_POST_DB_AMAP {
+       u32 dw[1];
+};
+
+/*
+ * Update Default PDU Queue Doorbell Register used to communicate
+ * to the controller that the driver has stopped processing the queue
+ * and where in the queue it stopped, this is
+ * a CQ Entry Type. Used by storage driver.
+ */
+struct BE_DEFAULT_PDU_DB_AMAP {
+       u8 qid[10];             /* DWORD 0 */
+       u8 rsvd0[4];    /* DWORD 0 */
+       u8 rearm;               /* DWORD 0 */
+       u8 event;               /* DWORD 0 */
+       u8 cqproc[14];  /* DWORD 0 */
+       u8 rsvd1[2];    /* DWORD 0 */
+} __packed;
+struct DEFAULT_PDU_DB_AMAP {
+       u32 dw[1];
+};
+
+/* Management Command and Controller default fragment ring */
+struct BE_MCC_DB_AMAP {
+       u8 rid[11];             /* DWORD 0 */
+       u8 rsvd0[5];    /* DWORD 0 */
+       u8 numPosted[14];       /* DWORD 0 */
+       u8 rsvd1[2];    /* DWORD 0 */
+} __packed;
+struct MCC_DB_AMAP {
+       u32 dw[1];
+};
+
+/*
+ * Used for bootstrapping the Host interface. This register is
+ * used for driver communication with the MPU when no MCC Rings exist.
+ * The software must write this register twice to post any MCC
+ * command. First, it writes the register with hi=1 and the upper bits of
+ * the  physical address for the MCC_MAILBOX structure.  Software must poll
+ * the ready bit until this is acknowledged.  Then, sotware writes the
+ * register with hi=0 with the lower bits in the address.  It must
+ * poll the ready bit until the MCC command is complete.  Upon completion,
+ * the MCC_MAILBOX will contain a valid completion queue  entry.
+ */
+struct BE_MPU_MAILBOX_DB_AMAP {
+       u8 ready;               /* DWORD 0 */
+       u8 hi;          /* DWORD 0 */
+       u8 address[30]; /* DWORD 0 */
+} __packed;
+struct MPU_MAILBOX_DB_AMAP {
+       u32 dw[1];
+};
+
+/*
+ *  This is the protection domain doorbell register map. Note that
+ *  while this map shows doorbells for all Blade Engine supported
+ *  protocols, not all of these may be valid in a given function or
+ *  protection domain. It is the responsibility of the application
+ *  accessing the doorbells to know which are valid. Each doorbell
+ *  occupies 32 bytes of space, but unless otherwise specified,
+ *  only the first 4 bytes should be written.  There are 32 instances
+ *  of these doorbells for the host and 31 virtual machines respectively.
+ *  The host and VMs will only map the doorbell pages belonging to its
+ *  protection domain. It will not be able to touch the doorbells for
+ *  another VM. The doorbells are the only registers directly accessible
+ *  by a virtual machine. Similarly, there are 511 additional
+ *  doorbells for RDMA protection domains. PD 0 for RDMA shares
+ *  the same physical protection domain doorbell page as ETH/iSCSI.
+ *
+ */
+struct BE_PROTECTION_DOMAIN_DBMAP_AMAP {
+       u8 rsvd0[512];  /* DWORD 0 */
+       struct BE_SQ_DB_AMAP rdma_sq_db;
+       u8 rsvd1[7][32];        /* DWORD 17 */
+       struct BE_WRB_POST_DB_AMAP iscsi_wrb_post_db;
+       u8 rsvd2[7][32];        /* DWORD 25 */
+       struct BE_SQ_DB_AMAP etx_sq_db;
+       u8 rsvd3[7][32];        /* DWORD 33 */
+       struct BE_RQ_DB_AMAP rdma_rq_db;
+       u8 rsvd4[7][32];        /* DWORD 41 */
+       struct BE_DEFAULT_PDU_DB_AMAP iscsi_default_pdu_db;
+       u8 rsvd5[7][32];        /* DWORD 49 */
+       struct BE_TPM_RQ_DB_AMAP tpm_rq_db;
+       u8 rsvd6[7][32];        /* DWORD 57 */
+       struct BE_RQ_DB_AMAP erx_rq_db;
+       u8 rsvd7[7][32];        /* DWORD 65 */
+       struct BE_CQ_DB_AMAP cq_db;
+       u8 rsvd8[7][32];        /* DWORD 73 */
+       struct BE_MCC_DB_AMAP mpu_mcc_db;
+       u8 rsvd9[7][32];        /* DWORD 81 */
+       struct BE_MPU_MAILBOX_DB_AMAP mcc_bootstrap_db;
+       u8 rsvd10[935][32];     /* DWORD 89 */
+} __packed;
+struct PROTECTION_DOMAIN_DBMAP_AMAP {
+       u32 dw[1024];
+};
+
+#endif /* __doorbells_amap_h__ */
diff --git a/drivers/staging/benet/fw/ep.h b/drivers/staging/benet/fw/ep.h
new file mode 100644 (file)
index 0000000..72fcf64
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __ep_amap_h__
+#define __ep_amap_h__
+
+/* General Control and Status Register. */
+struct BE_EP_CONTROL_CSR_AMAP {
+       u8 m0_RxPbuf;   /* DWORD 0 */
+       u8 m1_RxPbuf;   /* DWORD 0 */
+       u8 m2_RxPbuf;   /* DWORD 0 */
+       u8 ff_en;               /* DWORD 0 */
+       u8 rsvd0[27];   /* DWORD 0 */
+       u8 CPU_reset;   /* DWORD 0 */
+} __packed;
+struct EP_CONTROL_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* Semaphore Register. */
+struct BE_EP_SEMAPHORE_CSR_AMAP {
+       u8 value[32];   /* DWORD 0 */
+} __packed;
+struct EP_SEMAPHORE_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* Embedded Processor Specific Registers. */
+struct BE_EP_CSRMAP_AMAP {
+       struct BE_EP_CONTROL_CSR_AMAP ep_control;
+       u8 rsvd0[32];   /* DWORD 1 */
+       u8 rsvd1[32];   /* DWORD 2 */
+       u8 rsvd2[32];   /* DWORD 3 */
+       u8 rsvd3[32];   /* DWORD 4 */
+       u8 rsvd4[32];   /* DWORD 5 */
+       u8 rsvd5[8][128];       /* DWORD 6 */
+       u8 rsvd6[32];   /* DWORD 38 */
+       u8 rsvd7[32];   /* DWORD 39 */
+       u8 rsvd8[32];   /* DWORD 40 */
+       u8 rsvd9[32];   /* DWORD 41 */
+       u8 rsvd10[32];  /* DWORD 42 */
+       struct BE_EP_SEMAPHORE_CSR_AMAP ep_semaphore;
+       u8 rsvd11[32];  /* DWORD 44 */
+       u8 rsvd12[19][32];      /* DWORD 45 */
+} __packed;
+struct EP_CSRMAP_AMAP {
+       u32 dw[64];
+};
+
+#endif /* __ep_amap_h__ */
diff --git a/drivers/staging/benet/fw/etx_context.h b/drivers/staging/benet/fw/etx_context.h
new file mode 100644 (file)
index 0000000..554fbe5
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __etx_context_amap_h__
+#define __etx_context_amap_h__
+
+/* ETX ring  context structure. */
+struct BE_ETX_CONTEXT_AMAP {
+       u8 tx_cidx[11]; /* DWORD 0 */
+       u8 rsvd0[5];    /* DWORD 0 */
+       u8 rsvd1[16];   /* DWORD 0 */
+       u8 tx_pidx[11]; /* DWORD 1 */
+       u8 rsvd2;               /* DWORD 1 */
+       u8 tx_ring_size[4];     /* DWORD 1 */
+       u8 pd_id[5];    /* DWORD 1 */
+       u8 pd_id_not_valid;     /* DWORD 1 */
+       u8 cq_id_send[10];      /* DWORD 1 */
+       u8 rsvd3[32];   /* DWORD 2 */
+       u8 rsvd4[32];   /* DWORD 3 */
+       u8 cur_bytes[32];       /* DWORD 4 */
+       u8 max_bytes[32];       /* DWORD 5 */
+       u8 time_stamp[32];      /* DWORD 6 */
+       u8 rsvd5[11];   /* DWORD 7 */
+       u8 func;                /* DWORD 7 */
+       u8 rsvd6[20];   /* DWORD 7 */
+       u8 cur_txd_count[32];   /* DWORD 8 */
+       u8 max_txd_count[32];   /* DWORD 9 */
+       u8 rsvd7[32];   /* DWORD 10 */
+       u8 rsvd8[32];   /* DWORD 11 */
+       u8 rsvd9[32];   /* DWORD 12 */
+       u8 rsvd10[32];  /* DWORD 13 */
+       u8 rsvd11[32];  /* DWORD 14 */
+       u8 rsvd12[32];  /* DWORD 15 */
+} __packed;
+struct ETX_CONTEXT_AMAP {
+       u32 dw[16];
+};
+
+#endif /* __etx_context_amap_h__ */
diff --git a/drivers/staging/benet/fw/fwcmd_common.h b/drivers/staging/benet/fw/fwcmd_common.h
new file mode 100644 (file)
index 0000000..406e0d6
--- /dev/null
@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __fwcmd_common_amap_h__
+#define __fwcmd_common_amap_h__
+#include "host_struct.h"
+
+/* --- PHY_LINK_DUPLEX_ENUM --- */
+#define PHY_LINK_DUPLEX_NONE            (0)
+#define PHY_LINK_DUPLEX_HALF            (1)
+#define PHY_LINK_DUPLEX_FULL            (2)
+
+/* --- PHY_LINK_SPEED_ENUM --- */
+#define PHY_LINK_SPEED_ZERO             (0)    /* No link. */
+#define PHY_LINK_SPEED_10MBPS           (1)    /* 10 Mbps */
+#define PHY_LINK_SPEED_100MBPS          (2)    /* 100 Mbps */
+#define PHY_LINK_SPEED_1GBPS            (3)    /* 1 Gbps */
+#define PHY_LINK_SPEED_10GBPS           (4)    /* 10 Gbps */
+
+/* --- PHY_LINK_FAULT_ENUM --- */
+#define PHY_LINK_FAULT_NONE             (0)    /* No fault status
+                                                       available or detected */
+#define PHY_LINK_FAULT_LOCAL            (1)    /* Local fault detected */
+#define PHY_LINK_FAULT_REMOTE           (2)    /* Remote fault detected */
+
+/* --- BE_ULP_MASK --- */
+#define BE_ULP0_MASK                    (1)
+#define BE_ULP1_MASK                    (2)
+#define BE_ULP2_MASK                    (4)
+
+/* --- NTWK_ACTIVE_PORT --- */
+#define NTWK_PORT_A                     (0)    /* Port A is currently active */
+#define NTWK_PORT_B                     (1)    /* Port B is currently active */
+#define NTWK_NO_ACTIVE_PORT             (15)   /* Both ports have lost link */
+
+/* --- NTWK_LINK_TYPE --- */
+#define NTWK_LINK_TYPE_PHYSICAL         (0)    /* link up/down event
+                                                  applies to BladeEngine's
+                                                  Physical Ports
+                                                  */
+#define NTWK_LINK_TYPE_VIRTUAL          (1)    /* Virtual link up/down event
+                                                  reported by BladeExchange.
+                                                  This applies only when the
+                                                  VLD feature is enabled
+                                                  */
+
+/*
+ * --- FWCMD_MAC_TYPE_ENUM ---
+ * This enum defines the types of MAC addresses in the RXF MAC Address Table.
+ */
+#define MAC_ADDRESS_TYPE_STORAGE        (0)    /* Storage MAC Address */
+#define MAC_ADDRESS_TYPE_NETWORK        (1)    /* Network MAC Address */
+#define MAC_ADDRESS_TYPE_PD             (2)    /* Protection Domain MAC Addr */
+#define MAC_ADDRESS_TYPE_MANAGEMENT     (3)    /* Managment MAC Address */
+
+
+/* --- FWCMD_RING_TYPE_ENUM --- */
+#define FWCMD_RING_TYPE_ETH_RX          (1)    /* Ring created with */
+                                       /* FWCMD_COMMON_ETH_RX_CREATE. */
+#define FWCMD_RING_TYPE_ETH_TX          (2)    /* Ring created with */
+                                       /* FWCMD_COMMON_ETH_TX_CREATE. */
+#define FWCMD_RING_TYPE_ISCSI_WRBQ      (3)    /* Ring created with */
+                                       /* FWCMD_COMMON_ISCSI_WRBQ_CREATE. */
+#define FWCMD_RING_TYPE_ISCSI_DEFQ      (4)    /* Ring created with */
+                                       /* FWCMD_COMMON_ISCSI_DEFQ_CREATE. */
+#define FWCMD_RING_TYPE_TPM_WRBQ        (5)    /* Ring created with */
+                                       /* FWCMD_COMMON_TPM_WRBQ_CREATE. */
+#define FWCMD_RING_TYPE_TPM_DEFQ        (6)    /* Ring created with */
+                                       /* FWCMD_COMMONTPM_TDEFQ_CREATE. */
+#define FWCMD_RING_TYPE_TPM_RQ          (7)    /* Ring created with */
+                                       /* FWCMD_COMMON_TPM_RQ_CREATE. */
+#define FWCMD_RING_TYPE_MCC             (8)    /* Ring created with */
+                                       /* FWCMD_COMMON_MCC_CREATE. */
+#define FWCMD_RING_TYPE_CQ              (9)    /* Ring created with */
+                                       /* FWCMD_COMMON_CQ_CREATE. */
+#define FWCMD_RING_TYPE_EQ              (10)   /* Ring created with */
+                                       /* FWCMD_COMMON_EQ_CREATE. */
+#define FWCMD_RING_TYPE_QP              (11)   /* Ring created with */
+                                       /* FWCMD_RDMA_QP_CREATE. */
+
+
+/* --- ETH_TX_RING_TYPE_ENUM --- */
+#define ETH_TX_RING_TYPE_FORWARDING     (1)    /* Ethernet ring for
+                                                  forwarding packets */
+#define ETH_TX_RING_TYPE_STANDARD       (2)    /* Ethernet ring for sending
+                                                  network packets. */
+#define ETH_TX_RING_TYPE_BOUND          (3)    /* Ethernet ring bound to the
+                                                  port specified in the command
+                                                  header.port_number field.
+                                                  Rings of this type are
+                                                  NOT subject to the
+                                                  failover logic implemented
+                                                  in the BladeEngine.
+                                                  */
+
+/* --- FWCMD_COMMON_QOS_TYPE_ENUM --- */
+#define QOS_BITS_NIC                    (1)    /* max_bits_per_second_NIC */
+                                                 /* field is valid.  */
+#define QOS_PKTS_NIC                    (2)    /* max_packets_per_second_NIC */
+                                                 /* field is valid.  */
+#define QOS_IOPS_ISCSI                  (4)    /* max_ios_per_second_iSCSI */
+                                                 /*field is valid.  */
+#define QOS_VLAN_TAG                    (8)    /* domain_VLAN_tag field
+                                                  is valid. */
+#define QOS_FABRIC_ID                   (16)   /* fabric_domain_ID field
+                                                  is valid. */
+#define QOS_OEM_PARAMS                  (32)   /* qos_params_oem field
+                                                  is valid. */
+#define QOS_TPUT_ISCSI                  (64)   /* max_bytes_per_second_iSCSI
+                                                  field  is valid.  */
+
+
+/*
+ * --- FAILOVER_CONFIG_ENUM ---
+ * Failover configuration setting used in FWCMD_COMMON_FORCE_FAILOVER
+ */
+#define FAILOVER_CONFIG_NO_CHANGE       (0)    /* No change to automatic */
+                                                 /* port failover setting. */
+#define FAILOVER_CONFIG_ON              (1)    /* Automatic port failover
+                                                  on link down  is enabled. */
+#define FAILOVER_CONFIG_OFF             (2)    /* Automatic port failover
+                                                  on link down is disabled. */
+
+/*
+ * --- FAILOVER_PORT_ENUM ---
+ * Failover port setting used in FWCMD_COMMON_FORCE_FAILOVER
+ */
+#define FAILOVER_PORT_A                 (0)    /* Selects port A. */
+#define FAILOVER_PORT_B                 (1)    /* Selects port B. */
+#define FAILOVER_PORT_NONE              (15)   /* No port change requested. */
+
+
+/*
+ * --- MGMT_FLASHROM_OPCODE ---
+ * Flash ROM operation code
+ */
+#define MGMT_FLASHROM_OPCODE_FLASH      (1)    /* Commit downloaded data
+                                                  to Flash ROM */
+#define MGMT_FLASHROM_OPCODE_SAVE       (2)    /* Save downloaded data to
+                                                  ARM's DDR - do not flash */
+#define MGMT_FLASHROM_OPCODE_CLEAR      (3)    /* Erase specified component
+                                                  from FlashROM */
+#define MGMT_FLASHROM_OPCODE_REPORT     (4)    /* Read specified component
+                                                  from Flash ROM */
+#define MGMT_FLASHROM_OPCODE_IMAGE_INFO (5)    /* Returns size of a
+                                                  component */
+
+/*
+ * --- MGMT_FLASHROM_OPTYPE ---
+ * Flash ROM operation type
+ */
+#define MGMT_FLASHROM_OPTYPE_CODE_FIRMWARE (0) /* Includes ARM firmware,
+                                                  IPSec (optional) and EP
+                                                  firmware  */
+#define MGMT_FLASHROM_OPTYPE_CODE_REDBOOT (1)
+#define MGMT_FLASHROM_OPTYPE_CODE_BIOS  (2)
+#define MGMT_FLASHROM_OPTYPE_CODE_PXE_BIOS (3)
+#define MGMT_FLASHROM_OPTYPE_CODE_CTRLS (4)
+#define MGMT_FLASHROM_OPTYPE_CFG_IPSEC  (5)
+#define MGMT_FLASHROM_OPTYPE_CFG_INI    (6)
+#define MGMT_FLASHROM_OPTYPE_ROM_OFFSET_SPECIFIED (7)
+
+/*
+ * --- FLASHROM_TYPE ---
+ * Flash ROM manufacturers supported in the f/w
+ */
+#define INTEL                           (0)
+#define SPANSION                        (1)
+#define MICRON                          (2)
+
+/* --- DDR_CAS_TYPE --- */
+#define CAS_3                           (0)
+#define CAS_4                           (1)
+#define CAS_5                           (2)
+
+/* --- DDR_SIZE_TYPE --- */
+#define SIZE_256MB                      (0)
+#define SIZE_512MB                      (1)
+
+/* --- DDR_MODE_TYPE --- */
+#define DDR_NO_ECC                      (0)
+#define DDR_ECC                         (1)
+
+/* --- INTERFACE_10GB_TYPE --- */
+#define CX4_TYPE                        (0)
+#define XFP_TYPE                        (1)
+
+/* --- BE_CHIP_MAX_MTU --- */
+#define CHIP_MAX_MTU                    (9000)
+
+/* --- XAUI_STATE_ENUM --- */
+#define XAUI_STATE_ENABLE               (0)    /* This MUST be the default
+                                                  value for all requests
+                                                  which set/change
+                                                  equalization parameter.  */
+#define XAUI_STATE_DISABLE              (255)  /* The XAUI for both ports
+                                                  may be disabled for EMI
+                                                  tests. There is no
+                                                  provision for turning off
+                                                  individual ports.
+                                                  */
+/* --- BE_ASIC_REVISION --- */
+#define BE_ASIC_REV_A0                  (1)
+#define BE_ASIC_REV_A1                  (2)
+
+#endif /* __fwcmd_common_amap_h__ */
diff --git a/drivers/staging/benet/fw/fwcmd_common_bmap.h b/drivers/staging/benet/fw/fwcmd_common_bmap.h
new file mode 100644 (file)
index 0000000..a007cf2
--- /dev/null
@@ -0,0 +1,717 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __fwcmd_common_bmap_h__
+#define __fwcmd_common_bmap_h__
+#include "fwcmd_types_bmap.h"
+#include "fwcmd_hdr_bmap.h"
+
+#if defined(__BIG_ENDIAN)
+   /* Physical Address. */
+struct PHYS_ADDR {
+       union {
+               struct {
+                       u32 lo; /* DWORD 0 */
+                       u32 hi; /* DWORD 1 */
+               } __packed;     /* unnamed struct */
+               u32 dw[2];      /* dword union */
+       };                      /* unnamed union */
+} __packed ;
+
+
+#else
+   /* Physical Address. */
+struct PHYS_ADDR {
+       union {
+               struct {
+                       u32 lo; /* DWORD 0 */
+                       u32 hi; /* DWORD 1 */
+               } __packed;     /* unnamed struct */
+               u32 dw[2];      /* dword union */
+       };                      /* unnamed union */
+} __packed ;
+
+struct BE_LINK_STATUS {
+       u8 mac0_duplex;
+       u8 mac0_speed;
+       u8 mac1_duplex;
+       u8 mac1_speed;
+       u8 mgmt_mac_duplex;
+       u8 mgmt_mac_speed;
+       u8 active_port;
+       u8 rsvd0;
+       u8 mac0_fault;
+       u8 mac1_fault;
+       u16 rsvd1;
+} __packed;
+#endif
+
+struct FWCMD_COMMON_ANON_170_REQUEST {
+       u32 rsvd0;
+} __packed;
+
+union LINK_STATUS_QUERY_PARAMS {
+       struct BE_LINK_STATUS response;
+       struct FWCMD_COMMON_ANON_170_REQUEST request;
+} __packed;
+
+/*
+ *  Queries the the link status for all ports.  The valid values below
+ *  DO NOT indicate that  a particular duplex or speed is supported by
+ *  BladeEngine. These enumerations simply  list all possible duplexes
+ *  and speeds for any port. Consult BladeEngine product  documentation
+ *  for the supported parameters.
+ */
+struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY {
+       union FWCMD_HEADER header;
+       union LINK_STATUS_QUERY_PARAMS params;
+} __packed;
+
+struct FWCMD_COMMON_ANON_171_REQUEST {
+       u8 type;
+       u8 port;
+       u8 mac1;
+       u8 permanent;
+} __packed;
+
+struct FWCMD_COMMON_ANON_172_RESPONSE {
+       struct MAC_ADDRESS_FORMAT mac;
+} __packed;
+
+union NTWK_MAC_QUERY_PARAMS {
+       struct FWCMD_COMMON_ANON_171_REQUEST request;
+       struct FWCMD_COMMON_ANON_172_RESPONSE response;
+} __packed;
+
+/* Queries one MAC address.  */
+struct FWCMD_COMMON_NTWK_MAC_QUERY {
+       union FWCMD_HEADER header;
+       union NTWK_MAC_QUERY_PARAMS params;
+} __packed;
+
+struct MAC_SET_PARAMS_IN {
+       u8 type;
+       u8 port;
+       u8 mac1;
+       u8 invalidate;
+       struct MAC_ADDRESS_FORMAT mac;
+} __packed;
+
+struct MAC_SET_PARAMS_OUT {
+       u32 rsvd0;
+} __packed;
+
+union MAC_SET_PARAMS {
+       struct MAC_SET_PARAMS_IN request;
+       struct MAC_SET_PARAMS_OUT response;
+} __packed;
+
+/* Sets a MAC address.  */
+struct FWCMD_COMMON_NTWK_MAC_SET {
+       union FWCMD_HEADER header;
+       union MAC_SET_PARAMS params;
+} __packed;
+
+/* MAC address list. */
+struct NTWK_MULTICAST_MAC_LIST {
+       u8 byte[6];
+} __packed;
+
+struct FWCMD_COMMON_NTWK_MULTICAST_SET_REQUEST_PAYLOAD {
+       u16 num_mac;
+       u8 promiscuous;
+       u8 rsvd0;
+       struct NTWK_MULTICAST_MAC_LIST mac[32];
+} __packed;
+
+struct FWCMD_COMMON_ANON_174_RESPONSE {
+       u32 rsvd0;
+} __packed;
+
+union FWCMD_COMMON_ANON_173_PARAMS {
+       struct FWCMD_COMMON_NTWK_MULTICAST_SET_REQUEST_PAYLOAD request;
+       struct FWCMD_COMMON_ANON_174_RESPONSE response;
+} __packed;
+
+/*
+ *  Sets multicast address hash. The MPU will merge the MAC address lists
+ *  from all clients,  including the networking and storage functions.
+ *  This command may fail if the final merged  list of MAC addresses exceeds
+ *  32 entries.
+ */
+struct FWCMD_COMMON_NTWK_MULTICAST_SET {
+       union FWCMD_HEADER header;
+       union FWCMD_COMMON_ANON_173_PARAMS params;
+} __packed;
+
+struct FWCMD_COMMON_NTWK_VLAN_CONFIG_REQUEST_PAYLOAD {
+       u16 num_vlan;
+       u8 promiscuous;
+       u8 rsvd0;
+       u16 vlan_tag[32];
+} __packed;
+
+struct FWCMD_COMMON_ANON_176_RESPONSE {
+       u32 rsvd0;
+} __packed;
+
+union FWCMD_COMMON_ANON_175_PARAMS {
+       struct FWCMD_COMMON_NTWK_VLAN_CONFIG_REQUEST_PAYLOAD request;
+       struct FWCMD_COMMON_ANON_176_RESPONSE response;
+} __packed;
+
+/*
+ *  Sets VLAN tag filter. The MPU will merge the VLAN tag list from all
+ *  clients, including  the networking and storage functions. This command
+ *  may fail if the final vlan_tag array  (from all functions) is longer
+ *  than 32 entries.
+ */
+struct FWCMD_COMMON_NTWK_VLAN_CONFIG {
+       union FWCMD_HEADER header;
+       union FWCMD_COMMON_ANON_175_PARAMS params;
+} __packed;
+
+struct RING_DESTROY_REQUEST {
+       u16 ring_type;
+       u16 id;
+       u8 bypass_flush;
+       u8 rsvd0;
+       u16 rsvd1;
+} __packed;
+
+struct FWCMD_COMMON_ANON_190_RESPONSE {
+       u32 rsvd0;
+} __packed;
+
+union FWCMD_COMMON_ANON_189_PARAMS {
+       struct RING_DESTROY_REQUEST request;
+       struct FWCMD_COMMON_ANON_190_RESPONSE response;
+} __packed;
+/*
+ *  Command for destroying any ring. The connection(s) using the ring should
+ *  be quiesced  before destroying the ring.
+ */
+struct FWCMD_COMMON_RING_DESTROY {
+       union FWCMD_HEADER header;
+       union FWCMD_COMMON_ANON_189_PARAMS params;
+} __packed;
+
+struct FWCMD_COMMON_ANON_192_REQUEST {
+       u16 num_pages;
+       u16 rsvd0;
+       struct CQ_CONTEXT_AMAP context;
+       struct PHYS_ADDR pages[4];
+} __packed ;
+
+struct FWCMD_COMMON_ANON_193_RESPONSE {
+       u16 cq_id;
+} __packed ;
+
+union FWCMD_COMMON_ANON_191_PARAMS {
+       struct FWCMD_COMMON_ANON_192_REQUEST request;
+       struct FWCMD_COMMON_ANON_193_RESPONSE response;
+} __packed ;
+
+/*
+ *  Command for creating a completion queue. A Completion Queue must span
+ *  at least 1 page and  at most 4 pages. Each completion queue entry
+ *  is 16 bytes regardless of CQ entry format.  Thus the ring must be
+ *  at least 256 entries deep (corresponding to 1 page) and can be at
+ *   most 1024 entries deep (corresponding to 4 pages). The number of
+ *  pages posted must  contain the CQ ring size as encoded in the context.
+ *
+ */
+struct FWCMD_COMMON_CQ_CREATE {
+       union FWCMD_HEADER header;
+       union FWCMD_COMMON_ANON_191_PARAMS params;
+} __packed ;
+
+struct FWCMD_COMMON_ANON_198_REQUEST {
+       u16 num_pages;
+       u16 rsvd0;
+       struct EQ_CONTEXT_AMAP context;
+       struct PHYS_ADDR pages[8];
+} __packed ;
+
+struct FWCMD_COMMON_ANON_199_RESPONSE {
+       u16 eq_id;
+} __packed ;
+
+union FWCMD_COMMON_ANON_197_PARAMS {
+       struct FWCMD_COMMON_ANON_198_REQUEST request;
+       struct FWCMD_COMMON_ANON_199_RESPONSE response;
+} __packed ;
+
+/*
+ *  Command for creating a event queue. An Event Queue must span at least
+ *  1 page and at most  8 pages. The number of pages posted must contain
+ *  the EQ ring. The ring is defined by  the size of the EQ entries (encoded
+ *  in the context) and the number of EQ entries (also  encoded in the
+ *  context).
+ */
+struct FWCMD_COMMON_EQ_CREATE {
+       union  FWCMD_HEADER header;
+       union FWCMD_COMMON_ANON_197_PARAMS params;
+} __packed ;
+
+struct FWCMD_COMMON_ANON_201_REQUEST {
+       u16 cq_id;
+       u16 bcmc_cq_id;
+       u16 num_pages;
+       u16 rsvd0;
+       struct PHYS_ADDR pages[2];
+} __packed;
+
+struct FWCMD_COMMON_ANON_202_RESPONSE {
+       u16 id;
+} __packed;
+
+union FWCMD_COMMON_ANON_200_PARAMS {
+       struct FWCMD_COMMON_ANON_201_REQUEST request;
+       struct FWCMD_COMMON_ANON_202_RESPONSE response;
+} __packed;
+
+/*
+ *  Command for creating Ethernet receive ring.  An ERX ring contains ETH_RX_D
+ *  entries (8  bytes each). An ERX ring must be 1024 entries deep
+ *  (corresponding to 2 pages).
+ */
+struct FWCMD_COMMON_ETH_RX_CREATE {
+       union FWCMD_HEADER header;
+       union FWCMD_COMMON_ANON_200_PARAMS params;
+} __packed;
+
+struct FWCMD_COMMON_ANON_204_REQUEST {
+       u16 num_pages;
+       u8 ulp_num;
+       u8 type;
+       struct ETX_CONTEXT_AMAP context;
+       struct PHYS_ADDR pages[8];
+} __packed ;
+
+struct FWCMD_COMMON_ANON_205_RESPONSE {
+       u16 cid;
+       u8 ulp_num;
+       u8 rsvd0;
+} __packed ;
+
+union FWCMD_COMMON_ANON_203_PARAMS {
+       struct FWCMD_COMMON_ANON_204_REQUEST request;
+       struct FWCMD_COMMON_ANON_205_RESPONSE response;
+} __packed ;
+
+/*
+ *  Command for creating an Ethernet transmit ring.  An ETX ring contains
+ *  ETH_WRB entries (16  bytes each). An ETX ring must be at least 256
+ *  entries deep (corresponding to 1 page)  and at most 2k entries deep
+ *  (corresponding to 8 pages).
+ */
+struct FWCMD_COMMON_ETH_TX_CREATE {
+       union FWCMD_HEADER header;
+       union FWCMD_COMMON_ANON_203_PARAMS params;
+} __packed ;
+
+struct FWCMD_COMMON_ANON_222_REQUEST {
+       u16 num_pages;
+       u16 rsvd0;
+       struct MCC_RING_CONTEXT_AMAP context;
+       struct PHYS_ADDR pages[8];
+} __packed ;
+
+struct FWCMD_COMMON_ANON_223_RESPONSE {
+       u16 id;
+} __packed ;
+
+union FWCMD_COMMON_ANON_221_PARAMS {
+       struct FWCMD_COMMON_ANON_222_REQUEST request;
+       struct FWCMD_COMMON_ANON_223_RESPONSE response;
+} __packed ;
+
+/*
+ *  Command for creating the MCC ring. An MCC ring must be at least 16
+ *  entries deep  (corresponding to 1 page) and at most 128 entries deep
+ *  (corresponding to 8 pages).
+ */
+struct FWCMD_COMMON_MCC_CREATE {
+       union FWCMD_HEADER header;
+       union FWCMD_COMMON_ANON_221_PARAMS params;
+} __packed ;
+
+struct GET_QOS_IN {
+       u32 qos_params_rsvd;
+} __packed;
+
+struct GET_QOS_OUT {
+       u32 max_bits_per_second_NIC;
+       u32 max_packets_per_second_NIC;
+       u32 max_ios_per_second_iSCSI;
+       u32 max_bytes_per_second_iSCSI;
+       u16 domain_VLAN_tag;
+       u16 fabric_domain_ID;
+       u32 qos_params_oem[4];
+} __packed;
+
+union GET_QOS_PARAMS {
+       struct GET_QOS_IN request;
+       struct GET_QOS_OUT response;
+} __packed;
+
+/* QOS/Bandwidth settings per domain. Applicable only in VMs.  */
+struct FWCMD_COMMON_GET_QOS {
+       union FWCMD_HEADER header;
+       union GET_QOS_PARAMS params;
+} __packed;
+
+struct SET_QOS_IN {
+       u32 valid_flags;
+       u32 max_bits_per_second_NIC;
+       u32 max_packets_per_second_NIC;
+       u32 max_ios_per_second_iSCSI;
+       u32 max_bytes_per_second_iSCSI;
+       u16 domain_VLAN_tag;
+       u16 fabric_domain_ID;
+       u32 qos_params_oem[4];
+} __packed;
+
+struct SET_QOS_OUT {
+       u32 qos_params_rsvd;
+} __packed;
+
+union SET_QOS_PARAMS {
+       struct SET_QOS_IN request;
+       struct SET_QOS_OUT response;
+} __packed;
+
+/* QOS/Bandwidth settings per domain. Applicable only in VMs.  */
+struct FWCMD_COMMON_SET_QOS {
+       union FWCMD_HEADER header;
+       union SET_QOS_PARAMS params;
+} __packed;
+
+struct SET_FRAME_SIZE_IN {
+       u32 max_tx_frame_size;
+       u32 max_rx_frame_size;
+} __packed;
+
+struct SET_FRAME_SIZE_OUT {
+       u32 chip_max_tx_frame_size;
+       u32 chip_max_rx_frame_size;
+} __packed;
+
+union SET_FRAME_SIZE_PARAMS {
+       struct SET_FRAME_SIZE_IN request;
+       struct SET_FRAME_SIZE_OUT response;
+} __packed;
+
+/* Set frame size command. Only host domain may issue this command.  */
+struct FWCMD_COMMON_SET_FRAME_SIZE {
+       union FWCMD_HEADER header;
+       union SET_FRAME_SIZE_PARAMS params;
+} __packed;
+
+struct FORCE_FAILOVER_IN {
+       u32 move_to_port;
+       u32 failover_config;
+} __packed;
+
+struct FWCMD_COMMON_ANON_231_RESPONSE {
+       u32 rsvd0;
+} __packed;
+
+union FWCMD_COMMON_ANON_230_PARAMS {
+       struct FORCE_FAILOVER_IN request;
+       struct FWCMD_COMMON_ANON_231_RESPONSE response;
+} __packed;
+
+/*
+ *  Use this command to control failover in BladeEngine. It may be used
+ *  to failback to a  restored port or to forcibly move traffic from
+ *  one port to another. It may also be used  to enable or disable the
+ *  automatic failover feature. This command can only be issued by  domain
+ *  0.
+ */
+struct FWCMD_COMMON_FORCE_FAILOVER {
+       union FWCMD_HEADER header;
+       union FWCMD_COMMON_ANON_230_PARAMS params;
+} __packed;
+
+struct FWCMD_COMMON_ANON_240_REQUEST {
+       u64 context;
+} __packed;
+
+struct FWCMD_COMMON_ANON_241_RESPONSE {
+       u64 context;
+} __packed;
+
+union FWCMD_COMMON_ANON_239_PARAMS {
+       struct FWCMD_COMMON_ANON_240_REQUEST request;
+       struct FWCMD_COMMON_ANON_241_RESPONSE response;
+} __packed;
+
+/*
+ *  This command can be used by clients as a no-operation request. Typical
+ *  uses for drivers  are as a heartbeat mechanism, or deferred processing
+ *  catalyst. The ARM will always  complete this command with a good completion.
+ *  The 64-bit parameter is not touched by the  ARM processor.
+ */
+struct FWCMD_COMMON_NOP {
+       union FWCMD_HEADER header;
+       union FWCMD_COMMON_ANON_239_PARAMS params;
+} __packed;
+
+struct NTWK_RX_FILTER_SETTINGS {
+       u8 promiscuous;
+       u8 ip_cksum;
+       u8 tcp_cksum;
+       u8 udp_cksum;
+       u8 pass_err;
+       u8 pass_ckerr;
+       u8 strip_crc;
+       u8 mcast_en;
+       u8 bcast_en;
+       u8 mcast_promiscuous_en;
+       u8 unicast_en;
+       u8 vlan_promiscuous;
+} __packed;
+
+union FWCMD_COMMON_ANON_242_PARAMS {
+       struct NTWK_RX_FILTER_SETTINGS request;
+       struct NTWK_RX_FILTER_SETTINGS response;
+} __packed;
+
+/*
+ *  This command is used to modify the ethernet receive filter configuration.
+ *  Only domain 0  network function drivers may issue this command. The
+ *  applied configuration is returned in  the response payload. Note:
+ *  Some receive packet filter settings are global on  BladeEngine and
+ *  can affect both the storage and network function clients that the
+ *   BladeEngine hardware and firmware serve. Additionaly, depending
+ *  on the revision of  BladeEngine, some ethernet receive filter settings
+ *  are dependent on others. If a  dependency exists between settings
+ *  for the BladeEngine revision, and the command request  settings do
+ *  not meet the dependency requirement, the invalid settings will not
+ *  be  applied despite the comand succeeding. For example: a driver may
+ *  request to enable  broadcast packets, but not enable multicast packets.
+ *  On early revisions of BladeEngine,  there may be no distinction between
+ *  broadcast and multicast filters, so broadcast could  not be enabled
+ *  without enabling multicast. In this scenario, the comand would still
+ *   succeed, but the response payload would indicate the previously
+ *  configured broadcast  and multicast setting.
+ */
+struct FWCMD_COMMON_NTWK_RX_FILTER {
+       union FWCMD_HEADER header;
+       union FWCMD_COMMON_ANON_242_PARAMS params;
+} __packed;
+
+
+struct FWCMD_COMMON_ANON_244_REQUEST {
+       u32 rsvd0;
+} __packed;
+
+struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD {
+       u8 firmware_version_string[32];
+       u8 fw_on_flash_version_string[32];
+} __packed;
+
+union FWCMD_COMMON_ANON_243_PARAMS {
+       struct FWCMD_COMMON_ANON_244_REQUEST request;
+       struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD response;
+} __packed;
+
+/* This comand retrieves the firmware version.  */
+struct FWCMD_COMMON_GET_FW_VERSION {
+       union FWCMD_HEADER header;
+       union FWCMD_COMMON_ANON_243_PARAMS params;
+} __packed;
+
+struct FWCMD_COMMON_ANON_246_REQUEST {
+       u16 tx_flow_control;
+       u16 rx_flow_control;
+} __packed;
+
+struct FWCMD_COMMON_ANON_247_RESPONSE {
+       u32 rsvd0;
+} __packed;
+
+union FWCMD_COMMON_ANON_245_PARAMS {
+       struct FWCMD_COMMON_ANON_246_REQUEST request;
+       struct FWCMD_COMMON_ANON_247_RESPONSE response;
+} __packed;
+
+/*
+ *  This comand is used to program BladeEngine flow control behavior.
+ *  Only the host  networking driver is allowed to use this comand.
+ */
+struct FWCMD_COMMON_SET_FLOW_CONTROL {
+       union FWCMD_HEADER header;
+       union FWCMD_COMMON_ANON_245_PARAMS params;
+} __packed;
+
+struct FWCMD_COMMON_ANON_249_REQUEST {
+       u32 rsvd0;
+} __packed;
+
+struct FWCMD_COMMON_ANON_250_RESPONSE {
+       u16 tx_flow_control;
+       u16 rx_flow_control;
+} __packed;
+
+union FWCMD_COMMON_ANON_248_PARAMS {
+       struct FWCMD_COMMON_ANON_249_REQUEST request;
+       struct FWCMD_COMMON_ANON_250_RESPONSE response;
+} __packed;
+
+/* This comand is used to read BladeEngine flow control settings.  */
+struct FWCMD_COMMON_GET_FLOW_CONTROL {
+       union FWCMD_HEADER header;
+       union FWCMD_COMMON_ANON_248_PARAMS params;
+} __packed;
+
+struct EQ_DELAY_PARAMS {
+       u32 eq_id;
+       u32 delay_in_microseconds;
+} __packed;
+
+struct FWCMD_COMMON_ANON_257_REQUEST {
+       u32 num_eq;
+       u32 rsvd0;
+       struct EQ_DELAY_PARAMS delay[16];
+} __packed;
+
+struct FWCMD_COMMON_ANON_258_RESPONSE {
+       u32 delay_resolution_in_microseconds;
+       u32 delay_max_in_microseconds;
+} __packed;
+
+union MODIFY_EQ_DELAY_PARAMS {
+       struct FWCMD_COMMON_ANON_257_REQUEST request;
+       struct FWCMD_COMMON_ANON_258_RESPONSE response;
+} __packed;
+
+/* This comand changes the EQ delay for a given set of EQs.  */
+struct FWCMD_COMMON_MODIFY_EQ_DELAY {
+       union FWCMD_HEADER header;
+       union MODIFY_EQ_DELAY_PARAMS params;
+} __packed;
+
+struct FWCMD_COMMON_ANON_260_REQUEST {
+       u32 rsvd0;
+} __packed;
+
+struct BE_FIRMWARE_CONFIG {
+       u16 be_config_number;
+       u16 asic_revision;
+       u32 nic_ulp_mask;
+       u32 tulp_mask;
+       u32 iscsi_ulp_mask;
+       u32 rdma_ulp_mask;
+       u32 rsvd0[4];
+       u32 eth_tx_id_start;
+       u32 eth_tx_id_count;
+       u32 eth_rx_id_start;
+       u32 eth_rx_id_count;
+       u32 tpm_wrbq_id_start;
+       u32 tpm_wrbq_id_count;
+       u32 tpm_defq_id_start;
+       u32 tpm_defq_id_count;
+       u32 iscsi_wrbq_id_start;
+       u32 iscsi_wrbq_id_count;
+       u32 iscsi_defq_id_start;
+       u32 iscsi_defq_id_count;
+       u32 rdma_qp_id_start;
+       u32 rdma_qp_id_count;
+       u32 rsvd1[8];
+} __packed;
+
+union FWCMD_COMMON_ANON_259_PARAMS {
+       struct FWCMD_COMMON_ANON_260_REQUEST request;
+       struct BE_FIRMWARE_CONFIG response;
+} __packed;
+
+/*
+ *  This comand queries the current firmware configuration parameters.
+ *   The static  configuration type is defined by be_config_number. This
+ *  differentiates different  BladeEngine builds, such as iSCSI Initiator
+ *  versus iSCSI Target.  For a given static  configuration, the Upper
+ *  Layer Protocol (ULP) processors may be reconfigured to support  different
+ *  protocols. Each ULP processor supports one or more protocols. The
+ *  masks  indicate which processors are configured for each protocol.
+ *   For a given static  configuration, the number of TCP connections
+ *  supported for each protocol may vary. The  *_id_start and *_id_count
+ *  variables define a linear range of IDs that are available for  each
+ *  supported protocol. The *_id_count may be used by the driver to allocate
+ *  the  appropriate number of connection resources. The *_id_start may
+ *  be used to map the  arbitrary range of IDs to a zero-based range
+ *  of indices.
+ */
+struct FWCMD_COMMON_FIRMWARE_CONFIG {
+       union FWCMD_HEADER header;
+       union FWCMD_COMMON_ANON_259_PARAMS params;
+} __packed;
+
+struct FWCMD_COMMON_PORT_EQUALIZATION_PARAMS {
+       u32 emph_lev_sel_port0;
+       u32 emph_lev_sel_port1;
+       u8 xaui_vo_sel;
+       u8 xaui_state;
+       u16 rsvd0;
+       u32 xaui_eq_vector;
+} __packed;
+
+struct FWCMD_COMMON_ANON_262_REQUEST {
+       u32 rsvd0;
+} __packed;
+
+union FWCMD_COMMON_ANON_261_PARAMS {
+       struct FWCMD_COMMON_ANON_262_REQUEST request;
+       struct FWCMD_COMMON_PORT_EQUALIZATION_PARAMS response;
+} __packed;
+
+/*
+ *  This comand can be used to read XAUI equalization parameters. The
+ *  ARM firmware applies  default equalization parameters during initialization.
+ *  These parameters may be  customer-specific when derived from the
+ *  SEEPROM. See SEEPROM_DATA for equalization  specific fields.
+ */
+struct FWCMD_COMMON_GET_PORT_EQUALIZATION {
+       union FWCMD_HEADER header;
+       union FWCMD_COMMON_ANON_261_PARAMS params;
+} __packed;
+
+struct FWCMD_COMMON_ANON_264_RESPONSE {
+       u32 rsvd0;
+} __packed;
+
+union FWCMD_COMMON_ANON_263_PARAMS {
+       struct FWCMD_COMMON_PORT_EQUALIZATION_PARAMS request;
+       struct FWCMD_COMMON_ANON_264_RESPONSE response;
+} __packed;
+
+/*
+ *  This comand can be used to set XAUI equalization parameters. The ARM
+ *  firmware applies  default equalization parameters during initialization.
+ *  These parameters may be  customer-specific when derived from the
+ *  SEEPROM. See SEEPROM_DATA for equalization  specific fields.
+ */
+struct FWCMD_COMMON_SET_PORT_EQUALIZATION {
+       union FWCMD_HEADER header;
+       union FWCMD_COMMON_ANON_263_PARAMS params;
+} __packed;
+
+#endif /* __fwcmd_common_bmap_h__ */
diff --git a/drivers/staging/benet/fw/fwcmd_eth_bmap.h b/drivers/staging/benet/fw/fwcmd_eth_bmap.h
new file mode 100644 (file)
index 0000000..234b179
--- /dev/null
@@ -0,0 +1,280 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __fwcmd_eth_bmap_h__
+#define __fwcmd_eth_bmap_h__
+#include "fwcmd_hdr_bmap.h"
+#include "fwcmd_types_bmap.h"
+
+struct MIB_ETH_STATISTICS_PARAMS_IN {
+       u32 rsvd0;
+} __packed;
+
+struct BE_RXF_STATS {
+       u32 p0recvdtotalbytesLSD;       /* DWORD 0 */
+       u32 p0recvdtotalbytesMSD;       /* DWORD 1 */
+       u32 p0recvdtotalframes; /* DWORD 2 */
+       u32 p0recvdunicastframes;       /* DWORD 3 */
+       u32 p0recvdmulticastframes;     /* DWORD 4 */
+       u32 p0recvdbroadcastframes;     /* DWORD 5 */
+       u32 p0crcerrors;        /* DWORD 6 */
+       u32 p0alignmentsymerrs; /* DWORD 7 */
+       u32 p0pauseframesrecvd; /* DWORD 8 */
+       u32 p0controlframesrecvd;       /* DWORD 9 */
+       u32 p0inrangelenerrors; /* DWORD 10 */
+       u32 p0outrangeerrors;   /* DWORD 11 */
+       u32 p0frametoolongerrors;       /* DWORD 12 */
+       u32 p0droppedaddressmatch;      /* DWORD 13 */
+       u32 p0droppedvlanmismatch;      /* DWORD 14 */
+       u32 p0ipdroppedtoosmall;        /* DWORD 15 */
+       u32 p0ipdroppedtooshort;        /* DWORD 16 */
+       u32 p0ipdroppedhdrtoosmall;     /* DWORD 17 */
+       u32 p0tcpdroppedlen;    /* DWORD 18 */
+       u32 p0droppedrunt;      /* DWORD 19 */
+       u32 p0recvd64;          /* DWORD 20 */
+       u32 p0recvd65_127;      /* DWORD 21 */
+       u32 p0recvd128_256;     /* DWORD 22 */
+       u32 p0recvd256_511;     /* DWORD 23 */
+       u32 p0recvd512_1023;    /* DWORD 24 */
+       u32 p0recvd1518_1522;   /* DWORD 25 */
+       u32 p0recvd1522_2047;   /* DWORD 26 */
+       u32 p0recvd2048_4095;   /* DWORD 27 */
+       u32 p0recvd4096_8191;   /* DWORD 28 */
+       u32 p0recvd8192_9216;   /* DWORD 29 */
+       u32 p0rcvdipcksmerrs;   /* DWORD 30 */
+       u32 p0recvdtcpcksmerrs; /* DWORD 31 */
+       u32 p0recvdudpcksmerrs; /* DWORD 32 */
+       u32 p0recvdnonrsspackets;       /* DWORD 33 */
+       u32 p0recvdippackets;   /* DWORD 34 */
+       u32 p0recvdchute1packets;       /* DWORD 35 */
+       u32 p0recvdchute2packets;       /* DWORD 36 */
+       u32 p0recvdchute3packets;       /* DWORD 37 */
+       u32 p0recvdipsecpackets;        /* DWORD 38 */
+       u32 p0recvdmanagementpackets;   /* DWORD 39 */
+       u32 p0xmitbyteslsd;     /* DWORD 40 */
+       u32 p0xmitbytesmsd;     /* DWORD 41 */
+       u32 p0xmitunicastframes;        /* DWORD 42 */
+       u32 p0xmitmulticastframes;      /* DWORD 43 */
+       u32 p0xmitbroadcastframes;      /* DWORD 44 */
+       u32 p0xmitpauseframes;  /* DWORD 45 */
+       u32 p0xmitcontrolframes;        /* DWORD 46 */
+       u32 p0xmit64;           /* DWORD 47 */
+       u32 p0xmit65_127;       /* DWORD 48 */
+       u32 p0xmit128_256;      /* DWORD 49 */
+       u32 p0xmit256_511;      /* DWORD 50 */
+       u32 p0xmit512_1023;     /* DWORD 51 */
+       u32 p0xmit1518_1522;    /* DWORD 52 */
+       u32 p0xmit1522_2047;    /* DWORD 53 */
+       u32 p0xmit2048_4095;    /* DWORD 54 */
+       u32 p0xmit4096_8191;    /* DWORD 55 */
+       u32 p0xmit8192_9216;    /* DWORD 56 */
+       u32 p0rxfifooverflowdropped;    /* DWORD 57 */
+       u32 p0ipseclookupfaileddropped; /* DWORD 58 */
+       u32 p1recvdtotalbytesLSD;       /* DWORD 59 */
+       u32 p1recvdtotalbytesMSD;       /* DWORD 60 */
+       u32 p1recvdtotalframes; /* DWORD 61 */
+       u32 p1recvdunicastframes;       /* DWORD 62 */
+       u32 p1recvdmulticastframes;     /* DWORD 63 */
+       u32 p1recvdbroadcastframes;     /* DWORD 64 */
+       u32 p1crcerrors;        /* DWORD 65 */
+       u32 p1alignmentsymerrs; /* DWORD 66 */
+       u32 p1pauseframesrecvd; /* DWORD 67 */
+       u32 p1controlframesrecvd;       /* DWORD 68 */
+       u32 p1inrangelenerrors; /* DWORD 69 */
+       u32 p1outrangeerrors;   /* DWORD 70 */
+       u32 p1frametoolongerrors;       /* DWORD 71 */
+       u32 p1droppedaddressmatch;      /* DWORD 72 */
+       u32 p1droppedvlanmismatch;      /* DWORD 73 */
+       u32 p1ipdroppedtoosmall;        /* DWORD 74 */
+       u32 p1ipdroppedtooshort;        /* DWORD 75 */
+       u32 p1ipdroppedhdrtoosmall;     /* DWORD 76 */
+       u32 p1tcpdroppedlen;    /* DWORD 77 */
+       u32 p1droppedrunt;      /* DWORD 78 */
+       u32 p1recvd64;          /* DWORD 79 */
+       u32 p1recvd65_127;      /* DWORD 80 */
+       u32 p1recvd128_256;     /* DWORD 81 */
+       u32 p1recvd256_511;     /* DWORD 82 */
+       u32 p1recvd512_1023;    /* DWORD 83 */
+       u32 p1recvd1518_1522;   /* DWORD 84 */
+       u32 p1recvd1522_2047;   /* DWORD 85 */
+       u32 p1recvd2048_4095;   /* DWORD 86 */
+       u32 p1recvd4096_8191;   /* DWORD 87 */
+       u32 p1recvd8192_9216;   /* DWORD 88 */
+       u32 p1rcvdipcksmerrs;   /* DWORD 89 */
+       u32 p1recvdtcpcksmerrs; /* DWORD 90 */
+       u32 p1recvdudpcksmerrs; /* DWORD 91 */
+       u32 p1recvdnonrsspackets;       /* DWORD 92 */
+       u32 p1recvdippackets;   /* DWORD 93 */
+       u32 p1recvdchute1packets;       /* DWORD 94 */
+       u32 p1recvdchute2packets;       /* DWORD 95 */
+       u32 p1recvdchute3packets;       /* DWORD 96 */
+       u32 p1recvdipsecpackets;        /* DWORD 97 */
+       u32 p1recvdmanagementpackets;   /* DWORD 98 */
+       u32 p1xmitbyteslsd;     /* DWORD 99 */
+       u32 p1xmitbytesmsd;     /* DWORD 100 */
+       u32 p1xmitunicastframes;        /* DWORD 101 */
+       u32 p1xmitmulticastframes;      /* DWORD 102 */
+       u32 p1xmitbroadcastframes;      /* DWORD 103 */
+       u32 p1xmitpauseframes;  /* DWORD 104 */
+       u32 p1xmitcontrolframes;        /* DWORD 105 */
+       u32 p1xmit64;           /* DWORD 106 */
+       u32 p1xmit65_127;       /* DWORD 107 */
+       u32 p1xmit128_256;      /* DWORD 108 */
+       u32 p1xmit256_511;      /* DWORD 109 */
+       u32 p1xmit512_1023;     /* DWORD 110 */
+       u32 p1xmit1518_1522;    /* DWORD 111 */
+       u32 p1xmit1522_2047;    /* DWORD 112 */
+       u32 p1xmit2048_4095;    /* DWORD 113 */
+       u32 p1xmit4096_8191;    /* DWORD 114 */
+       u32 p1xmit8192_9216;    /* DWORD 115 */
+       u32 p1rxfifooverflowdropped;    /* DWORD 116 */
+       u32 p1ipseclookupfaileddropped; /* DWORD 117 */
+       u32 pxdroppednopbuf;    /* DWORD 118 */
+       u32 pxdroppednotxpb;    /* DWORD 119 */
+       u32 pxdroppednoipsecbuf;        /* DWORD 120 */
+       u32 pxdroppednoerxdescr;        /* DWORD 121 */
+       u32 pxdroppednotpredescr;       /* DWORD 122 */
+       u32 pxrecvdmanagementportpackets;       /* DWORD 123 */
+       u32 pxrecvdmanagementportbytes; /* DWORD 124 */
+       u32 pxrecvdmanagementportpauseframes;   /* DWORD 125 */
+       u32 pxrecvdmanagementporterrors;        /* DWORD 126 */
+       u32 pxxmitmanagementportpackets;        /* DWORD 127 */
+       u32 pxxmitmanagementportbytes;  /* DWORD 128 */
+       u32 pxxmitmanagementportpause;  /* DWORD 129 */
+       u32 pxxmitmanagementportrxfifooverflow; /* DWORD 130 */
+       u32 pxrecvdipsecipcksmerrs;     /* DWORD 131 */
+       u32 pxrecvdtcpsecipcksmerrs;    /* DWORD 132 */
+       u32 pxrecvdudpsecipcksmerrs;    /* DWORD 133 */
+       u32 pxipsecrunt;        /* DWORD 134 */
+       u32 pxipsecaddressmismatchdropped;      /* DWORD 135 */
+       u32 pxipsecrxfifooverflowdropped;       /* DWORD 136 */
+       u32 pxipsecframestoolong;       /* DWORD 137 */
+       u32 pxipsectotalipframes;       /* DWORD 138 */
+       u32 pxipseciptoosmall;  /* DWORD 139 */
+       u32 pxipseciptooshort;  /* DWORD 140 */
+       u32 pxipseciphdrtoosmall;       /* DWORD 141 */
+       u32 pxipsectcphdrbad;   /* DWORD 142 */
+       u32 pxrecvdipsecchute1; /* DWORD 143 */
+       u32 pxrecvdipsecchute2; /* DWORD 144 */
+       u32 pxrecvdipsecchute3; /* DWORD 145 */
+       u32 pxdropped7frags;    /* DWORD 146 */
+       u32 pxdroppedfrags;     /* DWORD 147 */
+       u32 pxdroppedinvalidfragring;   /* DWORD 148 */
+       u32 pxnumforwardedpackets;      /* DWORD 149 */
+} __packed;
+
+union MIB_ETH_STATISTICS_PARAMS {
+       struct MIB_ETH_STATISTICS_PARAMS_IN request;
+       struct BE_RXF_STATS response;
+} __packed;
+
+/*
+ *  Query ethernet statistics. All domains may issue this command. The
+ *  host domain drivers  may optionally reset internal statistic counters
+ *  with a query.
+ */
+struct FWCMD_ETH_GET_STATISTICS {
+       union FWCMD_HEADER header;
+       union MIB_ETH_STATISTICS_PARAMS params;
+} __packed;
+
+
+struct FWCMD_ETH_ANON_175_REQUEST {
+       u8 port0_promiscuous;
+       u8 port1_promiscuous;
+       u16 rsvd0;
+} __packed;
+
+struct FWCMD_ETH_ANON_176_RESPONSE {
+       u32 rsvd0;
+} __packed;
+
+union FWCMD_ETH_ANON_174_PARAMS {
+       struct FWCMD_ETH_ANON_175_REQUEST request;
+       struct FWCMD_ETH_ANON_176_RESPONSE response;
+} __packed;
+
+/* Enables/Disables promiscuous ethernet receive mode.  */
+struct FWCMD_ETH_PROMISCUOUS {
+       union FWCMD_HEADER header;
+       union FWCMD_ETH_ANON_174_PARAMS params;
+} __packed;
+
+struct FWCMD_ETH_ANON_178_REQUEST {
+       u32 new_fragsize_log2;
+} __packed;
+
+struct FWCMD_ETH_ANON_179_RESPONSE {
+       u32 actual_fragsize_log2;
+} __packed;
+
+union FWCMD_ETH_ANON_177_PARAMS {
+       struct FWCMD_ETH_ANON_178_REQUEST request;
+       struct FWCMD_ETH_ANON_179_RESPONSE response;
+} __packed;
+
+/*
+ *  Sets the Ethernet RX fragment size. Only host (domain 0) networking
+ *  drivers may issue  this command.  This call will fail for non-host
+ *  protection domains. In this situation the  MCC CQ status will indicate
+ *  a failure due to insufficient priviledges. The response  should be
+ *  ignored, and the driver should use the FWCMD_ETH_GET_FRAG_SIZE to
+ *  query the  existing ethernet receive fragment size. It must use this
+ *  fragment size for all  fragments in the ethernet receive ring.  If
+ *  the command succeeds, the driver must use the  frag size indicated
+ *  in the command response since the requested frag size may not be  applied
+ *  until the next reboot. When the requested fragsize matches the response
+ *   fragsize, this indicates the request was applied immediately.
+ */
+struct FWCMD_ETH_SET_RX_FRAG_SIZE {
+       union FWCMD_HEADER header;
+       union FWCMD_ETH_ANON_177_PARAMS params;
+} __packed;
+
+struct FWCMD_ETH_ANON_181_REQUEST {
+       u32 rsvd0;
+} __packed;
+
+struct FWCMD_ETH_ANON_182_RESPONSE {
+       u32 actual_fragsize_log2;
+} __packed;
+
+union FWCMD_ETH_ANON_180_PARAMS {
+       struct FWCMD_ETH_ANON_181_REQUEST request;
+       struct FWCMD_ETH_ANON_182_RESPONSE response;
+} __packed;
+
+/*
+ *  Queries the Ethernet RX fragment size. All domains may issue this
+ *  command.  The driver  should call this command to determine the minimum
+ *  required fragment size for the ethernet  RX ring buffers. Drivers
+ *  may choose to use a larger size for each fragment buffer, but  BladeEngine
+ *  will use up to the configured minimum required fragsize in each ethernet
+ *   receive fragment buffer. For example, if the ethernet receive fragment
+ *  size is  configured to 4kB, and a driver uses 8kB fragments, a 6kB
+ *  ethernet packet received by  BladeEngine will be split accross two
+ *  of the driver's receive framgents (4kB in one  fragment buffer, and
+ *  2kB in the subsequent fragment buffer).
+ */
+struct FWCMD_ETH_GET_RX_FRAG_SIZE {
+       union FWCMD_HEADER header;
+       union FWCMD_ETH_ANON_180_PARAMS params;
+} __packed;
+
+#endif /* __fwcmd_eth_bmap_h__ */
diff --git a/drivers/staging/benet/fw/fwcmd_hdr_bmap.h b/drivers/staging/benet/fw/fwcmd_hdr_bmap.h
new file mode 100644 (file)
index 0000000..28b4532
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __fwcmd_hdr_bmap_h__
+#define __fwcmd_hdr_bmap_h__
+
+struct FWCMD_REQUEST_HEADER {
+       u8 opcode;
+       u8 subsystem;
+       u8 port_number;
+       u8 domain;
+       u32 timeout;
+       u32 request_length;
+       u32 rsvd0;
+} __packed;
+
+struct FWCMD_RESPONSE_HEADER {
+       u8 opcode;
+       u8 subsystem;
+       u8 rsvd0;
+       u8 domain;
+       u8 status;
+       u8 additional_status;
+       u16 rsvd1;
+       u32 response_length;
+       u32 actual_response_length;
+} __packed;
+
+/*
+ *  The firmware/driver overwrites the input FWCMD_REQUEST_HEADER with
+ *  the output  FWCMD_RESPONSE_HEADER.
+ */
+union FWCMD_HEADER {
+       struct FWCMD_REQUEST_HEADER request;
+       struct FWCMD_RESPONSE_HEADER response;
+} __packed;
+
+#endif /* __fwcmd_hdr_bmap_h__ */
diff --git a/drivers/staging/benet/fw/fwcmd_mcc.h b/drivers/staging/benet/fw/fwcmd_mcc.h
new file mode 100644 (file)
index 0000000..9eeca87
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __fwcmd_mcc_amap_h__
+#define __fwcmd_mcc_amap_h__
+#include "fwcmd_opcodes.h"
+/*
+ * Where applicable, a WRB, may contain a list of Scatter-gather elements.
+ * Each element supports a 64 bit address and a 32bit length field.
+ */
+struct BE_MCC_SGE_AMAP {
+       u8 pa_lo[32];   /* DWORD 0 */
+       u8 pa_hi[32];   /* DWORD 1 */
+       u8 length[32];  /* DWORD 2 */
+} __packed;
+struct MCC_SGE_AMAP {
+       u32 dw[3];
+};
+/*
+ * The design of an MCC_SGE allows up to 19 elements to be embedded
+ * in a WRB, supporting 64KB data transfers (assuming a 4KB page size).
+ */
+struct BE_MCC_WRB_PAYLOAD_AMAP {
+       union {
+               struct BE_MCC_SGE_AMAP sgl[19];
+               u8 embedded[59][32];    /* DWORD 0 */
+       };
+} __packed;
+struct MCC_WRB_PAYLOAD_AMAP {
+       u32 dw[59];
+};
+
+/*
+ * This is the structure of the MCC Command WRB for commands
+ * sent to the Management Processing Unit (MPU). See section
+ * for usage in embedded and non-embedded modes.
+ */
+struct BE_MCC_WRB_AMAP {
+       u8 embedded;    /* DWORD 0 */
+       u8 rsvd0[2];    /* DWORD 0 */
+       u8 sge_count[5];        /* DWORD 0 */
+       u8 rsvd1[16];   /* DWORD 0 */
+       u8 special[8];  /* DWORD 0 */
+       u8 payload_length[32];  /* DWORD 1 */
+       u8 tag[2][32];  /* DWORD 2 */
+       u8 rsvd2[32];   /* DWORD 4 */
+       struct BE_MCC_WRB_PAYLOAD_AMAP payload;
+} __packed;
+struct MCC_WRB_AMAP {
+       u32 dw[64];
+};
+
+/*  This is the structure of the MCC Completion queue entry  */
+struct BE_MCC_CQ_ENTRY_AMAP {
+       u8 completion_status[16];       /* DWORD 0 */
+       u8 extended_status[16]; /* DWORD 0 */
+       u8 mcc_tag[2][32];      /* DWORD 1 */
+       u8 rsvd0[27];   /* DWORD 3 */
+       u8 consumed;    /* DWORD 3 */
+       u8 completed;   /* DWORD 3 */
+       u8 hpi_buffer_completion;       /* DWORD 3 */
+       u8 async_event; /* DWORD 3 */
+       u8 valid;               /* DWORD 3 */
+} __packed;
+struct MCC_CQ_ENTRY_AMAP {
+       u32 dw[4];
+};
+
+/* Mailbox structures used by the MPU during bootstrap */
+struct BE_MCC_MAILBOX_AMAP {
+       struct BE_MCC_WRB_AMAP wrb;
+       struct BE_MCC_CQ_ENTRY_AMAP cq;
+} __packed;
+struct MCC_MAILBOX_AMAP {
+       u32 dw[68];
+};
+
+#endif /* __fwcmd_mcc_amap_h__ */
diff --git a/drivers/staging/benet/fw/fwcmd_opcodes.h b/drivers/staging/benet/fw/fwcmd_opcodes.h
new file mode 100644 (file)
index 0000000..23d5693
--- /dev/null
@@ -0,0 +1,244 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __fwcmd_opcodes_amap_h__
+#define __fwcmd_opcodes_amap_h__
+
+/*
+ * --- FWCMD_SUBSYSTEMS ---
+ * The commands are grouped into the following subsystems. The subsystem
+ * code along with the opcode uniquely identify a particular fwcmd.
+ */
+#define FWCMD_SUBSYSTEM_RSVD  (0)      /* This subsystem is reserved. It is */
+                                                 /* never used. */
+#define FWCMD_SUBSYSTEM_COMMON (1)     /* CMDs in this group are common to
+                                       * all subsystems. See
+                                       * COMMON_SUBSYSTEM_OPCODES for opcodes
+                                       * and Common Host Configuration CMDs
+                                       * for the FWCMD descriptions.
+                                       */
+#define FWCMD_SUBSYSTEM_COMMON_ISCSI    (2) /* CMDs in this group are */
+                                       /*
+                                       * common to Initiator and Target. See
+                                       * COMMON_ISCSI_SUBSYSTEM_OPCODES and
+                                       * Common iSCSI Initiator and Target
+                                       * CMDs for the command descriptions.
+                                       */
+#define FWCMD_SUBSYSTEM_ETH             (3)    /* This subsystem is used to
+                                               execute  Ethernet commands.  */
+
+#define FWCMD_SUBSYSTEM_TPM             (4)    /* This subsystem is used
+                                                to execute TPM  commands.  */
+#define FWCMD_SUBSYSTEM_PXE_UNDI        (5)    /* This subsystem is used
+                                               * to execute PXE
+                                               * and UNDI specific commands.
+                                               */
+
+#define FWCMD_SUBSYSTEM_ISCSI_INI       (6)    /* This subsystem is used to
+                                               execute ISCSI Initiator
+                                               specific commands.
+                                               */
+#define FWCMD_SUBSYSTEM_ISCSI_TGT       (7)    /* This subsystem is used
+                                               to execute iSCSI Target
+                                               specific commands.between
+                                               PTL and ARM firmware.
+                                               */
+#define FWCMD_SUBSYSTEM_MILI_PTL        (8)    /* This subsystem is used to
+                                               execute iSCSI Target specific
+                                               commands.between MILI
+                                               and PTL.  */
+#define FWCMD_SUBSYSTEM_MILI_TMD        (9)    /* This subsystem is used to
+                                               execute iSCSI Target specific
+                                               commands between MILI
+                                               and TMD.  */
+#define FWCMD_SUBSYSTEM_PROXY           (11)   /* This subsystem is used
+                                               to execute proxied commands
+                                               within the host at the
+                                               explicit request of a
+                                               non priviledged domain.
+                                               This 'subsystem' is entirely
+                                               virtual from the controller
+                                               and firmware perspective as
+                                               it is implemented in host
+                                               drivers.
+                                               */
+
+/*
+ * --- COMMON_SUBSYSTEM_OPCODES ---
+ * These opcodes are common to both networking and storage PCI
+ * functions. They are used to reserve resources and configure
+ * BladeEngine. These opcodes all use the FWCMD_SUBSYSTEM_COMMON
+ * subsystem code.
+ */
+#define OPCODE_COMMON_NTWK_MAC_QUERY    (1)
+#define SUBSYSTEM_COMMON_NTWK_MAC_QUERY (1)
+#define SUBSYSTEM_COMMON_NTWK_MAC_SET   (1)
+#define SUBSYSTEM_COMMON_NTWK_MULTICAST_SET (1)
+#define SUBSYSTEM_COMMON_NTWK_VLAN_CONFIG (1)
+#define SUBSYSTEM_COMMON_NTWK_LINK_STATUS_QUERY (1)
+#define SUBSYSTEM_COMMON_READ_FLASHROM  (1)
+#define SUBSYSTEM_COMMON_WRITE_FLASHROM (1)
+#define SUBSYSTEM_COMMON_QUERY_MAX_FWCMD_BUFFER_SIZE (1)
+#define SUBSYSTEM_COMMON_ADD_PAGE_TABLES (1)
+#define SUBSYSTEM_COMMON_REMOVE_PAGE_TABLES (1)
+#define SUBSYSTEM_COMMON_RING_DESTROY   (1)
+#define SUBSYSTEM_COMMON_CQ_CREATE      (1)
+#define SUBSYSTEM_COMMON_EQ_CREATE      (1)
+#define SUBSYSTEM_COMMON_ETH_RX_CREATE  (1)
+#define SUBSYSTEM_COMMON_ETH_TX_CREATE  (1)
+#define SUBSYSTEM_COMMON_ISCSI_DEFQ_CREATE (1)
+#define SUBSYSTEM_COMMON_ISCSI_WRBQ_CREATE (1)
+#define SUBSYSTEM_COMMON_MCC_CREATE     (1)
+#define SUBSYSTEM_COMMON_JELL_CONFIG    (1)
+#define SUBSYSTEM_COMMON_FORCE_FAILOVER (1)
+#define SUBSYSTEM_COMMON_ADD_TEMPLATE_HEADER_BUFFERS (1)
+#define SUBSYSTEM_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS (1)
+#define SUBSYSTEM_COMMON_POST_ZERO_BUFFER (1)
+#define SUBSYSTEM_COMMON_GET_QOS        (1)
+#define SUBSYSTEM_COMMON_SET_QOS        (1)
+#define SUBSYSTEM_COMMON_TCP_GET_STATISTICS (1)
+#define SUBSYSTEM_COMMON_SEEPROM_READ   (1)
+#define SUBSYSTEM_COMMON_TCP_STATE_QUERY (1)
+#define SUBSYSTEM_COMMON_GET_CNTL_ATTRIBUTES (1)
+#define SUBSYSTEM_COMMON_NOP            (1)
+#define SUBSYSTEM_COMMON_NTWK_RX_FILTER (1)
+#define SUBSYSTEM_COMMON_GET_FW_VERSION (1)
+#define SUBSYSTEM_COMMON_SET_FLOW_CONTROL (1)
+#define SUBSYSTEM_COMMON_GET_FLOW_CONTROL (1)
+#define SUBSYSTEM_COMMON_SET_TCP_PARAMETERS (1)
+#define SUBSYSTEM_COMMON_SET_FRAME_SIZE (1)
+#define SUBSYSTEM_COMMON_GET_FAT        (1)
+#define SUBSYSTEM_COMMON_MODIFY_EQ_DELAY (1)
+#define SUBSYSTEM_COMMON_FIRMWARE_CONFIG (1)
+#define SUBSYSTEM_COMMON_ENABLE_DISABLE_DOMAINS (1)
+#define SUBSYSTEM_COMMON_GET_DOMAIN_CONFIG (1)
+#define SUBSYSTEM_COMMON_SET_VLD_CONFIG (1)
+#define SUBSYSTEM_COMMON_GET_VLD_CONFIG (1)
+#define SUBSYSTEM_COMMON_GET_PORT_EQUALIZATION (1)
+#define SUBSYSTEM_COMMON_SET_PORT_EQUALIZATION (1)
+#define SUBSYSTEM_COMMON_RED_CONFIG     (1)
+#define OPCODE_COMMON_NTWK_MAC_SET      (2)
+#define OPCODE_COMMON_NTWK_MULTICAST_SET (3)
+#define OPCODE_COMMON_NTWK_VLAN_CONFIG  (4)
+#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY (5)
+#define OPCODE_COMMON_READ_FLASHROM     (6)
+#define OPCODE_COMMON_WRITE_FLASHROM    (7)
+#define OPCODE_COMMON_QUERY_MAX_FWCMD_BUFFER_SIZE (8)
+#define OPCODE_COMMON_ADD_PAGE_TABLES   (9)
+#define OPCODE_COMMON_REMOVE_PAGE_TABLES (10)
+#define OPCODE_COMMON_RING_DESTROY      (11)
+#define OPCODE_COMMON_CQ_CREATE         (12)
+#define OPCODE_COMMON_EQ_CREATE         (13)
+#define OPCODE_COMMON_ETH_RX_CREATE     (14)
+#define OPCODE_COMMON_ETH_TX_CREATE     (15)
+#define OPCODE_COMMON_NET_RESERVED0     (16)   /* Reserved */
+#define OPCODE_COMMON_NET_RESERVED1     (17)   /* Reserved */
+#define OPCODE_COMMON_NET_RESERVED2     (18)   /* Reserved */
+#define OPCODE_COMMON_ISCSI_DEFQ_CREATE (19)
+#define OPCODE_COMMON_ISCSI_WRBQ_CREATE (20)
+#define OPCODE_COMMON_MCC_CREATE        (21)
+#define OPCODE_COMMON_JELL_CONFIG       (22)
+#define OPCODE_COMMON_FORCE_FAILOVER    (23)
+#define OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS (24)
+#define OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS (25)
+#define OPCODE_COMMON_POST_ZERO_BUFFER  (26)
+#define OPCODE_COMMON_GET_QOS           (27)
+#define OPCODE_COMMON_SET_QOS           (28)
+#define OPCODE_COMMON_TCP_GET_STATISTICS (29)
+#define OPCODE_COMMON_SEEPROM_READ      (30)
+#define OPCODE_COMMON_TCP_STATE_QUERY   (31)
+#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES (32)
+#define OPCODE_COMMON_NOP               (33)
+#define OPCODE_COMMON_NTWK_RX_FILTER    (34)
+#define OPCODE_COMMON_GET_FW_VERSION    (35)
+#define OPCODE_COMMON_SET_FLOW_CONTROL  (36)
+#define OPCODE_COMMON_GET_FLOW_CONTROL  (37)
+#define OPCODE_COMMON_SET_TCP_PARAMETERS (38)
+#define OPCODE_COMMON_SET_FRAME_SIZE    (39)
+#define OPCODE_COMMON_GET_FAT           (40)
+#define OPCODE_COMMON_MODIFY_EQ_DELAY   (41)
+#define OPCODE_COMMON_FIRMWARE_CONFIG   (42)
+#define OPCODE_COMMON_ENABLE_DISABLE_DOMAINS (43)
+#define OPCODE_COMMON_GET_DOMAIN_CONFIG (44)
+#define OPCODE_COMMON_SET_VLD_CONFIG    (45)
+#define OPCODE_COMMON_GET_VLD_CONFIG    (46)
+#define OPCODE_COMMON_GET_PORT_EQUALIZATION (47)
+#define OPCODE_COMMON_SET_PORT_EQUALIZATION (48)
+#define OPCODE_COMMON_RED_CONFIG        (49)
+
+
+
+/*
+ * --- ETH_SUBSYSTEM_OPCODES ---
+ * These opcodes are used for configuring the Ethernet interfaces. These
+ * opcodes all use the FWCMD_SUBSYSTEM_ETH subsystem code.
+ */
+#define OPCODE_ETH_RSS_CONFIG           (1)
+#define OPCODE_ETH_ACPI_CONFIG          (2)
+#define SUBSYSTEM_ETH_RSS_CONFIG        (3)
+#define SUBSYSTEM_ETH_ACPI_CONFIG       (3)
+#define OPCODE_ETH_PROMISCUOUS          (3)
+#define SUBSYSTEM_ETH_PROMISCUOUS       (3)
+#define SUBSYSTEM_ETH_GET_STATISTICS    (3)
+#define SUBSYSTEM_ETH_GET_RX_FRAG_SIZE  (3)
+#define SUBSYSTEM_ETH_SET_RX_FRAG_SIZE  (3)
+#define OPCODE_ETH_GET_STATISTICS       (4)
+#define OPCODE_ETH_GET_RX_FRAG_SIZE     (5)
+#define OPCODE_ETH_SET_RX_FRAG_SIZE     (6)
+
+
+
+
+
+/*
+ * --- MCC_STATUS_CODE ---
+ * These are the global status codes used by all subsystems
+ */
+#define MCC_STATUS_SUCCESS              (0)    /* Indicates a successful
+                                               completion of  the command */
+#define MCC_STATUS_INSUFFICIENT_PRIVILEGES (1) /* The client does not have
+                                               sufficient privileges to
+                                               execute the command */
+#define MCC_STATUS_INVALID_PARAMETER    (2)    /* A parameter in the command
+                                               was invalid. The extended
+                                               status contains the index
+                                               of the parameter */
+#define MCC_STATUS_INSUFFICIENT_RESOURCES (3)  /* There are insufficient
+                                               chip resources to execute
+                                               the command */
+#define MCC_STATUS_QUEUE_FLUSHING       (4)    /* The command is completing
+                                               because the queue was
+                                               getting flushed */
+#define MCC_STATUS_DMA_FAILED           (5)    /* The command is completing
+                                               with a DMA error */
+
+/*
+ * --- MGMT_ERROR_CODES ---
+ * Error Codes returned in the status field of the FWCMD response header
+ */
+#define MGMT_STATUS_SUCCESS             (0)    /* The FWCMD completed
+                                               without errors */
+#define MGMT_STATUS_FAILED              (1)    /* Error status in the Status
+                                               field of  the
+                                               struct FWCMD_RESPONSE_HEADER */
+#define MGMT_STATUS_ILLEGAL_REQUEST     (2)    /* Invalid FWCMD opcode */
+#define MGMT_STATUS_ILLEGAL_FIELD       (3)    /* Invalid parameter in
+                                               the FWCMD  payload */
+
+#endif /* __fwcmd_opcodes_amap_h__ */
diff --git a/drivers/staging/benet/fw/fwcmd_types_bmap.h b/drivers/staging/benet/fw/fwcmd_types_bmap.h
new file mode 100644 (file)
index 0000000..92217af
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __fwcmd_types_bmap_h__
+#define __fwcmd_types_bmap_h__
+
+/* MAC address format  */
+struct MAC_ADDRESS_FORMAT {
+       u16 SizeOfStructure;
+       u8 MACAddress[6];
+} __packed;
+
+#endif /* __fwcmd_types_bmap_h__ */
diff --git a/drivers/staging/benet/fw/host_struct.h b/drivers/staging/benet/fw/host_struct.h
new file mode 100644 (file)
index 0000000..3de6722
--- /dev/null
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __host_struct_amap_h__
+#define __host_struct_amap_h__
+#include "be_cm.h"
+#include "be_common.h"
+#include "descriptors.h"
+
+/* --- EQ_COMPLETION_MAJOR_CODE_ENUM --- */
+#define EQ_MAJOR_CODE_COMPLETION        (0)    /* Completion event on a */
+                                                 /* qcompletion ueue. */
+#define EQ_MAJOR_CODE_ETH               (1)    /* Affiliated Ethernet Event. */
+#define EQ_MAJOR_CODE_RESERVED          (2)    /* Reserved */
+#define EQ_MAJOR_CODE_RDMA              (3)    /* Affiliated RDMA Event. */
+#define EQ_MAJOR_CODE_ISCSI             (4)    /* Affiliated ISCSI Event */
+#define EQ_MAJOR_CODE_UNAFFILIATED      (5)    /* Unaffiliated Event */
+
+/* --- EQ_COMPLETION_MINOR_CODE_ENUM --- */
+#define EQ_MINOR_CODE_COMPLETION        (0)    /* Completion event on a */
+                                                 /* completion queue. */
+#define EQ_MINOR_CODE_OTHER             (1)    /* Other Event (TBD). */
+
+/* Queue Entry Definition for all 4 byte event queue types. */
+struct BE_EQ_ENTRY_AMAP {
+       u8 Valid;               /* DWORD 0 */
+       u8 MajorCode[3];        /* DWORD 0 */
+       u8 MinorCode[12];       /* DWORD 0 */
+       u8 ResourceID[16];      /* DWORD 0 */
+} __packed;
+struct EQ_ENTRY_AMAP {
+       u32 dw[1];
+};
+
+/*
+ * --- ETH_EVENT_CODE ---
+ * These codes are returned by the MPU when one of these events has occurred,
+ * and the event is configured to report to an Event Queue when an event
+ * is detected.
+ */
+#define ETH_EQ_LINK_STATUS              (0)    /* Link status change event */
+                                                 /* detected. */
+#define ETH_EQ_WATERMARK                (1)    /* watermark event detected. */
+#define ETH_EQ_MAGIC_PKT                (2)    /* magic pkt event detected. */
+#define ETH_EQ_ACPI_PKT0                (3)    /* ACPI interesting packet */
+                                                 /* detected. */
+#define ETH_EQ_ACPI_PKT1                (3)    /* ACPI interesting packet */
+                                                 /* detected. */
+#define ETH_EQ_ACPI_PKT2                (3)    /* ACPI interesting packet */
+                                                 /* detected. */
+#define ETH_EQ_ACPI_PKT3                (3)    /* ACPI interesting packet */
+                                                 /* detected. */
+
+/*
+ * --- ETH_TX_COMPL_STATUS_ENUM ---
+ * Status codes contained in Ethernet TX completion descriptors.
+ */
+#define ETH_COMP_VALID                  (0)
+#define ETH_COMP_ERROR                  (1)
+#define ETH_COMP_INVALID                (15)
+
+/*
+ * --- ETH_TX_COMPL_PORT_ENUM ---
+ * Port indicator contained in Ethernet TX completion descriptors.
+ */
+#define ETH_COMP_PORT0                  (0)
+#define ETH_COMP_PORT1                  (1)
+#define ETH_COMP_MGMT                   (2)
+
+/*
+ * --- ETH_TX_COMPL_CT_ENUM ---
+ * Completion type indicator contained in Ethernet TX completion descriptors.
+ */
+#define ETH_COMP_ETH                    (0)
+
+/*
+ * Work request block that the driver issues to the chip for
+ * Ethernet transmissions. All control fields must be valid in each WRB for
+ * a message. The controller, as specified by the flags, optionally writes
+ * an entry to the Completion Ring and generate an event.
+ */
+struct BE_ETH_WRB_AMAP {
+       u8 frag_pa_hi[32];      /* DWORD 0 */
+       u8 frag_pa_lo[32];      /* DWORD 1 */
+       u8 complete;    /* DWORD 2 */
+       u8 event;               /* DWORD 2 */
+       u8 crc;         /* DWORD 2 */
+       u8 forward;             /* DWORD 2 */
+       u8 ipsec;               /* DWORD 2 */
+       u8 mgmt;                /* DWORD 2 */
+       u8 ipcs;                /* DWORD 2 */
+       u8 udpcs;               /* DWORD 2 */
+       u8 tcpcs;               /* DWORD 2 */
+       u8 lso;         /* DWORD 2 */
+       u8 last;                /* DWORD 2 */
+       u8 vlan;                /* DWORD 2 */
+       u8 dbg[3];              /* DWORD 2 */
+       u8 hash_val[3]; /* DWORD 2 */
+       u8 lso_mss[14]; /* DWORD 2 */
+       u8 frag_len[16];        /* DWORD 3 */
+       u8 vlan_tag[16];        /* DWORD 3 */
+} __packed;
+struct ETH_WRB_AMAP {
+       u32 dw[4];
+};
+
+/* This is an Ethernet transmit completion descriptor */
+struct BE_ETH_TX_COMPL_AMAP {
+       u8 user_bytes[16];      /* DWORD 0 */
+       u8 nwh_bytes[8];        /* DWORD 0 */
+       u8 lso;         /* DWORD 0 */
+       u8 rsvd0[7];    /* DWORD 0 */
+       u8 wrb_index[16];       /* DWORD 1 */
+       u8 ct[2];               /* DWORD 1 */
+       u8 port[2];             /* DWORD 1 */
+       u8 rsvd1[8];    /* DWORD 1 */
+       u8 status[4];   /* DWORD 1 */
+       u8 rsvd2[16];   /* DWORD 2 */
+       u8 ringid[11];  /* DWORD 2 */
+       u8 hash_val[4]; /* DWORD 2 */
+       u8 valid;               /* DWORD 2 */
+       u8 rsvd3[32];   /* DWORD 3 */
+} __packed;
+struct ETH_TX_COMPL_AMAP {
+       u32 dw[4];
+};
+
+/* Ethernet Receive Buffer descriptor */
+struct BE_ETH_RX_D_AMAP {
+       u8 fragpa_hi[32];       /* DWORD 0 */
+       u8 fragpa_lo[32];       /* DWORD 1 */
+} __packed;
+struct ETH_RX_D_AMAP {
+       u32 dw[2];
+};
+
+/* This is an Ethernet Receive Completion Descriptor */
+struct BE_ETH_RX_COMPL_AMAP {
+       u8 vlan_tag[16];        /* DWORD 0 */
+       u8 pktsize[14]; /* DWORD 0 */
+       u8 port;                /* DWORD 0 */
+       u8 rsvd0;               /* DWORD 0 */
+       u8 err;         /* DWORD 1 */
+       u8 rsshp;               /* DWORD 1 */
+       u8 ipf;         /* DWORD 1 */
+       u8 tcpf;                /* DWORD 1 */
+       u8 udpf;                /* DWORD 1 */
+       u8 ipcksm;              /* DWORD 1 */
+       u8 tcpcksm;             /* DWORD 1 */
+       u8 udpcksm;             /* DWORD 1 */
+       u8 macdst[6];   /* DWORD 1 */
+       u8 vtp;         /* DWORD 1 */
+       u8 vtm;         /* DWORD 1 */
+       u8 fragndx[10]; /* DWORD 1 */
+       u8 ct[2];               /* DWORD 1 */
+       u8 ipsec;               /* DWORD 1 */
+       u8 numfrags[3]; /* DWORD 1 */
+       u8 rsvd1[31];   /* DWORD 2 */
+       u8 valid;               /* DWORD 2 */
+       u8 rsshash[32]; /* DWORD 3 */
+} __packed;
+struct ETH_RX_COMPL_AMAP {
+       u32 dw[4];
+};
+
+#endif /* __host_struct_amap_h__ */
diff --git a/drivers/staging/benet/fw/mpu.h b/drivers/staging/benet/fw/mpu.h
new file mode 100644 (file)
index 0000000..41f3f87
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __mpu_amap_h__
+#define __mpu_amap_h__
+#include "ep.h"
+
+/* Provide control parameters for the Managment Processor Unit. */
+struct BE_MPU_CSRMAP_AMAP {
+       struct BE_EP_CSRMAP_AMAP ep;
+       u8 rsvd0[128];  /* DWORD 64 */
+       u8 rsvd1[32];   /* DWORD 68 */
+       u8 rsvd2[192];  /* DWORD 69 */
+       u8 rsvd3[192];  /* DWORD 75 */
+       u8 rsvd4[32];   /* DWORD 81 */
+       u8 rsvd5[32];   /* DWORD 82 */
+       u8 rsvd6[32];   /* DWORD 83 */
+       u8 rsvd7[32];   /* DWORD 84 */
+       u8 rsvd8[32];   /* DWORD 85 */
+       u8 rsvd9[32];   /* DWORD 86 */
+       u8 rsvd10[32];  /* DWORD 87 */
+       u8 rsvd11[32];  /* DWORD 88 */
+       u8 rsvd12[32];  /* DWORD 89 */
+       u8 rsvd13[32];  /* DWORD 90 */
+       u8 rsvd14[32];  /* DWORD 91 */
+       u8 rsvd15[32];  /* DWORD 92 */
+       u8 rsvd16[32];  /* DWORD 93 */
+       u8 rsvd17[32];  /* DWORD 94 */
+       u8 rsvd18[32];  /* DWORD 95 */
+       u8 rsvd19[32];  /* DWORD 96 */
+       u8 rsvd20[32];  /* DWORD 97 */
+       u8 rsvd21[32];  /* DWORD 98 */
+       u8 rsvd22[32];  /* DWORD 99 */
+       u8 rsvd23[32];  /* DWORD 100 */
+       u8 rsvd24[32];  /* DWORD 101 */
+       u8 rsvd25[32];  /* DWORD 102 */
+       u8 rsvd26[32];  /* DWORD 103 */
+       u8 rsvd27[32];  /* DWORD 104 */
+       u8 rsvd28[96];  /* DWORD 105 */
+       u8 rsvd29[32];  /* DWORD 108 */
+       u8 rsvd30[32];  /* DWORD 109 */
+       u8 rsvd31[32];  /* DWORD 110 */
+       u8 rsvd32[32];  /* DWORD 111 */
+       u8 rsvd33[32];  /* DWORD 112 */
+       u8 rsvd34[96];  /* DWORD 113 */
+       u8 rsvd35[32];  /* DWORD 116 */
+       u8 rsvd36[32];  /* DWORD 117 */
+       u8 rsvd37[32];  /* DWORD 118 */
+       u8 rsvd38[32];  /* DWORD 119 */
+       u8 rsvd39[32];  /* DWORD 120 */
+       u8 rsvd40[32];  /* DWORD 121 */
+       u8 rsvd41[134][32];     /* DWORD 122 */
+} __packed;
+struct MPU_CSRMAP_AMAP {
+       u32 dw[256];
+};
+
+#endif /* __mpu_amap_h__ */
diff --git a/drivers/staging/benet/fw/mpu_context.h b/drivers/staging/benet/fw/mpu_context.h
new file mode 100644 (file)
index 0000000..8ce90f9
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __mpu_context_amap_h__
+#define __mpu_context_amap_h__
+
+/*
+ * Management command and control ring context. The MPUs BTLR_CTRL1 CSR
+ * controls the writeback behavior of the producer and consumer index values.
+ */
+struct BE_MCC_RING_CONTEXT_AMAP {
+       u8 con_index[16];       /* DWORD 0 */
+       u8 ring_size[4];        /* DWORD 0 */
+       u8 cq_id[11];   /* DWORD 0 */
+       u8 rsvd0;               /* DWORD 0 */
+       u8 prod_index[16];      /* DWORD 1 */
+       u8 pdid[15];    /* DWORD 1 */
+       u8 invalid;             /* DWORD 1 */
+       u8 cmd_pending_current[7];      /* DWORD 2 */
+       u8 rsvd1[25];   /* DWORD 2 */
+       u8 hpi_port_cq_id[11];  /* DWORD 3 */
+       u8 rsvd2[5];    /* DWORD 3 */
+       u8 cmd_pending_max[7];  /* DWORD 3 */
+       u8 rsvd3[9];    /* DWORD 3 */
+} __packed;
+struct MCC_RING_CONTEXT_AMAP {
+       u32 dw[4];
+};
+
+#endif /* __mpu_context_amap_h__ */
diff --git a/drivers/staging/benet/fw/pcicfg.h b/drivers/staging/benet/fw/pcicfg.h
new file mode 100644 (file)
index 0000000..7c15684
--- /dev/null
@@ -0,0 +1,825 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __pcicfg_amap_h__
+#define __pcicfg_amap_h__
+
+/* Vendor and Device ID Register. */
+struct BE_PCICFG_ID_CSR_AMAP {
+       u8 vendorid[16];        /* DWORD 0 */
+       u8 deviceid[16];        /* DWORD 0 */
+} __packed;
+struct PCICFG_ID_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* IO Bar Register. */
+struct BE_PCICFG_IOBAR_CSR_AMAP {
+       u8 iospace;             /* DWORD 0 */
+       u8 rsvd0[7];    /* DWORD 0 */
+       u8 iobar[24];   /* DWORD 0 */
+} __packed;
+struct PCICFG_IOBAR_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* Memory BAR 0 Register. */
+struct BE_PCICFG_MEMBAR0_CSR_AMAP {
+       u8 memspace;    /* DWORD 0 */
+       u8 type[2];             /* DWORD 0 */
+       u8 pf;          /* DWORD 0 */
+       u8 rsvd0[10];   /* DWORD 0 */
+       u8 membar0[18]; /* DWORD 0 */
+} __packed;
+struct PCICFG_MEMBAR0_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* Memory BAR 1 - Low Address Register. */
+struct BE_PCICFG_MEMBAR1_LO_CSR_AMAP {
+       u8 memspace;    /* DWORD 0 */
+       u8 type[2];             /* DWORD 0 */
+       u8 pf;          /* DWORD 0 */
+       u8 rsvd0[13];   /* DWORD 0 */
+       u8 membar1lo[15];       /* DWORD 0 */
+} __packed;
+struct PCICFG_MEMBAR1_LO_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* Memory BAR 1 - High Address Register. */
+struct BE_PCICFG_MEMBAR1_HI_CSR_AMAP {
+       u8 membar1hi[32];       /* DWORD 0 */
+} __packed;
+struct PCICFG_MEMBAR1_HI_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* Memory BAR 2 - Low Address Register. */
+struct BE_PCICFG_MEMBAR2_LO_CSR_AMAP {
+       u8 memspace;    /* DWORD 0 */
+       u8 type[2];             /* DWORD 0 */
+       u8 pf;          /* DWORD 0 */
+       u8 rsvd0[17];   /* DWORD 0 */
+       u8 membar2lo[11];       /* DWORD 0 */
+} __packed;
+struct PCICFG_MEMBAR2_LO_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* Memory BAR 2 - High Address Register. */
+struct BE_PCICFG_MEMBAR2_HI_CSR_AMAP {
+       u8 membar2hi[32];       /* DWORD 0 */
+} __packed;
+struct PCICFG_MEMBAR2_HI_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* Subsystem Vendor and ID (Function 0) Register. */
+struct BE_PCICFG_SUBSYSTEM_ID_F0_CSR_AMAP {
+       u8 subsys_vendor_id[16];        /* DWORD 0 */
+       u8 subsys_id[16];       /* DWORD 0 */
+} __packed;
+struct PCICFG_SUBSYSTEM_ID_F0_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* Subsystem Vendor and ID (Function 1) Register. */
+struct BE_PCICFG_SUBSYSTEM_ID_F1_CSR_AMAP {
+       u8 subsys_vendor_id[16];        /* DWORD 0 */
+       u8 subsys_id[16];       /* DWORD 0 */
+} __packed;
+struct PCICFG_SUBSYSTEM_ID_F1_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* Semaphore Register. */
+struct BE_PCICFG_SEMAPHORE_CSR_AMAP {
+       u8 locked;              /* DWORD 0 */
+       u8 rsvd0[31];   /* DWORD 0 */
+} __packed;
+struct PCICFG_SEMAPHORE_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* Soft Reset Register. */
+struct BE_PCICFG_SOFT_RESET_CSR_AMAP {
+       u8 rsvd0[7];    /* DWORD 0 */
+       u8 softreset;   /* DWORD 0 */
+       u8 rsvd1[16];   /* DWORD 0 */
+       u8 nec_ll_rcvdetect_i[8];       /* DWORD 0 */
+} __packed;
+struct PCICFG_SOFT_RESET_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* Unrecoverable Error Status (Low) Register. Each bit corresponds to
+ * an internal Unrecoverable Error.  These are set by hardware and may be
+ * cleared by writing a one to the respective bit(s) to be cleared.  Any
+ * bit being set that is also unmasked will result in Unrecoverable Error
+ * interrupt notification to the host CPU and/or Server Management chip
+ * and the transitioning of BladeEngine to an Offline state.
+ */
+struct BE_PCICFG_UE_STATUS_LOW_CSR_AMAP {
+       u8 cev_ue_status;       /* DWORD 0 */
+       u8 ctx_ue_status;       /* DWORD 0 */
+       u8 dbuf_ue_status;      /* DWORD 0 */
+       u8 erx_ue_status;       /* DWORD 0 */
+       u8 host_ue_status;      /* DWORD 0 */
+       u8 mpu_ue_status;       /* DWORD 0 */
+       u8 ndma_ue_status;      /* DWORD 0 */
+       u8 ptc_ue_status;       /* DWORD 0 */
+       u8 rdma_ue_status;      /* DWORD 0 */
+       u8 rxf_ue_status;       /* DWORD 0 */
+       u8 rxips_ue_status;     /* DWORD 0 */
+       u8 rxulp0_ue_status;    /* DWORD 0 */
+       u8 rxulp1_ue_status;    /* DWORD 0 */
+       u8 rxulp2_ue_status;    /* DWORD 0 */
+       u8 tim_ue_status;       /* DWORD 0 */
+       u8 tpost_ue_status;     /* DWORD 0 */
+       u8 tpre_ue_status;      /* DWORD 0 */
+       u8 txips_ue_status;     /* DWORD 0 */
+       u8 txulp0_ue_status;    /* DWORD 0 */
+       u8 txulp1_ue_status;    /* DWORD 0 */
+       u8 uc_ue_status;        /* DWORD 0 */
+       u8 wdma_ue_status;      /* DWORD 0 */
+       u8 txulp2_ue_status;    /* DWORD 0 */
+       u8 host1_ue_status;     /* DWORD 0 */
+       u8 p0_ob_link_ue_status;        /* DWORD 0 */
+       u8 p1_ob_link_ue_status;        /* DWORD 0 */
+       u8 host_gpio_ue_status; /* DWORD 0 */
+       u8 mbox_netw_ue_status; /* DWORD 0 */
+       u8 mbox_stor_ue_status; /* DWORD 0 */
+       u8 axgmac0_ue_status;   /* DWORD 0 */
+       u8 axgmac1_ue_status;   /* DWORD 0 */
+       u8 mpu_intpend_ue_status;       /* DWORD 0 */
+} __packed;
+struct PCICFG_UE_STATUS_LOW_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* Unrecoverable Error Status (High) Register. Each bit corresponds to
+ * an internal Unrecoverable Error.  These are set by hardware and may be
+ * cleared by writing a one to the respective bit(s) to be cleared.  Any
+ * bit being set that is also unmasked will result in Unrecoverable Error
+ * interrupt notification to the host CPU and/or Server Management chip;
+ * and the transitioning of BladeEngine to an Offline state.
+ */
+struct BE_PCICFG_UE_STATUS_HI_CSR_AMAP {
+       u8 jtag_ue_status;      /* DWORD 0 */
+       u8 lpcmemhost_ue_status;        /* DWORD 0 */
+       u8 mgmt_mac_ue_status;  /* DWORD 0 */
+       u8 mpu_iram_ue_status;  /* DWORD 0 */
+       u8 pcs0online_ue_status;        /* DWORD 0 */
+       u8 pcs1online_ue_status;        /* DWORD 0 */
+       u8 pctl0_ue_status;     /* DWORD 0 */
+       u8 pctl1_ue_status;     /* DWORD 0 */
+       u8 pmem_ue_status;      /* DWORD 0 */
+       u8 rr_ue_status;        /* DWORD 0 */
+       u8 rxpp_ue_status;      /* DWORD 0 */
+       u8 txpb_ue_status;      /* DWORD 0 */
+       u8 txp_ue_status;       /* DWORD 0 */
+       u8 xaui_ue_status;      /* DWORD 0 */
+       u8 arm_ue_status;       /* DWORD 0 */
+       u8 ipc_ue_status;       /* DWORD 0 */
+       u8 rsvd0[16];   /* DWORD 0 */
+} __packed;
+struct PCICFG_UE_STATUS_HI_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* Unrecoverable Error Mask (Low) Register. Each bit, when set to one,
+ * will mask the associated Unrecoverable  Error status bit from notification
+ * of Unrecoverable Error to the host CPU and/or Server Managment chip and the
+ * transitioning of all BladeEngine units to an Offline state.
+ */
+struct BE_PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP {
+       u8 cev_ue_mask; /* DWORD 0 */
+       u8 ctx_ue_mask; /* DWORD 0 */
+       u8 dbuf_ue_mask;        /* DWORD 0 */
+       u8 erx_ue_mask; /* DWORD 0 */
+       u8 host_ue_mask;        /* DWORD 0 */
+       u8 mpu_ue_mask; /* DWORD 0 */
+       u8 ndma_ue_mask;        /* DWORD 0 */
+       u8 ptc_ue_mask; /* DWORD 0 */
+       u8 rdma_ue_mask;        /* DWORD 0 */
+       u8 rxf_ue_mask; /* DWORD 0 */
+       u8 rxips_ue_mask;       /* DWORD 0 */
+       u8 rxulp0_ue_mask;      /* DWORD 0 */
+       u8 rxulp1_ue_mask;      /* DWORD 0 */
+       u8 rxulp2_ue_mask;      /* DWORD 0 */
+       u8 tim_ue_mask; /* DWORD 0 */
+       u8 tpost_ue_mask;       /* DWORD 0 */
+       u8 tpre_ue_mask;        /* DWORD 0 */
+       u8 txips_ue_mask;       /* DWORD 0 */
+       u8 txulp0_ue_mask;      /* DWORD 0 */
+       u8 txulp1_ue_mask;      /* DWORD 0 */
+       u8 uc_ue_mask;  /* DWORD 0 */
+       u8 wdma_ue_mask;        /* DWORD 0 */
+       u8 txulp2_ue_mask;      /* DWORD 0 */
+       u8 host1_ue_mask;       /* DWORD 0 */
+       u8 p0_ob_link_ue_mask;  /* DWORD 0 */
+       u8 p1_ob_link_ue_mask;  /* DWORD 0 */
+       u8 host_gpio_ue_mask;   /* DWORD 0 */
+       u8 mbox_netw_ue_mask;   /* DWORD 0 */
+       u8 mbox_stor_ue_mask;   /* DWORD 0 */
+       u8 axgmac0_ue_mask;     /* DWORD 0 */
+       u8 axgmac1_ue_mask;     /* DWORD 0 */
+       u8 mpu_intpend_ue_mask; /* DWORD 0 */
+} __packed;
+struct PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* Unrecoverable Error Mask (High) Register. Each bit, when set to one,
+ * will mask the associated Unrecoverable Error status bit from notification
+ * of Unrecoverable Error to the host CPU and/or Server Managment chip and the
+ * transitioning of all BladeEngine units to an Offline state.
+ */
+struct BE_PCICFG_UE_STATUS_HI_MASK_CSR_AMAP {
+       u8 jtag_ue_mask;        /* DWORD 0 */
+       u8 lpcmemhost_ue_mask;  /* DWORD 0 */
+       u8 mgmt_mac_ue_mask;    /* DWORD 0 */
+       u8 mpu_iram_ue_mask;    /* DWORD 0 */
+       u8 pcs0online_ue_mask;  /* DWORD 0 */
+       u8 pcs1online_ue_mask;  /* DWORD 0 */
+       u8 pctl0_ue_mask;       /* DWORD 0 */
+       u8 pctl1_ue_mask;       /* DWORD 0 */
+       u8 pmem_ue_mask;        /* DWORD 0 */
+       u8 rr_ue_mask;  /* DWORD 0 */
+       u8 rxpp_ue_mask;        /* DWORD 0 */
+       u8 txpb_ue_mask;        /* DWORD 0 */
+       u8 txp_ue_mask; /* DWORD 0 */
+       u8 xaui_ue_mask;        /* DWORD 0 */
+       u8 arm_ue_mask; /* DWORD 0 */
+       u8 ipc_ue_mask; /* DWORD 0 */
+       u8 rsvd0[16];   /* DWORD 0 */
+} __packed;
+struct PCICFG_UE_STATUS_HI_MASK_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* Online Control Register 0. This register controls various units within
+ * BladeEngine being in an Online or Offline state.
+ */
+struct BE_PCICFG_ONLINE0_CSR_AMAP {
+       u8 cev_online;  /* DWORD 0 */
+       u8 ctx_online;  /* DWORD 0 */
+       u8 dbuf_online; /* DWORD 0 */
+       u8 erx_online;  /* DWORD 0 */
+       u8 host_online; /* DWORD 0 */
+       u8 mpu_online;  /* DWORD 0 */
+       u8 ndma_online; /* DWORD 0 */
+       u8 ptc_online;  /* DWORD 0 */
+       u8 rdma_online; /* DWORD 0 */
+       u8 rxf_online;  /* DWORD 0 */
+       u8 rxips_online;        /* DWORD 0 */
+       u8 rxulp0_online;       /* DWORD 0 */
+       u8 rxulp1_online;       /* DWORD 0 */
+       u8 rxulp2_online;       /* DWORD 0 */
+       u8 tim_online;  /* DWORD 0 */
+       u8 tpost_online;        /* DWORD 0 */
+       u8 tpre_online; /* DWORD 0 */
+       u8 txips_online;        /* DWORD 0 */
+       u8 txulp0_online;       /* DWORD 0 */
+       u8 txulp1_online;       /* DWORD 0 */
+       u8 uc_online;   /* DWORD 0 */
+       u8 wdma_online; /* DWORD 0 */
+       u8 txulp2_online;       /* DWORD 0 */
+       u8 host1_online;        /* DWORD 0 */
+       u8 p0_ob_link_online;   /* DWORD 0 */
+       u8 p1_ob_link_online;   /* DWORD 0 */
+       u8 host_gpio_online;    /* DWORD 0 */
+       u8 mbox_netw_online;    /* DWORD 0 */
+       u8 mbox_stor_online;    /* DWORD 0 */
+       u8 axgmac0_online;      /* DWORD 0 */
+       u8 axgmac1_online;      /* DWORD 0 */
+       u8 mpu_intpend_online;  /* DWORD 0 */
+} __packed;
+struct PCICFG_ONLINE0_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* Online Control Register 1. This register controls various units within
+ * BladeEngine being in an Online or Offline state.
+ */
+struct BE_PCICFG_ONLINE1_CSR_AMAP {
+       u8 jtag_online; /* DWORD 0 */
+       u8 lpcmemhost_online;   /* DWORD 0 */
+       u8 mgmt_mac_online;     /* DWORD 0 */
+       u8 mpu_iram_online;     /* DWORD 0 */
+       u8 pcs0online_online;   /* DWORD 0 */
+       u8 pcs1online_online;   /* DWORD 0 */
+       u8 pctl0_online;        /* DWORD 0 */
+       u8 pctl1_online;        /* DWORD 0 */
+       u8 pmem_online; /* DWORD 0 */
+       u8 rr_online;   /* DWORD 0 */
+       u8 rxpp_online; /* DWORD 0 */
+       u8 txpb_online; /* DWORD 0 */
+       u8 txp_online;  /* DWORD 0 */
+       u8 xaui_online; /* DWORD 0 */
+       u8 arm_online;  /* DWORD 0 */
+       u8 ipc_online;  /* DWORD 0 */
+       u8 rsvd0[16];   /* DWORD 0 */
+} __packed;
+struct PCICFG_ONLINE1_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* Host Timer Register. */
+struct BE_PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP {
+       u8 hosttimer[24];       /* DWORD 0 */
+       u8 hostintr;    /* DWORD 0 */
+       u8 rsvd0[7];    /* DWORD 0 */
+} __packed;
+struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* Scratchpad Register (for software use). */
+struct BE_PCICFG_SCRATCHPAD_CSR_AMAP {
+       u8 scratchpad[32];      /* DWORD 0 */
+} __packed;
+struct PCICFG_SCRATCHPAD_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* PCI Express Capabilities Register. */
+struct BE_PCICFG_PCIE_CAP_CSR_AMAP {
+       u8 capid[8];    /* DWORD 0 */
+       u8 nextcap[8];  /* DWORD 0 */
+       u8 capver[4];   /* DWORD 0 */
+       u8 devport[4];  /* DWORD 0 */
+       u8 rsvd0[6];    /* DWORD 0 */
+       u8 rsvd1[2];    /* DWORD 0 */
+} __packed;
+struct PCICFG_PCIE_CAP_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* PCI Express Device Capabilities Register. */
+struct BE_PCICFG_PCIE_DEVCAP_CSR_AMAP {
+       u8 payload[3];  /* DWORD 0 */
+       u8 rsvd0[3];    /* DWORD 0 */
+       u8 lo_lat[3];   /* DWORD 0 */
+       u8 l1_lat[3];   /* DWORD 0 */
+       u8 rsvd1[3];    /* DWORD 0 */
+       u8 rsvd2[3];    /* DWORD 0 */
+       u8 pwr_value[8];        /* DWORD 0 */
+       u8 pwr_scale[2];        /* DWORD 0 */
+       u8 rsvd3[4];    /* DWORD 0 */
+} __packed;
+struct PCICFG_PCIE_DEVCAP_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* PCI Express Device Control/Status Registers. */
+struct BE_PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP {
+       u8 CorrErrReportEn;     /* DWORD 0 */
+       u8 NonFatalErrReportEn; /* DWORD 0 */
+       u8 FatalErrReportEn;    /* DWORD 0 */
+       u8 UnsuppReqReportEn;   /* DWORD 0 */
+       u8 EnableRelaxOrder;    /* DWORD 0 */
+       u8 Max_Payload_Size[3]; /* DWORD 0 */
+       u8 ExtendTagFieldEnable;        /* DWORD 0 */
+       u8 PhantomFnEnable;     /* DWORD 0 */
+       u8 AuxPwrPMEnable;      /* DWORD 0 */
+       u8 EnableNoSnoop;       /* DWORD 0 */
+       u8 Max_Read_Req_Size[3];        /* DWORD 0 */
+       u8 rsvd0;               /* DWORD 0 */
+       u8 CorrErrDetect;       /* DWORD 0 */
+       u8 NonFatalErrDetect;   /* DWORD 0 */
+       u8 FatalErrDetect;      /* DWORD 0 */
+       u8 UnsuppReqDetect;     /* DWORD 0 */
+       u8 AuxPwrDetect;        /* DWORD 0 */
+       u8 TransPending;        /* DWORD 0 */
+       u8 rsvd1[10];   /* DWORD 0 */
+} __packed;
+struct PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* PCI Express Link Capabilities Register. */
+struct BE_PCICFG_PCIE_LINK_CAP_CSR_AMAP {
+       u8 MaxLinkSpeed[4];     /* DWORD 0 */
+       u8 MaxLinkWidth[6];     /* DWORD 0 */
+       u8 ASPMSupport[2];      /* DWORD 0 */
+       u8 L0sExitLat[3];       /* DWORD 0 */
+       u8 L1ExitLat[3];        /* DWORD 0 */
+       u8 rsvd0[6];    /* DWORD 0 */
+       u8 PortNum[8];  /* DWORD 0 */
+} __packed;
+struct PCICFG_PCIE_LINK_CAP_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* PCI Express Link Status Register. */
+struct BE_PCICFG_PCIE_LINK_STATUS_CSR_AMAP {
+       u8 ASPMCtl[2];  /* DWORD 0 */
+       u8 rsvd0;               /* DWORD 0 */
+       u8 ReadCmplBndry;       /* DWORD 0 */
+       u8 LinkDisable; /* DWORD 0 */
+       u8 RetrainLink; /* DWORD 0 */
+       u8 CommonClkConfig;     /* DWORD 0 */
+       u8 ExtendSync;  /* DWORD 0 */
+       u8 rsvd1[8];    /* DWORD 0 */
+       u8 LinkSpeed[4];        /* DWORD 0 */
+       u8 NegLinkWidth[6];     /* DWORD 0 */
+       u8 LinkTrainErr;        /* DWORD 0 */
+       u8 LinkTrain;   /* DWORD 0 */
+       u8 SlotClkConfig;       /* DWORD 0 */
+       u8 rsvd2[3];    /* DWORD 0 */
+} __packed;
+struct PCICFG_PCIE_LINK_STATUS_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* PCI Express MSI Configuration Register. */
+struct BE_PCICFG_MSI_CSR_AMAP {
+       u8 capid[8];    /* DWORD 0 */
+       u8 nextptr[8];  /* DWORD 0 */
+       u8 tablesize[11];       /* DWORD 0 */
+       u8 rsvd0[3];    /* DWORD 0 */
+       u8 funcmask;    /* DWORD 0 */
+       u8 en;          /* DWORD 0 */
+} __packed;
+struct PCICFG_MSI_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* MSI-X Table Offset Register. */
+struct BE_PCICFG_MSIX_TABLE_CSR_AMAP {
+       u8 tablebir[3]; /* DWORD 0 */
+       u8 offset[29];  /* DWORD 0 */
+} __packed;
+struct PCICFG_MSIX_TABLE_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* MSI-X PBA Offset Register. */
+struct BE_PCICFG_MSIX_PBA_CSR_AMAP {
+       u8 pbabir[3];   /* DWORD 0 */
+       u8 offset[29];  /* DWORD 0 */
+} __packed;
+struct PCICFG_MSIX_PBA_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* PCI Express MSI-X Message Vector Control Register. */
+struct BE_PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP {
+       u8 vector_control;      /* DWORD 0 */
+       u8 rsvd0[31];   /* DWORD 0 */
+} __packed;
+struct PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* PCI Express MSI-X Message Data Register. */
+struct BE_PCICFG_MSIX_MSG_DATA_CSR_AMAP {
+       u8 data[16];    /* DWORD 0 */
+       u8 rsvd0[16];   /* DWORD 0 */
+} __packed;
+struct PCICFG_MSIX_MSG_DATA_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* PCI Express MSI-X Message Address Register - High Part. */
+struct BE_PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP {
+       u8 addr[32];    /* DWORD 0 */
+} __packed;
+struct PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP {
+       u32 dw[1];
+};
+
+/* PCI Express MSI-X Message Address Register - Low Part. */
+struct BE_PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP {
+       u8 rsvd0[2];    /* DWORD 0 */
+       u8 addr[30];    /* DWORD 0 */
+} __packed;
+struct PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP {
+       u32 dw[1];
+};
+
+struct BE_PCICFG_ANON_18_RSVD_AMAP {
+       u8 rsvd0[32];   /* DWORD 0 */
+} __packed;
+struct PCICFG_ANON_18_RSVD_AMAP {
+       u32 dw[1];
+};
+
+struct BE_PCICFG_ANON_19_RSVD_AMAP {
+       u8 rsvd0[32];   /* DWORD 0 */
+} __packed;
+struct PCICFG_ANON_19_RSVD_AMAP {
+       u32 dw[1];
+};
+
+struct BE_PCICFG_ANON_20_RSVD_AMAP {
+       u8 rsvd0[32];   /* DWORD 0 */
+       u8 rsvd1[25][32];       /* DWORD 1 */
+} __packed;
+struct PCICFG_ANON_20_RSVD_AMAP {
+       u32 dw[26];
+};
+
+struct BE_PCICFG_ANON_21_RSVD_AMAP {
+       u8 rsvd0[32];   /* DWORD 0 */
+       u8 rsvd1[1919][32];     /* DWORD 1 */
+} __packed;
+struct PCICFG_ANON_21_RSVD_AMAP {
+       u32 dw[1920];
+};
+
+struct BE_PCICFG_ANON_22_MESSAGE_AMAP {
+       struct BE_PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP vec_ctrl;
+       struct BE_PCICFG_MSIX_MSG_DATA_CSR_AMAP msg_data;
+       struct BE_PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP addr_hi;
+       struct BE_PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP addr_low;
+} __packed;
+struct PCICFG_ANON_22_MESSAGE_AMAP {
+       u32 dw[4];
+};
+
+struct BE_PCICFG_ANON_23_RSVD_AMAP {
+       u8 rsvd0[32];   /* DWORD 0 */
+       u8 rsvd1[895][32];      /* DWORD 1 */
+} __packed;
+struct PCICFG_ANON_23_RSVD_AMAP {
+       u32 dw[896];
+};
+
+/* These PCI Configuration Space registers are for the Storage  Function of
+ * BladeEngine (Function 0). In the memory map of the registers below their
+ * table,
+ */
+struct BE_PCICFG0_CSRMAP_AMAP {
+       struct BE_PCICFG_ID_CSR_AMAP id;
+       u8 rsvd0[32];   /* DWORD 1 */
+       u8 rsvd1[32];   /* DWORD 2 */
+       u8 rsvd2[32];   /* DWORD 3 */
+       struct BE_PCICFG_IOBAR_CSR_AMAP iobar;
+       struct BE_PCICFG_MEMBAR0_CSR_AMAP membar0;
+       struct BE_PCICFG_MEMBAR1_LO_CSR_AMAP membar1_lo;
+       struct BE_PCICFG_MEMBAR1_HI_CSR_AMAP membar1_hi;
+       struct BE_PCICFG_MEMBAR2_LO_CSR_AMAP membar2_lo;
+       struct BE_PCICFG_MEMBAR2_HI_CSR_AMAP membar2_hi;
+       u8 rsvd3[32];   /* DWORD 10 */
+       struct BE_PCICFG_SUBSYSTEM_ID_F0_CSR_AMAP subsystem_id;
+       u8 rsvd4[32];   /* DWORD 12 */
+       u8 rsvd5[32];   /* DWORD 13 */
+       u8 rsvd6[32];   /* DWORD 14 */
+       u8 rsvd7[32];   /* DWORD 15 */
+       struct BE_PCICFG_SEMAPHORE_CSR_AMAP semaphore[4];
+       struct BE_PCICFG_SOFT_RESET_CSR_AMAP soft_reset;
+       u8 rsvd8[32];   /* DWORD 21 */
+       struct BE_PCICFG_SCRATCHPAD_CSR_AMAP scratchpad;
+       u8 rsvd9[32];   /* DWORD 23 */
+       u8 rsvd10[32];  /* DWORD 24 */
+       u8 rsvd11[32];  /* DWORD 25 */
+       u8 rsvd12[32];  /* DWORD 26 */
+       u8 rsvd13[32];  /* DWORD 27 */
+       u8 rsvd14[2][32];       /* DWORD 28 */
+       u8 rsvd15[32];  /* DWORD 30 */
+       u8 rsvd16[32];  /* DWORD 31 */
+       u8 rsvd17[8][32];       /* DWORD 32 */
+       struct BE_PCICFG_UE_STATUS_LOW_CSR_AMAP ue_status_low;
+       struct BE_PCICFG_UE_STATUS_HI_CSR_AMAP ue_status_hi;
+       struct BE_PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP ue_status_low_mask;
+       struct BE_PCICFG_UE_STATUS_HI_MASK_CSR_AMAP ue_status_hi_mask;
+       struct BE_PCICFG_ONLINE0_CSR_AMAP online0;
+       struct BE_PCICFG_ONLINE1_CSR_AMAP online1;
+       u8 rsvd18[32];  /* DWORD 46 */
+       u8 rsvd19[32];  /* DWORD 47 */
+       u8 rsvd20[32];  /* DWORD 48 */
+       u8 rsvd21[32];  /* DWORD 49 */
+       struct BE_PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP host_timer_int_ctrl;
+       u8 rsvd22[32];  /* DWORD 51 */
+       struct BE_PCICFG_PCIE_CAP_CSR_AMAP pcie_cap;
+       struct BE_PCICFG_PCIE_DEVCAP_CSR_AMAP pcie_devcap;
+       struct BE_PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP pcie_control_status;
+       struct BE_PCICFG_PCIE_LINK_CAP_CSR_AMAP pcie_link_cap;
+       struct BE_PCICFG_PCIE_LINK_STATUS_CSR_AMAP pcie_link_status;
+       struct BE_PCICFG_MSI_CSR_AMAP msi;
+       struct BE_PCICFG_MSIX_TABLE_CSR_AMAP msix_table_offset;
+       struct BE_PCICFG_MSIX_PBA_CSR_AMAP msix_pba_offset;
+       u8 rsvd23[32];  /* DWORD 60 */
+       u8 rsvd24[32];  /* DWORD 61 */
+       u8 rsvd25[32];  /* DWORD 62 */
+       u8 rsvd26[32];  /* DWORD 63 */
+       u8 rsvd27[32];  /* DWORD 64 */
+       u8 rsvd28[32];  /* DWORD 65 */
+       u8 rsvd29[32];  /* DWORD 66 */
+       u8 rsvd30[32];  /* DWORD 67 */
+       u8 rsvd31[32];  /* DWORD 68 */
+       u8 rsvd32[32];  /* DWORD 69 */
+       u8 rsvd33[32];  /* DWORD 70 */
+       u8 rsvd34[32];  /* DWORD 71 */
+       u8 rsvd35[32];  /* DWORD 72 */
+       u8 rsvd36[32];  /* DWORD 73 */
+       u8 rsvd37[32];  /* DWORD 74 */
+       u8 rsvd38[32];  /* DWORD 75 */
+       u8 rsvd39[32];  /* DWORD 76 */
+       u8 rsvd40[32];  /* DWORD 77 */
+       u8 rsvd41[32];  /* DWORD 78 */
+       u8 rsvd42[32];  /* DWORD 79 */
+       u8 rsvd43[32];  /* DWORD 80 */
+       u8 rsvd44[32];  /* DWORD 81 */
+       u8 rsvd45[32];  /* DWORD 82 */
+       u8 rsvd46[32];  /* DWORD 83 */
+       u8 rsvd47[32];  /* DWORD 84 */
+       u8 rsvd48[32];  /* DWORD 85 */
+       u8 rsvd49[32];  /* DWORD 86 */
+       u8 rsvd50[32];  /* DWORD 87 */
+       u8 rsvd51[32];  /* DWORD 88 */
+       u8 rsvd52[32];  /* DWORD 89 */
+       u8 rsvd53[32];  /* DWORD 90 */
+       u8 rsvd54[32];  /* DWORD 91 */
+       u8 rsvd55[32];  /* DWORD 92 */
+       u8 rsvd56[832]; /* DWORD 93 */
+       u8 rsvd57[32];  /* DWORD 119 */
+       u8 rsvd58[32];  /* DWORD 120 */
+       u8 rsvd59[32];  /* DWORD 121 */
+       u8 rsvd60[32];  /* DWORD 122 */
+       u8 rsvd61[32];  /* DWORD 123 */
+       u8 rsvd62[32];  /* DWORD 124 */
+       u8 rsvd63[32];  /* DWORD 125 */
+       u8 rsvd64[32];  /* DWORD 126 */
+       u8 rsvd65[32];  /* DWORD 127 */
+       u8 rsvd66[61440];       /* DWORD 128 */
+       struct BE_PCICFG_ANON_22_MESSAGE_AMAP message[32];
+       u8 rsvd67[28672];       /* DWORD 2176 */
+       u8 rsvd68[32];  /* DWORD 3072 */
+       u8 rsvd69[1023][32];    /* DWORD 3073 */
+} __packed;
+struct PCICFG0_CSRMAP_AMAP {
+       u32 dw[4096];
+};
+
+struct BE_PCICFG_ANON_24_RSVD_AMAP {
+       u8 rsvd0[32];   /* DWORD 0 */
+} __packed;
+struct PCICFG_ANON_24_RSVD_AMAP {
+       u32 dw[1];
+};
+
+struct BE_PCICFG_ANON_25_RSVD_AMAP {
+       u8 rsvd0[32];   /* DWORD 0 */
+} __packed;
+struct PCICFG_ANON_25_RSVD_AMAP {
+       u32 dw[1];
+};
+
+struct BE_PCICFG_ANON_26_RSVD_AMAP {
+       u8 rsvd0[32];   /* DWORD 0 */
+} __packed;
+struct PCICFG_ANON_26_RSVD_AMAP {
+       u32 dw[1];
+};
+
+struct BE_PCICFG_ANON_27_RSVD_AMAP {
+       u8 rsvd0[32];   /* DWORD 0 */
+       u8 rsvd1[32];   /* DWORD 1 */
+} __packed;
+struct PCICFG_ANON_27_RSVD_AMAP {
+       u32 dw[2];
+};
+
+struct BE_PCICFG_ANON_28_RSVD_AMAP {
+       u8 rsvd0[32];   /* DWORD 0 */
+       u8 rsvd1[3][32];        /* DWORD 1 */
+} __packed;
+struct PCICFG_ANON_28_RSVD_AMAP {
+       u32 dw[4];
+};
+
+struct BE_PCICFG_ANON_29_RSVD_AMAP {
+       u8 rsvd0[32];   /* DWORD 0 */
+       u8 rsvd1[36][32];       /* DWORD 1 */
+} __packed;
+struct PCICFG_ANON_29_RSVD_AMAP {
+       u32 dw[37];
+};
+
+struct BE_PCICFG_ANON_30_RSVD_AMAP {
+       u8 rsvd0[32];   /* DWORD 0 */
+       u8 rsvd1[1930][32];     /* DWORD 1 */
+} __packed;
+struct PCICFG_ANON_30_RSVD_AMAP {
+       u32 dw[1931];
+};
+
+struct BE_PCICFG_ANON_31_MESSAGE_AMAP {
+       struct BE_PCICFG_MSIX_VECTOR_CONTROL_CSR_AMAP vec_ctrl;
+       struct BE_PCICFG_MSIX_MSG_DATA_CSR_AMAP msg_data;
+       struct BE_PCICFG_MSIX_MSG_ADDR_HI_CSR_AMAP addr_hi;
+       struct BE_PCICFG_MSIX_MSG_ADDR_LO_CSR_AMAP addr_low;
+} __packed;
+struct PCICFG_ANON_31_MESSAGE_AMAP {
+       u32 dw[4];
+};
+
+struct BE_PCICFG_ANON_32_RSVD_AMAP {
+       u8 rsvd0[32];   /* DWORD 0 */
+       u8 rsvd1[895][32];      /* DWORD 1 */
+} __packed;
+struct PCICFG_ANON_32_RSVD_AMAP {
+       u32 dw[896];
+};
+
+/* This PCI configuration space register map is for the  Networking Function of
+ * BladeEngine (Function 1).
+ */
+struct BE_PCICFG1_CSRMAP_AMAP {
+       struct BE_PCICFG_ID_CSR_AMAP id;
+       u8 rsvd0[32];   /* DWORD 1 */
+       u8 rsvd1[32];   /* DWORD 2 */
+       u8 rsvd2[32];   /* DWORD 3 */
+       struct BE_PCICFG_IOBAR_CSR_AMAP iobar;
+       struct BE_PCICFG_MEMBAR0_CSR_AMAP membar0;
+       struct BE_PCICFG_MEMBAR1_LO_CSR_AMAP membar1_lo;
+       struct BE_PCICFG_MEMBAR1_HI_CSR_AMAP membar1_hi;
+       struct BE_PCICFG_MEMBAR2_LO_CSR_AMAP membar2_lo;
+       struct BE_PCICFG_MEMBAR2_HI_CSR_AMAP membar2_hi;
+       u8 rsvd3[32];   /* DWORD 10 */
+       struct BE_PCICFG_SUBSYSTEM_ID_F1_CSR_AMAP subsystem_id;
+       u8 rsvd4[32];   /* DWORD 12 */
+       u8 rsvd5[32];   /* DWORD 13 */
+       u8 rsvd6[32];   /* DWORD 14 */
+       u8 rsvd7[32];   /* DWORD 15 */
+       struct BE_PCICFG_SEMAPHORE_CSR_AMAP semaphore[4];
+       struct BE_PCICFG_SOFT_RESET_CSR_AMAP soft_reset;
+       u8 rsvd8[32];   /* DWORD 21 */
+       struct BE_PCICFG_SCRATCHPAD_CSR_AMAP scratchpad;
+       u8 rsvd9[32];   /* DWORD 23 */
+       u8 rsvd10[32];  /* DWORD 24 */
+       u8 rsvd11[32];  /* DWORD 25 */
+       u8 rsvd12[32];  /* DWORD 26 */
+       u8 rsvd13[32];  /* DWORD 27 */
+       u8 rsvd14[2][32];       /* DWORD 28 */
+       u8 rsvd15[32];  /* DWORD 30 */
+       u8 rsvd16[32];  /* DWORD 31 */
+       u8 rsvd17[8][32];       /* DWORD 32 */
+       struct BE_PCICFG_UE_STATUS_LOW_CSR_AMAP ue_status_low;
+       struct BE_PCICFG_UE_STATUS_HI_CSR_AMAP ue_status_hi;
+       struct BE_PCICFG_UE_STATUS_LOW_MASK_CSR_AMAP ue_status_low_mask;
+       struct BE_PCICFG_UE_STATUS_HI_MASK_CSR_AMAP ue_status_hi_mask;
+       struct BE_PCICFG_ONLINE0_CSR_AMAP online0;
+       struct BE_PCICFG_ONLINE1_CSR_AMAP online1;
+       u8 rsvd18[32];  /* DWORD 46 */
+       u8 rsvd19[32];  /* DWORD 47 */
+       u8 rsvd20[32];  /* DWORD 48 */
+       u8 rsvd21[32];  /* DWORD 49 */
+       struct BE_PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP host_timer_int_ctrl;
+       u8 rsvd22[32];  /* DWORD 51 */
+       struct BE_PCICFG_PCIE_CAP_CSR_AMAP pcie_cap;
+       struct BE_PCICFG_PCIE_DEVCAP_CSR_AMAP pcie_devcap;
+       struct BE_PCICFG_PCIE_CONTROL_STATUS_CSR_AMAP pcie_control_status;
+       struct BE_PCICFG_PCIE_LINK_CAP_CSR_AMAP pcie_link_cap;
+       struct BE_PCICFG_PCIE_LINK_STATUS_CSR_AMAP pcie_link_status;
+       struct BE_PCICFG_MSI_CSR_AMAP msi;
+       struct BE_PCICFG_MSIX_TABLE_CSR_AMAP msix_table_offset;
+       struct BE_PCICFG_MSIX_PBA_CSR_AMAP msix_pba_offset;
+       u8 rsvd23[64];  /* DWORD 60 */
+       u8 rsvd24[32];  /* DWORD 62 */
+       u8 rsvd25[32];  /* DWORD 63 */
+       u8 rsvd26[32];  /* DWORD 64 */
+       u8 rsvd27[32];  /* DWORD 65 */
+       u8 rsvd28[32];  /* DWORD 66 */
+       u8 rsvd29[32];  /* DWORD 67 */
+       u8 rsvd30[32];  /* DWORD 68 */
+       u8 rsvd31[32];  /* DWORD 69 */
+       u8 rsvd32[32];  /* DWORD 70 */
+       u8 rsvd33[32];  /* DWORD 71 */
+       u8 rsvd34[32];  /* DWORD 72 */
+       u8 rsvd35[32];  /* DWORD 73 */
+       u8 rsvd36[32];  /* DWORD 74 */
+       u8 rsvd37[128]; /* DWORD 75 */
+       u8 rsvd38[32];  /* DWORD 79 */
+       u8 rsvd39[1184];        /* DWORD 80 */
+       u8 rsvd40[61792];       /* DWORD 117 */
+       struct BE_PCICFG_ANON_31_MESSAGE_AMAP message[32];
+       u8 rsvd41[28672];       /* DWORD 2176 */
+       u8 rsvd42[32];  /* DWORD 3072 */
+       u8 rsvd43[1023][32];    /* DWORD 3073 */
+} __packed;
+struct PCICFG1_CSRMAP_AMAP {
+       u32 dw[4096];
+};
+
+#endif /* __pcicfg_amap_h__ */
diff --git a/drivers/staging/benet/fw/post_codes.h b/drivers/staging/benet/fw/post_codes.h
new file mode 100644 (file)
index 0000000..6d1621f
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __post_codes_amap_h__
+#define __post_codes_amap_h__
+
+/* --- MGMT_HBA_POST_STAGE_ENUM --- */
+#define POST_STAGE_POWER_ON_RESET   (0)        /* State after a cold or warm boot. */
+#define POST_STAGE_AWAITING_HOST_RDY (1)       /* ARM boot code awaiting a
+                                               go-ahed from  the host. */
+#define POST_STAGE_HOST_RDY (2)        /* Host has given go-ahed to ARM. */
+#define POST_STAGE_BE_RESET (3)        /* Host wants to reset chip, this is a  chip
+                                               workaround  */
+#define POST_STAGE_SEEPROM_CS_START (256)      /* SEEPROM checksum
+                                               test start. */
+#define POST_STAGE_SEEPROM_CS_DONE  (257)      /* SEEPROM checksum test
+                                                       done. */
+#define POST_STAGE_DDR_CONFIG_START (512)      /* DDR configuration start. */
+#define POST_STAGE_DDR_CONFIG_DONE  (513)      /* DDR configuration done. */
+#define POST_STAGE_DDR_CALIBRATE_START  (768)  /* DDR calibration start. */
+#define POST_STAGE_DDR_CALIBRATE_DONE   (769)  /* DDR calibration done. */
+#define POST_STAGE_DDR_TEST_START   (1024)     /* DDR memory test start. */
+#define POST_STAGE_DDR_TEST_DONE    (1025)     /* DDR memory test done. */
+#define POST_STAGE_REDBOOT_INIT_START   (1536) /* Redboot starts execution. */
+#define POST_STAGE_REDBOOT_INIT_DONE (1537)    /* Redboot done execution. */
+#define POST_STAGE_FW_IMAGE_LOAD_START (1792)  /* Firmware image load to
+                                                       DDR start. */
+#define POST_STAGE_FW_IMAGE_LOAD_DONE   (1793) /* Firmware image load
+                                                       to DDR done. */
+#define POST_STAGE_ARMFW_START          (2048) /* ARMfw runtime code
+                                               starts execution. */
+#define POST_STAGE_DHCP_QUERY_START     (2304) /* DHCP server query start. */
+#define POST_STAGE_DHCP_QUERY_DONE      (2305) /* DHCP server query done. */
+#define POST_STAGE_BOOT_TARGET_DISCOVERY_START (2560)  /* Boot Target
+                                               Discovery Start. */
+#define POST_STAGE_BOOT_TARGET_DISCOVERY_DONE (2561)   /* Boot Target
+                                               Discovery Done. */
+#define POST_STAGE_RC_OPTION_SET        (2816) /* Remote configuration
+                                               option is set in  SEEPROM  */
+#define POST_STAGE_SWITCH_LINK          (2817) /* Wait for link up on switch */
+#define POST_STAGE_SEND_ICDS_MESSAGE    (2818) /* Send the ICDS message
+                                               to switch */
+#define POST_STAGE_PERFROM_TFTP         (2819) /* Download xml using TFTP */
+#define POST_STAGE_PARSE_XML            (2820) /* Parse XML file */
+#define POST_STAGE_DOWNLOAD_IMAGE       (2821) /* Download IMAGE from
+                                               TFTP server */
+#define POST_STAGE_FLASH_IMAGE          (2822) /* Flash the IMAGE */
+#define POST_STAGE_RC_DONE              (2823) /* Remote configuration
+                                               complete */
+#define POST_STAGE_REBOOT_SYSTEM        (2824) /* Upgrade IMAGE done,
+                                               reboot required */
+#define POST_STAGE_MAC_ADDRESS          (3072) /* MAC Address Check */
+#define POST_STAGE_ARMFW_READY          (49152)        /* ARMfw is done with POST
+                                               and ready. */
+#define POST_STAGE_ARMFW_UE             (61440)        /* ARMfw has asserted an
+                                               unrecoverable error. The
+                                               lower 3 hex digits of the
+                                               stage code identify the
+                                               unique error code.
+                                               */
+
+/* This structure defines the format of the MPU semaphore
+ * register when used for POST.
+ */
+struct BE_MGMT_HBA_POST_STATUS_STRUCT_AMAP {
+       u8 stage[16];   /* DWORD 0 */
+       u8 rsvd0[10];   /* DWORD 0 */
+       u8 iscsi_driver_loaded; /* DWORD 0 */
+       u8 option_rom_installed;        /* DWORD 0 */
+       u8 iscsi_ip_conflict;   /* DWORD 0 */
+       u8 iscsi_no_ip; /* DWORD 0 */
+       u8 backup_fw;   /* DWORD 0 */
+       u8 error;               /* DWORD 0 */
+} __packed;
+struct MGMT_HBA_POST_STATUS_STRUCT_AMAP {
+       u32 dw[1];
+};
+
+/* --- MGMT_HBA_POST_DUMMY_BITS_ENUM --- */
+#define POST_BIT_ISCSI_LOADED           (26)
+#define POST_BIT_OPTROM_INST            (27)
+#define POST_BIT_BAD_IP_ADDR            (28)
+#define POST_BIT_NO_IP_ADDR             (29)
+#define POST_BIT_BACKUP_FW              (30)
+#define POST_BIT_ERROR                  (31)
+
+/* --- MGMT_HBA_POST_DUMMY_VALUES_ENUM --- */
+#define POST_ISCSI_DRIVER_LOADED        (67108864)
+#define POST_OPTROM_INSTALLED           (134217728)
+#define POST_ISCSI_IP_ADDRESS_CONFLICT  (268435456)
+#define POST_ISCSI_NO_IP_ADDRESS        (536870912)
+#define POST_BACKUP_FW_LOADED           (1073741824)
+#define POST_FATAL_ERROR                (2147483648)
+
+#endif /* __post_codes_amap_h__ */
diff --git a/drivers/staging/benet/fw/regmap.h b/drivers/staging/benet/fw/regmap.h
new file mode 100644 (file)
index 0000000..e816ba2
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+/*
+ * Autogenerated by srcgen version: 0127
+ */
+#ifndef __regmap_amap_h__
+#define __regmap_amap_h__
+#include "pcicfg.h"
+#include "ep.h"
+#include "cev.h"
+#include "mpu.h"
+#include "doorbells.h"
+
+/*
+ * This is the control and status register map for BladeEngine, showing
+ * the relative size and offset of each sub-module. The CSR registers
+ * are identical for the network and storage PCI functions. The
+ * CSR map is shown below, followed by details of each block,
+ * in sub-sections.  The sub-sections begin with a description
+ * of CSRs that are instantiated in multiple blocks.
+ */
+struct BE_BLADE_ENGINE_CSRMAP_AMAP {
+       struct BE_MPU_CSRMAP_AMAP mpu;
+       u8 rsvd0[8192]; /* DWORD 256 */
+       u8 rsvd1[8192]; /* DWORD 512 */
+       struct BE_CEV_CSRMAP_AMAP cev;
+       u8 rsvd2[8192]; /* DWORD 1024 */
+       u8 rsvd3[8192]; /* DWORD 1280 */
+       u8 rsvd4[8192]; /* DWORD 1536 */
+       u8 rsvd5[8192]; /* DWORD 1792 */
+       u8 rsvd6[8192]; /* DWORD 2048 */
+       u8 rsvd7[8192]; /* DWORD 2304 */
+       u8 rsvd8[8192]; /* DWORD 2560 */
+       u8 rsvd9[8192]; /* DWORD 2816 */
+       u8 rsvd10[8192];        /* DWORD 3072 */
+       u8 rsvd11[8192];        /* DWORD 3328 */
+       u8 rsvd12[8192];        /* DWORD 3584 */
+       u8 rsvd13[8192];        /* DWORD 3840 */
+       u8 rsvd14[8192];        /* DWORD 4096 */
+       u8 rsvd15[8192];        /* DWORD 4352 */
+       u8 rsvd16[8192];        /* DWORD 4608 */
+       u8 rsvd17[8192];        /* DWORD 4864 */
+       u8 rsvd18[8192];        /* DWORD 5120 */
+       u8 rsvd19[8192];        /* DWORD 5376 */
+       u8 rsvd20[8192];        /* DWORD 5632 */
+       u8 rsvd21[8192];        /* DWORD 5888 */
+       u8 rsvd22[8192];        /* DWORD 6144 */
+       u8 rsvd23[17152][32];   /* DWORD 6400 */
+} __packed;
+struct BLADE_ENGINE_CSRMAP_AMAP {
+       u32 dw[23552];
+};
+
+#endif /* __regmap_amap_h__ */
diff --git a/drivers/staging/benet/hwlib/bestatus.h b/drivers/staging/benet/hwlib/bestatus.h
new file mode 100644 (file)
index 0000000..59c7a4b
--- /dev/null
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+#ifndef _BESTATUS_H_
+#define _BESTATUS_H_
+
+#define BE_SUCCESS                      (0x00000000L)
+/*
+ * MessageId: BE_PENDING
+ *  The BladeEngine Driver call succeeded, and pended operation.
+ */
+#define BE_PENDING                       (0x20070001L)
+#define BE_STATUS_PENDING                (BE_PENDING)
+/*
+ * MessageId: BE_NOT_OK
+ *  An error occurred.
+ */
+#define BE_NOT_OK                        (0xE0070002L)
+/*
+ * MessageId: BE_STATUS_SYSTEM_RESOURCES
+ *  Insufficient host system resources exist to complete the API.
+ */
+#define BE_STATUS_SYSTEM_RESOURCES       (0xE0070003L)
+/*
+ * MessageId: BE_STATUS_CHIP_RESOURCES
+ *  Insufficient chip resources exist to complete the API.
+ */
+#define BE_STATUS_CHIP_RESOURCES         (0xE0070004L)
+/*
+ * MessageId: BE_STATUS_NO_RESOURCE
+ *  Insufficient resources to complete request.
+ */
+#define BE_STATUS_NO_RESOURCE            (0xE0070005L)
+/*
+ * MessageId: BE_STATUS_BUSY
+ *  Resource is currently busy.
+ */
+#define BE_STATUS_BUSY                   (0xE0070006L)
+/*
+ * MessageId: BE_STATUS_INVALID_PARAMETER
+ *  Invalid Parameter in request.
+ */
+#define BE_STATUS_INVALID_PARAMETER      (0xE0000007L)
+/*
+ * MessageId: BE_STATUS_NOT_SUPPORTED
+ *  Requested operation is not supported.
+ */
+#define BE_STATUS_NOT_SUPPORTED          (0xE000000DL)
+
+/*
+ * ***************************************************************************
+ *                     E T H E R N E T   S T A T U S
+ * ***************************************************************************
+ */
+
+/*
+ * MessageId: BE_ETH_TX_ERROR
+ *  The Ethernet device driver failed to transmit a packet.
+ */
+#define BE_ETH_TX_ERROR                  (0xE0070101L)
+
+/*
+ * ***************************************************************************
+ *                     S H A R E D   S T A T U S
+ * ***************************************************************************
+ */
+
+/*
+ * MessageId: BE_STATUS_VBD_INVALID_VERSION
+ *  The device driver is not compatible with this version of the VBD.
+ */
+#define BE_STATUS_INVALID_VERSION    (0xE0070402L)
+/*
+ * MessageId: BE_STATUS_DOMAIN_DENIED
+ *  The operation failed to complete due to insufficient access
+ *  rights for the requesting domain.
+ */
+#define BE_STATUS_DOMAIN_DENIED          (0xE0070403L)
+/*
+ * MessageId: BE_STATUS_TCP_NOT_STARTED
+ *  The embedded TCP/IP stack has not been started.
+ */
+#define BE_STATUS_TCP_NOT_STARTED        (0xE0070409L)
+/*
+ * MessageId: BE_STATUS_NO_MCC_WRB
+ *  No free MCC WRB are available for posting the request.
+ */
+#define BE_STATUS_NO_MCC_WRB                 (0xE0070414L)
+
+#endif /* _BESTATUS_ */
diff --git a/drivers/staging/benet/hwlib/cq.c b/drivers/staging/benet/hwlib/cq.c
new file mode 100644 (file)
index 0000000..6504586
--- /dev/null
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+#include "hwlib.h"
+#include "bestatus.h"
+
+/*
+ * Completion Queue Objects
+ */
+/*
+ *============================================================================
+ *                  P U B L I C  R O U T I N E S
+ *============================================================================
+ */
+
+/*
+    This routine creates a completion queue based on the client completion
+    queue configuration information.
+
+
+    FunctionObject      - Handle to a function object
+    CqBaseVa            - Base VA for a the CQ ring
+    NumEntries          - CEV_CQ_CNT_* values
+    solEventEnable      - 0 = All CQEs can generate Events if CQ is eventable
+                       1 = only CQEs with solicited bit set are eventable
+    eventable           - Eventable CQ, generates interrupts.
+    nodelay             - 1 = Force interrupt, relevent if CQ eventable.
+                       Interrupt is asserted immediately after EQE
+                       write is confirmed, regardless of EQ Timer
+                       or watermark settings.
+    wme                 - Enable watermark based coalescing
+    wmThresh            - High watermark(CQ fullness at which event
+                       or interrupt should be asserted).  These are the
+                       CEV_WATERMARK encoded values.
+    EqObject            - EQ Handle to assign to this CQ
+    ppCqObject          - Internal CQ Handle returned.
+
+    Returns BE_SUCCESS if successfull, otherwise a useful error code is
+       returned.
+
+    IRQL < DISPATCH_LEVEL
+
+*/
+int be_cq_create(struct be_function_object *pfob,
+       struct ring_desc *rd, u32 length, bool solicited_eventable,
+       bool no_delay, u32 wm_thresh,
+       struct be_eq_object *eq_object, struct be_cq_object *cq_object)
+{
+       int status = BE_SUCCESS;
+       u32 num_entries_encoding;
+       u32 num_entries = length / sizeof(struct MCC_CQ_ENTRY_AMAP);
+       struct FWCMD_COMMON_CQ_CREATE *fwcmd = NULL;
+       struct MCC_WRB_AMAP *wrb = NULL;
+       u32 n;
+       unsigned long irql;
+
+       ASSERT(rd);
+       ASSERT(cq_object);
+       ASSERT(length % sizeof(struct MCC_CQ_ENTRY_AMAP) == 0);
+
+       switch (num_entries) {
+       case 256:
+               num_entries_encoding = CEV_CQ_CNT_256;
+               break;
+       case 512:
+               num_entries_encoding = CEV_CQ_CNT_512;
+               break;
+       case 1024:
+               num_entries_encoding = CEV_CQ_CNT_1024;
+               break;
+       default:
+               ASSERT(0);
+               return BE_STATUS_INVALID_PARAMETER;
+       }
+
+       /*
+        * All cq entries all the same size.  Use iSCSI version
+        * as a test for the proper rd length.
+        */
+       memset(cq_object, 0, sizeof(*cq_object));
+
+       atomic_set(&cq_object->ref_count, 0);
+       cq_object->parent_function = pfob;
+       cq_object->eq_object = eq_object;
+       cq_object->num_entries = num_entries;
+       /* save for MCC cq processing */
+       cq_object->va = rd->va;
+
+       /* map into UT. */
+       length = num_entries * sizeof(struct MCC_CQ_ENTRY_AMAP);
+
+       spin_lock_irqsave(&pfob->post_lock, irql);
+
+       wrb = be_function_peek_mcc_wrb(pfob);
+       if (!wrb) {
+               ASSERT(wrb);
+               TRACE(DL_ERR, "No free MCC WRBs in create EQ.");
+               status = BE_STATUS_NO_MCC_WRB;
+               goto Error;
+       }
+       /* Prepares an embedded fwcmd, including request/response sizes. */
+       fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_CQ_CREATE);
+
+       fwcmd->params.request.num_pages = PAGES_SPANNED(OFFSET_IN_PAGE(rd->va),
+                                                                       length);
+
+       AMAP_SET_BITS_PTR(CQ_CONTEXT, valid, &fwcmd->params.request.context, 1);
+       n = pfob->pci_function_number;
+       AMAP_SET_BITS_PTR(CQ_CONTEXT, Func, &fwcmd->params.request.context, n);
+
+       n = (eq_object != NULL);
+       AMAP_SET_BITS_PTR(CQ_CONTEXT, Eventable,
+                               &fwcmd->params.request.context, n);
+       AMAP_SET_BITS_PTR(CQ_CONTEXT, Armed, &fwcmd->params.request.context, 1);
+
+       n = eq_object ? eq_object->eq_id : 0;
+       AMAP_SET_BITS_PTR(CQ_CONTEXT, EQID, &fwcmd->params.request.context, n);
+       AMAP_SET_BITS_PTR(CQ_CONTEXT, Count,
+                       &fwcmd->params.request.context, num_entries_encoding);
+
+       n = 0; /* Protection Domain is always 0 in  Linux  driver */
+       AMAP_SET_BITS_PTR(CQ_CONTEXT, PD, &fwcmd->params.request.context, n);
+       AMAP_SET_BITS_PTR(CQ_CONTEXT, NoDelay,
+                               &fwcmd->params.request.context, no_delay);
+       AMAP_SET_BITS_PTR(CQ_CONTEXT, SolEvent,
+                       &fwcmd->params.request.context, solicited_eventable);
+
+       n = (wm_thresh != 0xFFFFFFFF);
+       AMAP_SET_BITS_PTR(CQ_CONTEXT, WME, &fwcmd->params.request.context, n);
+
+       n = (n ? wm_thresh : 0);
+       AMAP_SET_BITS_PTR(CQ_CONTEXT, Watermark,
+                               &fwcmd->params.request.context, n);
+       /* Create a page list for the FWCMD. */
+       be_rd_to_pa_list(rd, fwcmd->params.request.pages,
+                         ARRAY_SIZE(fwcmd->params.request.pages));
+
+       /* Post the f/w command */
+       status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
+                       NULL, NULL, fwcmd, NULL);
+       if (status != BE_SUCCESS) {
+               TRACE(DL_ERR, "MCC to create CQ failed.");
+               goto Error;
+       }
+       /* Remember the CQ id. */
+       cq_object->cq_id = fwcmd->params.response.cq_id;
+
+       /* insert this cq into eq_object reference */
+       if (eq_object) {
+               atomic_inc(&eq_object->ref_count);
+               list_add_tail(&cq_object->cqlist_for_eq,
+                                       &eq_object->cq_list_head);
+       }
+
+Error:
+       spin_unlock_irqrestore(&pfob->post_lock, irql);
+
+       if (pfob->pend_queue_driving && pfob->mcc) {
+               pfob->pend_queue_driving = 0;
+               be_drive_mcc_wrb_queue(pfob->mcc);
+       }
+       return status;
+}
+
+/*
+
+    Deferences the given object. Once the object's reference count drops to
+    zero, the object is destroyed and all resources that are held by this object
+    are released.  The on-chip context is also destroyed along with the queue
+    ID, and any mappings made into the UT.
+
+    cq_object            - CQ handle returned from cq_object_create.
+
+    returns the current reference count on the object
+
+    IRQL: IRQL < DISPATCH_LEVEL
+*/
+int be_cq_destroy(struct be_cq_object *cq_object)
+{
+       int status = 0;
+
+       /* Nothing should reference this CQ at this point. */
+       ASSERT(atomic_read(&cq_object->ref_count) == 0);
+
+       /* Send fwcmd to destroy the CQ. */
+       status = be_function_ring_destroy(cq_object->parent_function,
+                    cq_object->cq_id, FWCMD_RING_TYPE_CQ,
+                                       NULL, NULL, NULL, NULL);
+       ASSERT(status == 0);
+
+       /* Remove reference if this is an eventable CQ. */
+       if (cq_object->eq_object) {
+               atomic_dec(&cq_object->eq_object->ref_count);
+               list_del(&cq_object->cqlist_for_eq);
+       }
+       return BE_SUCCESS;
+}
+
diff --git a/drivers/staging/benet/hwlib/eq.c b/drivers/staging/benet/hwlib/eq.c
new file mode 100644 (file)
index 0000000..db92ccd
--- /dev/null
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+#include "hwlib.h"
+#include "bestatus.h"
+/*
+    This routine creates an event queue based on the client completion
+    queue configuration information.
+
+    FunctionObject      - Handle to a function object
+    EqBaseVa            - Base VA for a the EQ ring
+    SizeEncoding        - The encoded size for the EQ entries. This value is
+                       either CEV_EQ_SIZE_4 or CEV_EQ_SIZE_16
+    NumEntries          - CEV_CQ_CNT_* values.
+    Watermark           - Enables watermark based coalescing.  This parameter
+                       must be of the type CEV_WMARK_* if watermarks
+                       are enabled.  If watermarks to to be disabled
+                       this value should be-1.
+    TimerDelay          - If a timer delay is enabled this value should be the
+                       time of the delay in 8 microsecond units.  If
+                       delays are not used this parameter should be
+                       set to -1.
+    ppEqObject          - Internal EQ Handle returned.
+
+    Returns BE_SUCCESS if successfull,, otherwise a useful error code
+       is returned.
+
+    IRQL < DISPATCH_LEVEL
+*/
+int
+be_eq_create(struct be_function_object *pfob,
+               struct ring_desc *rd, u32 eqe_size, u32 num_entries,
+               u32 watermark,  /* CEV_WMARK_* or -1 */
+               u32 timer_delay,        /* in 8us units, or -1 */
+               struct be_eq_object *eq_object)
+{
+       int status = BE_SUCCESS;
+       u32 num_entries_encoding, eqe_size_encoding, length;
+       struct FWCMD_COMMON_EQ_CREATE *fwcmd = NULL;
+       struct MCC_WRB_AMAP *wrb = NULL;
+       u32 n;
+       unsigned long irql;
+
+       ASSERT(rd);
+       ASSERT(eq_object);
+
+       switch (num_entries) {
+       case 256:
+               num_entries_encoding = CEV_EQ_CNT_256;
+               break;
+       case 512:
+               num_entries_encoding = CEV_EQ_CNT_512;
+               break;
+       case 1024:
+               num_entries_encoding = CEV_EQ_CNT_1024;
+               break;
+       case 2048:
+               num_entries_encoding = CEV_EQ_CNT_2048;
+               break;
+       case 4096:
+               num_entries_encoding = CEV_EQ_CNT_4096;
+               break;
+       default:
+               ASSERT(0);
+               return BE_STATUS_INVALID_PARAMETER;
+       }
+
+       switch (eqe_size) {
+       case 4:
+               eqe_size_encoding = CEV_EQ_SIZE_4;
+               break;
+       case 16:
+               eqe_size_encoding = CEV_EQ_SIZE_16;
+               break;
+       default:
+               ASSERT(0);
+               return BE_STATUS_INVALID_PARAMETER;
+       }
+
+       if ((eqe_size == 4 && num_entries < 1024) ||
+           (eqe_size == 16 && num_entries == 4096)) {
+               TRACE(DL_ERR, "Bad EQ size. eqe_size:%d num_entries:%d",
+                     eqe_size, num_entries);
+               ASSERT(0);
+               return BE_STATUS_INVALID_PARAMETER;
+       }
+
+       memset(eq_object, 0, sizeof(*eq_object));
+
+       atomic_set(&eq_object->ref_count, 0);
+       eq_object->parent_function = pfob;
+       eq_object->eq_id = 0xFFFFFFFF;
+
+       INIT_LIST_HEAD(&eq_object->cq_list_head);
+
+       length = num_entries * eqe_size;
+
+       spin_lock_irqsave(&pfob->post_lock, irql);
+
+       wrb = be_function_peek_mcc_wrb(pfob);
+       if (!wrb) {
+               ASSERT(wrb);
+               TRACE(DL_ERR, "No free MCC WRBs in create EQ.");
+               status = BE_STATUS_NO_MCC_WRB;
+               goto Error;
+       }
+       /* Prepares an embedded fwcmd, including request/response sizes. */
+       fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_EQ_CREATE);
+
+       fwcmd->params.request.num_pages = PAGES_SPANNED(OFFSET_IN_PAGE(rd->va),
+                                                                       length);
+       n = pfob->pci_function_number;
+       AMAP_SET_BITS_PTR(EQ_CONTEXT, Func, &fwcmd->params.request.context, n);
+
+       AMAP_SET_BITS_PTR(EQ_CONTEXT, valid, &fwcmd->params.request.context, 1);
+
+       AMAP_SET_BITS_PTR(EQ_CONTEXT, Size,
+                       &fwcmd->params.request.context, eqe_size_encoding);
+
+       n = 0; /* Protection Domain is always 0 in  Linux  driver */
+       AMAP_SET_BITS_PTR(EQ_CONTEXT, PD, &fwcmd->params.request.context, n);
+
+       /* Let the caller ARM the EQ with the doorbell. */
+       AMAP_SET_BITS_PTR(EQ_CONTEXT, Armed, &fwcmd->params.request.context, 0);
+
+       AMAP_SET_BITS_PTR(EQ_CONTEXT, Count, &fwcmd->params.request.context,
+                                       num_entries_encoding);
+
+       n = pfob->pci_function_number * 32;
+       AMAP_SET_BITS_PTR(EQ_CONTEXT, EventVect,
+                               &fwcmd->params.request.context, n);
+       if (watermark != -1) {
+               AMAP_SET_BITS_PTR(EQ_CONTEXT, WME,
+                               &fwcmd->params.request.context, 1);
+               AMAP_SET_BITS_PTR(EQ_CONTEXT, Watermark,
+                               &fwcmd->params.request.context, watermark);
+               ASSERT(watermark <= CEV_WMARK_240);
+       } else
+               AMAP_SET_BITS_PTR(EQ_CONTEXT, WME,
+                                       &fwcmd->params.request.context, 0);
+       if (timer_delay != -1) {
+               AMAP_SET_BITS_PTR(EQ_CONTEXT, TMR,
+                                       &fwcmd->params.request.context, 1);
+
+               ASSERT(timer_delay <= 250);     /* max value according to EAS */
+               timer_delay = min(timer_delay, (u32)250);
+
+               AMAP_SET_BITS_PTR(EQ_CONTEXT, Delay,
+                               &fwcmd->params.request.context, timer_delay);
+       } else {
+               AMAP_SET_BITS_PTR(EQ_CONTEXT, TMR,
+                               &fwcmd->params.request.context, 0);
+       }
+       /* Create a page list for the FWCMD. */
+       be_rd_to_pa_list(rd, fwcmd->params.request.pages,
+                         ARRAY_SIZE(fwcmd->params.request.pages));
+
+       status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
+                                       NULL, NULL, fwcmd, NULL);
+       if (status != BE_SUCCESS) {
+               TRACE(DL_ERR, "MCC to create EQ failed.");
+               goto Error;
+       }
+       /* Get the EQ id.  The MPU allocates the IDs. */
+       eq_object->eq_id = fwcmd->params.response.eq_id;
+
+Error:
+       spin_unlock_irqrestore(&pfob->post_lock, irql);
+
+       if (pfob->pend_queue_driving && pfob->mcc) {
+               pfob->pend_queue_driving = 0;
+               be_drive_mcc_wrb_queue(pfob->mcc);
+       }
+       return status;
+}
+
+/*
+    Deferences the given object. Once the object's reference count drops to
+    zero, the object is destroyed and all resources that are held by this
+    object are released.  The on-chip context is also destroyed along with
+    the queue ID, and any mappings made into the UT.
+
+    eq_object            - EQ handle returned from eq_object_create.
+
+    Returns BE_SUCCESS if successfull, otherwise a useful error code
+       is returned.
+
+    IRQL: IRQL < DISPATCH_LEVEL
+*/
+int be_eq_destroy(struct be_eq_object *eq_object)
+{
+       int status = 0;
+
+       ASSERT(atomic_read(&eq_object->ref_count) == 0);
+       /* no CQs should reference this EQ now */
+       ASSERT(list_empty(&eq_object->cq_list_head));
+
+       /* Send fwcmd to destroy the EQ. */
+       status = be_function_ring_destroy(eq_object->parent_function,
+                            eq_object->eq_id, FWCMD_RING_TYPE_EQ,
+                                       NULL, NULL, NULL, NULL);
+       ASSERT(status == 0);
+
+       return BE_SUCCESS;
+}
+/*
+ *---------------------------------------------------------------------------
+ * Function: be_eq_modify_delay
+ *   Changes the EQ delay for a group of EQs.
+ * num_eq             - The number of EQs in the eq_array to adjust.
+ *                     This also is the number of delay values in
+ *                     the eq_delay_array.
+ * eq_array           - Array of struct be_eq_object pointers to adjust.
+ * eq_delay_array     - Array of "num_eq" timer delays in units
+ *                     of microseconds. The be_eq_query_delay_range
+ *                     fwcmd returns the resolution and range of
+ *                      legal EQ delays.
+ * cb           -
+ * cb_context   -
+ * q_ctxt             - Optional. Pointer to a previously allocated
+ *                     struct. If the MCC WRB ring is full, this
+ *                     structure is used to queue the operation. It
+ *                      will be posted to the MCC ring when space
+ *                      becomes available. All queued commands will
+ *                      be posted to the ring in the order they are
+ *                      received. It is always valid to pass a pointer to
+ *                      a generic be_generic_q_cntxt. However,
+ *                      the specific context structs
+ *                      are generally smaller than the generic struct.
+ * return pend_status - BE_SUCCESS (0) on success.
+ *                     BE_PENDING (postive value) if the FWCMD
+ *                      completion is pending. Negative error code on failure.
+ *-------------------------------------------------------------------------
+ */
+int
+be_eq_modify_delay(struct be_function_object *pfob,
+                  u32 num_eq, struct be_eq_object **eq_array,
+                  u32 *eq_delay_array, mcc_wrb_cqe_callback cb,
+                  void *cb_context, struct be_eq_modify_delay_q_ctxt *q_ctxt)
+{
+       struct FWCMD_COMMON_MODIFY_EQ_DELAY *fwcmd = NULL;
+       struct MCC_WRB_AMAP *wrb = NULL;
+       int status = 0;
+       struct be_generic_q_ctxt *gen_ctxt = NULL;
+       u32 i;
+       unsigned long irql;
+
+       spin_lock_irqsave(&pfob->post_lock, irql);
+
+       wrb = be_function_peek_mcc_wrb(pfob);
+       if (!wrb) {
+               if (q_ctxt && cb) {
+                       wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
+                       gen_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
+                       gen_ctxt->context.bytes = sizeof(*q_ctxt);
+               } else {
+                       status = BE_STATUS_NO_MCC_WRB;
+                       goto Error;
+               }
+       }
+       /* Prepares an embedded fwcmd, including request/response sizes. */
+       fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_MODIFY_EQ_DELAY);
+
+       ASSERT(num_eq > 0);
+       ASSERT(num_eq <= ARRAY_SIZE(fwcmd->params.request.delay));
+       fwcmd->params.request.num_eq = num_eq;
+       for (i = 0; i < num_eq; i++) {
+               fwcmd->params.request.delay[i].eq_id = eq_array[i]->eq_id;
+               fwcmd->params.request.delay[i].delay_in_microseconds =
+                   eq_delay_array[i];
+       }
+
+       /* Post the f/w command */
+       status = be_function_post_mcc_wrb(pfob, wrb, gen_ctxt,
+                       cb, cb_context, NULL, NULL, fwcmd, NULL);
+
+Error:
+       spin_unlock_irqrestore(&pfob->post_lock, irql);
+
+       if (pfob->pend_queue_driving && pfob->mcc) {
+               pfob->pend_queue_driving = 0;
+               be_drive_mcc_wrb_queue(pfob->mcc);
+       }
+       return status;
+}
+
diff --git a/drivers/staging/benet/hwlib/eth.c b/drivers/staging/benet/hwlib/eth.c
new file mode 100644 (file)
index 0000000..f641b62
--- /dev/null
@@ -0,0 +1,1273 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+#include <linux/if_ether.h>
+#include "hwlib.h"
+#include "bestatus.h"
+
+/*
+ *---------------------------------------------------------
+ * Function: be_eth_sq_create_ex
+ *   Creates an ethernet send ring - extended version with
+ *   additional parameters.
+ * pfob -
+ * rd             - ring address
+ * length_in_bytes -
+ * type            - The type of ring to create.
+ * ulp             - The requested ULP number for the ring.
+ *                  This should be zero based, i.e. 0,1,2. This must
+ *                  be valid NIC ULP based on the firmware config.
+ *                   All doorbells for this ring must be sent to
+ *                   this ULP. The first network ring allocated for
+ *                   each ULP are higher performance than subsequent rings.
+ * cq_object       - cq object for completions
+ * ex_parameters   - Additional parameters (that may increase in
+ *                  future revisions). These parameters are only used
+ *                  for certain ring types -- see
+ *                   struct be_eth_sq_parameters for details.
+ * eth_sq          -
+ * return status   - BE_SUCCESS (0) on success. Negative error code on failure.
+ *---------------------------------------------------------
+ */
+int
+be_eth_sq_create_ex(struct be_function_object *pfob, struct ring_desc *rd,
+               u32 length, u32 type, u32 ulp, struct be_cq_object *cq_object,
+               struct be_eth_sq_parameters *ex_parameters,
+               struct be_ethsq_object *eth_sq)
+{
+       struct FWCMD_COMMON_ETH_TX_CREATE *fwcmd = NULL;
+       struct MCC_WRB_AMAP *wrb = NULL;
+       int status = 0;
+       u32 n;
+       unsigned long irql;
+
+       ASSERT(rd);
+       ASSERT(eth_sq);
+       ASSERT(ex_parameters);
+
+       spin_lock_irqsave(&pfob->post_lock, irql);
+
+       memset(eth_sq, 0, sizeof(*eth_sq));
+
+       eth_sq->parent_function = pfob;
+       eth_sq->bid = 0xFFFFFFFF;
+       eth_sq->cq_object = cq_object;
+
+       /* Translate hwlib interface to arm interface. */
+       switch (type) {
+       case BE_ETH_TX_RING_TYPE_FORWARDING:
+               type = ETH_TX_RING_TYPE_FORWARDING;
+               break;
+       case BE_ETH_TX_RING_TYPE_STANDARD:
+               type = ETH_TX_RING_TYPE_STANDARD;
+               break;
+       case BE_ETH_TX_RING_TYPE_BOUND:
+               ASSERT(ex_parameters->port < 2);
+               type = ETH_TX_RING_TYPE_BOUND;
+               break;
+       default:
+               TRACE(DL_ERR, "Invalid eth tx ring type:%d", type);
+               return BE_NOT_OK;
+               break;
+       }
+
+       wrb = be_function_peek_mcc_wrb(pfob);
+       if (!wrb) {
+               ASSERT(wrb);
+               TRACE(DL_ERR, "No free MCC WRBs in create EQ.");
+               status = BE_STATUS_NO_MCC_WRB;
+               goto Error;
+       }
+       /* NIC must be supported by the current config. */
+       ASSERT(pfob->fw_config.nic_ulp_mask);
+
+       /*
+        * The ulp parameter must select a valid NIC ULP
+        * for the current config.
+        */
+       ASSERT((1 << ulp) & pfob->fw_config.nic_ulp_mask);
+
+       /* Prepares an embedded fwcmd, including request/response sizes. */
+       fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_ETH_TX_CREATE);
+       fwcmd->header.request.port_number = ex_parameters->port;
+
+       AMAP_SET_BITS_PTR(ETX_CONTEXT, pd_id,
+                               &fwcmd->params.request.context, 0);
+
+       n = be_ring_length_to_encoding(length, sizeof(struct ETH_WRB_AMAP));
+       AMAP_SET_BITS_PTR(ETX_CONTEXT, tx_ring_size,
+                                       &fwcmd->params.request.context, n);
+
+       AMAP_SET_BITS_PTR(ETX_CONTEXT, cq_id_send,
+                       &fwcmd->params.request.context, cq_object->cq_id);
+
+       n = pfob->pci_function_number;
+       AMAP_SET_BITS_PTR(ETX_CONTEXT, func, &fwcmd->params.request.context, n);
+
+       fwcmd->params.request.type = type;
+       fwcmd->params.request.ulp_num  = (1 << ulp);
+       fwcmd->params.request.num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
+       ASSERT(PAGES_SPANNED(rd->va, rd->length) >=
+                               fwcmd->params.request.num_pages);
+
+       /* Create a page list for the FWCMD. */
+       be_rd_to_pa_list(rd, fwcmd->params.request.pages,
+                         ARRAY_SIZE(fwcmd->params.request.pages));
+
+       status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
+                                       NULL, NULL, fwcmd, NULL);
+       if (status != BE_SUCCESS) {
+               TRACE(DL_ERR, "MCC to create etx queue failed.");
+               goto Error;
+       }
+       /* save the butler ID */
+       eth_sq->bid = fwcmd->params.response.cid;
+
+       /* add a reference to the corresponding CQ */
+       atomic_inc(&cq_object->ref_count);
+
+Error:
+       spin_unlock_irqrestore(&pfob->post_lock, irql);
+
+       if (pfob->pend_queue_driving && pfob->mcc) {
+               pfob->pend_queue_driving = 0;
+               be_drive_mcc_wrb_queue(pfob->mcc);
+       }
+       return status;
+}
+
+
+/*
+    This routine destroys an ethernet send queue
+
+    EthSq - EthSq Handle returned from EthSqCreate
+
+    This function always return BE_SUCCESS.
+
+    This function frees memory allocated by EthSqCreate for the EthSq Object.
+
+*/
+int be_eth_sq_destroy(struct be_ethsq_object *eth_sq)
+{
+       int status = 0;
+
+       /* Send fwcmd to destroy the queue. */
+       status = be_function_ring_destroy(eth_sq->parent_function, eth_sq->bid,
+                    FWCMD_RING_TYPE_ETH_TX, NULL, NULL, NULL, NULL);
+       ASSERT(status == 0);
+
+       /* Derefence any associated CQs. */
+       atomic_dec(&eth_sq->cq_object->ref_count);
+       return status;
+}
+/*
+    This routine attempts to set the transmit flow control parameters.
+
+    FunctionObject      - Handle to a function object
+
+    txfc_enable         - transmit flow control enable - true for
+                         enable, false for disable
+
+    rxfc_enable         - receive flow control enable - true for
+                               enable, false for disable
+
+    Returns BE_SUCCESS if successfull, otherwise a useful int error
+    code is returned.
+
+    IRQL: < DISPATCH_LEVEL
+
+    This function always fails in non-privileged machine context.
+*/
+int
+be_eth_set_flow_control(struct be_function_object *pfob,
+                       bool txfc_enable, bool rxfc_enable)
+{
+       struct FWCMD_COMMON_SET_FLOW_CONTROL *fwcmd = NULL;
+       struct MCC_WRB_AMAP *wrb = NULL;
+       int status = 0;
+       unsigned long irql;
+
+       spin_lock_irqsave(&pfob->post_lock, irql);
+
+       wrb = be_function_peek_mcc_wrb(pfob);
+       if (!wrb) {
+               TRACE(DL_ERR, "MCC wrb peek failed.");
+               status = BE_STATUS_NO_MCC_WRB;
+               goto error;
+       }
+       /* Prepares an embedded fwcmd, including request/response sizes. */
+       fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_SET_FLOW_CONTROL);
+
+       fwcmd->params.request.rx_flow_control = rxfc_enable;
+       fwcmd->params.request.tx_flow_control = txfc_enable;
+
+       /* Post the f/w command */
+       status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
+                                       NULL, NULL, fwcmd, NULL);
+
+       if (status != 0) {
+               TRACE(DL_ERR, "set flow control fwcmd failed.");
+               goto error;
+       }
+
+error:
+       spin_unlock_irqrestore(&pfob->post_lock, irql);
+
+       if (pfob->pend_queue_driving && pfob->mcc) {
+               pfob->pend_queue_driving = 0;
+               be_drive_mcc_wrb_queue(pfob->mcc);
+       }
+       return status;
+}
+
+/*
+    This routine attempts to get the transmit flow control parameters.
+
+    pfob      - Handle to a function object
+
+    txfc_enable         - transmit flow control enable - true for
+                       enable, false for disable
+
+    rxfc_enable         - receive flow control enable - true for enable,
+                       false for disable
+
+    Returns BE_SUCCESS if successfull, otherwise a useful int error code
+                       is returned.
+
+    IRQL: < DISPATCH_LEVEL
+
+    This function always fails in non-privileged machine context.
+*/
+int
+be_eth_get_flow_control(struct be_function_object *pfob,
+                       bool *txfc_enable, bool *rxfc_enable)
+{
+       struct FWCMD_COMMON_GET_FLOW_CONTROL *fwcmd = NULL;
+       struct MCC_WRB_AMAP *wrb = NULL;
+       int status = 0;
+       unsigned long irql;
+
+       spin_lock_irqsave(&pfob->post_lock, irql);
+
+       wrb = be_function_peek_mcc_wrb(pfob);
+       if (!wrb) {
+               TRACE(DL_ERR, "MCC wrb peek failed.");
+               status = BE_STATUS_NO_MCC_WRB;
+               goto error;
+       }
+       /* Prepares an embedded fwcmd, including request/response sizes. */
+       fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_GET_FLOW_CONTROL);
+
+       /* Post the f/w command */
+       status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
+                                               NULL, NULL, fwcmd, NULL);
+
+       if (status != 0) {
+               TRACE(DL_ERR, "get flow control fwcmd failed.");
+               goto error;
+       }
+
+       *txfc_enable = fwcmd->params.response.tx_flow_control;
+       *rxfc_enable = fwcmd->params.response.rx_flow_control;
+
+error:
+       spin_unlock_irqrestore(&pfob->post_lock, irql);
+
+       if (pfob->pend_queue_driving && pfob->mcc) {
+               pfob->pend_queue_driving = 0;
+               be_drive_mcc_wrb_queue(pfob->mcc);
+       }
+       return status;
+}
+
+/*
+ *---------------------------------------------------------
+ * Function: be_eth_set_qos
+ *   This function sets the ethernet transmit Quality of Service (QoS)
+ *   characteristics of BladeEngine for the domain. All ethernet
+ *   transmit rings of the domain will evenly share the bandwidth.
+ *   The exeception to sharing is the host primary (super) ethernet
+ *   transmit ring as well as the host ethernet forwarding ring
+ *   for missed offload data.
+ * pfob -
+ * max_bps         - the maximum bits per second in units of
+ *                     10 Mbps (valid 0-100)
+ * max_pps         - the maximum packets per second in units
+ *                     of 1 Kpps (0 indicates no limit)
+ * return status   - BE_SUCCESS (0) on success. Negative error code on failure.
+ *---------------------------------------------------------
+ */
+int
+be_eth_set_qos(struct be_function_object *pfob, u32 max_bps, u32 max_pps)
+{
+       struct FWCMD_COMMON_SET_QOS *fwcmd = NULL;
+       struct MCC_WRB_AMAP *wrb = NULL;
+       int status = 0;
+       unsigned long irql;
+
+       spin_lock_irqsave(&pfob->post_lock, irql);
+
+       wrb = be_function_peek_mcc_wrb(pfob);
+       if (!wrb) {
+               TRACE(DL_ERR, "MCC wrb peek failed.");
+               status = BE_STATUS_NO_MCC_WRB;
+               goto error;
+       }
+       /* Prepares an embedded fwcmd, including request/response sizes. */
+       fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_SET_QOS);
+
+       /* Set fields in fwcmd */
+       fwcmd->params.request.max_bits_per_second_NIC = max_bps;
+       fwcmd->params.request.max_packets_per_second_NIC = max_pps;
+       fwcmd->params.request.valid_flags = QOS_BITS_NIC | QOS_PKTS_NIC;
+
+       /* Post the f/w command */
+       status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
+                                       NULL, NULL, fwcmd, NULL);
+
+       if (status != 0)
+               TRACE(DL_ERR, "network set qos fwcmd failed.");
+
+error:
+       spin_unlock_irqrestore(&pfob->post_lock, irql);
+       if (pfob->pend_queue_driving && pfob->mcc) {
+               pfob->pend_queue_driving = 0;
+               be_drive_mcc_wrb_queue(pfob->mcc);
+       }
+       return status;
+}
+
+/*
+ *---------------------------------------------------------
+ * Function: be_eth_get_qos
+ *   This function retrieves the ethernet transmit Quality of Service (QoS)
+ *   characteristics for the domain.
+ * max_bps         - the maximum bits per second in units of
+ *                     10 Mbps (valid 0-100)
+ * max_pps         - the maximum packets per second in units of
+ *                     1 Kpps (0 indicates no limit)
+ * return status   - BE_SUCCESS (0) on success. Negative error code on failure.
+ *---------------------------------------------------------
+ */
+int
+be_eth_get_qos(struct be_function_object *pfob, u32 *max_bps, u32 *max_pps)
+{
+       struct FWCMD_COMMON_GET_QOS *fwcmd = NULL;
+       struct MCC_WRB_AMAP *wrb = NULL;
+       int status = 0;
+       unsigned long irql;
+
+       spin_lock_irqsave(&pfob->post_lock, irql);
+
+       wrb = be_function_peek_mcc_wrb(pfob);
+       if (!wrb) {
+               TRACE(DL_ERR, "MCC wrb peek failed.");
+               status = BE_STATUS_NO_MCC_WRB;
+               goto error;
+       }
+       /* Prepares an embedded fwcmd, including request/response sizes. */
+       fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_GET_QOS);
+
+       /* Post the f/w command */
+       status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
+                                       NULL, NULL, fwcmd, NULL);
+
+       if (status != 0) {
+               TRACE(DL_ERR, "network get qos fwcmd failed.");
+               goto error;
+       }
+
+       *max_bps = fwcmd->params.response.max_bits_per_second_NIC;
+       *max_pps = fwcmd->params.response.max_packets_per_second_NIC;
+
+error:
+       spin_unlock_irqrestore(&pfob->post_lock, irql);
+       if (pfob->pend_queue_driving && pfob->mcc) {
+               pfob->pend_queue_driving = 0;
+               be_drive_mcc_wrb_queue(pfob->mcc);
+       }
+       return status;
+}
+
+/*
+ *---------------------------------------------------------
+ * Function: be_eth_set_frame_size
+ *   This function sets the ethernet maximum frame size. The previous
+ *   values are returned.
+ * pfob -
+ * tx_frame_size   - maximum transmit frame size in bytes
+ * rx_frame_size   - maximum receive frame size in bytes
+ * return status   - BE_SUCCESS (0) on success. Negative error code on failure.
+ *---------------------------------------------------------
+ */
+int
+be_eth_set_frame_size(struct be_function_object *pfob,
+                     u32 *tx_frame_size, u32 *rx_frame_size)
+{
+       struct FWCMD_COMMON_SET_FRAME_SIZE *fwcmd = NULL;
+       struct MCC_WRB_AMAP *wrb = NULL;
+       int status = 0;
+       unsigned long irql;
+
+       spin_lock_irqsave(&pfob->post_lock, irql);
+
+       wrb = be_function_peek_mcc_wrb(pfob);
+       if (!wrb) {
+               TRACE(DL_ERR, "MCC wrb peek failed.");
+               status = BE_STATUS_NO_MCC_WRB;
+               goto error;
+       }
+       /* Prepares an embedded fwcmd, including request/response sizes. */
+       fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_SET_FRAME_SIZE);
+       fwcmd->params.request.max_tx_frame_size = *tx_frame_size;
+       fwcmd->params.request.max_rx_frame_size = *rx_frame_size;
+
+       /* Post the f/w command */
+       status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
+                                               NULL, NULL, fwcmd, NULL);
+
+       if (status != 0) {
+               TRACE(DL_ERR, "network set frame size fwcmd failed.");
+               goto error;
+       }
+
+       *tx_frame_size = fwcmd->params.response.chip_max_tx_frame_size;
+       *rx_frame_size = fwcmd->params.response.chip_max_rx_frame_size;
+
+error:
+       spin_unlock_irqrestore(&pfob->post_lock, irql);
+       if (pfob->pend_queue_driving && pfob->mcc) {
+               pfob->pend_queue_driving = 0;
+               be_drive_mcc_wrb_queue(pfob->mcc);
+       }
+       return status;
+}
+
+
+/*
+    This routine creates a Ethernet receive ring.
+
+    pfob      - handle to a function object
+    rq_base_va            - base VA for the default receive ring. this must be
+                       exactly 8K in length and continguous physical memory.
+    cq_object            - handle to a previously created CQ to be associated
+                       with the RQ.
+    pp_eth_rq             - pointer to an opqaue handle where an eth
+                       receive object is returned.
+    Returns BE_SUCCESS if successfull, , otherwise a useful
+    int error code is returned.
+
+    IRQL: < DISPATCH_LEVEL
+    this function allocates a struct be_ethrq_object *object.
+    there must be no more than 1 of these per function object, unless the
+    function object supports RSS (is networking and on the host).
+    the rq_base_va must point to a buffer of exactly 8K.
+    the erx::host_cqid (or host_stor_cqid) register and erx::ring_page registers
+    will be updated as appropriate on return
+*/
+int
+be_eth_rq_create(struct be_function_object *pfob,
+                       struct ring_desc *rd, struct be_cq_object *cq_object,
+                       struct be_cq_object *bcmc_cq_object,
+                       struct be_ethrq_object *eth_rq)
+{
+       int status = 0;
+       struct MCC_WRB_AMAP *wrb = NULL;
+       struct FWCMD_COMMON_ETH_RX_CREATE *fwcmd = NULL;
+       unsigned long irql;
+
+       /* MPU will set the  */
+       ASSERT(rd);
+       ASSERT(eth_rq);
+
+       spin_lock_irqsave(&pfob->post_lock, irql);
+
+       eth_rq->parent_function = pfob;
+       eth_rq->cq_object = cq_object;
+
+       wrb = be_function_peek_mcc_wrb(pfob);
+       if (!wrb) {
+               TRACE(DL_ERR, "MCC wrb peek failed.");
+               status = BE_STATUS_NO_MCC_WRB;
+               goto Error;
+       }
+       /* Prepares an embedded fwcmd, including request/response sizes. */
+       fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_ETH_RX_CREATE);
+
+       fwcmd->params.request.num_pages = 2;    /* required length */
+       fwcmd->params.request.cq_id = cq_object->cq_id;
+
+       if (bcmc_cq_object)
+               fwcmd->params.request.bcmc_cq_id = bcmc_cq_object->cq_id;
+       else
+               fwcmd->params.request.bcmc_cq_id = 0xFFFF;
+
+       /* Create a page list for the FWCMD. */
+       be_rd_to_pa_list(rd, fwcmd->params.request.pages,
+                         ARRAY_SIZE(fwcmd->params.request.pages));
+
+       /* Post the f/w command */
+       status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
+                                               NULL, NULL, fwcmd, NULL);
+       if (status != BE_SUCCESS) {
+               TRACE(DL_ERR, "fwcmd to map eth rxq frags failed.");
+               goto Error;
+       }
+       /* Save the ring ID for cleanup. */
+       eth_rq->rid = fwcmd->params.response.id;
+
+       atomic_inc(&cq_object->ref_count);
+
+Error:
+       spin_unlock_irqrestore(&pfob->post_lock, irql);
+
+       if (pfob->pend_queue_driving && pfob->mcc) {
+               pfob->pend_queue_driving = 0;
+               be_drive_mcc_wrb_queue(pfob->mcc);
+       }
+       return status;
+}
+
+/*
+    This routine destroys an Ethernet receive queue
+
+    eth_rq - ethernet receive queue handle returned from eth_rq_create
+
+    Returns BE_SUCCESS on success and an appropriate int on failure.
+
+    This function frees resourcs allocated by EthRqCreate.
+    The erx::host_cqid (or host_stor_cqid) register and erx::ring_page
+    registers will be updated as appropriate on return
+    IRQL: < DISPATCH_LEVEL
+*/
+
+static void be_eth_rq_destroy_internal_cb(void *context, int status,
+                                        struct MCC_WRB_AMAP *wrb)
+{
+       struct be_ethrq_object *eth_rq = (struct be_ethrq_object *) context;
+
+       if (status != BE_SUCCESS) {
+               TRACE(DL_ERR, "Destroy eth rq failed in internal callback.\n");
+       } else {
+               /* Dereference any CQs associated with this queue. */
+               atomic_dec(&eth_rq->cq_object->ref_count);
+       }
+
+       return;
+}
+
+int be_eth_rq_destroy(struct be_ethrq_object *eth_rq)
+{
+       int status = BE_SUCCESS;
+
+       /* Send fwcmd to destroy the RQ. */
+       status = be_function_ring_destroy(eth_rq->parent_function,
+                       eth_rq->rid, FWCMD_RING_TYPE_ETH_RX, NULL, NULL,
+                       be_eth_rq_destroy_internal_cb, eth_rq);
+
+       return status;
+}
+
+/*
+ *---------------------------------------------------------------------------
+ * Function: be_eth_rq_destroy_options
+ *   Destroys an ethernet receive ring with finer granularity options
+ *   than the standard be_eth_rq_destroy() API function.
+ * eth_rq           -
+ * flush            - Set to 1 to flush the ring, set to 0 to bypass the flush
+ * cb               - Callback function on completion
+ * cb_context       - Callback context
+ * return status    - BE_SUCCESS (0) on success. Negative error code on failure.
+ *----------------------------------------------------------------------------
+ */
+int
+be_eth_rq_destroy_options(struct be_ethrq_object *eth_rq, bool flush,
+               mcc_wrb_cqe_callback cb, void *cb_context)
+{
+       struct FWCMD_COMMON_RING_DESTROY *fwcmd = NULL;
+       struct MCC_WRB_AMAP *wrb = NULL;
+       int status = BE_SUCCESS;
+       struct be_function_object *pfob = NULL;
+       unsigned long irql;
+
+       pfob = eth_rq->parent_function;
+
+       spin_lock_irqsave(&pfob->post_lock, irql);
+
+       TRACE(DL_INFO, "Destroy eth_rq ring id:%d, flush:%d", eth_rq->rid,
+             flush);
+
+       wrb = be_function_peek_mcc_wrb(pfob);
+       if (!wrb) {
+               ASSERT(wrb);
+               TRACE(DL_ERR, "No free MCC WRBs in destroy eth_rq ring.");
+               status = BE_STATUS_NO_MCC_WRB;
+               goto Error;
+       }
+       /* Prepares an embedded fwcmd, including request/response sizes. */
+       fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_RING_DESTROY);
+
+       fwcmd->params.request.id = eth_rq->rid;
+       fwcmd->params.request.ring_type = FWCMD_RING_TYPE_ETH_RX;
+       fwcmd->params.request.bypass_flush = ((0 == flush) ? 1 : 0);
+
+       /* Post the f/w command */
+       status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb, cb_context,
+                       be_eth_rq_destroy_internal_cb, eth_rq, fwcmd, NULL);
+
+       if (status != BE_SUCCESS && status != BE_PENDING) {
+               TRACE(DL_ERR, "eth_rq ring destroy failed. id:%d, flush:%d",
+                     eth_rq->rid, flush);
+               goto Error;
+       }
+
+Error:
+       spin_unlock_irqrestore(&pfob->post_lock, irql);
+
+       if (pfob->pend_queue_driving && pfob->mcc) {
+               pfob->pend_queue_driving = 0;
+               be_drive_mcc_wrb_queue(pfob->mcc);
+       }
+       return status;
+}
+
+/*
+    This routine queries the frag size for erx.
+
+    pfob      - handle to a function object
+
+    frag_size_bytes       - erx frag size in bytes that is/was set.
+
+    Returns BE_SUCCESS if successfull, otherwise a useful int error
+    code is returned.
+
+    IRQL: < DISPATCH_LEVEL
+
+*/
+int
+be_eth_rq_get_frag_size(struct be_function_object *pfob, u32 *frag_size_bytes)
+{
+       struct FWCMD_ETH_GET_RX_FRAG_SIZE *fwcmd = NULL;
+       struct MCC_WRB_AMAP *wrb = NULL;
+       int status = 0;
+       unsigned long irql;
+
+       ASSERT(frag_size_bytes);
+
+       spin_lock_irqsave(&pfob->post_lock, irql);
+
+       wrb = be_function_peek_mcc_wrb(pfob);
+       if (!wrb) {
+               TRACE(DL_ERR, "MCC wrb peek failed.");
+               return BE_STATUS_NO_MCC_WRB;
+       }
+       /* Prepares an embedded fwcmd, including request/response sizes. */
+       fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, ETH_GET_RX_FRAG_SIZE);
+
+       /* Post the f/w command */
+       status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
+                               NULL, NULL, fwcmd, NULL);
+
+       if (status != 0) {
+               TRACE(DL_ERR, "get frag size fwcmd failed.");
+               goto error;
+       }
+
+       *frag_size_bytes = 1 << fwcmd->params.response.actual_fragsize_log2;
+
+error:
+       spin_unlock_irqrestore(&pfob->post_lock, irql);
+
+       if (pfob->pend_queue_driving && pfob->mcc) {
+               pfob->pend_queue_driving = 0;
+               be_drive_mcc_wrb_queue(pfob->mcc);
+       }
+       return status;
+}
+
+/*
+    This routine attempts to set the frag size for erx.  If the frag size is
+    already set, the attempt fails and the current frag size is returned.
+
+    pfob      - Handle to a function object
+
+    frag_size       - Erx frag size in bytes that is/was set.
+
+    current_frag_size_bytes    - Pointer to location where currrent frag
+                                is to be rturned
+
+    Returns BE_SUCCESS if successfull, otherwise a useful int error
+    code is returned.
+
+    IRQL: < DISPATCH_LEVEL
+
+    This function always fails in non-privileged machine context.
+*/
+int
+be_eth_rq_set_frag_size(struct be_function_object *pfob,
+                       u32 frag_size, u32 *frag_size_bytes)
+{
+       struct FWCMD_ETH_SET_RX_FRAG_SIZE *fwcmd = NULL;
+       struct MCC_WRB_AMAP *wrb = NULL;
+       int status = 0;
+       unsigned long irql;
+
+       ASSERT(frag_size_bytes);
+
+       spin_lock_irqsave(&pfob->post_lock, irql);
+
+       wrb = be_function_peek_mcc_wrb(pfob);
+       if (!wrb) {
+               TRACE(DL_ERR, "MCC wrb peek failed.");
+               status = BE_STATUS_NO_MCC_WRB;
+               goto error;
+       }
+       /* Prepares an embedded fwcmd, including request/response sizes. */
+       fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, ETH_SET_RX_FRAG_SIZE);
+
+       ASSERT(frag_size >= 128 && frag_size <= 16 * 1024);
+
+       /* This is the log2 of the fragsize.  This is not the exact
+        * ERX encoding. */
+       fwcmd->params.request.new_fragsize_log2 = __ilog2_u32(frag_size);
+
+       /* Post the f/w command */
+       status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
+                               NULL, NULL, fwcmd, NULL);
+
+       if (status != 0) {
+               TRACE(DL_ERR, "set frag size fwcmd failed.");
+               goto error;
+       }
+
+       *frag_size_bytes = 1 << fwcmd->params.response.actual_fragsize_log2;
+error:
+       spin_unlock_irqrestore(&pfob->post_lock, irql);
+
+       if (pfob->pend_queue_driving && pfob->mcc) {
+               pfob->pend_queue_driving = 0;
+               be_drive_mcc_wrb_queue(pfob->mcc);
+       }
+       return status;
+}
+
+
+/*
+    This routine gets or sets a mac address for a domain
+    given the port and mac.
+
+    FunctionObject  - Function object handle.
+    port1           - Set to TRUE if this function will set/get the Port 1
+                       address.  Only the host may set this to TRUE.
+    mac1            - Set to TRUE if this function will set/get the
+                       MAC 1 address.  Only the host may set this to TRUE.
+    write           - Set to TRUE if this function should write the mac address.
+    mac_address      - Buffer of the mac address to read or write.
+
+    Returns BE_SUCCESS if successfull, otherwise a useful int is returned.
+
+    IRQL: < DISPATCH_LEVEL
+*/
+int be_rxf_mac_address_read_write(struct be_function_object *pfob,
+               bool port1,     /* VM must always set to false */
+               bool mac1,      /* VM must always set to false */
+               bool mgmt, bool write,
+               bool permanent, u8 *mac_address,
+               mcc_wrb_cqe_callback cb,        /* optional */
+               void *cb_context)       /* optional */
+{
+       int status = BE_SUCCESS;
+       union {
+               struct FWCMD_COMMON_NTWK_MAC_QUERY *query;
+               struct FWCMD_COMMON_NTWK_MAC_SET *set;
+       } fwcmd = {NULL};
+       struct MCC_WRB_AMAP *wrb = NULL;
+       u32 type = 0;
+       unsigned long irql;
+       struct be_mcc_wrb_response_copy rc;
+
+       spin_lock_irqsave(&pfob->post_lock, irql);
+
+       ASSERT(mac_address);
+
+       ASSERT(port1 == false);
+       ASSERT(mac1 == false);
+
+       wrb = be_function_peek_mcc_wrb(pfob);
+       if (!wrb) {
+               TRACE(DL_ERR, "MCC wrb peek failed.");
+               status = BE_STATUS_NO_MCC_WRB;
+               goto Error;
+       }
+
+       if (mgmt) {
+               type = MAC_ADDRESS_TYPE_MANAGEMENT;
+       } else {
+               if (pfob->type == BE_FUNCTION_TYPE_NETWORK)
+                       type = MAC_ADDRESS_TYPE_NETWORK;
+               else
+                       type = MAC_ADDRESS_TYPE_STORAGE;
+       }
+
+       if (write) {
+               /* Prepares an embedded fwcmd, including
+                * request/response sizes.
+                */
+               fwcmd.set = BE_PREPARE_EMBEDDED_FWCMD(pfob,
+                                              wrb, COMMON_NTWK_MAC_SET);
+
+               fwcmd.set->params.request.invalidate = 0;
+               fwcmd.set->params.request.mac1 = (mac1 ? 1 : 0);
+               fwcmd.set->params.request.port = (port1 ? 1 : 0);
+               fwcmd.set->params.request.type = type;
+
+               /* Copy the mac address to set. */
+               fwcmd.set->params.request.mac.SizeOfStructure =
+                           sizeof(fwcmd.set->params.request.mac);
+               memcpy(fwcmd.set->params.request.mac.MACAddress,
+                       mac_address, ETH_ALEN);
+
+               /* Post the f/w command */
+               status = be_function_post_mcc_wrb(pfob, wrb, NULL,
+                               cb, cb_context, NULL, NULL, fwcmd.set, NULL);
+
+       } else {
+
+               /*
+                * Prepares an embedded fwcmd, including
+                * request/response sizes.
+                */
+               fwcmd.query = BE_PREPARE_EMBEDDED_FWCMD(pfob,
+                                              wrb, COMMON_NTWK_MAC_QUERY);
+
+               fwcmd.query->params.request.mac1 = (mac1 ? 1 : 0);
+               fwcmd.query->params.request.port = (port1 ? 1 : 0);
+               fwcmd.query->params.request.type = type;
+               fwcmd.query->params.request.permanent = permanent;
+
+               rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_NTWK_MAC_QUERY,
+                                               params.response.mac.MACAddress);
+               rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_NTWK_MAC_QUERY,
+                                               params.response.mac.MACAddress);
+               rc.va = mac_address;
+               /* Post the f/w command (with a copy for the response) */
+               status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb,
+                               cb_context, NULL, NULL, fwcmd.query, &rc);
+       }
+
+       if (status < 0) {
+               TRACE(DL_ERR, "mac set/query failed.");
+               goto Error;
+       }
+
+Error:
+       spin_unlock_irqrestore(&pfob->post_lock, irql);
+       if (pfob->pend_queue_driving && pfob->mcc) {
+               pfob->pend_queue_driving = 0;
+               be_drive_mcc_wrb_queue(pfob->mcc);
+       }
+       return status;
+}
+
+/*
+    This routine writes data to context memory.
+
+    pfob  - Function object handle.
+    mac_table     - Set to the 128-bit multicast address hash table.
+
+    Returns BE_SUCCESS if successfull, otherwise a useful int is returned.
+
+    IRQL: < DISPATCH_LEVEL
+*/
+
+int be_rxf_multicast_config(struct be_function_object *pfob,
+               bool promiscuous, u32 num, u8 *mac_table,
+               mcc_wrb_cqe_callback cb,        /* optional */
+               void *cb_context,
+               struct be_multicast_q_ctxt *q_ctxt)
+{
+       int status = BE_SUCCESS;
+       struct FWCMD_COMMON_NTWK_MULTICAST_SET *fwcmd = NULL;
+       struct MCC_WRB_AMAP *wrb = NULL;
+       struct be_generic_q_ctxt *generic_ctxt = NULL;
+       unsigned long irql;
+
+       ASSERT(num <= ARRAY_SIZE(fwcmd->params.request.mac));
+
+       if (num > ARRAY_SIZE(fwcmd->params.request.mac)) {
+               TRACE(DL_ERR, "Too many multicast addresses. BE supports %d.",
+                     (int) ARRAY_SIZE(fwcmd->params.request.mac));
+               return BE_NOT_OK;
+       }
+
+       spin_lock_irqsave(&pfob->post_lock, irql);
+
+       wrb = be_function_peek_mcc_wrb(pfob);
+       if (!wrb) {
+               if (q_ctxt && cb) {
+                       wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
+                       generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
+                       generic_ctxt->context.bytes = sizeof(*q_ctxt);
+               } else {
+                       status = BE_STATUS_NO_MCC_WRB;
+                       goto Error;
+               }
+       }
+       /* Prepares an embedded fwcmd, including request/response sizes. */
+       fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_NTWK_MULTICAST_SET);
+
+       fwcmd->params.request.promiscuous = promiscuous;
+       if (!promiscuous) {
+               fwcmd->params.request.num_mac = num;
+               if (num > 0) {
+                       ASSERT(mac_table);
+                       memcpy(fwcmd->params.request.mac,
+                                               mac_table, ETH_ALEN * num);
+               }
+       }
+
+       /* Post the f/w command */
+       status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt,
+                       cb, cb_context, NULL, NULL, fwcmd, NULL);
+       if (status < 0) {
+               TRACE(DL_ERR, "multicast fwcmd failed.");
+               goto Error;
+       }
+
+Error:
+       spin_unlock_irqrestore(&pfob->post_lock, irql);
+       if (pfob->pend_queue_driving && pfob->mcc) {
+               pfob->pend_queue_driving = 0;
+               be_drive_mcc_wrb_queue(pfob->mcc);
+       }
+       return status;
+}
+
+/*
+    This routine adds or removes a vlan tag from the rxf table.
+
+    FunctionObject  - Function object handle.
+    VLanTag         - VLan tag to add or remove.
+    Add             - Set to TRUE if this will add a vlan tag
+
+    Returns BE_SUCCESS if successfull, otherwise a useful int is returned.
+
+    IRQL: < DISPATCH_LEVEL
+*/
+int be_rxf_vlan_config(struct be_function_object *pfob,
+               bool promiscuous, u32 num, u16 *vlan_tag_array,
+               mcc_wrb_cqe_callback cb,        /* optional */
+               void *cb_context,
+               struct be_vlan_q_ctxt *q_ctxt)  /* optional */
+{
+       int status = BE_SUCCESS;
+       struct FWCMD_COMMON_NTWK_VLAN_CONFIG *fwcmd = NULL;
+       struct MCC_WRB_AMAP *wrb = NULL;
+       struct be_generic_q_ctxt *generic_ctxt = NULL;
+       unsigned long irql;
+
+       if (num > ARRAY_SIZE(fwcmd->params.request.vlan_tag)) {
+               TRACE(DL_ERR, "Too many VLAN tags.");
+               return BE_NOT_OK;
+       }
+
+       spin_lock_irqsave(&pfob->post_lock, irql);
+
+       wrb = be_function_peek_mcc_wrb(pfob);
+       if (!wrb) {
+               if (q_ctxt && cb) {
+                       wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
+                       generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
+                       generic_ctxt->context.bytes = sizeof(*q_ctxt);
+               } else {
+                       status = BE_STATUS_NO_MCC_WRB;
+                       goto Error;
+               }
+       }
+       /* Prepares an embedded fwcmd, including request/response sizes. */
+       fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_NTWK_VLAN_CONFIG);
+
+       fwcmd->params.request.promiscuous = promiscuous;
+       if (!promiscuous) {
+               fwcmd->params.request.num_vlan = num;
+
+               if (num > 0) {
+                       ASSERT(vlan_tag_array);
+                       memcpy(fwcmd->params.request.vlan_tag, vlan_tag_array,
+                                 num * sizeof(vlan_tag_array[0]));
+               }
+       }
+
+       /* Post the commadn */
+       status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt,
+                       cb, cb_context, NULL, NULL, fwcmd, NULL);
+       if (status < 0) {
+               TRACE(DL_ERR, "vlan fwcmd failed.");
+               goto Error;
+       }
+
+Error:
+       spin_unlock_irqrestore(&pfob->post_lock, irql);
+       if (pfob->pend_queue_driving && pfob->mcc) {
+               pfob->pend_queue_driving = 0;
+               be_drive_mcc_wrb_queue(pfob->mcc);
+       }
+       return status;
+}
+
+
+int be_rxf_link_status(struct be_function_object *pfob,
+               struct BE_LINK_STATUS *link_status,
+               mcc_wrb_cqe_callback cb,
+               void *cb_context,
+               struct be_link_status_q_ctxt *q_ctxt)
+{
+       struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY *fwcmd = NULL;
+       struct MCC_WRB_AMAP *wrb = NULL;
+       int status = 0;
+       struct be_generic_q_ctxt *generic_ctxt = NULL;
+       unsigned long irql;
+       struct be_mcc_wrb_response_copy rc;
+
+       ASSERT(link_status);
+
+       spin_lock_irqsave(&pfob->post_lock, irql);
+
+       wrb = be_function_peek_mcc_wrb(pfob);
+
+       if (!wrb) {
+               if (q_ctxt && cb) {
+                       wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
+                       generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
+                       generic_ctxt->context.bytes = sizeof(*q_ctxt);
+               } else {
+                       status = BE_STATUS_NO_MCC_WRB;
+                       goto Error;
+               }
+       }
+       /* Prepares an embedded fwcmd, including request/response sizes. */
+       fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb,
+                                              COMMON_NTWK_LINK_STATUS_QUERY);
+
+       rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY,
+                                       params.response);
+       rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY,
+                                       params.response);
+       rc.va = link_status;
+       /* Post or queue the f/w command */
+       status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt,
+                       cb, cb_context, NULL, NULL, fwcmd, &rc);
+
+       if (status < 0) {
+               TRACE(DL_ERR, "link status fwcmd failed.");
+               goto Error;
+       }
+
+Error:
+       spin_unlock_irqrestore(&pfob->post_lock, irql);
+       if (pfob->pend_queue_driving && pfob->mcc) {
+               pfob->pend_queue_driving = 0;
+               be_drive_mcc_wrb_queue(pfob->mcc);
+       }
+       return status;
+}
+
+int
+be_rxf_query_eth_statistics(struct be_function_object *pfob,
+                   struct FWCMD_ETH_GET_STATISTICS *va_for_fwcmd,
+                   u64 pa_for_fwcmd, mcc_wrb_cqe_callback cb,
+                   void *cb_context,
+                   struct be_nonembedded_q_ctxt *q_ctxt)
+{
+       struct MCC_WRB_AMAP *wrb = NULL;
+       int status = 0;
+       struct be_generic_q_ctxt *generic_ctxt = NULL;
+       unsigned long irql;
+
+       ASSERT(va_for_fwcmd);
+       ASSERT(pa_for_fwcmd);
+
+       spin_lock_irqsave(&pfob->post_lock, irql);
+
+       wrb = be_function_peek_mcc_wrb(pfob);
+
+       if (!wrb) {
+               if (q_ctxt && cb) {
+                       wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
+                       generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
+                       generic_ctxt->context.bytes = sizeof(*q_ctxt);
+               } else {
+                       status = BE_STATUS_NO_MCC_WRB;
+                       goto Error;
+               }
+       }
+
+       TRACE(DL_INFO, "Query eth stats. fwcmd va:%p pa:0x%08x_%08x",
+             va_for_fwcmd, upper_32_bits(pa_for_fwcmd), (u32)pa_for_fwcmd);
+
+       /* Prepares an embedded fwcmd, including request/response sizes. */
+       va_for_fwcmd = BE_PREPARE_NONEMBEDDED_FWCMD(pfob, wrb,
+                         va_for_fwcmd, pa_for_fwcmd, ETH_GET_STATISTICS);
+
+       /* Post the f/w command */
+       status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt,
+               cb, cb_context, NULL, NULL, va_for_fwcmd, NULL);
+       if (status < 0) {
+               TRACE(DL_ERR, "eth stats fwcmd failed.");
+               goto Error;
+       }
+
+Error:
+       spin_unlock_irqrestore(&pfob->post_lock, irql);
+       if (pfob->pend_queue_driving && pfob->mcc) {
+               pfob->pend_queue_driving = 0;
+               be_drive_mcc_wrb_queue(pfob->mcc);
+       }
+       return status;
+}
+
+int
+be_rxf_promiscuous(struct be_function_object *pfob,
+                  bool enable_port0, bool enable_port1,
+                  mcc_wrb_cqe_callback cb, void *cb_context,
+                  struct be_promiscuous_q_ctxt *q_ctxt)
+{
+       struct FWCMD_ETH_PROMISCUOUS *fwcmd = NULL;
+       struct MCC_WRB_AMAP *wrb = NULL;
+       int status = 0;
+       struct be_generic_q_ctxt *generic_ctxt = NULL;
+       unsigned long irql;
+
+
+       spin_lock_irqsave(&pfob->post_lock, irql);
+
+       wrb = be_function_peek_mcc_wrb(pfob);
+
+       if (!wrb) {
+               if (q_ctxt && cb) {
+                       wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
+                       generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
+                       generic_ctxt->context.bytes = sizeof(*q_ctxt);
+               } else {
+                       status = BE_STATUS_NO_MCC_WRB;
+                       goto Error;
+               }
+       }
+       /* Prepares an embedded fwcmd, including request/response sizes. */
+       fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, ETH_PROMISCUOUS);
+
+       fwcmd->params.request.port0_promiscuous = enable_port0;
+       fwcmd->params.request.port1_promiscuous = enable_port1;
+
+       /* Post the f/w command */
+       status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt,
+                       cb, cb_context, NULL, NULL, fwcmd, NULL);
+
+       if (status < 0) {
+               TRACE(DL_ERR, "promiscuous fwcmd failed.");
+               goto Error;
+       }
+
+Error:
+       spin_unlock_irqrestore(&pfob->post_lock, irql);
+       if (pfob->pend_queue_driving && pfob->mcc) {
+               pfob->pend_queue_driving = 0;
+               be_drive_mcc_wrb_queue(pfob->mcc);
+       }
+       return status;
+}
+
+
+/*
+ *-------------------------------------------------------------------------
+ * Function: be_rxf_filter_config
+ *   Configures BladeEngine ethernet receive filter settings.
+ * pfob    -
+ * settings           - Pointer to the requested filter settings.
+ *                     The response from BladeEngine will be placed back
+ *                     in this structure.
+ * cb                 - optional
+ * cb_context         - optional
+ * q_ctxt             - Optional. Pointer to a previously allocated struct.
+ *                     If the MCC WRB ring is full, this structure is
+ *                     used to queue the operation. It will be posted
+ *                     to the MCC ring when space becomes available. All
+ *                      queued commands will be posted to the ring in
+ *                      the order they are received. It is always valid
+ *                      to pass a pointer to a generic
+ *                      be_generic_q_ctxt. However, the specific
+ *                      context structs are generally smaller than
+ *                      the generic struct.
+ * return pend_status - BE_SUCCESS (0) on success.
+ *                     BE_PENDING (postive value) if the FWCMD
+ *                      completion is pending. Negative error code on failure.
+ *---------------------------------------------------------------------------
+ */
+int
+be_rxf_filter_config(struct be_function_object *pfob,
+                    struct NTWK_RX_FILTER_SETTINGS *settings,
+                    mcc_wrb_cqe_callback cb, void *cb_context,
+                    struct be_rxf_filter_q_ctxt *q_ctxt)
+{
+       struct FWCMD_COMMON_NTWK_RX_FILTER *fwcmd = NULL;
+       struct MCC_WRB_AMAP *wrb = NULL;
+       int status = 0;
+       struct be_generic_q_ctxt *generic_ctxt = NULL;
+       unsigned long irql;
+       struct be_mcc_wrb_response_copy rc;
+
+       ASSERT(settings);
+
+       spin_lock_irqsave(&pfob->post_lock, irql);
+
+       wrb = be_function_peek_mcc_wrb(pfob);
+
+       if (!wrb) {
+               if (q_ctxt && cb) {
+                       wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
+                       generic_ctxt = (struct be_generic_q_ctxt *) q_ctxt;
+                       generic_ctxt->context.bytes = sizeof(*q_ctxt);
+               } else {
+                       status = BE_STATUS_NO_MCC_WRB;
+                       goto Error;
+               }
+       }
+       /* Prepares an embedded fwcmd, including request/response sizes. */
+       fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_NTWK_RX_FILTER);
+       memcpy(&fwcmd->params.request, settings, sizeof(*settings));
+
+       rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_NTWK_RX_FILTER,
+                                       params.response);
+       rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_NTWK_RX_FILTER,
+                                       params.response);
+       rc.va = settings;
+       /* Post or queue the f/w command */
+       status = be_function_post_mcc_wrb(pfob, wrb, generic_ctxt,
+                       cb, cb_context, NULL, NULL, fwcmd, &rc);
+
+       if (status < 0) {
+               TRACE(DL_ERR, "RXF/ERX filter config fwcmd failed.");
+               goto Error;
+       }
+
+Error:
+       spin_unlock_irqrestore(&pfob->post_lock, irql);
+       if (pfob->pend_queue_driving && pfob->mcc) {
+               pfob->pend_queue_driving = 0;
+               be_drive_mcc_wrb_queue(pfob->mcc);
+       }
+       return status;
+}
diff --git a/drivers/staging/benet/hwlib/funcobj.c b/drivers/staging/benet/hwlib/funcobj.c
new file mode 100644 (file)
index 0000000..6eabe96
--- /dev/null
@@ -0,0 +1,565 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+#include "hwlib.h"
+#include "bestatus.h"
+
+
+int
+be_function_internal_query_firmware_config(struct be_function_object *pfob,
+                                  struct BE_FIRMWARE_CONFIG *config)
+{
+       struct FWCMD_COMMON_FIRMWARE_CONFIG *fwcmd = NULL;
+       struct MCC_WRB_AMAP *wrb = NULL;
+       int status = 0;
+       unsigned long irql;
+       struct be_mcc_wrb_response_copy rc;
+
+       spin_lock_irqsave(&pfob->post_lock, irql);
+
+       wrb = be_function_peek_mcc_wrb(pfob);
+       if (!wrb) {
+               TRACE(DL_ERR, "MCC wrb peek failed.");
+               status = BE_STATUS_NO_MCC_WRB;
+               goto error;
+       }
+       /* Prepares an embedded fwcmd, including request/response sizes. */
+       fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_FIRMWARE_CONFIG);
+
+       rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_FIRMWARE_CONFIG,
+                                       params.response);
+       rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_FIRMWARE_CONFIG,
+                                       params.response);
+       rc.va = config;
+
+       /* Post the f/w command */
+       status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL,
+                                       NULL, NULL, NULL, fwcmd, &rc);
+error:
+       spin_unlock_irqrestore(&pfob->post_lock, irql);
+       if (pfob->pend_queue_driving && pfob->mcc) {
+               pfob->pend_queue_driving = 0;
+               be_drive_mcc_wrb_queue(pfob->mcc);
+       }
+       return status;
+}
+
+/*
+    This allocates and initializes a function object based on the information
+    provided by upper layer drivers.
+
+    Returns BE_SUCCESS on success and an appropriate int on failure.
+
+    A function object represents a single BladeEngine (logical) PCI function.
+    That is a function object either represents
+    the networking side of BladeEngine or the iSCSI side of BladeEngine.
+
+    This routine will also detect and create an appropriate PD object for the
+    PCI function as needed.
+*/
+int
+be_function_object_create(u8 __iomem *csr_va, u8 __iomem *db_va,
+               u8 __iomem *pci_va, u32 function_type,
+               struct ring_desc *mailbox, struct be_function_object *pfob)
+{
+       int status;
+
+       ASSERT(pfob);   /* not a magic assert */
+       ASSERT(function_type <= 2);
+
+       TRACE(DL_INFO, "Create function object. type:%s object:0x%p",
+             (function_type == BE_FUNCTION_TYPE_ISCSI ? "iSCSI" :
+              (function_type == BE_FUNCTION_TYPE_NETWORK ? "Network" :
+               "Arm")), pfob);
+
+       memset(pfob, 0, sizeof(*pfob));
+
+       pfob->type = function_type;
+       pfob->csr_va = csr_va;
+       pfob->db_va = db_va;
+       pfob->pci_va = pci_va;
+
+       spin_lock_init(&pfob->cq_lock);
+       spin_lock_init(&pfob->post_lock);
+       spin_lock_init(&pfob->mcc_context_lock);
+
+
+       pfob->pci_function_number = 1;
+
+
+       pfob->emulate = false;
+       TRACE(DL_NOTE, "Non-emulation mode");
+       status = be_drive_POST(pfob);
+       if (status != BE_SUCCESS) {
+               TRACE(DL_ERR, "BladeEngine POST failed.");
+               goto error;
+       }
+
+       /* Initialize the mailbox */
+       status = be_mpu_init_mailbox(pfob, mailbox);
+       if (status != BE_SUCCESS) {
+               TRACE(DL_ERR, "Failed to initialize mailbox.");
+               goto error;
+       }
+       /*
+        * Cache the firmware config for ASSERTs in hwclib and later
+        * driver queries.
+        */
+       status = be_function_internal_query_firmware_config(pfob,
+                                              &pfob->fw_config);
+       if (status != BE_SUCCESS) {
+               TRACE(DL_ERR, "Failed to query firmware config.");
+               goto error;
+       }
+
+error:
+       if (status != BE_SUCCESS) {
+               /* No cleanup necessary */
+               TRACE(DL_ERR, "Failed to create function.");
+               memset(pfob, 0, sizeof(*pfob));
+       }
+       return status;
+}
+
+/*
+    This routine drops the reference count on a given function object. Once
+    the reference count falls to zero, the function object is destroyed and all
+    resources held are freed.
+
+    FunctionObject      - The function object to drop the reference to.
+*/
+int be_function_object_destroy(struct be_function_object *pfob)
+{
+       TRACE(DL_INFO, "Destroy pfob. Object:0x%p",
+             pfob);
+
+
+       ASSERT(pfob->mcc == NULL);
+
+       return BE_SUCCESS;
+}
+
+int be_function_cleanup(struct be_function_object *pfob)
+{
+       int status = 0;
+       u32 isr;
+       u32 host_intr;
+       struct PCICFG_HOST_TIMER_INT_CTRL_CSR_AMAP ctrl;
+
+
+       if (pfob->type == BE_FUNCTION_TYPE_NETWORK) {
+               status = be_rxf_multicast_config(pfob, false, 0,
+                                               NULL, NULL, NULL, NULL);
+               ASSERT(status == BE_SUCCESS);
+       }
+       /* VLAN */
+       status = be_rxf_vlan_config(pfob, false, 0, NULL, NULL, NULL, NULL);
+       ASSERT(status == BE_SUCCESS);
+       /*
+        * MCC Queue -- Switches to mailbox mode.  May want to destroy
+        * all but the MCC CQ before this call if polling CQ is much better
+        * performance than polling mailbox register.
+        */
+       if (pfob->mcc)
+               status = be_mcc_ring_destroy(pfob->mcc);
+       /*
+        * If interrupts are disabled, clear any CEV interrupt assertions that
+        * fired after we stopped processing EQs.
+        */
+       ctrl.dw[0] = PCICFG1_READ(pfob, host_timer_int_ctrl);
+       host_intr = AMAP_GET_BITS_PTR(PCICFG_HOST_TIMER_INT_CTRL_CSR,
+                                                       hostintr, ctrl.dw);
+       if (!host_intr)
+               if (pfob->type == BE_FUNCTION_TYPE_NETWORK)
+                       isr = CSR_READ(pfob, cev.isr1);
+               else
+                       isr = CSR_READ(pfob, cev.isr0);
+       else
+               /* This should never happen... */
+               TRACE(DL_ERR, "function_cleanup called with interrupt enabled");
+       /* Function object destroy */
+       status = be_function_object_destroy(pfob);
+       ASSERT(status == BE_SUCCESS);
+
+       return status;
+}
+
+
+void *
+be_function_prepare_embedded_fwcmd(struct be_function_object *pfob,
+       struct MCC_WRB_AMAP *wrb, u32 payld_len, u32 request_length,
+       u32 response_length, u32 opcode, u32 subsystem)
+{
+       struct FWCMD_REQUEST_HEADER *header = NULL;
+       u32 n;
+
+       ASSERT(wrb);
+
+       n = AMAP_BYTE_OFFSET(MCC_WRB, payload);
+       AMAP_SET_BITS_PTR(MCC_WRB, embedded, wrb, 1);
+       AMAP_SET_BITS_PTR(MCC_WRB, payload_length, wrb, min(payld_len, n));
+       header = (struct FWCMD_REQUEST_HEADER *)((u8 *)wrb + n);
+
+       header->timeout = 0;
+       header->domain = 0;
+       header->request_length = max(request_length, response_length);
+       header->opcode = opcode;
+       header->subsystem = subsystem;
+
+       return header;
+}
+
+void *
+be_function_prepare_nonembedded_fwcmd(struct be_function_object *pfob,
+       struct MCC_WRB_AMAP *wrb,
+       void *fwcmd_va, u64 fwcmd_pa,
+       u32 payld_len,
+       u32 request_length,
+       u32 response_length,
+       u32 opcode, u32 subsystem)
+{
+       struct FWCMD_REQUEST_HEADER *header = NULL;
+       u32 n;
+       struct MCC_WRB_PAYLOAD_AMAP *plp;
+
+       ASSERT(wrb);
+       ASSERT(fwcmd_va);
+
+       header = (struct FWCMD_REQUEST_HEADER *) fwcmd_va;
+
+       AMAP_SET_BITS_PTR(MCC_WRB, embedded, wrb, 0);
+       AMAP_SET_BITS_PTR(MCC_WRB, payload_length, wrb, payld_len);
+
+       /*
+        * Assume one fragment. The caller may override the SGL by
+        * rewriting the 0th length and adding more entries.  They
+        * will also need to update the sge_count.
+        */
+       AMAP_SET_BITS_PTR(MCC_WRB, sge_count, wrb, 1);
+
+       n = AMAP_BYTE_OFFSET(MCC_WRB, payload);
+       plp = (struct MCC_WRB_PAYLOAD_AMAP *)((u8 *)wrb + n);
+       AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].length, plp, payld_len);
+       AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].pa_lo, plp, (u32)fwcmd_pa);
+       AMAP_SET_BITS_PTR(MCC_WRB_PAYLOAD, sgl[0].pa_hi, plp,
+                                       upper_32_bits(fwcmd_pa));
+
+       header->timeout = 0;
+       header->domain = 0;
+       header->request_length = max(request_length, response_length);
+       header->opcode = opcode;
+       header->subsystem = subsystem;
+
+       return header;
+}
+
+struct MCC_WRB_AMAP *
+be_function_peek_mcc_wrb(struct be_function_object *pfob)
+{
+       struct MCC_WRB_AMAP *wrb = NULL;
+       u32 offset;
+
+       if (pfob->mcc)
+               wrb = _be_mpu_peek_ring_wrb(pfob->mcc, false);
+       else {
+               offset = AMAP_BYTE_OFFSET(MCC_MAILBOX, wrb);
+               wrb = (struct MCC_WRB_AMAP *) ((u8 *) pfob->mailbox.va +
+                               offset);
+       }
+
+       if (wrb)
+               memset(wrb, 0, sizeof(struct MCC_WRB_AMAP));
+
+       return wrb;
+}
+
+#if defined(BE_DEBUG)
+void be_function_debug_print_wrb(struct be_function_object *pfob,
+               struct MCC_WRB_AMAP *wrb, void *optional_fwcmd_va,
+               struct be_mcc_wrb_context *wrb_context)
+{
+
+       struct FWCMD_REQUEST_HEADER *header = NULL;
+       u8 embedded;
+       u32 n;
+
+       embedded = AMAP_GET_BITS_PTR(MCC_WRB, embedded, wrb);
+
+       if (embedded) {
+               n = AMAP_BYTE_OFFSET(MCC_WRB, payload);
+               header = (struct FWCMD_REQUEST_HEADER *)((u8 *)wrb + n);
+       } else {
+               header = (struct FWCMD_REQUEST_HEADER *) optional_fwcmd_va;
+       }
+
+       /* Save the completed count before posting for a debug assert. */
+
+       if (header) {
+               wrb_context->opcode = header->opcode;
+               wrb_context->subsystem = header->subsystem;
+
+       } else {
+               wrb_context->opcode = 0;
+               wrb_context->subsystem = 0;
+       }
+}
+#else
+#define be_function_debug_print_wrb(a_, b_, c_, d_)
+#endif
+
+int
+be_function_post_mcc_wrb(struct be_function_object *pfob,
+               struct MCC_WRB_AMAP *wrb,
+               struct be_generic_q_ctxt *q_ctxt,
+               mcc_wrb_cqe_callback cb, void *cb_context,
+               mcc_wrb_cqe_callback internal_cb,
+               void *internal_cb_context, void *optional_fwcmd_va,
+               struct be_mcc_wrb_response_copy *rc)
+{
+       int status;
+       struct be_mcc_wrb_context *wrb_context = NULL;
+       u64 *p;
+
+       if (q_ctxt) {
+               /* Initialize context.         */
+               q_ctxt->context.internal_cb = internal_cb;
+               q_ctxt->context.internal_cb_context = internal_cb_context;
+               q_ctxt->context.cb = cb;
+               q_ctxt->context.cb_context = cb_context;
+               if (rc) {
+                       q_ctxt->context.copy.length = rc->length;
+                       q_ctxt->context.copy.fwcmd_offset = rc->fwcmd_offset;
+                       q_ctxt->context.copy.va = rc->va;
+               } else
+                       q_ctxt->context.copy.length = 0;
+
+               q_ctxt->context.optional_fwcmd_va = optional_fwcmd_va;
+
+               /* Queue this request */
+               status = be_function_queue_mcc_wrb(pfob, q_ctxt);
+
+               goto Error;
+       }
+       /*
+        * Allocate a WRB context struct to hold the callback pointers,
+        * status, etc.  This is required if commands complete out of order.
+        */
+       wrb_context = _be_mcc_allocate_wrb_context(pfob);
+       if (!wrb_context) {
+               TRACE(DL_WARN, "Failed to allocate MCC WRB context.");
+               status = BE_STATUS_SYSTEM_RESOURCES;
+               goto Error;
+       }
+       /* Initialize context. */
+       memset(wrb_context, 0, sizeof(*wrb_context));
+       wrb_context->internal_cb = internal_cb;
+       wrb_context->internal_cb_context = internal_cb_context;
+       wrb_context->cb = cb;
+       wrb_context->cb_context = cb_context;
+       if (rc) {
+               wrb_context->copy.length = rc->length;
+               wrb_context->copy.fwcmd_offset = rc->fwcmd_offset;
+               wrb_context->copy.va = rc->va;
+       } else
+               wrb_context->copy.length = 0;
+       wrb_context->wrb = wrb;
+
+       /*
+        * Copy the context pointer into the WRB opaque tag field.
+        * Verify assumption of 64-bit tag with a compile time assert.
+        */
+       p = (u64 *) ((u8 *)wrb + AMAP_BYTE_OFFSET(MCC_WRB, tag));
+       *p = (u64)(size_t)wrb_context;
+
+       /* Print info about this FWCMD for debug builds. */
+       be_function_debug_print_wrb(pfob, wrb, optional_fwcmd_va, wrb_context);
+
+       /*
+        * issue the WRB to the MPU as appropriate
+        */
+       if (pfob->mcc) {
+               /*
+                * we're in WRB mode, pass to the mcc layer
+                */
+               status = _be_mpu_post_wrb_ring(pfob->mcc, wrb, wrb_context);
+       } else {
+               /*
+                * we're in mailbox mode
+                */
+               status = _be_mpu_post_wrb_mailbox(pfob, wrb, wrb_context);
+
+               /* mailbox mode always completes synchronously */
+               ASSERT(status != BE_STATUS_PENDING);
+       }
+
+Error:
+
+       return status;
+}
+
+int
+be_function_ring_destroy(struct be_function_object *pfob,
+               u32 id, u32 ring_type, mcc_wrb_cqe_callback cb,
+               void *cb_context, mcc_wrb_cqe_callback internal_cb,
+               void *internal_cb_context)
+{
+
+       struct FWCMD_COMMON_RING_DESTROY *fwcmd = NULL;
+       struct MCC_WRB_AMAP *wrb = NULL;
+       int status = 0;
+       unsigned long irql;
+
+       spin_lock_irqsave(&pfob->post_lock, irql);
+
+       TRACE(DL_INFO, "Destroy ring id:%d type:%d", id, ring_type);
+
+       wrb = be_function_peek_mcc_wrb(pfob);
+       if (!wrb) {
+               ASSERT(wrb);
+               TRACE(DL_ERR, "No free MCC WRBs in destroy ring.");
+               status = BE_STATUS_NO_MCC_WRB;
+               goto Error;
+       }
+       /* Prepares an embedded fwcmd, including request/response sizes. */
+       fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_RING_DESTROY);
+
+       fwcmd->params.request.id = id;
+       fwcmd->params.request.ring_type = ring_type;
+
+       /* Post the f/w command */
+       status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb, cb_context,
+                               internal_cb, internal_cb_context, fwcmd, NULL);
+       if (status != BE_SUCCESS && status != BE_PENDING) {
+               TRACE(DL_ERR, "Ring destroy fwcmd failed. id:%d ring_type:%d",
+                       id, ring_type);
+               goto Error;
+       }
+
+Error:
+       spin_unlock_irqrestore(&pfob->post_lock, irql);
+       if (pfob->pend_queue_driving && pfob->mcc) {
+               pfob->pend_queue_driving = 0;
+               be_drive_mcc_wrb_queue(pfob->mcc);
+       }
+       return status;
+}
+
+void
+be_rd_to_pa_list(struct ring_desc *rd, struct PHYS_ADDR *pa_list, u32 max_num)
+{
+       u32 num_pages = PAGES_SPANNED(rd->va, rd->length);
+       u32 i = 0;
+       u64 pa = rd->pa;
+       __le64 lepa;
+
+       ASSERT(pa_list);
+       ASSERT(pa);
+
+       for (i = 0; i < min(num_pages, max_num); i++) {
+               lepa = cpu_to_le64(pa);
+               pa_list[i].lo = (u32)lepa;
+               pa_list[i].hi = upper_32_bits(lepa);
+               pa += PAGE_SIZE;
+       }
+}
+
+
+
+/*-----------------------------------------------------------------------------
+ * Function: be_function_get_fw_version
+ *   Retrieves the firmware version on the adpater. If the callback is
+ *   NULL this call executes synchronously. If the callback is not NULL,
+ *   the returned status will be BE_PENDING if the command was issued
+ *   successfully.
+ * pfob    -
+ * fwv         - Pointer to response buffer if callback is NULL.
+ * cb           - Callback function invoked when the FWCMD completes.
+ * cb_context   - Passed to the callback function.
+ * return pend_status - BE_SUCCESS (0) on success.
+ *                     BE_PENDING (postive value) if the FWCMD
+ *                      completion is pending. Negative error code on failure.
+ *---------------------------------------------------------------------------
+ */
+int
+be_function_get_fw_version(struct be_function_object *pfob,
+               struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD *fwv,
+               mcc_wrb_cqe_callback cb, void *cb_context)
+{
+       int status = BE_SUCCESS;
+       struct MCC_WRB_AMAP *wrb = NULL;
+       struct FWCMD_COMMON_GET_FW_VERSION *fwcmd = NULL;
+       unsigned long irql;
+       struct be_mcc_wrb_response_copy rc;
+
+       spin_lock_irqsave(&pfob->post_lock, irql);
+
+       wrb = be_function_peek_mcc_wrb(pfob);
+       if (!wrb) {
+               TRACE(DL_ERR, "MCC wrb peek failed.");
+               status = BE_STATUS_NO_MCC_WRB;
+               goto Error;
+       }
+
+       if (!cb && !fwv) {
+               TRACE(DL_ERR, "callback and response buffer NULL!");
+               status = BE_NOT_OK;
+               goto Error;
+       }
+       /* Prepares an embedded fwcmd, including request/response sizes. */
+       fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_GET_FW_VERSION);
+
+       rc.length = FIELD_SIZEOF(struct FWCMD_COMMON_GET_FW_VERSION,
+                                       params.response);
+       rc.fwcmd_offset = offsetof(struct FWCMD_COMMON_GET_FW_VERSION,
+                                       params.response);
+       rc.va = fwv;
+
+       /* Post the f/w command */
+       status = be_function_post_mcc_wrb(pfob, wrb, NULL, cb,
+                               cb_context, NULL, NULL, fwcmd, &rc);
+
+Error:
+       spin_unlock_irqrestore(&pfob->post_lock, irql);
+       if (pfob->pend_queue_driving && pfob->mcc) {
+               pfob->pend_queue_driving = 0;
+               be_drive_mcc_wrb_queue(pfob->mcc);
+       }
+       return status;
+}
+
+int
+be_function_queue_mcc_wrb(struct be_function_object *pfob,
+                         struct be_generic_q_ctxt *q_ctxt)
+{
+       int status;
+
+       ASSERT(q_ctxt);
+
+       /*
+        * issue the WRB to the MPU as appropriate
+        */
+       if (pfob->mcc) {
+
+               /* We're in ring mode.  Queue this item. */
+               pfob->mcc->backlog_length++;
+               list_add_tail(&q_ctxt->context.list, &pfob->mcc->backlog);
+               status = BE_PENDING;
+       } else {
+               status = BE_NOT_OK;
+       }
+       return status;
+}
+
diff --git a/drivers/staging/benet/hwlib/hwlib.h b/drivers/staging/benet/hwlib/hwlib.h
new file mode 100644 (file)
index 0000000..0cffe8f
--- /dev/null
@@ -0,0 +1,856 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+#ifndef __hwlib_h__
+#define __hwlib_h__
+
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+#include "regmap.h"            /* srcgen array map output */
+
+#include "asyncmesg.h"
+#include "fwcmd_opcodes.h"
+#include "post_codes.h"
+#include "fwcmd_mcc.h"
+
+#include "fwcmd_types_bmap.h"
+#include "fwcmd_common_bmap.h"
+#include "fwcmd_eth_bmap.h"
+#include "bestatus.h"
+/*
+ *
+ * Macros for reading/writing a protection domain or CSR registers
+ * in BladeEngine.
+ */
+#define PD_READ(_fo_, _field_)                                 \
+       ioread32((_fo_)->db_va +                                \
+               AMAP_BYTE_OFFSET(PROTECTION_DOMAIN_DBMAP, _field_))
+
+#define PD_WRITE(_fo_, _field_, _value_)                       \
+       iowrite32((_value_), (_fo_)->db_va +                    \
+               AMAP_BYTE_OFFSET(PROTECTION_DOMAIN_DBMAP, _field_))
+
+#define CSR_READ(_fo_, _field_)                                \
+       ioread32((_fo_)->csr_va +                       \
+               AMAP_BYTE_OFFSET(BLADE_ENGINE_CSRMAP, _field_))
+
+#define CSR_WRITE(_fo_, _field_, _value_)                      \
+       iowrite32((_value_), (_fo_)->csr_va +                   \
+               AMAP_BYTE_OFFSET(BLADE_ENGINE_CSRMAP, _field_))
+
+#define PCICFG0_READ(_fo_, _field_)                            \
+       ioread32((_fo_)->pci_va +                               \
+               AMAP_BYTE_OFFSET(PCICFG0_CSRMAP, _field_))
+
+#define PCICFG0_WRITE(_fo_, _field_, _value_)                  \
+       iowrite32((_value_), (_fo_)->pci_va +                   \
+               AMAP_BYTE_OFFSET(PCICFG0_CSRMAP, _field_))
+
+#define PCICFG1_READ(_fo_, _field_)                            \
+       ioread32((_fo_)->pci_va +                               \
+               AMAP_BYTE_OFFSET(PCICFG1_CSRMAP, _field_))
+
+#define PCICFG1_WRITE(_fo_, _field_, _value_)                  \
+       iowrite32((_value_), (_fo_)->pci_va +                   \
+               AMAP_BYTE_OFFSET(PCICFG1_CSRMAP, _field_))
+
+#ifdef BE_DEBUG
+#define ASSERT(c)       BUG_ON(!(c));
+#else
+#define ASSERT(c)
+#endif
+
+/* debug levels */
+enum BE_DEBUG_LEVELS {
+       DL_ALWAYS = 0,          /* cannot be masked */
+       DL_ERR = 0x1,           /* errors that should never happen */
+       DL_WARN = 0x2,          /* something questionable.
+                                  recoverable errors */
+       DL_NOTE = 0x4,          /* infrequent, important debug info */
+       DL_INFO = 0x8,          /* debug information */
+       DL_VERBOSE = 0x10,      /* detailed info, such as buffer traces */
+       BE_DL_MIN_VALUE = 0x1,  /* this is the min value used */
+       BE_DL_MAX_VALUE = 0x80  /* this is the higheset value used */
+} ;
+
+extern unsigned int trace_level;
+
+#define TRACE(lm, fmt, args...)  {                             \
+               if (trace_level & lm) {                         \
+                       printk(KERN_NOTICE "BE: %s:%d \n" fmt,  \
+                       __FILE__ , __LINE__ , ## args);         \
+               }                                               \
+       }
+
+static inline unsigned int be_trace_set_level(unsigned int level)
+{
+       unsigned int old_level = trace_level;
+       trace_level = level;
+       return old_level;
+}
+
+#define be_trace_get_level()   trace_level
+/*
+ * Returns number of pages spanned by the size of data
+ * starting at the given address.
+ */
+#define PAGES_SPANNED(_address, _size) \
+   ((u32)((((size_t)(_address) & (PAGE_SIZE - 1)) + \
+               (_size) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
+/* Byte offset into the page corresponding to given address */
+#define OFFSET_IN_PAGE(_addr_) ((size_t)(_addr_) & (PAGE_SIZE-1))
+
+/*
+ * circular subtract.
+ * Returns a - b assuming a circular number system, where a and b are
+ * in range (0, maxValue-1). If a==b, zero is returned so the
+ * highest value possible with this subtraction is maxValue-1.
+ */
+static inline u32 be_subc(u32 a, u32 b, u32 max)
+{
+       ASSERT(a <= max && b <= max);
+       ASSERT(max > 0);
+       return (a >= b ? (a - b) : (max - b + a));
+}
+
+static inline u32 be_addc(u32 a, u32 b, u32 max)
+{
+       ASSERT(a < max);
+       ASSERT(max > 0);
+       return ((max - a > b) ? (a + b) : (b + a - max));
+}
+
+/* descriptor for a physically contiguous memory used for ring */
+struct ring_desc {
+       u32 length;     /* length in bytes */
+       void *va;       /* virtual address */
+       u64 pa;         /* bus address */
+} ;
+
+/*
+ * This structure stores information about a ring shared between hardware
+ * and software.  Each ring is allocated by the driver in the uncached
+ * extension and mapped into BladeEngine's unified table.
+ */
+struct mp_ring {
+       u32 pages;              /* queue size in pages */
+       u32 id;                 /* queue id assigned by beklib */
+       u32 num;                /* number of elements in queue */
+       u32 cidx;               /* consumer index */
+       u32 pidx;               /* producer index -- not used by most rings */
+       u32 itemSize;           /* size in bytes of one object */
+
+       void *va;               /* The virtual address of the ring.
+                                  This should be last to allow 32 & 64
+                                  bit debugger extensions to work. */
+} ;
+
+/*-----------  amap bit filed get / set macros and functions -----*/
+/*
+ * Structures defined in the map header files (under fw/amap/) with names
+ * in the format BE_<name>_AMAP are pseudo structures with members
+ * of type u8. These structures are templates that are used in
+ * conjuntion with the structures with names in the format
+ * <name>_AMAP to calculate the bit masks and bit offsets to get or set
+ * bit fields in structures. The structures <name>_AMAP are arrays
+ * of 32 bits words and have the correct size.  The following macros
+ * provide convenient ways to get and set the various members
+ * in the structures without using strucctures with bit fields.
+ * Always use the macros AMAP_GET_BITS_PTR and AMAP_SET_BITS_PTR
+ * macros to extract and set various members.
+ */
+
+/*
+ * Returns the a bit mask for the register that is NOT shifted into location.
+ * That means return values always look like: 0x1, 0xFF, 0x7FF, etc...
+ */
+static inline u32 amap_mask(u32 bit_size)
+{
+    return (bit_size == 32 ? 0xFFFFFFFF : (1 << bit_size) - 1);
+}
+
+#define AMAP_BIT_MASK(_struct_, _register_)       \
+       amap_mask(AMAP_BIT_SIZE(_struct_, _register_))
+
+/*
+ * non-optimized set bits function. First clears the bits and then assigns them.
+ * This does not require knowledge of the particular DWORD you are setting.
+ * e.g. AMAP_SET_BITS_PTR (struct, field1, &contextMemory, 123);
+ */
+static inline void
+amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
+{
+       u32 *dw = (u32 *)ptr;
+       *(dw + dw_offset) &= ~(mask << offset);
+       *(dw + dw_offset) |= (mask & value) << offset;
+}
+
+#define AMAP_SET_BITS_PTR(_struct_, _register_, _structPtr_, _value_)  \
+       amap_set(_structPtr_, AMAP_WORD_OFFSET(_struct_, _register_),   \
+               AMAP_BIT_MASK(_struct_, _register_),                    \
+               AMAP_BIT_OFFSET(_struct_, _register_), _value_)
+
+/*
+ * Non-optimized routine that gets the bits without knowing the correct DWORD.
+ * e.g. fieldValue = AMAP_GET_BITS_PTR (struct, field1, &contextMemory);
+ */
+static inline u32
+amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
+{
+       u32 *dw = (u32 *)ptr;
+       return mask & (*(dw + dw_offset) >> offset);
+}
+#define AMAP_GET_BITS_PTR(_struct_, _register_, _structPtr_)           \
+       amap_get(_structPtr_, AMAP_WORD_OFFSET(_struct_, _register_),   \
+               AMAP_BIT_MASK(_struct_, _register_),                    \
+                       AMAP_BIT_OFFSET(_struct_, _register_))
+
+/* Returns 0-31 representing bit offset within a DWORD of a bitfield. */
+#define AMAP_BIT_OFFSET(_struct_, _register_)                  \
+    (((size_t)&(((struct BE_ ## _struct_ ## _AMAP*)0)->_register_))%32)
+
+/* Returns 0-n representing byte offset of bitfield with the structure. */
+#define AMAP_BYTE_OFFSET(_struct_, _register_)                  \
+       (((size_t)&(((struct BE_ ## _struct_ ## _AMAP *)0)->_register_))/8)
+
+/* Returns 0-n representing DWORD offset of bitfield within the structure. */
+#define AMAP_WORD_OFFSET(_struct_, _register_)  \
+                 (AMAP_BYTE_OFFSET(_struct_, _register_)/4)
+
+/*
+ * Gets a pointer to a field within a structure
+ * The field must be byte aligned.
+ */
+#define AMAP_GET_PTR(_struct_, _register_, _structPtr_)                     \
+    (void *) ((u8 *)(_structPtr_) + AMAP_BYTE_OFFSET(_struct_, _register_))
+
+/* Returns size of bitfield in bits. */
+#define AMAP_BIT_SIZE(_struct_, _register_) \
+               sizeof(((struct BE_ ## _struct_ ## _AMAP*)0)->_register_)
+
+/* Returns size of bitfield in bytes. */
+#define AMAP_BYTE_SIZE(_struct_) (sizeof(struct BE_ ## _struct_ ## _AMAP)/8)
+
+/* Returns size of bitfield in DWORDS. */
+#define AMAP_WORD_SIZE(_struct_) (AMAP_BYTE_SIZE(_struct_)/4)
+
+struct be_mcc_wrb_response_copy {
+       u16 length;             /* bytes in response */
+       u16 fwcmd_offset;       /* offset within the wrb of the response */
+       void *va;               /* user's va to copy response into */
+
+} ;
+typedef void (*mcc_wrb_cqe_callback) (void *context, int status,
+                               struct MCC_WRB_AMAP *optional_wrb);
+struct be_mcc_wrb_context {
+
+       mcc_wrb_cqe_callback internal_cb;       /* Function to call on
+                                               completion */
+       void *internal_cb_context;      /* Parameter to pass
+                                                  to completion function */
+
+       mcc_wrb_cqe_callback cb;        /* Function to call on completion */
+       void *cb_context;       /* Parameter to pass to completion function */
+
+       int *users_final_status;        /* pointer to a local
+                                               variable for synchronous
+                                               commands */
+       struct MCC_WRB_AMAP *wrb;       /* pointer to original wrb for embedded
+                                               commands only */
+       struct list_head next;  /* links context structs together in
+                                  free list */
+
+       struct be_mcc_wrb_response_copy copy;   /* Optional parameters to copy
+                                          embedded response to user's va */
+
+#if defined(BE_DEBUG)
+       u16 subsystem, opcode;  /* Track this FWCMD for debug builds. */
+       struct MCC_WRB_AMAP *ring_wrb;
+       u32 consumed_count;
+#endif
+} ;
+
+/*
+    Represents a function object for network or storage.  This
+    is used to manage per-function resources like MCC CQs, etc.
+*/
+struct be_function_object {
+
+       u32 magic;              /*!< magic for detecting memory corruption. */
+
+       /* PCI BAR mapped addresses */
+       u8 __iomem *csr_va;     /* CSR */
+       u8 __iomem *db_va;      /* Door Bell */
+       u8 __iomem *pci_va;     /* PCI config space */
+       u32 emulate;            /* if set, MPU is not available.
+                                 Emulate everything.     */
+       u32 pend_queue_driving; /* if set, drive the queued WRBs
+                                  after releasing the WRB lock */
+
+       spinlock_t post_lock;   /* lock for verifying one thread posting wrbs */
+       spinlock_t cq_lock;     /* lock for verifying one thread
+                                  processing cq */
+       spinlock_t mcc_context_lock;    /* lock for protecting mcc
+                                          context free list */
+       unsigned long post_irq;
+       unsigned long cq_irq;
+
+       u32 type;
+       u32 pci_function_number;
+
+       struct be_mcc_object *mcc;      /* mcc rings. */
+
+       struct {
+               struct MCC_MAILBOX_AMAP *va;    /* VA to the mailbox */
+               u64 pa; /* PA to the mailbox */
+               u32 length;     /* byte length of mailbox */
+
+               /* One default context struct used for posting at
+                * least one MCC_WRB
+                */
+               struct be_mcc_wrb_context default_context;
+               bool default_context_allocated;
+       } mailbox;
+
+       struct {
+
+               /* Wake on lans configured. */
+               u32 wol_bitmask;        /* bits 0,1,2,3 are set if
+                                          corresponding index is enabled */
+       } config;
+
+
+       struct BE_FIRMWARE_CONFIG fw_config;
+} ;
+
+/*
+      Represents an Event Queue
+*/
+struct be_eq_object {
+       u32 magic;
+       atomic_t ref_count;
+
+       struct be_function_object *parent_function;
+
+       struct list_head eq_list;
+       struct list_head cq_list_head;
+
+       u32 eq_id;
+       void *cb_context;
+
+} ;
+
+/*
+    Manages a completion queue
+*/
+struct be_cq_object {
+       u32 magic;
+       atomic_t ref_count;
+
+       struct be_function_object *parent_function;
+       struct be_eq_object *eq_object;
+
+       struct list_head cq_list;
+       struct list_head cqlist_for_eq;
+
+       void *va;
+       u32 num_entries;
+
+       void *cb_context;
+
+       u32 cq_id;
+
+} ;
+
+/*
+    Manages an ethernet send queue
+*/
+struct be_ethsq_object {
+       u32 magic;
+
+       struct list_head list;
+
+       struct be_function_object *parent_function;
+       struct be_cq_object *cq_object;
+       u32 bid;
+
+} ;
+
+/*
+@brief
+    Manages an ethernet receive queue
+*/
+struct be_ethrq_object {
+       u32 magic;
+       struct list_head list;
+       struct be_function_object *parent_function;
+       u32 rid;
+       struct be_cq_object *cq_object;
+       struct be_cq_object *rss_cq_object[4];
+
+} ;
+
+/*
+    Manages an MCC
+*/
+typedef void (*mcc_async_event_callback) (void *context, u32 event_code,
+                               void *event);
+struct be_mcc_object {
+       u32 magic;
+
+       struct be_function_object *parent_function;
+       struct list_head mcc_list;
+
+       struct be_cq_object *cq_object;
+
+       /* Async event callback for MCC CQ. */
+       mcc_async_event_callback async_cb;
+       void *async_context;
+
+       struct {
+               struct be_mcc_wrb_context *base;
+               u32 num;
+               struct list_head list_head;
+       } wrb_context;
+
+       struct {
+               struct ring_desc *rd;
+               struct mp_ring ring;
+       } sq;
+
+       struct {
+               struct mp_ring ring;
+       } cq;
+
+       u32 processing;         /* flag indicating that one thread
+                                  is processing CQ */
+       u32 rearm;              /* doorbell rearm setting to make
+                                  sure the active processing thread */
+       /* rearms the CQ if any of the threads requested it. */
+
+       struct list_head backlog;
+       u32 backlog_length;
+       u32 driving_backlog;
+       u32 consumed_index;
+
+} ;
+
+
+/* Queue context header -- the required software information for
+ * queueing a WRB.
+ */
+struct be_queue_driver_context {
+       mcc_wrb_cqe_callback internal_cb;       /* Function to call on
+                                                  completion */
+       void *internal_cb_context;      /* Parameter to pass
+                                                  to completion function */
+
+       mcc_wrb_cqe_callback cb;        /* Function to call on completion */
+       void *cb_context;       /* Parameter to pass to completion function */
+
+       struct be_mcc_wrb_response_copy copy;   /* Optional parameters to copy
+                                          embedded response to user's va */
+       void *optional_fwcmd_va;
+       struct list_head list;
+       u32 bytes;
+} ;
+
+/*
+ * Common MCC WRB header that all commands require.
+ */
+struct be_mcc_wrb_header {
+       u8 rsvd[AMAP_BYTE_OFFSET(MCC_WRB, payload)];
+} ;
+
+/*
+ * All non embedded commands supported by hwlib functions only allow
+ * 1 SGE.  This queue context handles them all.
+ */
+struct be_nonembedded_q_ctxt {
+       struct be_queue_driver_context context;
+       struct be_mcc_wrb_header wrb_header;
+       struct MCC_SGE_AMAP sge[1];
+} ;
+
+/*
+ * ------------------------------------------------------------------------
+ *  This section contains the specific queue struct for each command.
+ *  The user could always provide a be_generic_q_ctxt but this is a
+ *  rather large struct.  By using the specific struct, memory consumption
+ *  can be reduced.
+ * ------------------------------------------------------------------------
+ */
+
+struct be_link_status_q_ctxt {
+       struct be_queue_driver_context context;
+       struct be_mcc_wrb_header wrb_header;
+       struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY fwcmd;
+} ;
+
+struct be_multicast_q_ctxt {
+       struct be_queue_driver_context context;
+       struct be_mcc_wrb_header wrb_header;
+       struct FWCMD_COMMON_NTWK_MULTICAST_SET fwcmd;
+} ;
+
+
+struct be_vlan_q_ctxt {
+       struct be_queue_driver_context context;
+       struct be_mcc_wrb_header wrb_header;
+       struct FWCMD_COMMON_NTWK_VLAN_CONFIG fwcmd;
+} ;
+
+struct be_promiscuous_q_ctxt {
+       struct be_queue_driver_context context;
+       struct be_mcc_wrb_header wrb_header;
+       struct FWCMD_ETH_PROMISCUOUS fwcmd;
+} ;
+
+struct be_force_failover_q_ctxt {
+       struct be_queue_driver_context context;
+       struct be_mcc_wrb_header wrb_header;
+       struct FWCMD_COMMON_FORCE_FAILOVER fwcmd;
+} ;
+
+
+struct be_rxf_filter_q_ctxt {
+       struct be_queue_driver_context context;
+       struct be_mcc_wrb_header wrb_header;
+       struct FWCMD_COMMON_NTWK_RX_FILTER fwcmd;
+} ;
+
+struct be_eq_modify_delay_q_ctxt {
+       struct be_queue_driver_context context;
+       struct be_mcc_wrb_header wrb_header;
+       struct FWCMD_COMMON_MODIFY_EQ_DELAY fwcmd;
+} ;
+
+/*
+ * The generic context is the largest size that would be required.
+ * It is the software context plus an entire WRB.
+ */
+struct be_generic_q_ctxt {
+       struct be_queue_driver_context context;
+       struct be_mcc_wrb_header wrb_header;
+       struct MCC_WRB_PAYLOAD_AMAP payload;
+} ;
+
+/*
+ * Types for the BE_QUEUE_CONTEXT object.
+ */
+#define BE_QUEUE_INVALID       (0)
+#define BE_QUEUE_LINK_STATUS   (0xA006)
+#define BE_QUEUE_ETH_STATS     (0xA007)
+#define BE_QUEUE_TPM_STATS     (0xA008)
+#define BE_QUEUE_TCP_STATS     (0xA009)
+#define BE_QUEUE_MULTICAST     (0xA00A)
+#define BE_QUEUE_VLAN          (0xA00B)
+#define BE_QUEUE_RSS           (0xA00C)
+#define BE_QUEUE_FORCE_FAILOVER        (0xA00D)
+#define BE_QUEUE_PROMISCUOUS   (0xA00E)
+#define BE_QUEUE_WAKE_ON_LAN   (0xA00F)
+#define BE_QUEUE_NOP           (0xA010)
+
+/* --- BE_FUNCTION_ENUM --- */
+#define BE_FUNCTION_TYPE_ISCSI          (0)
+#define BE_FUNCTION_TYPE_NETWORK        (1)
+#define BE_FUNCTION_TYPE_ARM            (2)
+
+/* --- BE_ETH_TX_RING_TYPE_ENUM --- */
+#define BE_ETH_TX_RING_TYPE_FORWARDING  (1)    /* Ether ring for forwarding */
+#define BE_ETH_TX_RING_TYPE_STANDARD    (2)    /* Ether ring for sending */
+                                               /* network packets. */
+#define BE_ETH_TX_RING_TYPE_BOUND       (3)    /* Ethernet ring for sending */
+                                               /* network packets, bound */
+                                               /* to a physical port. */
+/*
+ * ----------------------------------------------------------------------
+ *   API MACROS
+ * ----------------------------------------------------------------------
+ */
+#define BE_FWCMD_NAME(_short_name_)     struct FWCMD_##_short_name_
+#define BE_OPCODE_NAME(_short_name_)    OPCODE_##_short_name_
+#define BE_SUBSYSTEM_NAME(_short_name_) SUBSYSTEM_##_short_name_
+
+
+#define BE_PREPARE_EMBEDDED_FWCMD(_pfob_, _wrb_, _short_name_) \
+       ((BE_FWCMD_NAME(_short_name_) *)                                \
+       be_function_prepare_embedded_fwcmd(_pfob_, _wrb_,       \
+               sizeof(BE_FWCMD_NAME(_short_name_)),            \
+               FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.request), \
+               FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.response), \
+               BE_OPCODE_NAME(_short_name_),                           \
+               BE_SUBSYSTEM_NAME(_short_name_)));
+
+#define BE_PREPARE_NONEMBEDDED_FWCMD(_pfob_, _wrb_, _iva_, _ipa_, _short_name_)\
+       ((BE_FWCMD_NAME(_short_name_) *)                                \
+       be_function_prepare_nonembedded_fwcmd(_pfob_, _wrb_, (_iva_), (_ipa_), \
+               sizeof(BE_FWCMD_NAME(_short_name_)),            \
+               FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.request), \
+               FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.response), \
+               BE_OPCODE_NAME(_short_name_),                           \
+               BE_SUBSYSTEM_NAME(_short_name_)));
+
+int be_function_object_create(u8 __iomem *csr_va, u8 __iomem *db_va,
+       u8 __iomem *pci_va, u32 function_type, struct ring_desc *mailbox_rd,
+         struct be_function_object *pfob);
+
+int be_function_object_destroy(struct be_function_object *pfob);
+int be_function_cleanup(struct be_function_object *pfob);
+
+
+int be_function_get_fw_version(struct be_function_object *pfob,
+       struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD *fw_version,
+       mcc_wrb_cqe_callback cb, void *cb_context);
+
+
+int be_eq_modify_delay(struct be_function_object *pfob,
+                  u32 num_eq, struct be_eq_object **eq_array,
+                  u32 *eq_delay_array, mcc_wrb_cqe_callback cb,
+                  void *cb_context,
+                  struct be_eq_modify_delay_q_ctxt *q_ctxt);
+
+
+
+int be_eq_create(struct be_function_object *pfob,
+            struct ring_desc *rd, u32 eqe_size, u32 num_entries,
+            u32 watermark, u32 timer_delay, struct be_eq_object *eq_object);
+
+int be_eq_destroy(struct be_eq_object *eq);
+
+int be_cq_create(struct be_function_object *pfob,
+       struct ring_desc *rd, u32 length,
+       bool solicited_eventable, bool no_delay,
+       u32 wm_thresh, struct be_eq_object *eq_object,
+       struct be_cq_object *cq_object);
+
+int be_cq_destroy(struct be_cq_object *cq);
+
+int be_mcc_ring_create(struct be_function_object *pfob,
+                  struct ring_desc *rd, u32 length,
+                  struct be_mcc_wrb_context *context_array,
+                  u32 num_context_entries,
+                  struct be_cq_object *cq, struct be_mcc_object *mcc);
+int be_mcc_ring_destroy(struct be_mcc_object *mcc_object);
+
+int be_mcc_process_cq(struct be_mcc_object *mcc_object, bool rearm);
+
+int be_mcc_add_async_event_callback(struct be_mcc_object *mcc_object,
+               mcc_async_event_callback cb, void *cb_context);
+
+int be_pci_soft_reset(struct be_function_object *pfob);
+
+
+int be_drive_POST(struct be_function_object *pfob);
+
+
+int be_eth_sq_create(struct be_function_object *pfob,
+               struct ring_desc *rd, u32 length_in_bytes,
+               u32 type, u32 ulp, struct be_cq_object *cq_object,
+               struct be_ethsq_object *eth_sq);
+
+struct be_eth_sq_parameters {
+       u32 port;
+       u32 rsvd0[2];
+} ;
+
+int be_eth_sq_create_ex(struct be_function_object *pfob,
+                   struct ring_desc *rd, u32 length_in_bytes,
+                   u32 type, u32 ulp, struct be_cq_object *cq_object,
+                   struct be_eth_sq_parameters *ex_parameters,
+                   struct be_ethsq_object *eth_sq);
+int be_eth_sq_destroy(struct be_ethsq_object *eth_sq);
+
+int be_eth_set_flow_control(struct be_function_object *pfob,
+                       bool txfc_enable, bool rxfc_enable);
+
+int be_eth_get_flow_control(struct be_function_object *pfob,
+                       bool *txfc_enable, bool *rxfc_enable);
+int be_eth_set_qos(struct be_function_object *pfob, u32 max_bps, u32 max_pps);
+
+int be_eth_get_qos(struct be_function_object *pfob, u32 *max_bps, u32 *max_pps);
+
+int be_eth_set_frame_size(struct be_function_object *pfob,
+                     u32 *tx_frame_size, u32 *rx_frame_size);
+
+int be_eth_rq_create(struct be_function_object *pfob,
+                struct ring_desc *rd, struct be_cq_object *cq_object,
+                struct be_cq_object *bcmc_cq_object,
+                struct be_ethrq_object *eth_rq);
+
+int be_eth_rq_destroy(struct be_ethrq_object *eth_rq);
+
+int be_eth_rq_destroy_options(struct be_ethrq_object *eth_rq, bool flush,
+               mcc_wrb_cqe_callback cb, void *cb_context);
+int be_eth_rq_set_frag_size(struct be_function_object *pfob,
+               u32 new_frag_size_bytes, u32 *actual_frag_size_bytes);
+int be_eth_rq_get_frag_size(struct be_function_object *pfob,
+                                               u32 *frag_size_bytes);
+
+void *be_function_prepare_embedded_fwcmd(struct be_function_object *pfob,
+                  struct MCC_WRB_AMAP *wrb,
+                  u32 payload_length, u32 request_length,
+                  u32 response_length, u32 opcode, u32 subsystem);
+void *be_function_prepare_nonembedded_fwcmd(struct be_function_object *pfob,
+       struct MCC_WRB_AMAP *wrb, void *fwcmd_header_va, u64 fwcmd_header_pa,
+       u32 payload_length, u32 request_length, u32 response_length,
+       u32 opcode, u32 subsystem);
+
+
+struct MCC_WRB_AMAP *
+be_function_peek_mcc_wrb(struct be_function_object *pfob);
+
+int be_rxf_mac_address_read_write(struct be_function_object *pfob,
+             bool port1, bool mac1, bool mgmt,
+             bool write, bool permanent, u8 *mac_address,
+             mcc_wrb_cqe_callback cb,
+             void *cb_context);
+
+int be_rxf_multicast_config(struct be_function_object *pfob,
+                       bool promiscuous, u32 num, u8 *mac_table,
+                       mcc_wrb_cqe_callback cb,
+                       void *cb_context,
+                       struct be_multicast_q_ctxt *q_ctxt);
+
+int be_rxf_vlan_config(struct be_function_object *pfob,
+          bool promiscuous, u32 num, u16 *vlan_tag_array,
+          mcc_wrb_cqe_callback cb, void *cb_context,
+          struct be_vlan_q_ctxt *q_ctxt);
+
+
+int be_rxf_link_status(struct be_function_object *pfob,
+                  struct BE_LINK_STATUS *link_status,
+                  mcc_wrb_cqe_callback cb,
+                  void *cb_context,
+                  struct be_link_status_q_ctxt *q_ctxt);
+
+
+int be_rxf_query_eth_statistics(struct be_function_object *pfob,
+               struct FWCMD_ETH_GET_STATISTICS *va_for_fwcmd,
+               u64 pa_for_fwcmd, mcc_wrb_cqe_callback cb,
+               void *cb_context,
+               struct be_nonembedded_q_ctxt *q_ctxt);
+
+int be_rxf_promiscuous(struct be_function_object *pfob,
+                  bool enable_port0, bool enable_port1,
+                  mcc_wrb_cqe_callback cb, void *cb_context,
+                  struct be_promiscuous_q_ctxt *q_ctxt);
+
+
+int be_rxf_filter_config(struct be_function_object *pfob,
+                    struct NTWK_RX_FILTER_SETTINGS *settings,
+                    mcc_wrb_cqe_callback cb,
+                    void *cb_context,
+                    struct be_rxf_filter_q_ctxt *q_ctxt);
+
+/*
+ * ------------------------------------------------------
+ *  internal functions used by hwlib
+ * ------------------------------------------------------
+ */
+
+
+int be_function_ring_destroy(struct be_function_object *pfob,
+                      u32 id, u32 ring_type, mcc_wrb_cqe_callback cb,
+                      void *cb_context,
+                      mcc_wrb_cqe_callback internal_cb,
+                      void *internal_callback_context);
+
+int be_function_post_mcc_wrb(struct be_function_object *pfob,
+               struct MCC_WRB_AMAP *wrb,
+               struct be_generic_q_ctxt *q_ctxt,
+               mcc_wrb_cqe_callback cb, void *cb_context,
+               mcc_wrb_cqe_callback internal_cb,
+               void *internal_cb_context, void *optional_fwcmd_va,
+               struct be_mcc_wrb_response_copy *response_copy);
+
+int be_function_queue_mcc_wrb(struct be_function_object *pfob,
+                         struct be_generic_q_ctxt *q_ctxt);
+
+/*
+ * ------------------------------------------------------
+ *  MCC QUEUE
+ * ------------------------------------------------------
+ */
+
+int be_mpu_init_mailbox(struct be_function_object *pfob, struct ring_desc *rd);
+
+
+struct MCC_WRB_AMAP *
+_be_mpu_peek_ring_wrb(struct be_mcc_object *mcc, bool driving_queue);
+
+struct be_mcc_wrb_context *
+_be_mcc_allocate_wrb_context(struct be_function_object *pfob);
+
+void _be_mcc_free_wrb_context(struct be_function_object *pfob,
+                        struct be_mcc_wrb_context *context);
+
+int _be_mpu_post_wrb_mailbox(struct be_function_object *pfob,
+        struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context);
+
+int _be_mpu_post_wrb_ring(struct be_mcc_object *mcc,
+       struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context);
+
+void be_drive_mcc_wrb_queue(struct be_mcc_object *mcc);
+
+
+/*
+ * ------------------------------------------------------
+ *  Ring Sizes
+ * ------------------------------------------------------
+ */
+static inline u32 be_ring_encoding_to_length(u32 encoding, u32 object_size)
+{
+
+       ASSERT(encoding != 1);  /* 1 is rsvd */
+       ASSERT(encoding < 16);
+       ASSERT(object_size > 0);
+
+       if (encoding == 0)      /* 32k deep */
+               encoding = 16;
+
+       return (1 << (encoding - 1)) * object_size;
+}
+
+static inline
+u32 be_ring_length_to_encoding(u32 length_in_bytes, u32 object_size)
+{
+
+       u32 count, encoding;
+
+       ASSERT(object_size > 0);
+       ASSERT(length_in_bytes % object_size == 0);
+
+       count = length_in_bytes / object_size;
+
+       ASSERT(count > 1);
+       ASSERT(count <= 32 * 1024);
+       ASSERT(length_in_bytes <= 8 * PAGE_SIZE); /* max ring size in UT */
+
+       encoding = __ilog2_u32(count) + 1;
+
+       if (encoding == 16)
+               encoding = 0;   /* 32k deep */
+
+       return encoding;
+}
+
+void be_rd_to_pa_list(struct ring_desc *rd, struct PHYS_ADDR *pa_list,
+                                               u32 max_num);
+#endif /* __hwlib_h__ */
diff --git a/drivers/staging/benet/hwlib/mpu.c b/drivers/staging/benet/hwlib/mpu.c
new file mode 100644 (file)
index 0000000..bec8dc9
--- /dev/null
@@ -0,0 +1,1362 @@
+/*
+ * Copyright (C) 2005 - 2008 ServerEngines
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.  The full GNU General
+ * Public License is included in this distribution in the file called COPYING.
+ *
+ * Contact Information:
+ * linux-drivers@serverengines.com
+ *
+ * ServerEngines
+ * 209 N. Fair Oaks Ave
+ * Sunnyvale, CA 94085
+ */
+#include "hwlib.h"
+#include "bestatus.h"
+
+static
+inline void mp_ring_create(struct mp_ring *ring, u32 num, u32 size, void *va)
+{
+       ASSERT(ring);
+       memset(ring, 0, sizeof(struct mp_ring));
+       ring->num = num;
+       ring->pages = DIV_ROUND_UP(num * size, PAGE_SIZE);
+       ring->itemSize = size;
+       ring->va = va;
+}
+
+/*
+ * -----------------------------------------------------------------------
+ * Interface for 2 index rings. i.e. consumer/producer rings
+ * --------------------------------------------------------------------------
+ */
+
+/* Returns number items pending on ring. */
+static inline u32 mp_ring_num_pending(struct mp_ring *ring)
+{
+       ASSERT(ring);
+       if (ring->num == 0)
+               return 0;
+       return be_subc(ring->pidx, ring->cidx, ring->num);
+}
+
+/* Returns number items free on ring. */
+static inline u32 mp_ring_num_empty(struct mp_ring *ring)
+{
+       ASSERT(ring);
+       return ring->num - 1 - mp_ring_num_pending(ring);
+}
+
+/* Consume 1 item */
+static inline void mp_ring_consume(struct mp_ring *ring)
+{
+       ASSERT(ring);
+       ASSERT(ring->pidx != ring->cidx);
+
+       ring->cidx = be_addc(ring->cidx, 1, ring->num);
+}
+
+/* Produce 1 item */
+static inline void mp_ring_produce(struct mp_ring *ring)
+{
+       ASSERT(ring);
+       ring->pidx = be_addc(ring->pidx, 1, ring->num);
+}
+
+/* Consume count items */
+static inline void mp_ring_consume_multiple(struct mp_ring *ring, u32 count)
+{
+       ASSERT(ring);
+       ASSERT(mp_ring_num_pending(ring) >= count);
+       ring->cidx = be_addc(ring->cidx, count, ring->num);
+}
+
+static inline void *mp_ring_item(struct mp_ring *ring, u32 index)
+{
+       ASSERT(ring);
+       ASSERT(index < ring->num);
+       ASSERT(ring->itemSize > 0);
+       return (u8 *) ring->va + index * ring->itemSize;
+}
+
+/* Ptr to produce item */
+static inline void *mp_ring_producer_ptr(struct mp_ring *ring)
+{
+       ASSERT(ring);
+       return mp_ring_item(ring, ring->pidx);
+}
+
+/*
+ * Returns a pointer to the current location in the ring.
+ * This is used for rings with 1 index.
+ */
+static inline void *mp_ring_current(struct mp_ring *ring)
+{
+       ASSERT(ring);
+       ASSERT(ring->pidx == 0);        /* not used */
+
+       return mp_ring_item(ring, ring->cidx);
+}
+
+/*
+ * Increment index for rings with only 1 index.
+ * This is used for rings with 1 index.
+ */
+static inline void *mp_ring_next(struct mp_ring *ring)
+{
+       ASSERT(ring);
+       ASSERT(ring->num > 0);
+       ASSERT(ring->pidx == 0);        /* not used */
+
+       ring->cidx = be_addc(ring->cidx, 1, ring->num);
+       return mp_ring_current(ring);
+}
+
+/*
+    This routine waits for a previously posted mailbox WRB to be completed.
+    Specifically it waits for the mailbox to say that it's ready to accept
+    more data by setting the LSB of the mailbox pd register to 1.
+
+    pcontroller      - The function object to post this data to
+
+    IRQL < DISPATCH_LEVEL
+*/
+static void be_mcc_mailbox_wait(struct be_function_object *pfob)
+{
+       struct MPU_MAILBOX_DB_AMAP mailbox_db;
+       u32 i = 0;
+       u32 ready;
+
+       if (pfob->emulate) {
+               /* No waiting for mailbox in emulated mode. */
+               return;
+       }
+
+       mailbox_db.dw[0] = PD_READ(pfob, mcc_bootstrap_db);
+       ready = AMAP_GET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db);
+
+       while (ready == false) {
+               if ((++i & 0x3FFFF) == 0) {
+                       TRACE(DL_WARN, "Waiting for mailbox ready - %dk polls",
+                                                               i / 1000);
+               }
+               udelay(1);
+               mailbox_db.dw[0] = PD_READ(pfob, mcc_bootstrap_db);
+               ready = AMAP_GET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db);
+       }
+}
+
+/*
+    This routine tells the MCC mailbox that there is data to processed
+    in the mailbox. It does this by setting the physical address for the
+    mailbox location and clearing the LSB.  This routine returns immediately
+    and does not wait for the WRB to be processed.
+
+    pcontroller      - The function object to post this data to
+
+    IRQL < DISPATCH_LEVEL
+
+*/
+static void be_mcc_mailbox_notify(struct be_function_object *pfob)
+{
+       struct MPU_MAILBOX_DB_AMAP mailbox_db;
+       u32 pa;
+
+       ASSERT(pfob->mailbox.pa);
+       ASSERT(pfob->mailbox.va);
+
+       /* If emulated, do not ring the mailbox */
+       if (pfob->emulate) {
+               TRACE(DL_WARN, "MPU disabled. Skipping mailbox notify.");
+               return;
+       }
+
+       /* form the higher bits in the address */
+       mailbox_db.dw[0] = 0;   /* init */
+       AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, hi, &mailbox_db, 1);
+       AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db, 0);
+
+       /* bits 34 to 63 */
+       pa = (u32) (pfob->mailbox.pa >> 34);
+       AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, address, &mailbox_db, pa);
+
+       /* Wait for the MPU to be ready */
+       be_mcc_mailbox_wait(pfob);
+
+       /* Ring doorbell 1st time */
+       PD_WRITE(pfob, mcc_bootstrap_db, mailbox_db.dw[0]);
+
+       /* Wait for 1st write to be acknowledged. */
+       be_mcc_mailbox_wait(pfob);
+
+       /* lower bits 30 bits from 4th bit (bits 4 to 33)*/
+       pa = (u32) (pfob->mailbox.pa >> 4) & 0x3FFFFFFF;
+
+       AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, hi, &mailbox_db, 0);
+       AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, ready, &mailbox_db, 0);
+       AMAP_SET_BITS_PTR(MPU_MAILBOX_DB, address, &mailbox_db, pa);
+
+       /* Ring doorbell 2nd time */
+       PD_WRITE(pfob, mcc_bootstrap_db, mailbox_db.dw[0]);
+}
+
+/*
+    This routine tells the MCC mailbox that there is data to processed
+    in the mailbox. It does this by setting the physical address for the
+    mailbox location and clearing the LSB.  This routine spins until the
+    MPU writes a 1 into the LSB indicating that the data has been received
+    and is ready to be processed.
+
+    pcontroller      - The function object to post this data to
+
+    IRQL < DISPATCH_LEVEL
+*/
+static void
+be_mcc_mailbox_notify_and_wait(struct be_function_object *pfob)
+{
+       /*
+        * Notify it
+        */
+       be_mcc_mailbox_notify(pfob);
+       /*
+        * Now wait for completion of WRB
+        */
+       be_mcc_mailbox_wait(pfob);
+}
+
+void
+be_mcc_process_cqe(struct be_function_object *pfob,
+                               struct MCC_CQ_ENTRY_AMAP *cqe)
+{
+       struct be_mcc_wrb_context *wrb_context = NULL;
+       u32 offset, status;
+       u8 *p;
+
+       ASSERT(cqe);
+       /*
+        * A command completed.  Commands complete out-of-order.
+        * Determine which command completed from the TAG.
+        */
+       offset = AMAP_BYTE_OFFSET(MCC_CQ_ENTRY, mcc_tag);
+       p = (u8 *) cqe + offset;
+       wrb_context = (struct be_mcc_wrb_context *)(void *)(size_t)(*(u64 *)p);
+
+       ASSERT(wrb_context);
+       /*
+        * Perform a response copy if requested.
+        * Only copy data if the FWCMD is successful.
+        */
+       status = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, completion_status, cqe);
+       if (status == MGMT_STATUS_SUCCESS && wrb_context->copy.length > 0) {
+               ASSERT(wrb_context->wrb);
+               ASSERT(wrb_context->copy.va);
+               p = (u8 *)wrb_context->wrb + AMAP_BYTE_OFFSET(MCC_WRB, payload);
+               memcpy(wrb_context->copy.va,
+                         (u8 *)p + wrb_context->copy.fwcmd_offset,
+                         wrb_context->copy.length);
+       }
+
+       if (status)
+               status = BE_NOT_OK;
+       /* internal callback */
+       if (wrb_context->internal_cb) {
+               wrb_context->internal_cb(wrb_context->internal_cb_context,
+                                               status, wrb_context->wrb);
+       }
+
+       /* callback */
+       if (wrb_context->cb) {
+               wrb_context->cb(wrb_context->cb_context,
+                                             status, wrb_context->wrb);
+       }
+       /* Free the context structure */
+       _be_mcc_free_wrb_context(pfob, wrb_context);
+}
+
+void be_drive_mcc_wrb_queue(struct be_mcc_object *mcc)
+{
+       struct be_function_object *pfob = NULL;
+       int status = BE_PENDING;
+       struct be_generic_q_ctxt *q_ctxt;
+       struct MCC_WRB_AMAP *wrb;
+       struct MCC_WRB_AMAP *queue_wrb;
+       u32 length, payload_length, sge_count, embedded;
+       unsigned long irql;
+
+       BUILD_BUG_ON((sizeof(struct be_generic_q_ctxt) <
+                         sizeof(struct be_queue_driver_context) +
+                                       sizeof(struct MCC_WRB_AMAP)));
+       pfob = mcc->parent_function;
+
+       spin_lock_irqsave(&pfob->post_lock, irql);
+
+       if (mcc->driving_backlog) {
+               spin_unlock_irqrestore(&pfob->post_lock, irql);
+               if (pfob->pend_queue_driving && pfob->mcc) {
+                       pfob->pend_queue_driving = 0;
+                       be_drive_mcc_wrb_queue(pfob->mcc);
+               }
+               return;
+       }
+       /* Acquire the flag to limit 1 thread to redrive posts. */
+       mcc->driving_backlog = 1;
+
+       while (!list_empty(&mcc->backlog)) {
+               wrb = _be_mpu_peek_ring_wrb(mcc, true); /* Driving the queue */
+               if (!wrb)
+                       break;  /* No space in the ring yet. */
+               /* Get the next queued entry to process. */
+               q_ctxt = list_first_entry(&mcc->backlog,
+                               struct be_generic_q_ctxt, context.list);
+               list_del(&q_ctxt->context.list);
+               pfob->mcc->backlog_length--;
+               /*
+                * Compute the required length of the WRB.
+                * Since the queue element may be smaller than
+                * the complete WRB, copy only the required number of bytes.
+                */
+               queue_wrb = (struct MCC_WRB_AMAP *) &q_ctxt->wrb_header;
+               embedded = AMAP_GET_BITS_PTR(MCC_WRB, embedded, queue_wrb);
+               if (embedded) {
+                       payload_length = AMAP_GET_BITS_PTR(MCC_WRB,
+                                               payload_length, queue_wrb);
+                       length = sizeof(struct be_mcc_wrb_header) +
+                                                               payload_length;
+               } else {
+                       sge_count = AMAP_GET_BITS_PTR(MCC_WRB, sge_count,
+                                                               queue_wrb);
+                       ASSERT(sge_count == 1); /* only 1 frag. */
+                       length = sizeof(struct be_mcc_wrb_header) +
+                           sge_count * sizeof(struct MCC_SGE_AMAP);
+               }
+
+               /*
+                * Truncate the length based on the size of the
+                * queue element.  Some elements that have output parameters
+                * can be smaller than the payload_length field would
+                * indicate.  We really only need to copy the request
+                * parameters, not the response.
+                */
+               length = min(length, (u32) (q_ctxt->context.bytes -
+                       offsetof(struct be_generic_q_ctxt, wrb_header)));
+
+               /* Copy the queue element WRB into the ring. */
+               memcpy(wrb, &q_ctxt->wrb_header, length);
+
+               /* Post the wrb.  This should not fail assuming we have
+                * enough context structs. */
+               status = be_function_post_mcc_wrb(pfob, wrb, NULL,
+                          q_ctxt->context.cb, q_ctxt->context.cb_context,
+                          q_ctxt->context.internal_cb,
+                          q_ctxt->context.internal_cb_context,
+                          q_ctxt->context.optional_fwcmd_va,
+                          &q_ctxt->context.copy);
+
+               if (status == BE_SUCCESS) {
+                       /*
+                        * Synchronous completion. Since it was queued,
+                        * we will invoke the callback.
+                        * To the user, this is an asynchronous request.
+                        */
+                       spin_unlock_irqrestore(&pfob->post_lock, irql);
+                       if (pfob->pend_queue_driving && pfob->mcc) {
+                               pfob->pend_queue_driving = 0;
+                               be_drive_mcc_wrb_queue(pfob->mcc);
+                       }
+
+                       ASSERT(q_ctxt->context.cb);
+
+                       q_ctxt->context.cb(
+                               q_ctxt->context.cb_context,
+                                               BE_SUCCESS, NULL);
+
+                       spin_lock_irqsave(&pfob->post_lock, irql);
+
+               } else if (status != BE_PENDING) {
+                       /*
+                        * Another resource failed.  Should never happen
+                        * if we have sufficient MCC_WRB_CONTEXT structs.
+                        * Return to head of the queue.
+                        */
+                       TRACE(DL_WARN, "Failed to post a queued WRB. 0x%x",
+                             status);
+                       list_add(&q_ctxt->context.list, &mcc->backlog);
+                       pfob->mcc->backlog_length++;
+                       break;
+               }
+       }
+
+       /* Free the flag to limit 1 thread to redrive posts. */
+       mcc->driving_backlog = 0;
+       spin_unlock_irqrestore(&pfob->post_lock, irql);
+}
+
+/* This function asserts that the WRB was consumed in order. */
+#ifdef BE_DEBUG
+u32 be_mcc_wrb_consumed_in_order(struct be_mcc_object *mcc,
+                                       struct MCC_CQ_ENTRY_AMAP *cqe)
+{
+       struct be_mcc_wrb_context *wrb_context = NULL;
+       u32 wrb_index;
+       u32 wrb_consumed_in_order;
+       u32 offset;
+       u8 *p;
+
+       ASSERT(cqe);
+       /*
+        * A command completed.  Commands complete out-of-order.
+        * Determine which command completed from the TAG.
+        */
+       offset = AMAP_BYTE_OFFSET(MCC_CQ_ENTRY, mcc_tag);
+       p = (u8 *) cqe + offset;
+       wrb_context = (struct be_mcc_wrb_context *)(void *)(size_t)(*(u64 *)p);
+
+       ASSERT(wrb_context);
+
+       wrb_index = (u32) (((u64)(size_t)wrb_context->ring_wrb -
+               (u64)(size_t)mcc->sq.ring.va) / sizeof(struct MCC_WRB_AMAP));
+
+       ASSERT(wrb_index < mcc->sq.ring.num);
+
+       wrb_consumed_in_order = (u32) (wrb_index == mcc->consumed_index);
+       mcc->consumed_index = be_addc(mcc->consumed_index, 1, mcc->sq.ring.num);
+       return wrb_consumed_in_order;
+}
+#endif
+
+int be_mcc_process_cq(struct be_mcc_object *mcc, bool rearm)
+{
+       struct be_function_object *pfob = NULL;
+       struct MCC_CQ_ENTRY_AMAP *cqe;
+       struct CQ_DB_AMAP db;
+       struct mp_ring *cq_ring = &mcc->cq.ring;
+       struct mp_ring *mp_ring = &mcc->sq.ring;
+       u32 num_processed = 0;
+       u32 consumed = 0, valid, completed, cqe_consumed, async_event;
+
+       pfob = mcc->parent_function;
+
+       spin_lock_irqsave(&pfob->cq_lock, pfob->cq_irq);
+
+       /*
+        * Verify that only one thread is processing the CQ at once.
+        * We cannot hold the lock while processing the CQ due to
+        * the callbacks into the OS.  Therefore, this flag is used
+        * to control it.  If any of the threads want to
+        * rearm the CQ, we need to honor that.
+        */
+       if (mcc->processing != 0) {
+               mcc->rearm = mcc->rearm || rearm;
+               goto Error;
+       } else {
+               mcc->processing = 1;    /* lock processing for this thread. */
+               mcc->rearm = rearm;     /* set our rearm setting */
+       }
+
+       spin_unlock_irqrestore(&pfob->cq_lock, pfob->cq_irq);
+
+       cqe = mp_ring_current(cq_ring);
+       valid = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, valid, cqe);
+       while (valid) {
+
+               if (num_processed >= 8) {
+                       /* coalesce doorbells, but free space in cq
+                        * ring while processing. */
+                       db.dw[0] = 0;   /* clear */
+                       AMAP_SET_BITS_PTR(CQ_DB, qid, &db, cq_ring->id);
+                       AMAP_SET_BITS_PTR(CQ_DB, rearm, &db, false);
+                       AMAP_SET_BITS_PTR(CQ_DB, event, &db, false);
+                       AMAP_SET_BITS_PTR(CQ_DB, num_popped, &db,
+                                                       num_processed);
+                       num_processed = 0;
+
+                       PD_WRITE(pfob, cq_db, db.dw[0]);
+               }
+
+               async_event = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, async_event, cqe);
+               if (async_event) {
+                       /* This is an asynchronous event. */
+                       struct ASYNC_EVENT_TRAILER_AMAP *async_trailer =
+                           (struct ASYNC_EVENT_TRAILER_AMAP *)
+                           ((u8 *) cqe + sizeof(struct MCC_CQ_ENTRY_AMAP) -
+                            sizeof(struct ASYNC_EVENT_TRAILER_AMAP));
+                       u32 event_code;
+                       async_event = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER,
+                                               async_event, async_trailer);
+                       ASSERT(async_event == 1);
+
+
+                       valid = AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER,
+                                               valid, async_trailer);
+                       ASSERT(valid == 1);
+
+                       /* Call the async event handler if it is installed. */
+                       if (mcc->async_cb) {
+                               event_code =
+                                       AMAP_GET_BITS_PTR(ASYNC_EVENT_TRAILER,
+                                               event_code, async_trailer);
+                               mcc->async_cb(mcc->async_context,
+                                           (u32) event_code, (void *) cqe);
+                       }
+
+               } else {
+                       /* This is a completion entry. */
+
+                       /* No vm forwarding in this driver. */
+
+                       cqe_consumed = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY,
+                                               consumed, cqe);
+                       if (cqe_consumed) {
+                               /*
+                                * A command on the MCC ring was consumed.
+                                * Update the consumer index.
+                                * These occur in order.
+                                */
+                               ASSERT(be_mcc_wrb_consumed_in_order(mcc, cqe));
+                               consumed++;
+                       }
+
+                       completed = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY,
+                                       completed, cqe);
+                       if (completed) {
+                               /* A command completed.  Use tag to
+                                * determine which command.  */
+                               be_mcc_process_cqe(pfob, cqe);
+                       }
+               }
+
+               /* Reset the CQE */
+               AMAP_SET_BITS_PTR(MCC_CQ_ENTRY, valid, cqe, false);
+               num_processed++;
+
+               /* Update our tracking for the CQ ring. */
+               cqe = mp_ring_next(cq_ring);
+               valid = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, valid, cqe);
+       }
+
+       TRACE(DL_INFO, "num_processed:0x%x, and consumed:0x%x",
+             num_processed, consumed);
+       /*
+        * Grab the CQ lock to synchronize the "rearm" setting for
+        * the doorbell, and for clearing the "processing" flag.
+        */
+       spin_lock_irqsave(&pfob->cq_lock, pfob->cq_irq);
+
+       /*
+        * Rearm the cq.  This is done based on the global mcc->rearm
+        * flag which combines the rearm parameter from the current
+        * call to process_cq and any other threads
+        * that tried to process the CQ while this one was active.
+        * This handles the situation where a sync. fwcmd was processing
+        * the CQ while the interrupt/dpc tries to process it.
+        * The sync process gets to continue -- but it is now
+        * responsible for the rearming.
+        */
+       if (num_processed > 0 || mcc->rearm == true) {
+               db.dw[0] = 0;   /* clear */
+               AMAP_SET_BITS_PTR(CQ_DB, qid, &db, cq_ring->id);
+               AMAP_SET_BITS_PTR(CQ_DB, rearm, &db, mcc->rearm);
+               AMAP_SET_BITS_PTR(CQ_DB, event, &db, false);
+               AMAP_SET_BITS_PTR(CQ_DB, num_popped, &db, num_processed);
+
+               PD_WRITE(pfob, cq_db, db.dw[0]);
+       }
+       /*
+        * Update the consumer index after ringing the CQ doorbell.
+        * We don't want another thread to post more WRBs before we
+        * have CQ space available.
+        */
+       mp_ring_consume_multiple(mp_ring, consumed);
+
+       /* Clear the processing flag. */
+       mcc->processing = 0;
+
+Error:
+       spin_unlock_irqrestore(&pfob->cq_lock, pfob->cq_irq);
+       /*
+        * Use the local variable to detect if the current thread
+        * holds the WRB post lock.  If rearm is false, this is
+        * either a synchronous command, or the upper layer driver is polling
+        * from a thread.  We do not drive the queue from that
+        * context since the driver may hold the
+        * wrb post lock already.
+        */
+       if (rearm)
+               be_drive_mcc_wrb_queue(mcc);
+       else
+               pfob->pend_queue_driving = 1;
+
+       return BE_SUCCESS;
+}
+
+/*
+ *============================================================================
+ *                  P U B L I C  R O U T I N E S
+ *============================================================================
+ */
+
+/*
+    This routine creates an MCC object.  This object contains an MCC send queue
+    and a CQ private to the MCC.
+
+    pcontroller      - Handle to a function object
+
+    EqObject            - EQ object that will be used to dispatch this MCC
+
+    ppMccObject         - Pointer to an internal Mcc Object returned.
+
+    Returns BE_SUCCESS if successfull,, otherwise a useful error code
+       is returned.
+
+    IRQL < DISPATCH_LEVEL
+
+*/
+int
+be_mcc_ring_create(struct be_function_object *pfob,
+                  struct ring_desc *rd, u32 length,
+                  struct be_mcc_wrb_context *context_array,
+                  u32 num_context_entries,
+                  struct be_cq_object *cq, struct be_mcc_object *mcc)
+{
+       int status = 0;
+
+       struct FWCMD_COMMON_MCC_CREATE *fwcmd = NULL;
+       struct MCC_WRB_AMAP *wrb = NULL;
+       u32 num_entries_encoded, n, i;
+       void *va = NULL;
+       unsigned long irql;
+
+       if (length < sizeof(struct MCC_WRB_AMAP) * 2) {
+               TRACE(DL_ERR, "Invalid MCC ring length:%d", length);
+               return BE_NOT_OK;
+       }
+       /*
+        * Reduce the actual ring size to be less than the number
+        * of context entries.  This ensures that we run out of
+        * ring WRBs first so the queuing works correctly.  We never
+        * queue based on context structs.
+        */
+       if (num_context_entries + 1 <
+                       length / sizeof(struct MCC_WRB_AMAP) - 1) {
+
+               u32 max_length =
+                   (num_context_entries + 2) * sizeof(struct MCC_WRB_AMAP);
+
+               if (is_power_of_2(max_length))
+                       length = __roundup_pow_of_two(max_length+1) / 2;
+               else
+                       length = __roundup_pow_of_two(max_length) / 2;
+
+               ASSERT(length <= max_length);
+
+               TRACE(DL_WARN,
+                       "MCC ring length reduced based on context entries."
+                       " length:%d wrbs:%d context_entries:%d", length,
+                       (int) (length / sizeof(struct MCC_WRB_AMAP)),
+                       num_context_entries);
+       }
+
+       spin_lock_irqsave(&pfob->post_lock, irql);
+
+       num_entries_encoded =
+           be_ring_length_to_encoding(length, sizeof(struct MCC_WRB_AMAP));
+
+       /* Init MCC object. */
+       memset(mcc, 0, sizeof(*mcc));
+       mcc->parent_function = pfob;
+       mcc->cq_object = cq;
+
+       INIT_LIST_HEAD(&mcc->backlog);
+
+       wrb = be_function_peek_mcc_wrb(pfob);
+       if (!wrb) {
+               ASSERT(wrb);
+               TRACE(DL_ERR, "No free MCC WRBs in create EQ.");
+               status = BE_STATUS_NO_MCC_WRB;
+               goto error;
+       }
+       /* Prepares an embedded fwcmd, including request/response sizes. */
+       fwcmd = BE_PREPARE_EMBEDDED_FWCMD(pfob, wrb, COMMON_MCC_CREATE);
+
+       fwcmd->params.request.num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
+       /*
+        * Program MCC ring context
+        */
+       AMAP_SET_BITS_PTR(MCC_RING_CONTEXT, pdid,
+                       &fwcmd->params.request.context, 0);
+       AMAP_SET_BITS_PTR(MCC_RING_CONTEXT, invalid,
+                       &fwcmd->params.request.context, false);
+       AMAP_SET_BITS_PTR(MCC_RING_CONTEXT, ring_size,
+                       &fwcmd->params.request.context, num_entries_encoded);
+
+       n = cq->cq_id;
+       AMAP_SET_BITS_PTR(MCC_RING_CONTEXT,
+                               cq_id, &fwcmd->params.request.context, n);
+       be_rd_to_pa_list(rd, fwcmd->params.request.pages,
+                               ARRAY_SIZE(fwcmd->params.request.pages));
+       /* Post the f/w command */
+       status = be_function_post_mcc_wrb(pfob, wrb, NULL, NULL, NULL,
+                                               NULL, NULL, fwcmd, NULL);
+       if (status != BE_SUCCESS) {
+               TRACE(DL_ERR, "MCC to create CQ failed.");
+               goto error;
+       }
+       /*
+        * Create a linked list of context structures
+        */
+       mcc->wrb_context.base = context_array;
+       mcc->wrb_context.num = num_context_entries;
+       INIT_LIST_HEAD(&mcc->wrb_context.list_head);
+       memset(context_array, 0,
+                   sizeof(struct be_mcc_wrb_context) * num_context_entries);
+       for (i = 0; i < mcc->wrb_context.num; i++) {
+               list_add_tail(&context_array[i].next,
+                                       &mcc->wrb_context.list_head);
+       }
+
+       /*
+        *
+        * Create an mcc_ring for tracking WRB hw ring
+        */
+       va = rd->va;
+       ASSERT(va);
+       mp_ring_create(&mcc->sq.ring, length / sizeof(struct MCC_WRB_AMAP),
+                               sizeof(struct MCC_WRB_AMAP), va);
+       mcc->sq.ring.id = fwcmd->params.response.id;
+       /*
+        * Init a mcc_ring for tracking the MCC CQ.
+        */
+       ASSERT(cq->va);
+       mp_ring_create(&mcc->cq.ring, cq->num_entries,
+                      sizeof(struct MCC_CQ_ENTRY_AMAP), cq->va);
+       mcc->cq.ring.id = cq->cq_id;
+
+       /* Force zeroing of CQ. */
+       memset(cq->va, 0, cq->num_entries * sizeof(struct MCC_CQ_ENTRY_AMAP));
+
+       /* Initialize debug index. */
+       mcc->consumed_index = 0;
+
+       atomic_inc(&cq->ref_count);
+       pfob->mcc = mcc;
+
+       TRACE(DL_INFO, "MCC ring created. id:%d bytes:%d cq_id:%d cq_entries:%d"
+             " num_context:%d", mcc->sq.ring.id, length,
+             cq->cq_id, cq->num_entries, num_context_entries);
+
+error:
+       spin_unlock_irqrestore(&pfob->post_lock, irql);
+       if (pfob->pend_queue_driving && pfob->mcc) {
+               pfob->pend_queue_driving = 0;
+               be_drive_mcc_wrb_queue(pfob->mcc);
+       }
+       return status;
+}
+
+/*
+    This routine destroys an MCC send queue
+
+    MccObject         - Internal Mcc Object to be destroyed.
+
+    Returns BE_SUCCESS if successfull, otherwise an error code is returned.
+
+    IRQL < DISPATCH_LEVEL
+
+    The caller of this routine must ensure that no other WRB may be posted
+    until this routine returns.
+
+*/
+int be_mcc_ring_destroy(struct be_mcc_object *mcc)
+{
+       int status = 0;
+       struct be_function_object *pfob = mcc->parent_function;
+
+
+       ASSERT(mcc->processing == 0);
+
+       /*
+        * Remove the ring from the function object.
+        * This transitions back to mailbox mode.
+        */
+       pfob->mcc = NULL;
+
+       /* Send fwcmd to destroy the queue.  (Using the mailbox.) */
+       status = be_function_ring_destroy(mcc->parent_function, mcc->sq.ring.id,
+                            FWCMD_RING_TYPE_MCC, NULL, NULL, NULL, NULL);
+       ASSERT(status == 0);
+
+       /* Release the SQ reference to the CQ */
+       atomic_dec(&mcc->cq_object->ref_count);
+
+       return status;
+}
+
+static void
+mcc_wrb_sync_cb(void *context, int staus, struct MCC_WRB_AMAP *wrb)
+{
+       struct be_mcc_wrb_context *wrb_context =
+                               (struct be_mcc_wrb_context *) context;
+       ASSERT(wrb_context);
+       *wrb_context->users_final_status = staus;
+}
+
+/*
+    This routine posts a command to the MCC send queue
+
+    mcc       - Internal Mcc Object to be destroyed.
+
+    wrb             - wrb to post.
+
+    Returns BE_SUCCESS if successfull, otherwise an error code is returned.
+
+    IRQL < DISPATCH_LEVEL if CompletionCallback is not NULL
+    IRQL <=DISPATCH_LEVEL if CompletionCallback is  NULL
+
+    If this routine is called with CompletionCallback != NULL the
+    call is considered to be asynchronous and will return as soon
+    as the WRB is posted to the MCC with BE_PENDING.
+
+    If CompletionCallback is NULL, then this routine will not return until
+    a completion for this MCC command has been processed.
+    If called at DISPATCH_LEVEL the CompletionCallback must be NULL.
+
+    This routine should only be called if the MPU has been boostraped past
+    mailbox mode.
+
+
+*/
+int
+_be_mpu_post_wrb_ring(struct be_mcc_object *mcc, struct MCC_WRB_AMAP *wrb,
+                               struct be_mcc_wrb_context *wrb_context)
+{
+
+       struct MCC_WRB_AMAP *ring_wrb = NULL;
+       int status = BE_PENDING;
+       int final_status = BE_PENDING;
+       mcc_wrb_cqe_callback cb = NULL;
+       struct MCC_DB_AMAP mcc_db;
+       u32 embedded;
+
+       ASSERT(mp_ring_num_empty(&mcc->sq.ring) > 0);
+       /*
+        * Input wrb is most likely the next wrb in the ring, since the client
+        * can peek at the address.
+        */
+       ring_wrb = mp_ring_producer_ptr(&mcc->sq.ring);
+       if (wrb != ring_wrb) {
+               /* If not equal, copy it into the ring. */
+               memcpy(ring_wrb, wrb, sizeof(struct MCC_WRB_AMAP));
+       }
+#ifdef BE_DEBUG
+       wrb_context->ring_wrb = ring_wrb;
+#endif
+       embedded = AMAP_GET_BITS_PTR(MCC_WRB, embedded, ring_wrb);
+       if (embedded) {
+               /* embedded commands will have the response within the WRB. */
+               wrb_context->wrb = ring_wrb;
+       } else {
+               /*
+                * non-embedded commands will not have the response
+                * within the WRB, and they may complete out-of-order.
+                * The WRB will not be valid to inspect
+                * during the completion.
+                */
+               wrb_context->wrb = NULL;
+       }
+       cb = wrb_context->cb;
+
+       if (cb == NULL) {
+               /* Assign our internal callback if this is a
+                * synchronous call. */
+               wrb_context->cb = mcc_wrb_sync_cb;
+               wrb_context->cb_context = wrb_context;
+               wrb_context->users_final_status = &final_status;
+       }
+       /* Increment producer index */
+
+       mcc_db.dw[0] = 0;               /* initialize */
+       AMAP_SET_BITS_PTR(MCC_DB, rid, &mcc_db, mcc->sq.ring.id);
+       AMAP_SET_BITS_PTR(MCC_DB, numPosted, &mcc_db, 1);
+
+       mp_ring_produce(&mcc->sq.ring);
+       PD_WRITE(mcc->parent_function, mpu_mcc_db, mcc_db.dw[0]);
+       TRACE(DL_INFO, "pidx: %x and cidx: %x.", mcc->sq.ring.pidx,
+             mcc->sq.ring.cidx);
+
+       if (cb == NULL) {
+               int polls = 0;  /* At >= 1 us per poll   */
+               /* Wait until this command completes, polling the CQ. */
+               do {
+                       TRACE(DL_INFO, "FWCMD submitted in the poll mode.");
+                       /* Do not rearm CQ in this context. */
+                       be_mcc_process_cq(mcc, false);
+
+                       if (final_status == BE_PENDING) {
+                               if ((++polls & 0x7FFFF) == 0) {
+                                       TRACE(DL_WARN,
+                                             "Warning : polling MCC CQ for %d"
+                                             "ms.", polls / 1000);
+                               }
+
+                               udelay(1);
+                       }
+
+                       /* final_status changed when the command completes */
+               } while (final_status == BE_PENDING);
+
+               status = final_status;
+       }
+
+       return status;
+}
+
+struct MCC_WRB_AMAP *
+_be_mpu_peek_ring_wrb(struct be_mcc_object *mcc, bool driving_queue)
+{
+       /* If we have queued items, do not allow a post to bypass the queue. */
+       if (!driving_queue && !list_empty(&mcc->backlog))
+               return NULL;
+
+       if (mp_ring_num_empty(&mcc->sq.ring) <= 0)
+               return NULL;
+       return (struct MCC_WRB_AMAP *) mp_ring_producer_ptr(&mcc->sq.ring);
+}
+
+int
+be_mpu_init_mailbox(struct be_function_object *pfob, struct ring_desc *mailbox)
+{
+       ASSERT(mailbox);
+       pfob->mailbox.va = mailbox->va;
+       pfob->mailbox.pa =  cpu_to_le64(mailbox->pa);
+       pfob->mailbox.length = mailbox->length;
+
+       ASSERT(((u32)(size_t)pfob->mailbox.va & 0xf) == 0);
+       ASSERT(((u32)(size_t)pfob->mailbox.pa & 0xf) == 0);
+       /*
+        * Issue the WRB to set MPU endianness
+        */
+       {
+               u64 *endian_check = (u64 *) (pfob->mailbox.va +
+                                       AMAP_BYTE_OFFSET(MCC_MAILBOX, wrb));
+               *endian_check = 0xFF1234FFFF5678FFULL;
+       }
+
+       be_mcc_mailbox_notify_and_wait(pfob);
+
+       return BE_SUCCESS;
+}
+
+
+/*
+    This routine posts a command to the MCC mailbox.
+
+    FuncObj         - Function Object to post the WRB on behalf of.
+    wrb             - wrb to post.
+    CompletionCallback  - Address of a callback routine to invoke once the WRB
+                               is completed.
+    CompletionCallbackContext - Opaque context to be passed during the call to
+                               the CompletionCallback.
+    Returns BE_SUCCESS if successfull, otherwise an error code is returned.
+
+    IRQL <=DISPATCH_LEVEL if CompletionCallback is  NULL
+
+    This routine will block until a completion for this MCC command has been
+    processed. If called at DISPATCH_LEVEL the CompletionCallback must be NULL.
+
+    This routine should only be called if the MPU has not been boostraped past
+    mailbox mode.
+*/
+int
+_be_mpu_post_wrb_mailbox(struct be_function_object *pfob,
+        struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context)
+{
+       struct MCC_MAILBOX_AMAP *mailbox = NULL;
+       struct MCC_WRB_AMAP *mb_wrb;
+       struct MCC_CQ_ENTRY_AMAP *mb_cq;
+       u32 offset, status;
+
+       ASSERT(pfob->mcc == NULL);
+       mailbox = pfob->mailbox.va;
+       ASSERT(mailbox);
+
+       offset = AMAP_BYTE_OFFSET(MCC_MAILBOX, wrb);
+       mb_wrb = (struct MCC_WRB_AMAP *) (u8 *)mailbox + offset;
+       if (mb_wrb != wrb) {
+               memset(mailbox, 0, sizeof(*mailbox));
+               memcpy(mb_wrb, wrb, sizeof(struct MCC_WRB_AMAP));
+       }
+       /* The callback can inspect the final WRB to get output parameters. */
+       wrb_context->wrb = mb_wrb;
+
+       be_mcc_mailbox_notify_and_wait(pfob);
+
+       /* A command completed.  Use tag to determine which command. */
+       offset = AMAP_BYTE_OFFSET(MCC_MAILBOX, cq);
+       mb_cq = (struct MCC_CQ_ENTRY_AMAP *) ((u8 *)mailbox + offset);
+       be_mcc_process_cqe(pfob, mb_cq);
+
+       status = AMAP_GET_BITS_PTR(MCC_CQ_ENTRY, completion_status, mb_cq);
+       if (status)
+               status = BE_NOT_OK;
+       return status;
+}
+
+struct be_mcc_wrb_context *
+_be_mcc_allocate_wrb_context(struct be_function_object *pfob)
+{
+       struct be_mcc_wrb_context *context = NULL;
+       unsigned long irq;
+
+       spin_lock_irqsave(&pfob->mcc_context_lock, irq);
+
+       if (!pfob->mailbox.default_context_allocated) {
+               /* Use the single default context that we
+                * always have allocated. */
+               pfob->mailbox.default_context_allocated = true;
+               context = &pfob->mailbox.default_context;
+       } else if (pfob->mcc) {
+               /* Get a context from the free list. If any are available. */
+               if (!list_empty(&pfob->mcc->wrb_context.list_head)) {
+                       context = list_first_entry(
+                               &pfob->mcc->wrb_context.list_head,
+                                        struct be_mcc_wrb_context, next);
+               }
+       }
+
+       spin_unlock_irqrestore(&pfob->mcc_context_lock, irq);
+
+       return context;
+}
+
+void
+_be_mcc_free_wrb_context(struct be_function_object *pfob,
+                        struct be_mcc_wrb_context *context)
+{
+       unsigned long irq;
+
+       ASSERT(context);
+       /*
+        * Zero during free to try and catch any bugs where the context
+        * is accessed after a free.
+        */
+       memset(context, 0, sizeof(context));
+
+       spin_lock_irqsave(&pfob->mcc_context_lock, irq);
+
+       if (context == &pfob->mailbox.default_context) {
+               /* Free the default context. */
+               ASSERT(pfob->mailbox.default_context_allocated);
+               pfob->mailbox.default_context_allocated = false;
+       } else {
+               /* Add to free list. */
+               ASSERT(pfob->mcc);
+               list_add_tail(&context->next,
+                               &pfob->mcc->wrb_context.list_head);
+       }
+
+       spin_unlock_irqrestore(&pfob->mcc_context_lock, irq);
+}
+
+int
+be_mcc_add_async_event_callback(struct be_mcc_object *mcc_object,
+               mcc_async_event_callback cb, void *cb_context)
+{
+       /* Lock against anyone trying to change the callback/context pointers
+        * while being used. */
+       spin_lock_irqsave(&mcc_object->parent_function->cq_lock,
+               mcc_object->parent_function->cq_irq);
+
+       /* Assign the async callback. */
+       mcc_object->async_context = cb_context;
+       mcc_object->async_cb = cb;
+
+       spin_unlock_irqrestore(&mcc_object->parent_function->cq_lock,
+                                       mcc_object->parent_function->cq_irq);
+
+       return BE_SUCCESS;
+}
+
+#define MPU_EP_CONTROL 0
+#define MPU_EP_SEMAPHORE 0xac
+
+/*
+ *-------------------------------------------------------------------
+ * Function: be_wait_for_POST_complete
+ *   Waits until the BladeEngine POST completes (either in error or success).
+ * pfob -
+ * return status   - BE_SUCCESS (0) on success. Negative error code on failure.
+ *-------------------------------------------------------------------
+ */
+static int be_wait_for_POST_complete(struct be_function_object *pfob)
+{
+       struct MGMT_HBA_POST_STATUS_STRUCT_AMAP status;
+       int s;
+       u32 post_error, post_stage;
+
+       const u32 us_per_loop = 1000;   /* 1000us */
+       const u32 print_frequency_loops = 1000000 / us_per_loop;
+       const u32 max_loops = 60 * print_frequency_loops;
+       u32 loops = 0;
+
+       /*
+        * Wait for arm fw indicating it is done or a fatal error happened.
+        * Note: POST can take some time to complete depending on configuration
+        * settings (consider ARM attempts to acquire an IP address
+        * over DHCP!!!).
+        *
+        */
+       do {
+               status.dw[0] = ioread32(pfob->csr_va + MPU_EP_SEMAPHORE);
+               post_error = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT,
+                                               error, &status);
+               post_stage = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT,
+                                               stage, &status);
+               if (0 == (loops % print_frequency_loops)) {
+                       /* Print current status */
+                       TRACE(DL_INFO, "POST status = 0x%x (stage = 0x%x)",
+                               status.dw[0], post_stage);
+               }
+               udelay(us_per_loop);
+       } while ((post_error != 1) &&
+                (post_stage != POST_STAGE_ARMFW_READY) &&
+                (++loops < max_loops));
+
+       if (post_error == 1) {
+               TRACE(DL_ERR, "POST error! Status = 0x%x (stage = 0x%x)",
+                     status.dw[0], post_stage);
+               s = BE_NOT_OK;
+       } else if (post_stage != POST_STAGE_ARMFW_READY) {
+               TRACE(DL_ERR, "POST time-out! Status = 0x%x (stage = 0x%x)",
+                     status.dw[0], post_stage);
+               s = BE_NOT_OK;
+       } else {
+               s = BE_SUCCESS;
+       }
+       return s;
+}
+
+/*
+ *-------------------------------------------------------------------
+ * Function: be_kickoff_and_wait_for_POST
+ *   Interacts with the BladeEngine management processor to initiate POST, and
+ *   subsequently waits until POST completes (either in error or success).
+ *   The caller must acquire the reset semaphore before initiating POST
+ *   to prevent multiple drivers interacting with the management processor.
+ *   Once POST is complete the caller must release the reset semaphore.
+ *   Callers who only want to wait for POST complete may call
+ *   be_wait_for_POST_complete.
+ * pfob -
+ * return status   - BE_SUCCESS (0) on success. Negative error code on failure.
+ *-------------------------------------------------------------------
+ */
+static int
+be_kickoff_and_wait_for_POST(struct be_function_object *pfob)
+{
+       struct MGMT_HBA_POST_STATUS_STRUCT_AMAP status;
+       int s;
+
+       const u32 us_per_loop = 1000;   /* 1000us */
+       const u32 print_frequency_loops = 1000000 / us_per_loop;
+       const u32 max_loops = 5 * print_frequency_loops;
+       u32 loops = 0;
+       u32 post_error, post_stage;
+
+       /* Wait for arm fw awaiting host ready or a fatal error happened. */
+       TRACE(DL_INFO, "Wait for BladeEngine ready to POST");
+       do {
+               status.dw[0] = ioread32(pfob->csr_va + MPU_EP_SEMAPHORE);
+               post_error = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT,
+                                               error, &status);
+               post_stage = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT,
+                                               stage, &status);
+               if (0 == (loops % print_frequency_loops)) {
+                       /* Print current status */
+                       TRACE(DL_INFO, "POST status = 0x%x (stage = 0x%x)",
+                             status.dw[0], post_stage);
+               }
+               udelay(us_per_loop);
+       } while ((post_error != 1) &&
+                (post_stage < POST_STAGE_AWAITING_HOST_RDY) &&
+                (++loops < max_loops));
+
+       if (post_error == 1) {
+               TRACE(DL_ERR, "Pre-POST error! Status = 0x%x (stage = 0x%x)",
+                     status.dw[0], post_stage);
+               s = BE_NOT_OK;
+       } else if (post_stage == POST_STAGE_AWAITING_HOST_RDY) {
+               iowrite32(POST_STAGE_HOST_RDY, pfob->csr_va + MPU_EP_SEMAPHORE);
+
+               /* Wait for POST to complete */
+               s = be_wait_for_POST_complete(pfob);
+       } else {
+               /*
+                * Either a timeout waiting for host ready signal or POST has
+                * moved ahead without requiring a host ready signal.
+                * Might as well give POST a chance to complete
+                * (or timeout again).
+                */
+               s = be_wait_for_POST_complete(pfob);
+       }
+       return s;
+}
+
+/*
+ *-------------------------------------------------------------------
+ * Function: be_pci_soft_reset
+ *   This function is called to issue a BladeEngine soft reset.
+ *   Callers should acquire the soft reset semaphore before calling this
+ *   function. Additionaly, callers should ensure they cannot be pre-empted
+ *   while the routine executes. Upon completion of this routine, callers
+ *   should release the reset semaphore. This routine implicitly waits
+ *   for BladeEngine POST to complete.
+ * pfob -
+ * return status   - BE_SUCCESS (0) on success. Negative error code on failure.
+ *-------------------------------------------------------------------
+ */
+int be_pci_soft_reset(struct be_function_object *pfob)
+{
+       struct PCICFG_SOFT_RESET_CSR_AMAP soft_reset;
+       struct PCICFG_ONLINE0_CSR_AMAP pciOnline0;
+       struct PCICFG_ONLINE1_CSR_AMAP pciOnline1;
+       struct EP_CONTROL_CSR_AMAP epControlCsr;
+       int status = BE_SUCCESS;
+       u32 i, soft_reset_bit;
+
+       TRACE(DL_NOTE, "PCI reset...");
+
+       /* Issue soft reset #1 to get BladeEngine into a known state. */
+       soft_reset.dw[0] = PCICFG0_READ(pfob, soft_reset);
+       AMAP_SET_BITS_PTR(PCICFG_SOFT_RESET_CSR, softreset, soft_reset.dw, 1);
+       PCICFG0_WRITE(pfob, host_timer_int_ctrl, soft_reset.dw[0]);
+       /*
+        * wait til soft reset is deasserted - hardware
+        * deasserts after some time.
+        */
+       i = 0;
+       do {
+               udelay(50);
+               soft_reset.dw[0] = PCICFG0_READ(pfob, soft_reset);
+               soft_reset_bit = AMAP_GET_BITS_PTR(PCICFG_SOFT_RESET_CSR,
+                                       softreset, soft_reset.dw);
+       } while (soft_reset_bit  && (i++ < 1024));
+       if (soft_reset_bit != 0) {
+               TRACE(DL_ERR, "Soft-reset #1 did not deassert as expected.");
+               status = BE_NOT_OK;
+               goto Error_label;
+       }
+       /* Mask everything  */
+       PCICFG0_WRITE(pfob, ue_status_low_mask, 0xFFFFFFFF);
+       PCICFG0_WRITE(pfob, ue_status_hi_mask, 0xFFFFFFFF);
+       /*
+        * Set everything offline except MPU IRAM (it is offline with
+        * the soft-reset, but soft-reset does not reset the PCICFG registers!)
+        */
+       pciOnline0.dw[0] = 0;
+       pciOnline1.dw[0] = 0;
+       AMAP_SET_BITS_PTR(PCICFG_ONLINE1_CSR, mpu_iram_online,
+                               pciOnline1.dw, 1);
+       PCICFG0_WRITE(pfob, online0, pciOnline0.dw[0]);
+       PCICFG0_WRITE(pfob, online1, pciOnline1.dw[0]);
+
+       udelay(20000);
+
+       /* Issue soft reset #2. */
+       AMAP_SET_BITS_PTR(PCICFG_SOFT_RESET_CSR, softreset, soft_reset.dw, 1);
+       PCICFG0_WRITE(pfob, host_timer_int_ctrl, soft_reset.dw[0]);
+       /*
+        * wait til soft reset is deasserted - hardware
+        * deasserts after some time.
+        */
+       i = 0;
+       do {
+               udelay(50);
+               soft_reset.dw[0] = PCICFG0_READ(pfob, soft_reset);
+               soft_reset_bit = AMAP_GET_BITS_PTR(PCICFG_SOFT_RESET_CSR,
+                                       softreset, soft_reset.dw);
+       } while (soft_reset_bit  && (i++ < 1024));
+       if (soft_reset_bit != 0) {
+               TRACE(DL_ERR, "Soft-reset #1 did not deassert as expected.");
+               status = BE_NOT_OK;
+               goto Error_label;
+       }
+
+
+       udelay(20000);
+
+       /* Take MPU out of reset. */
+
+       epControlCsr.dw[0] = ioread32(pfob->csr_va + MPU_EP_CONTROL);
+       AMAP_SET_BITS_PTR(EP_CONTROL_CSR, CPU_reset, &epControlCsr, 0);
+       iowrite32((u32)epControlCsr.dw[0], pfob->csr_va + MPU_EP_CONTROL);
+
+       /* Kickoff BE POST and wait for completion */
+       status = be_kickoff_and_wait_for_POST(pfob);
+
+Error_label:
+       return status;
+}
+
+
+/*
+ *-------------------------------------------------------------------
+ * Function: be_pci_reset_required
+ *   This private function is called to detect if a host entity is
+ *   required to issue a PCI soft reset and subsequently drive
+ *   BladeEngine POST. Scenarios where this is required:
+ *   1) BIOS-less configuration
+ *   2) Hot-swap/plug/power-on
+ * pfob -
+ * return   true if a reset is required, false otherwise
+ *-------------------------------------------------------------------
+ */
+static bool be_pci_reset_required(struct be_function_object *pfob)
+{
+       struct MGMT_HBA_POST_STATUS_STRUCT_AMAP status;
+       bool do_reset = false;
+       u32 post_error, post_stage;
+
+       /*
+        * Read the POST status register
+        */
+       status.dw[0] = ioread32(pfob->csr_va + MPU_EP_SEMAPHORE);
+       post_error = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT, error,
+                                                               &status);
+       post_stage = AMAP_GET_BITS_PTR(MGMT_HBA_POST_STATUS_STRUCT, stage,
+                                                               &status);
+       if (post_stage <= POST_STAGE_AWAITING_HOST_RDY) {
+               /*
+                * If BladeEngine is waiting for host ready indication,
+                * we want to do a PCI reset.
+                */
+               do_reset = true;
+       }
+
+       return do_reset;
+}
+
+/*
+ *-------------------------------------------------------------------
+ * Function: be_drive_POST
+ *   This function is called to drive BladeEngine POST. The
+ *   caller should ensure they cannot be pre-empted while this routine executes.
+ * pfob -
+ * return status   - BE_SUCCESS (0) on success. Negative error code on failure.
+ *-------------------------------------------------------------------
+ */
+int be_drive_POST(struct be_function_object *pfob)
+{
+       int status;
+
+       if (false != be_pci_reset_required(pfob)) {
+               /* PCI reset is needed (implicitly starts and waits for POST) */
+               status = be_pci_soft_reset(pfob);
+       } else {
+               /* No PCI reset is needed, start POST */
+               status = be_kickoff_and_wait_for_POST(pfob);
+       }
+
+       return status;
+}