mlx4: Move the Mellanox driver
authorJeff Kirsher <jeffrey.t.kirsher@intel.com>
Fri, 13 May 2011 08:32:22 +0000 (01:32 -0700)
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>
Thu, 11 Aug 2011 09:41:35 +0000 (02:41 -0700)
Moves the Mellanox driver into drivers/net/ethernet/mellanox/ and
make the necessary Kconfig and Makefile changes.

CC: Roland Dreier <roland@kernel.org>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
73 files changed:
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/mellanox/Kconfig [new file with mode: 0644]
drivers/net/ethernet/mellanox/Makefile [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/Kconfig [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/Makefile [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/alloc.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/catas.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/cmd.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/cq.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/en_cq.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/en_main.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/en_netdev.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/en_port.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/en_port.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/en_resources.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/en_rx.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/en_selftest.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/en_tx.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/eq.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/fw.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/fw.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/icm.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/icm.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/intf.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/main.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/mcg.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/mlx4.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/mr.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/pd.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/port.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/profile.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/qp.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/reset.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/sense.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx4/srq.c [new file with mode: 0644]
drivers/net/mlx4/Makefile [deleted file]
drivers/net/mlx4/alloc.c [deleted file]
drivers/net/mlx4/catas.c [deleted file]
drivers/net/mlx4/cmd.c [deleted file]
drivers/net/mlx4/cq.c [deleted file]
drivers/net/mlx4/en_cq.c [deleted file]
drivers/net/mlx4/en_ethtool.c [deleted file]
drivers/net/mlx4/en_main.c [deleted file]
drivers/net/mlx4/en_netdev.c [deleted file]
drivers/net/mlx4/en_port.c [deleted file]
drivers/net/mlx4/en_port.h [deleted file]
drivers/net/mlx4/en_resources.c [deleted file]
drivers/net/mlx4/en_rx.c [deleted file]
drivers/net/mlx4/en_selftest.c [deleted file]
drivers/net/mlx4/en_tx.c [deleted file]
drivers/net/mlx4/eq.c [deleted file]
drivers/net/mlx4/fw.c [deleted file]
drivers/net/mlx4/fw.h [deleted file]
drivers/net/mlx4/icm.c [deleted file]
drivers/net/mlx4/icm.h [deleted file]
drivers/net/mlx4/intf.c [deleted file]
drivers/net/mlx4/main.c [deleted file]
drivers/net/mlx4/mcg.c [deleted file]
drivers/net/mlx4/mlx4.h [deleted file]
drivers/net/mlx4/mlx4_en.h [deleted file]
drivers/net/mlx4/mr.c [deleted file]
drivers/net/mlx4/pd.c [deleted file]
drivers/net/mlx4/port.c [deleted file]
drivers/net/mlx4/profile.c [deleted file]
drivers/net/mlx4/qp.c [deleted file]
drivers/net/mlx4/reset.c [deleted file]
drivers/net/mlx4/sense.c [deleted file]
drivers/net/mlx4/srq.c [deleted file]

index 2701ee9190a18a665314b820e3c8e5c3b81160c1..56c033a32bdfcad9e9bcc89bab6a9afbb0d3b308 100644 (file)
@@ -1554,30 +1554,6 @@ config PASEMI_MAC
          This driver supports the on-chip 1/10Gbit Ethernet controller on
          PA Semi's PWRficient line of chips.
 
-config MLX4_EN
-       tristate "Mellanox Technologies 10Gbit Ethernet support"
-       depends on PCI && INET
-       select MLX4_CORE
-       select INET_LRO
-       help
-         This driver supports Mellanox Technologies ConnectX Ethernet
-         devices.
-
-config MLX4_CORE
-       tristate
-       depends on PCI
-       default n
-
-config MLX4_DEBUG
-       bool "Verbose debugging output" if (MLX4_CORE && EXPERT)
-       depends on MLX4_CORE
-       default y
-       ---help---
-         This option causes debugging code to be compiled into the
-         mlx4_core driver.  The output can be turned on via the
-         debug_level module parameter (which can also be set after
-         the driver is loaded through sysfs).
-
 config TEHUTI
        tristate "Tehuti Networks 10G Ethernet"
        depends on PCI
index c8a176f585a3688ba68b65c91994d962f24edfe4..73e357e1d16e454b4e3a75cd54cbfd7244dddfd2 100644 (file)
@@ -157,7 +157,6 @@ obj-$(CONFIG_BFIN_MAC) += bfin_mac.o
 obj-$(CONFIG_DM9000) += dm9000.o
 obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
 pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
-obj-$(CONFIG_MLX4_CORE) += mlx4/
 obj-$(CONFIG_ENC28J60) += enc28j60.o
 obj-$(CONFIG_ETHOC) += ethoc.o
 obj-$(CONFIG_GRETH) += greth.o
index 225918df224d13a708764eb1db5ba8bbc8e39027..8bbddc94ef2ec7ea9c8ea731dcca59294859ac31 100644 (file)
@@ -20,6 +20,7 @@ source "drivers/net/ethernet/chelsio/Kconfig"
 source "drivers/net/ethernet/emulex/Kconfig"
 source "drivers/net/ethernet/intel/Kconfig"
 source "drivers/net/ethernet/i825xx/Kconfig"
+source "drivers/net/ethernet/mellanox/Kconfig"
 source "drivers/net/ethernet/qlogic/Kconfig"
 source "drivers/net/ethernet/racal/Kconfig"
 source "drivers/net/ethernet/sfc/Kconfig"
index 734f7c9d6649eadde0bc76a8dd8b637cdbffa19d..e5f2954f7c74ab57422696b67848da4a05dd6c2c 100644 (file)
@@ -11,6 +11,7 @@ obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
 obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/
 obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
 obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/
+obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
 obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/
 obj-$(CONFIG_NET_VENDOR_RACAL) += racal/
 obj-$(CONFIG_SFC) += sfc/
diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig
new file mode 100644 (file)
index 0000000..e069491
--- /dev/null
@@ -0,0 +1,22 @@
+#
+# Mellanox driver configuration
+#
+
+config NET_VENDOR_MELLANOX
+       bool "Mellanox devices"
+       depends on PCI && INET
+       ---help---
+         If you have a network (Ethernet) card belonging to this class, say Y
+         and read the Ethernet-HOWTO, available from
+         <http://www.tldp.org/docs.html#howto>.
+
+         Note that the answer to this question doesn't directly affect the
+         kernel: saying N will just cause the configurator to skip all
+         the questions about Mellanox cards. If you say Y, you will be asked
+         for your specific card in the following questions.
+
+if NET_VENDOR_MELLANOX
+
+source "drivers/net/ethernet/mellanox/mlx4/Kconfig"
+
+endif # NET_VENDOR_MELLANOX
diff --git a/drivers/net/ethernet/mellanox/Makefile b/drivers/net/ethernet/mellanox/Makefile
new file mode 100644 (file)
index 0000000..37afb96
--- /dev/null
@@ -0,0 +1,5 @@
+#
+# Makefile for the Mellanox device drivers.
+#
+
+obj-$(CONFIG_MLX4_CORE) += mlx4/
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
new file mode 100644 (file)
index 0000000..1bb9353
--- /dev/null
@@ -0,0 +1,27 @@
+#
+# Mellanox driver configuration
+#
+
+config MLX4_EN
+       tristate "Mellanox Technologies 10Gbit Ethernet support"
+       depends on PCI && INET
+       select MLX4_CORE
+       select INET_LRO
+       ---help---
+         This driver supports Mellanox Technologies ConnectX Ethernet
+         devices.
+
+config MLX4_CORE
+       tristate
+       depends on PCI
+       default n
+
+config MLX4_DEBUG
+       bool "Verbose debugging output" if (MLX4_CORE && EXPERT)
+       depends on MLX4_CORE
+       default y
+       ---help---
+         This option causes debugging code to be compiled into the
+         mlx4_core driver.  The output can be turned on via the
+         debug_level module parameter (which can also be set after
+         the driver is loaded through sysfs).
diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile
new file mode 100644 (file)
index 0000000..d1aa45a
--- /dev/null
@@ -0,0 +1,9 @@
+obj-$(CONFIG_MLX4_CORE)                += mlx4_core.o
+
+mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
+               mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o
+
+obj-$(CONFIG_MLX4_EN)               += mlx4_en.o
+
+mlx4_en-y :=   en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \
+               en_resources.o en_netdev.o en_selftest.o
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
new file mode 100644 (file)
index 0000000..116cae3
--- /dev/null
@@ -0,0 +1,414 @@
+/*
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/bitmap.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+
+#include "mlx4.h"
+
+u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
+{
+       u32 obj;
+
+       spin_lock(&bitmap->lock);
+
+       obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
+       if (obj >= bitmap->max) {
+               bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+                               & bitmap->mask;
+               obj = find_first_zero_bit(bitmap->table, bitmap->max);
+       }
+
+       if (obj < bitmap->max) {
+               set_bit(obj, bitmap->table);
+               bitmap->last = (obj + 1);
+               if (bitmap->last == bitmap->max)
+                       bitmap->last = 0;
+               obj |= bitmap->top;
+       } else
+               obj = -1;
+
+       if (obj != -1)
+               --bitmap->avail;
+
+       spin_unlock(&bitmap->lock);
+
+       return obj;
+}
+
+void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
+{
+       mlx4_bitmap_free_range(bitmap, obj, 1);
+}
+
+u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
+{
+       u32 obj;
+
+       if (likely(cnt == 1 && align == 1))
+               return mlx4_bitmap_alloc(bitmap);
+
+       spin_lock(&bitmap->lock);
+
+       obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
+                               bitmap->last, cnt, align - 1);
+       if (obj >= bitmap->max) {
+               bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+                               & bitmap->mask;
+               obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
+                                               0, cnt, align - 1);
+       }
+
+       if (obj < bitmap->max) {
+               bitmap_set(bitmap->table, obj, cnt);
+               if (obj == bitmap->last) {
+                       bitmap->last = (obj + cnt);
+                       if (bitmap->last >= bitmap->max)
+                               bitmap->last = 0;
+               }
+               obj |= bitmap->top;
+       } else
+               obj = -1;
+
+       if (obj != -1)
+               bitmap->avail -= cnt;
+
+       spin_unlock(&bitmap->lock);
+
+       return obj;
+}
+
+u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
+{
+       return bitmap->avail;
+}
+
+void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
+{
+       obj &= bitmap->max + bitmap->reserved_top - 1;
+
+       spin_lock(&bitmap->lock);
+       bitmap_clear(bitmap->table, obj, cnt);
+       bitmap->last = min(bitmap->last, obj);
+       bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+                       & bitmap->mask;
+       bitmap->avail += cnt;
+       spin_unlock(&bitmap->lock);
+}
+
+int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
+                    u32 reserved_bot, u32 reserved_top)
+{
+       /* num must be a power of 2 */
+       if (num != roundup_pow_of_two(num))
+               return -EINVAL;
+
+       bitmap->last = 0;
+       bitmap->top  = 0;
+       bitmap->max  = num - reserved_top;
+       bitmap->mask = mask;
+       bitmap->reserved_top = reserved_top;
+       bitmap->avail = num - reserved_top - reserved_bot;
+       spin_lock_init(&bitmap->lock);
+       bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
+                               sizeof (long), GFP_KERNEL);
+       if (!bitmap->table)
+               return -ENOMEM;
+
+       bitmap_set(bitmap->table, 0, reserved_bot);
+
+       return 0;
+}
+
+void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
+{
+       kfree(bitmap->table);
+}
+
+/*
+ * Handling for queue buffers -- we allocate a bunch of memory and
+ * register it in a memory region at HCA virtual address 0.  If the
+ * requested size is > max_direct, we split the allocation into
+ * multiple pages, so we don't require too much contiguous memory.
+ */
+
+int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
+                  struct mlx4_buf *buf)
+{
+       dma_addr_t t;
+
+       if (size <= max_direct) {
+               buf->nbufs        = 1;
+               buf->npages       = 1;
+               buf->page_shift   = get_order(size) + PAGE_SHIFT;
+               buf->direct.buf   = dma_alloc_coherent(&dev->pdev->dev,
+                                                      size, &t, GFP_KERNEL);
+               if (!buf->direct.buf)
+                       return -ENOMEM;
+
+               buf->direct.map = t;
+
+               while (t & ((1 << buf->page_shift) - 1)) {
+                       --buf->page_shift;
+                       buf->npages *= 2;
+               }
+
+               memset(buf->direct.buf, 0, size);
+       } else {
+               int i;
+
+               buf->direct.buf  = NULL;
+               buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+               buf->npages      = buf->nbufs;
+               buf->page_shift  = PAGE_SHIFT;
+               buf->page_list   = kcalloc(buf->nbufs, sizeof(*buf->page_list),
+                                          GFP_KERNEL);
+               if (!buf->page_list)
+                       return -ENOMEM;
+
+               for (i = 0; i < buf->nbufs; ++i) {
+                       buf->page_list[i].buf =
+                               dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
+                                                  &t, GFP_KERNEL);
+                       if (!buf->page_list[i].buf)
+                               goto err_free;
+
+                       buf->page_list[i].map = t;
+
+                       memset(buf->page_list[i].buf, 0, PAGE_SIZE);
+               }
+
+               if (BITS_PER_LONG == 64) {
+                       struct page **pages;
+                       pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
+                       if (!pages)
+                               goto err_free;
+                       for (i = 0; i < buf->nbufs; ++i)
+                               pages[i] = virt_to_page(buf->page_list[i].buf);
+                       buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
+                       kfree(pages);
+                       if (!buf->direct.buf)
+                               goto err_free;
+               }
+       }
+
+       return 0;
+
+err_free:
+       mlx4_buf_free(dev, size, buf);
+
+       return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
+
+void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
+{
+       int i;
+
+       if (buf->nbufs == 1)
+               dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
+                                 buf->direct.map);
+       else {
+               if (BITS_PER_LONG == 64 && buf->direct.buf)
+                       vunmap(buf->direct.buf);
+
+               for (i = 0; i < buf->nbufs; ++i)
+                       if (buf->page_list[i].buf)
+                               dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+                                                 buf->page_list[i].buf,
+                                                 buf->page_list[i].map);
+               kfree(buf->page_list);
+       }
+}
+EXPORT_SYMBOL_GPL(mlx4_buf_free);
+
+static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
+{
+       struct mlx4_db_pgdir *pgdir;
+
+       pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
+       if (!pgdir)
+               return NULL;
+
+       bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
+       pgdir->bits[0] = pgdir->order0;
+       pgdir->bits[1] = pgdir->order1;
+       pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
+                                           &pgdir->db_dma, GFP_KERNEL);
+       if (!pgdir->db_page) {
+               kfree(pgdir);
+               return NULL;
+       }
+
+       return pgdir;
+}
+
+static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
+                                   struct mlx4_db *db, int order)
+{
+       int o;
+       int i;
+
+       for (o = order; o <= 1; ++o) {
+               i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
+               if (i < MLX4_DB_PER_PAGE >> o)
+                       goto found;
+       }
+
+       return -ENOMEM;
+
+found:
+       clear_bit(i, pgdir->bits[o]);
+
+       i <<= o;
+
+       if (o > order)
+               set_bit(i ^ 1, pgdir->bits[order]);
+
+       db->u.pgdir = pgdir;
+       db->index   = i;
+       db->db      = pgdir->db_page + db->index;
+       db->dma     = pgdir->db_dma  + db->index * 4;
+       db->order   = order;
+
+       return 0;
+}
+
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_db_pgdir *pgdir;
+       int ret = 0;
+
+       mutex_lock(&priv->pgdir_mutex);
+
+       list_for_each_entry(pgdir, &priv->pgdir_list, list)
+               if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
+                       goto out;
+
+       pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev));
+       if (!pgdir) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       list_add(&pgdir->list, &priv->pgdir_list);
+
+       /* This should never fail -- we just allocated an empty page: */
+       WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
+
+out:
+       mutex_unlock(&priv->pgdir_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(mlx4_db_alloc);
+
+void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int o;
+       int i;
+
+       mutex_lock(&priv->pgdir_mutex);
+
+       o = db->order;
+       i = db->index;
+
+       if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
+               clear_bit(i ^ 1, db->u.pgdir->order0);
+               ++o;
+       }
+       i >>= o;
+       set_bit(i, db->u.pgdir->bits[o]);
+
+       if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
+               dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+                                 db->u.pgdir->db_page, db->u.pgdir->db_dma);
+               list_del(&db->u.pgdir->list);
+               kfree(db->u.pgdir);
+       }
+
+       mutex_unlock(&priv->pgdir_mutex);
+}
+EXPORT_SYMBOL_GPL(mlx4_db_free);
+
+int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
+                      int size, int max_direct)
+{
+       int err;
+
+       err = mlx4_db_alloc(dev, &wqres->db, 1);
+       if (err)
+               return err;
+
+       *wqres->db.db = 0;
+
+       err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf);
+       if (err)
+               goto err_db;
+
+       err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
+                           &wqres->mtt);
+       if (err)
+               goto err_buf;
+
+       err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
+       if (err)
+               goto err_mtt;
+
+       return 0;
+
+err_mtt:
+       mlx4_mtt_cleanup(dev, &wqres->mtt);
+err_buf:
+       mlx4_buf_free(dev, size, &wqres->buf);
+err_db:
+       mlx4_db_free(dev, &wqres->db);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res);
+
+void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
+                      int size)
+{
+       mlx4_mtt_cleanup(dev, &wqres->mtt);
+       mlx4_buf_free(dev, size, &wqres->buf);
+       mlx4_db_free(dev, &wqres->db);
+}
+EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
new file mode 100644 (file)
index 0000000..32f9471
--- /dev/null
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/workqueue.h>
+
+#include "mlx4.h"
+
+enum {
+       MLX4_CATAS_POLL_INTERVAL        = 5 * HZ,
+};
+
+static DEFINE_SPINLOCK(catas_lock);
+
+static LIST_HEAD(catas_list);
+static struct work_struct catas_work;
+
+static int internal_err_reset = 1;
+module_param(internal_err_reset, int, 0644);
+MODULE_PARM_DESC(internal_err_reset,
+                "Reset device on internal errors if non-zero (default 1)");
+
+static void dump_err_buf(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       int i;
+
+       mlx4_err(dev, "Internal error detected:\n");
+       for (i = 0; i < priv->fw.catas_size; ++i)
+               mlx4_err(dev, "  buf[%02x]: %08x\n",
+                        i, swab32(readl(priv->catas_err.map + i)));
+}
+
+static void poll_catas(unsigned long dev_ptr)
+{
+       struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       if (readl(priv->catas_err.map)) {
+               dump_err_buf(dev);
+
+               mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0);
+
+               if (internal_err_reset) {
+                       spin_lock(&catas_lock);
+                       list_add(&priv->catas_err.list, &catas_list);
+                       spin_unlock(&catas_lock);
+
+                       queue_work(mlx4_wq, &catas_work);
+               }
+       } else
+               mod_timer(&priv->catas_err.timer,
+                         round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
+}
+
+static void catas_reset(struct work_struct *work)
+{
+       struct mlx4_priv *priv, *tmppriv;
+       struct mlx4_dev *dev;
+
+       LIST_HEAD(tlist);
+       int ret;
+
+       spin_lock_irq(&catas_lock);
+       list_splice_init(&catas_list, &tlist);
+       spin_unlock_irq(&catas_lock);
+
+       list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) {
+               struct pci_dev *pdev = priv->dev.pdev;
+
+               ret = mlx4_restart_one(priv->dev.pdev);
+               /* 'priv' now is not valid */
+               if (ret)
+                       pr_err("mlx4 %s: Reset failed (%d)\n",
+                              pci_name(pdev), ret);
+               else {
+                       dev  = pci_get_drvdata(pdev);
+                       mlx4_dbg(dev, "Reset succeeded\n");
+               }
+       }
+}
+
+void mlx4_start_catas_poll(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       phys_addr_t addr;
+
+       INIT_LIST_HEAD(&priv->catas_err.list);
+       init_timer(&priv->catas_err.timer);
+       priv->catas_err.map = NULL;
+
+       addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) +
+               priv->fw.catas_offset;
+
+       priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
+       if (!priv->catas_err.map) {
+               mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n",
+                         (unsigned long long) addr);
+               return;
+       }
+
+       priv->catas_err.timer.data     = (unsigned long) dev;
+       priv->catas_err.timer.function = poll_catas;
+       priv->catas_err.timer.expires  =
+               round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL);
+       add_timer(&priv->catas_err.timer);
+}
+
+void mlx4_stop_catas_poll(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       del_timer_sync(&priv->catas_err.timer);
+
+       if (priv->catas_err.map)
+               iounmap(priv->catas_err.map);
+
+       spin_lock_irq(&catas_lock);
+       list_del(&priv->catas_err.list);
+       spin_unlock_irq(&catas_lock);
+}
+
+void  __init mlx4_catas_init(void)
+{
+       INIT_WORK(&catas_work, catas_reset);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
new file mode 100644 (file)
index 0000000..23cee7b
--- /dev/null
@@ -0,0 +1,443 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include <asm/io.h>
+
+#include "mlx4.h"
+
+#define CMD_POLL_TOKEN 0xffff
+
+enum {
+       /* command completed successfully: */
+       CMD_STAT_OK             = 0x00,
+       /* Internal error (such as a bus error) occurred while processing command: */
+       CMD_STAT_INTERNAL_ERR   = 0x01,
+       /* Operation/command not supported or opcode modifier not supported: */
+       CMD_STAT_BAD_OP         = 0x02,
+       /* Parameter not supported or parameter out of range: */
+       CMD_STAT_BAD_PARAM      = 0x03,
+       /* System not enabled or bad system state: */
+       CMD_STAT_BAD_SYS_STATE  = 0x04,
+       /* Attempt to access reserved or unallocaterd resource: */
+       CMD_STAT_BAD_RESOURCE   = 0x05,
+       /* Requested resource is currently executing a command, or is otherwise busy: */
+       CMD_STAT_RESOURCE_BUSY  = 0x06,
+       /* Required capability exceeds device limits: */
+       CMD_STAT_EXCEED_LIM     = 0x08,
+       /* Resource is not in the appropriate state or ownership: */
+       CMD_STAT_BAD_RES_STATE  = 0x09,
+       /* Index out of range: */
+       CMD_STAT_BAD_INDEX      = 0x0a,
+       /* FW image corrupted: */
+       CMD_STAT_BAD_NVMEM      = 0x0b,
+       /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
+       CMD_STAT_ICM_ERROR      = 0x0c,
+       /* Attempt to modify a QP/EE which is not in the presumed state: */
+       CMD_STAT_BAD_QP_STATE   = 0x10,
+       /* Bad segment parameters (Address/Size): */
+       CMD_STAT_BAD_SEG_PARAM  = 0x20,
+       /* Memory Region has Memory Windows bound to: */
+       CMD_STAT_REG_BOUND      = 0x21,
+       /* HCA local attached memory not present: */
+       CMD_STAT_LAM_NOT_PRE    = 0x22,
+       /* Bad management packet (silently discarded): */
+       CMD_STAT_BAD_PKT        = 0x30,
+       /* More outstanding CQEs in CQ than new CQ size: */
+       CMD_STAT_BAD_SIZE       = 0x40,
+       /* Multi Function device support required: */
+       CMD_STAT_MULTI_FUNC_REQ = 0x50,
+};
+
+enum {
+       HCR_IN_PARAM_OFFSET     = 0x00,
+       HCR_IN_MODIFIER_OFFSET  = 0x08,
+       HCR_OUT_PARAM_OFFSET    = 0x0c,
+       HCR_TOKEN_OFFSET        = 0x14,
+       HCR_STATUS_OFFSET       = 0x18,
+
+       HCR_OPMOD_SHIFT         = 12,
+       HCR_T_BIT               = 21,
+       HCR_E_BIT               = 22,
+       HCR_GO_BIT              = 23
+};
+
+enum {
+       GO_BIT_TIMEOUT_MSECS    = 10000
+};
+
+struct mlx4_cmd_context {
+       struct completion       done;
+       int                     result;
+       int                     next;
+       u64                     out_param;
+       u16                     token;
+};
+
+static int mlx4_status_to_errno(u8 status)
+{
+       static const int trans_table[] = {
+               [CMD_STAT_INTERNAL_ERR]   = -EIO,
+               [CMD_STAT_BAD_OP]         = -EPERM,
+               [CMD_STAT_BAD_PARAM]      = -EINVAL,
+               [CMD_STAT_BAD_SYS_STATE]  = -ENXIO,
+               [CMD_STAT_BAD_RESOURCE]   = -EBADF,
+               [CMD_STAT_RESOURCE_BUSY]  = -EBUSY,
+               [CMD_STAT_EXCEED_LIM]     = -ENOMEM,
+               [CMD_STAT_BAD_RES_STATE]  = -EBADF,
+               [CMD_STAT_BAD_INDEX]      = -EBADF,
+               [CMD_STAT_BAD_NVMEM]      = -EFAULT,
+               [CMD_STAT_ICM_ERROR]      = -ENFILE,
+               [CMD_STAT_BAD_QP_STATE]   = -EINVAL,
+               [CMD_STAT_BAD_SEG_PARAM]  = -EFAULT,
+               [CMD_STAT_REG_BOUND]      = -EBUSY,
+               [CMD_STAT_LAM_NOT_PRE]    = -EAGAIN,
+               [CMD_STAT_BAD_PKT]        = -EINVAL,
+               [CMD_STAT_BAD_SIZE]       = -ENOMEM,
+               [CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
+       };
+
+       if (status >= ARRAY_SIZE(trans_table) ||
+           (status != CMD_STAT_OK && trans_table[status] == 0))
+               return -EIO;
+
+       return trans_table[status];
+}
+
+static int cmd_pending(struct mlx4_dev *dev)
+{
+       u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
+
+       return (status & swab32(1 << HCR_GO_BIT)) ||
+               (mlx4_priv(dev)->cmd.toggle ==
+                !!(status & swab32(1 << HCR_T_BIT)));
+}
+
+static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
+                        u32 in_modifier, u8 op_modifier, u16 op, u16 token,
+                        int event)
+{
+       struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
+       u32 __iomem *hcr = cmd->hcr;
+       int ret = -EAGAIN;
+       unsigned long end;
+
+       mutex_lock(&cmd->hcr_mutex);
+
+       end = jiffies;
+       if (event)
+               end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
+
+       while (cmd_pending(dev)) {
+               if (time_after_eq(jiffies, end))
+                       goto out;
+               cond_resched();
+       }
+
+       /*
+        * We use writel (instead of something like memcpy_toio)
+        * because writes of less than 32 bits to the HCR don't work
+        * (and some architectures such as ia64 implement memcpy_toio
+        * in terms of writeb).
+        */
+       __raw_writel((__force u32) cpu_to_be32(in_param >> 32),           hcr + 0);
+       __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful),  hcr + 1);
+       __raw_writel((__force u32) cpu_to_be32(in_modifier),              hcr + 2);
+       __raw_writel((__force u32) cpu_to_be32(out_param >> 32),          hcr + 3);
+       __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
+       __raw_writel((__force u32) cpu_to_be32(token << 16),              hcr + 5);
+
+       /* __raw_writel may not order writes. */
+       wmb();
+
+       __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT)                |
+                                              (cmd->toggle << HCR_T_BIT)       |
+                                              (event ? (1 << HCR_E_BIT) : 0)   |
+                                              (op_modifier << HCR_OPMOD_SHIFT) |
+                                              op),                       hcr + 6);
+
+       /*
+        * Make sure that our HCR writes don't get mixed in with
+        * writes from another CPU starting a FW command.
+        */
+       mmiowb();
+
+       cmd->toggle = cmd->toggle ^ 1;
+
+       ret = 0;
+
+out:
+       mutex_unlock(&cmd->hcr_mutex);
+       return ret;
+}
+
+static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
+                        int out_is_imm, u32 in_modifier, u8 op_modifier,
+                        u16 op, unsigned long timeout)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       void __iomem *hcr = priv->cmd.hcr;
+       int err = 0;
+       unsigned long end;
+
+       down(&priv->cmd.poll_sem);
+
+       err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
+                           in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
+       if (err)
+               goto out;
+
+       end = msecs_to_jiffies(timeout) + jiffies;
+       while (cmd_pending(dev) && time_before(jiffies, end))
+               cond_resched();
+
+       if (cmd_pending(dev)) {
+               err = -ETIMEDOUT;
+               goto out;
+       }
+
+       if (out_is_imm)
+               *out_param =
+                       (u64) be32_to_cpu((__force __be32)
+                                         __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
+                       (u64) be32_to_cpu((__force __be32)
+                                         __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
+
+       err = mlx4_status_to_errno(be32_to_cpu((__force __be32)
+                                              __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24);
+
+out:
+       up(&priv->cmd.poll_sem);
+       return err;
+}
+
+void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_cmd_context *context =
+               &priv->cmd.context[token & priv->cmd.token_mask];
+
+       /* previously timed out command completing at long last */
+       if (token != context->token)
+               return;
+
+       context->result    = mlx4_status_to_errno(status);
+       context->out_param = out_param;
+
+       complete(&context->done);
+}
+
+static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
+                        int out_is_imm, u32 in_modifier, u8 op_modifier,
+                        u16 op, unsigned long timeout)
+{
+       struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
+       struct mlx4_cmd_context *context;
+       int err = 0;
+
+       down(&cmd->event_sem);
+
+       spin_lock(&cmd->context_lock);
+       BUG_ON(cmd->free_head < 0);
+       context = &cmd->context[cmd->free_head];
+       context->token += cmd->token_mask + 1;
+       cmd->free_head = context->next;
+       spin_unlock(&cmd->context_lock);
+
+       init_completion(&context->done);
+
+       mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
+                     in_modifier, op_modifier, op, context->token, 1);
+
+       if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       err = context->result;
+       if (err)
+               goto out;
+
+       if (out_is_imm)
+               *out_param = context->out_param;
+
+out:
+       spin_lock(&cmd->context_lock);
+       context->next = cmd->free_head;
+       cmd->free_head = context - cmd->context;
+       spin_unlock(&cmd->context_lock);
+
+       up(&cmd->event_sem);
+       return err;
+}
+
+int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
+              int out_is_imm, u32 in_modifier, u8 op_modifier,
+              u16 op, unsigned long timeout)
+{
+       if (mlx4_priv(dev)->cmd.use_events)
+               return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
+                                    in_modifier, op_modifier, op, timeout);
+       else
+               return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
+                                    in_modifier, op_modifier, op, timeout);
+}
+EXPORT_SYMBOL_GPL(__mlx4_cmd);
+
+int mlx4_cmd_init(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       mutex_init(&priv->cmd.hcr_mutex);
+       sema_init(&priv->cmd.poll_sem, 1);
+       priv->cmd.use_events = 0;
+       priv->cmd.toggle     = 1;
+
+       priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE,
+                               MLX4_HCR_SIZE);
+       if (!priv->cmd.hcr) {
+               mlx4_err(dev, "Couldn't map command register.");
+               return -ENOMEM;
+       }
+
+       priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
+                                        MLX4_MAILBOX_SIZE,
+                                        MLX4_MAILBOX_SIZE, 0);
+       if (!priv->cmd.pool) {
+               iounmap(priv->cmd.hcr);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+void mlx4_cmd_cleanup(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       pci_pool_destroy(priv->cmd.pool);
+       iounmap(priv->cmd.hcr);
+}
+
+/*
+ * Switch to using events to issue FW commands (can only be called
+ * after event queue for command events has been initialized).
+ */
+int mlx4_cmd_use_events(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int i;
+
+       priv->cmd.context = kmalloc(priv->cmd.max_cmds *
+                                  sizeof (struct mlx4_cmd_context),
+                                  GFP_KERNEL);
+       if (!priv->cmd.context)
+               return -ENOMEM;
+
+       for (i = 0; i < priv->cmd.max_cmds; ++i) {
+               priv->cmd.context[i].token = i;
+               priv->cmd.context[i].next  = i + 1;
+       }
+
+       priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
+       priv->cmd.free_head = 0;
+
+       sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
+       spin_lock_init(&priv->cmd.context_lock);
+
+       for (priv->cmd.token_mask = 1;
+            priv->cmd.token_mask < priv->cmd.max_cmds;
+            priv->cmd.token_mask <<= 1)
+               ; /* nothing */
+       --priv->cmd.token_mask;
+
+       priv->cmd.use_events = 1;
+
+       down(&priv->cmd.poll_sem);
+
+       return 0;
+}
+
+/*
+ * Switch back to polling (used when shutting down the device)
+ */
+void mlx4_cmd_use_polling(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int i;
+
+       priv->cmd.use_events = 0;
+
+       for (i = 0; i < priv->cmd.max_cmds; ++i)
+               down(&priv->cmd.event_sem);
+
+       kfree(priv->cmd.context);
+
+       up(&priv->cmd.poll_sem);
+}
+
+struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+
+       mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
+       if (!mailbox)
+               return ERR_PTR(-ENOMEM);
+
+       mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
+                                     &mailbox->dma);
+       if (!mailbox->buf) {
+               kfree(mailbox);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       return mailbox;
+}
+EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
+
+void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox)
+{
+       if (!mailbox)
+               return;
+
+       pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
+       kfree(mailbox);
+}
+EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
new file mode 100644 (file)
index 0000000..bd8ef9f
--- /dev/null
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/hardirq.h>
+#include <linux/gfp.h>
+
+#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/cq.h>
+
+#include "mlx4.h"
+#include "icm.h"
+
+struct mlx4_cq_context {
+       __be32                  flags;
+       u16                     reserved1[3];
+       __be16                  page_offset;
+       __be32                  logsize_usrpage;
+       __be16                  cq_period;
+       __be16                  cq_max_count;
+       u8                      reserved2[3];
+       u8                      comp_eqn;
+       u8                      log_page_size;
+       u8                      reserved3[2];
+       u8                      mtt_base_addr_h;
+       __be32                  mtt_base_addr_l;
+       __be32                  last_notified_index;
+       __be32                  solicit_producer_index;
+       __be32                  consumer_index;
+       __be32                  producer_index;
+       u32                     reserved4[2];
+       __be64                  db_rec_addr;
+};
+
+#define MLX4_CQ_STATUS_OK              ( 0 << 28)
+#define MLX4_CQ_STATUS_OVERFLOW                ( 9 << 28)
+#define MLX4_CQ_STATUS_WRITE_FAIL      (10 << 28)
+#define MLX4_CQ_FLAG_CC                        ( 1 << 18)
+#define MLX4_CQ_FLAG_OI                        ( 1 << 17)
+#define MLX4_CQ_STATE_ARMED            ( 9 <<  8)
+#define MLX4_CQ_STATE_ARMED_SOL                ( 6 <<  8)
+#define MLX4_EQ_STATE_FIRED            (10 <<  8)
+
+void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
+{
+       struct mlx4_cq *cq;
+
+       cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
+                              cqn & (dev->caps.num_cqs - 1));
+       if (!cq) {
+               mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
+               return;
+       }
+
+       ++cq->arm_sn;
+
+       cq->comp(cq);
+}
+
+void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
+{
+       struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
+       struct mlx4_cq *cq;
+
+       spin_lock(&cq_table->lock);
+
+       cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
+       if (cq)
+               atomic_inc(&cq->refcount);
+
+       spin_unlock(&cq_table->lock);
+
+       if (!cq) {
+               mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
+               return;
+       }
+
+       cq->event(cq, event_type);
+
+       if (atomic_dec_and_test(&cq->refcount))
+               complete(&cq->free);
+}
+
+static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+                        int cq_num)
+{
+       return mlx4_cmd(dev, mailbox->dma, cq_num, 0, MLX4_CMD_SW2HW_CQ,
+                       MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+                        int cq_num, u32 opmod)
+{
+       return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
+                       MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+                        int cq_num)
+{
+       return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
+                           mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
+                           MLX4_CMD_TIME_CLASS_A);
+}
+
+int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
+                  u16 count, u16 period)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_cq_context *cq_context;
+       int err;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       cq_context = mailbox->buf;
+       memset(cq_context, 0, sizeof *cq_context);
+
+       cq_context->cq_max_count = cpu_to_be16(count);
+       cq_context->cq_period    = cpu_to_be16(period);
+
+       err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_cq_modify);
+
+int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
+                  int entries, struct mlx4_mtt *mtt)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_cq_context *cq_context;
+       u64 mtt_addr;
+       int err;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       cq_context = mailbox->buf;
+       memset(cq_context, 0, sizeof *cq_context);
+
+       cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
+       cq_context->log_page_size   = mtt->page_shift - 12;
+       mtt_addr = mlx4_mtt_addr(dev, mtt);
+       cq_context->mtt_base_addr_h = mtt_addr >> 32;
+       cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
+
+       err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_cq_resize);
+
+int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
+                 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
+                 unsigned vector, int collapsed)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_cq_table *cq_table = &priv->cq_table;
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_cq_context *cq_context;
+       u64 mtt_addr;
+       int err;
+
+       if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool)
+               return -EINVAL;
+
+       cq->vector = vector;
+
+       cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
+       if (cq->cqn == -1)
+               return -ENOMEM;
+
+       err = mlx4_table_get(dev, &cq_table->table, cq->cqn);
+       if (err)
+               goto err_out;
+
+       err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn);
+       if (err)
+               goto err_put;
+
+       spin_lock_irq(&cq_table->lock);
+       err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
+       spin_unlock_irq(&cq_table->lock);
+       if (err)
+               goto err_cmpt_put;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox)) {
+               err = PTR_ERR(mailbox);
+               goto err_radix;
+       }
+
+       cq_context = mailbox->buf;
+       memset(cq_context, 0, sizeof *cq_context);
+
+       cq_context->flags           = cpu_to_be32(!!collapsed << 18);
+       cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
+       cq_context->comp_eqn        = priv->eq_table.eq[vector].eqn;
+       cq_context->log_page_size   = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
+
+       mtt_addr = mlx4_mtt_addr(dev, mtt);
+       cq_context->mtt_base_addr_h = mtt_addr >> 32;
+       cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
+       cq_context->db_rec_addr     = cpu_to_be64(db_rec);
+
+       err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       if (err)
+               goto err_radix;
+
+       cq->cons_index = 0;
+       cq->arm_sn     = 1;
+       cq->uar        = uar;
+       atomic_set(&cq->refcount, 1);
+       init_completion(&cq->free);
+
+       return 0;
+
+err_radix:
+       spin_lock_irq(&cq_table->lock);
+       radix_tree_delete(&cq_table->tree, cq->cqn);
+       spin_unlock_irq(&cq_table->lock);
+
+err_cmpt_put:
+       mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn);
+
+err_put:
+       mlx4_table_put(dev, &cq_table->table, cq->cqn);
+
+err_out:
+       mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_cq_alloc);
+
+void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_cq_table *cq_table = &priv->cq_table;
+       int err;
+
+       err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn);
+       if (err)
+               mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
+
+       synchronize_irq(priv->eq_table.eq[cq->vector].irq);
+
+       spin_lock_irq(&cq_table->lock);
+       radix_tree_delete(&cq_table->tree, cq->cqn);
+       spin_unlock_irq(&cq_table->lock);
+
+       if (atomic_dec_and_test(&cq->refcount))
+               complete(&cq->free);
+       wait_for_completion(&cq->free);
+
+       mlx4_table_put(dev, &cq_table->table, cq->cqn);
+       mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+}
+EXPORT_SYMBOL_GPL(mlx4_cq_free);
+
+int mlx4_init_cq_table(struct mlx4_dev *dev)
+{
+       struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
+       int err;
+
+       spin_lock_init(&cq_table->lock);
+       INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
+
+       err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
+                              dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
+{
+       /* Nothing to do to clean up radix_tree */
+       mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
new file mode 100644 (file)
index 0000000..ec4b6d0
--- /dev/null
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/mlx4/cq.h>
+#include <linux/mlx4/qp.h>
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4_en.h"
+
+static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
+{
+       return;
+}
+
+
+int mlx4_en_create_cq(struct mlx4_en_priv *priv,
+                     struct mlx4_en_cq *cq,
+                     int entries, int ring, enum cq_type mode)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+       int err;
+
+       cq->size = entries;
+       if (mode == RX)
+               cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
+       else
+               cq->buf_size = sizeof(struct mlx4_cqe);
+
+       cq->ring = ring;
+       cq->is_tx = mode;
+       spin_lock_init(&cq->lock);
+
+       err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
+                               cq->buf_size, 2 * PAGE_SIZE);
+       if (err)
+               return err;
+
+       err = mlx4_en_map_buffer(&cq->wqres.buf);
+       if (err)
+               mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
+       else
+               cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
+
+       return err;
+}
+
+int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+       int err = 0;
+       char name[25];
+
+       cq->dev = mdev->pndev[priv->port];
+       cq->mcq.set_ci_db  = cq->wqres.db.db;
+       cq->mcq.arm_db     = cq->wqres.db.db + 1;
+       *cq->mcq.set_ci_db = 0;
+       *cq->mcq.arm_db    = 0;
+       memset(cq->buf, 0, cq->buf_size);
+
+       if (cq->is_tx == RX) {
+               if (mdev->dev->caps.comp_pool) {
+                       if (!cq->vector) {
+                               sprintf(name , "%s-rx-%d", priv->dev->name, cq->ring);
+                               if (mlx4_assign_eq(mdev->dev, name, &cq->vector)) {
+                                       cq->vector = (cq->ring + 1 + priv->port) %
+                                               mdev->dev->caps.num_comp_vectors;
+                                       mlx4_warn(mdev, "Failed Assigning an EQ to "
+                                                 "%s_rx-%d ,Falling back to legacy EQ's\n",
+                                                 priv->dev->name, cq->ring);
+                               }
+                       }
+               } else {
+                       cq->vector = (cq->ring + 1 + priv->port) %
+                               mdev->dev->caps.num_comp_vectors;
+               }
+       } else {
+               if (!cq->vector || !mdev->dev->caps.comp_pool) {
+                       /*Fallback to legacy pool in case of error*/
+                       cq->vector   = 0;
+               }
+       }
+
+       if (!cq->is_tx)
+               cq->size = priv->rx_ring[cq->ring].actual_size;
+
+       err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
+                           cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx);
+       if (err)
+               return err;
+
+       cq->mcq.comp  = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
+       cq->mcq.event = mlx4_en_cq_event;
+
+       if (cq->is_tx) {
+               init_timer(&cq->timer);
+               cq->timer.function = mlx4_en_poll_tx_cq;
+               cq->timer.data = (unsigned long) cq;
+       } else {
+               netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
+               napi_enable(&cq->napi);
+       }
+
+       return 0;
+}
+
+void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
+                       bool reserve_vectors)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+
+       mlx4_en_unmap_buffer(&cq->wqres.buf);
+       mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
+       if (priv->mdev->dev->caps.comp_pool && cq->vector && !reserve_vectors)
+               mlx4_release_eq(priv->mdev->dev, cq->vector);
+       cq->buf_size = 0;
+       cq->buf = NULL;
+}
+
+void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+
+       if (cq->is_tx)
+               del_timer(&cq->timer);
+       else {
+               napi_disable(&cq->napi);
+               netif_napi_del(&cq->napi);
+       }
+
+       mlx4_cq_free(mdev->dev, &cq->mcq);
+}
+
+/* Set rx cq moderation parameters */
+int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+{
+       return mlx4_cq_modify(priv->mdev->dev, &cq->mcq,
+                             cq->moder_cnt, cq->moder_time);
+}
+
+int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+{
+       mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map,
+                   &priv->mdev->uar_lock);
+
+       return 0;
+}
+
+
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
new file mode 100644 (file)
index 0000000..eb09625
--- /dev/null
@@ -0,0 +1,477 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+#include "mlx4_en.h"
+#include "en_port.h"
+
+
+static void
+mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+
+       strncpy(drvinfo->driver, DRV_NAME, 32);
+       strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
+       sprintf(drvinfo->fw_version, "%d.%d.%d",
+               (u16) (mdev->dev->caps.fw_ver >> 32),
+               (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
+               (u16) (mdev->dev->caps.fw_ver & 0xffff));
+       strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32);
+       drvinfo->n_stats = 0;
+       drvinfo->regdump_len = 0;
+       drvinfo->eedump_len = 0;
+}
+
+static const char main_strings[][ETH_GSTRING_LEN] = {
+       "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
+       "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
+       "rx_length_errors", "rx_over_errors", "rx_crc_errors",
+       "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
+       "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
+       "tx_heartbeat_errors", "tx_window_errors",
+
+       /* port statistics */
+       "tso_packets",
+       "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
+       "rx_csum_good", "rx_csum_none", "tx_chksum_offload",
+
+       /* packet statistics */
+       "broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3",
+       "rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0",
+       "tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5",
+       "tx_prio_6", "tx_prio_7",
+};
+#define NUM_MAIN_STATS 21
+#define NUM_ALL_STATS  (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
+
+static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
+       "Interupt Test",
+       "Link Test",
+       "Speed Test",
+       "Register Test",
+       "Loopback Test",
+};
+
+static u32 mlx4_en_get_msglevel(struct net_device *dev)
+{
+       return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
+}
+
+static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
+{
+       ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
+}
+
+static void mlx4_en_get_wol(struct net_device *netdev,
+                           struct ethtool_wolinfo *wol)
+{
+       struct mlx4_en_priv *priv = netdev_priv(netdev);
+       int err = 0;
+       u64 config = 0;
+
+       if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL)) {
+               wol->supported = 0;
+               wol->wolopts = 0;
+               return;
+       }
+
+       err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
+       if (err) {
+               en_err(priv, "Failed to get WoL information\n");
+               return;
+       }
+
+       if (config & MLX4_EN_WOL_MAGIC)
+               wol->supported = WAKE_MAGIC;
+       else
+               wol->supported = 0;
+
+       if (config & MLX4_EN_WOL_ENABLED)
+               wol->wolopts = WAKE_MAGIC;
+       else
+               wol->wolopts = 0;
+}
+
+static int mlx4_en_set_wol(struct net_device *netdev,
+                           struct ethtool_wolinfo *wol)
+{
+       struct mlx4_en_priv *priv = netdev_priv(netdev);
+       u64 config = 0;
+       int err = 0;
+
+       if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL))
+               return -EOPNOTSUPP;
+
+       if (wol->supported & ~WAKE_MAGIC)
+               return -EINVAL;
+
+       err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
+       if (err) {
+               en_err(priv, "Failed to get WoL info, unable to modify\n");
+               return err;
+       }
+
+       if (wol->wolopts & WAKE_MAGIC) {
+               config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED |
+                               MLX4_EN_WOL_MAGIC;
+       } else {
+               config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC);
+               config |= MLX4_EN_WOL_DO_MODIFY;
+       }
+
+       err = mlx4_wol_write(priv->mdev->dev, config, priv->port);
+       if (err)
+               en_err(priv, "Failed to set WoL information\n");
+
+       return err;
+}
+
+static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+
+       switch (sset) {
+       case ETH_SS_STATS:
+               return NUM_ALL_STATS +
+                       (priv->tx_ring_num + priv->rx_ring_num) * 2;
+       case ETH_SS_TEST:
+               return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
+                                       & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static void mlx4_en_get_ethtool_stats(struct net_device *dev,
+               struct ethtool_stats *stats, uint64_t *data)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       int index = 0;
+       int i;
+
+       spin_lock_bh(&priv->stats_lock);
+
+       for (i = 0; i < NUM_MAIN_STATS; i++)
+               data[index++] = ((unsigned long *) &priv->stats)[i];
+       for (i = 0; i < NUM_PORT_STATS; i++)
+               data[index++] = ((unsigned long *) &priv->port_stats)[i];
+       for (i = 0; i < priv->tx_ring_num; i++) {
+               data[index++] = priv->tx_ring[i].packets;
+               data[index++] = priv->tx_ring[i].bytes;
+       }
+       for (i = 0; i < priv->rx_ring_num; i++) {
+               data[index++] = priv->rx_ring[i].packets;
+               data[index++] = priv->rx_ring[i].bytes;
+       }
+       for (i = 0; i < NUM_PKT_STATS; i++)
+               data[index++] = ((unsigned long *) &priv->pkstats)[i];
+       spin_unlock_bh(&priv->stats_lock);
+
+}
+
+static void mlx4_en_self_test(struct net_device *dev,
+                             struct ethtool_test *etest, u64 *buf)
+{
+       mlx4_en_ex_selftest(dev, &etest->flags, buf);
+}
+
+static void mlx4_en_get_strings(struct net_device *dev,
+                               uint32_t stringset, uint8_t *data)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       int index = 0;
+       int i;
+
+       switch (stringset) {
+       case ETH_SS_TEST:
+               for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
+                       strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
+               if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK)
+                       for (; i < MLX4_EN_NUM_SELF_TEST; i++)
+                               strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
+               break;
+
+       case ETH_SS_STATS:
+               /* Add main counters */
+               for (i = 0; i < NUM_MAIN_STATS; i++)
+                       strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]);
+               for (i = 0; i< NUM_PORT_STATS; i++)
+                       strcpy(data + (index++) * ETH_GSTRING_LEN,
+                       main_strings[i + NUM_MAIN_STATS]);
+               for (i = 0; i < priv->tx_ring_num; i++) {
+                       sprintf(data + (index++) * ETH_GSTRING_LEN,
+                               "tx%d_packets", i);
+                       sprintf(data + (index++) * ETH_GSTRING_LEN,
+                               "tx%d_bytes", i);
+               }
+               for (i = 0; i < priv->rx_ring_num; i++) {
+                       sprintf(data + (index++) * ETH_GSTRING_LEN,
+                               "rx%d_packets", i);
+                       sprintf(data + (index++) * ETH_GSTRING_LEN,
+                               "rx%d_bytes", i);
+               }
+               for (i = 0; i< NUM_PKT_STATS; i++)
+                       strcpy(data + (index++) * ETH_GSTRING_LEN,
+                       main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]);
+               break;
+       }
+}
+
+static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       int trans_type;
+
+       cmd->autoneg = AUTONEG_DISABLE;
+       cmd->supported = SUPPORTED_10000baseT_Full;
+       cmd->advertising = ADVERTISED_10000baseT_Full;
+
+       if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
+               return -ENOMEM;
+
+       trans_type = priv->port_state.transciver;
+       if (netif_carrier_ok(dev)) {
+               ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
+               cmd->duplex = DUPLEX_FULL;
+       } else {
+               ethtool_cmd_speed_set(cmd, -1);
+               cmd->duplex = -1;
+       }
+
+       if (trans_type > 0 && trans_type <= 0xC) {
+               cmd->port = PORT_FIBRE;
+               cmd->transceiver = XCVR_EXTERNAL;
+               cmd->supported |= SUPPORTED_FIBRE;
+               cmd->advertising |= ADVERTISED_FIBRE;
+       } else if (trans_type == 0x80 || trans_type == 0) {
+               cmd->port = PORT_TP;
+               cmd->transceiver = XCVR_INTERNAL;
+               cmd->supported |= SUPPORTED_TP;
+               cmd->advertising |= ADVERTISED_TP;
+       } else  {
+               cmd->port = -1;
+               cmd->transceiver = -1;
+       }
+       return 0;
+}
+
+static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       if ((cmd->autoneg == AUTONEG_ENABLE) ||
+           (ethtool_cmd_speed(cmd) != SPEED_10000) ||
+           (cmd->duplex != DUPLEX_FULL))
+               return -EINVAL;
+
+       /* Nothing to change */
+       return 0;
+}
+
+static int mlx4_en_get_coalesce(struct net_device *dev,
+                             struct ethtool_coalesce *coal)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+
+       coal->tx_coalesce_usecs = 0;
+       coal->tx_max_coalesced_frames = 0;
+       coal->rx_coalesce_usecs = priv->rx_usecs;
+       coal->rx_max_coalesced_frames = priv->rx_frames;
+
+       coal->pkt_rate_low = priv->pkt_rate_low;
+       coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
+       coal->pkt_rate_high = priv->pkt_rate_high;
+       coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
+       coal->rate_sample_interval = priv->sample_interval;
+       coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
+       return 0;
+}
+
+static int mlx4_en_set_coalesce(struct net_device *dev,
+                             struct ethtool_coalesce *coal)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       int err, i;
+
+       priv->rx_frames = (coal->rx_max_coalesced_frames ==
+                          MLX4_EN_AUTO_CONF) ?
+                               MLX4_EN_RX_COAL_TARGET :
+                               coal->rx_max_coalesced_frames;
+       priv->rx_usecs = (coal->rx_coalesce_usecs ==
+                         MLX4_EN_AUTO_CONF) ?
+                               MLX4_EN_RX_COAL_TIME :
+                               coal->rx_coalesce_usecs;
+
+       /* Set adaptive coalescing params */
+       priv->pkt_rate_low = coal->pkt_rate_low;
+       priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
+       priv->pkt_rate_high = coal->pkt_rate_high;
+       priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
+       priv->sample_interval = coal->rate_sample_interval;
+       priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
+       priv->last_moder_time = MLX4_EN_AUTO_CONF;
+       if (priv->adaptive_rx_coal)
+               return 0;
+
+       for (i = 0; i < priv->rx_ring_num; i++) {
+               priv->rx_cq[i].moder_cnt = priv->rx_frames;
+               priv->rx_cq[i].moder_time = priv->rx_usecs;
+               err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
+               if (err)
+                       return err;
+       }
+       return 0;
+}
+
+static int mlx4_en_set_pauseparam(struct net_device *dev,
+                               struct ethtool_pauseparam *pause)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+       int err;
+
+       priv->prof->tx_pause = pause->tx_pause != 0;
+       priv->prof->rx_pause = pause->rx_pause != 0;
+       err = mlx4_SET_PORT_general(mdev->dev, priv->port,
+                                   priv->rx_skb_size + ETH_FCS_LEN,
+                                   priv->prof->tx_pause,
+                                   priv->prof->tx_ppp,
+                                   priv->prof->rx_pause,
+                                   priv->prof->rx_ppp);
+       if (err)
+               en_err(priv, "Failed setting pause params\n");
+
+       return err;
+}
+
+static void mlx4_en_get_pauseparam(struct net_device *dev,
+                                struct ethtool_pauseparam *pause)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+
+       pause->tx_pause = priv->prof->tx_pause;
+       pause->rx_pause = priv->prof->rx_pause;
+}
+
+static int mlx4_en_set_ringparam(struct net_device *dev,
+                                struct ethtool_ringparam *param)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+       u32 rx_size, tx_size;
+       int port_up = 0;
+       int err = 0;
+
+       if (param->rx_jumbo_pending || param->rx_mini_pending)
+               return -EINVAL;
+
+       rx_size = roundup_pow_of_two(param->rx_pending);
+       rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
+       rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
+       tx_size = roundup_pow_of_two(param->tx_pending);
+       tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
+       tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
+
+       if (rx_size == (priv->port_up ? priv->rx_ring[0].actual_size :
+                                       priv->rx_ring[0].size) &&
+           tx_size == priv->tx_ring[0].size)
+               return 0;
+
+       mutex_lock(&mdev->state_lock);
+       if (priv->port_up) {
+               port_up = 1;
+               mlx4_en_stop_port(dev);
+       }
+
+       mlx4_en_free_resources(priv, true);
+
+       priv->prof->tx_ring_size = tx_size;
+       priv->prof->rx_ring_size = rx_size;
+
+       err = mlx4_en_alloc_resources(priv);
+       if (err) {
+               en_err(priv, "Failed reallocating port resources\n");
+               goto out;
+       }
+       if (port_up) {
+               err = mlx4_en_start_port(dev);
+               if (err)
+                       en_err(priv, "Failed starting port\n");
+       }
+
+out:
+       mutex_unlock(&mdev->state_lock);
+       return err;
+}
+
+static void mlx4_en_get_ringparam(struct net_device *dev,
+                                 struct ethtool_ringparam *param)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+
+       memset(param, 0, sizeof(*param));
+       param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
+       param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
+       param->rx_pending = priv->port_up ?
+               priv->rx_ring[0].actual_size : priv->rx_ring[0].size;
+       param->tx_pending = priv->tx_ring[0].size;
+}
+
+const struct ethtool_ops mlx4_en_ethtool_ops = {
+       .get_drvinfo = mlx4_en_get_drvinfo,
+       .get_settings = mlx4_en_get_settings,
+       .set_settings = mlx4_en_set_settings,
+       .get_link = ethtool_op_get_link,
+       .get_strings = mlx4_en_get_strings,
+       .get_sset_count = mlx4_en_get_sset_count,
+       .get_ethtool_stats = mlx4_en_get_ethtool_stats,
+       .self_test = mlx4_en_self_test,
+       .get_wol = mlx4_en_get_wol,
+       .set_wol = mlx4_en_set_wol,
+       .get_msglevel = mlx4_en_get_msglevel,
+       .set_msglevel = mlx4_en_set_msglevel,
+       .get_coalesce = mlx4_en_get_coalesce,
+       .set_coalesce = mlx4_en_set_coalesce,
+       .get_pauseparam = mlx4_en_get_pauseparam,
+       .set_pauseparam = mlx4_en_set_pauseparam,
+       .get_ringparam = mlx4_en_get_ringparam,
+       .set_ringparam = mlx4_en_set_ringparam,
+};
+
+
+
+
+
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
new file mode 100644 (file)
index 0000000..6bfea23
--- /dev/null
@@ -0,0 +1,315 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/cpumask.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+
+#include <linux/mlx4/driver.h>
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4_en.h"
+
+MODULE_AUTHOR("Liran Liss, Yevgeny Petrilin");
+MODULE_DESCRIPTION("Mellanox ConnectX HCA Ethernet driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION " ("DRV_RELDATE")");
+
+static const char mlx4_en_version[] =
+       DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v"
+       DRV_VERSION " (" DRV_RELDATE ")\n";
+
+#define MLX4_EN_PARM_INT(X, def_val, desc) \
+       static unsigned int X = def_val;\
+       module_param(X , uint, 0444); \
+       MODULE_PARM_DESC(X, desc);
+
+
+/*
+ * Device scope module parameters
+ */
+
+
+/* Enable RSS TCP traffic */
+MLX4_EN_PARM_INT(tcp_rss, 1,
+                "Enable RSS for incomming TCP traffic or disabled (0)");
+/* Enable RSS UDP traffic */
+MLX4_EN_PARM_INT(udp_rss, 1,
+                "Enable RSS for incomming UDP traffic or disabled (0)");
+
+/* Priority pausing */
+MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
+                          " Per priority bit mask");
+MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
+                          " Per priority bit mask");
+
+int en_print(const char *level, const struct mlx4_en_priv *priv,
+            const char *format, ...)
+{
+       va_list args;
+       struct va_format vaf;
+       int i;
+
+       va_start(args, format);
+
+       vaf.fmt = format;
+       vaf.va = &args;
+       if (priv->registered)
+               i = printk("%s%s: %s: %pV",
+                          level, DRV_NAME, priv->dev->name, &vaf);
+       else
+               i = printk("%s%s: %s: Port %d: %pV",
+                          level, DRV_NAME, dev_name(&priv->mdev->pdev->dev),
+                          priv->port, &vaf);
+       va_end(args);
+
+       return i;
+}
+
+static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
+{
+       struct mlx4_en_profile *params = &mdev->profile;
+       int i;
+
+       params->tcp_rss = tcp_rss;
+       params->udp_rss = udp_rss;
+       if (params->udp_rss && !(mdev->dev->caps.flags
+                                       & MLX4_DEV_CAP_FLAG_UDP_RSS)) {
+               mlx4_warn(mdev, "UDP RSS is not supported on this device.\n");
+               params->udp_rss = 0;
+       }
+       for (i = 1; i <= MLX4_MAX_PORTS; i++) {
+               params->prof[i].rx_pause = 1;
+               params->prof[i].rx_ppp = pfcrx;
+               params->prof[i].tx_pause = 1;
+               params->prof[i].tx_ppp = pfctx;
+               params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
+               params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
+               params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS +
+                       (!!pfcrx) * MLX4_EN_NUM_PPP_RINGS;
+       }
+
+       return 0;
+}
+
+static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port)
+{
+       struct mlx4_en_dev *endev = ctx;
+
+       return endev->pndev[port];
+}
+
+static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
+                         enum mlx4_dev_event event, int port)
+{
+       struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
+       struct mlx4_en_priv *priv;
+
+       if (!mdev->pndev[port])
+               return;
+
+       priv = netdev_priv(mdev->pndev[port]);
+       switch (event) {
+       case MLX4_DEV_EVENT_PORT_UP:
+       case MLX4_DEV_EVENT_PORT_DOWN:
+               /* To prevent races, we poll the link state in a separate
+                 task rather than changing it here */
+               priv->link_state = event;
+               queue_work(mdev->workqueue, &priv->linkstate_task);
+               break;
+
+       case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
+               mlx4_err(mdev, "Internal error detected, restarting device\n");
+               break;
+
+       default:
+               mlx4_warn(mdev, "Unhandled event: %d\n", event);
+       }
+}
+
+static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
+{
+       struct mlx4_en_dev *mdev = endev_ptr;
+       int i;
+
+       mutex_lock(&mdev->state_lock);
+       mdev->device_up = false;
+       mutex_unlock(&mdev->state_lock);
+
+       mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
+               if (mdev->pndev[i])
+                       mlx4_en_destroy_netdev(mdev->pndev[i]);
+
+       flush_workqueue(mdev->workqueue);
+       destroy_workqueue(mdev->workqueue);
+       mlx4_mr_free(dev, &mdev->mr);
+       mlx4_uar_free(dev, &mdev->priv_uar);
+       mlx4_pd_free(dev, mdev->priv_pdn);
+       kfree(mdev);
+}
+
+static void *mlx4_en_add(struct mlx4_dev *dev)
+{
+       struct mlx4_en_dev *mdev;
+       int i;
+       int err;
+
+       printk_once(KERN_INFO "%s", mlx4_en_version);
+
+       mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
+       if (!mdev) {
+               dev_err(&dev->pdev->dev, "Device struct alloc failed, "
+                       "aborting.\n");
+               err = -ENOMEM;
+               goto err_free_res;
+       }
+
+       if (mlx4_pd_alloc(dev, &mdev->priv_pdn))
+               goto err_free_dev;
+
+       if (mlx4_uar_alloc(dev, &mdev->priv_uar))
+               goto err_pd;
+
+       mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT,
+                               PAGE_SIZE);
+       if (!mdev->uar_map)
+               goto err_uar;
+       spin_lock_init(&mdev->uar_lock);
+
+       mdev->dev = dev;
+       mdev->dma_device = &(dev->pdev->dev);
+       mdev->pdev = dev->pdev;
+       mdev->device_up = false;
+
+       mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
+       if (!mdev->LSO_support)
+               mlx4_warn(mdev, "LSO not supported, please upgrade to later "
+                               "FW version to enable LSO\n");
+
+       if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
+                        MLX4_PERM_LOCAL_WRITE |  MLX4_PERM_LOCAL_READ,
+                        0, 0, &mdev->mr)) {
+               mlx4_err(mdev, "Failed allocating memory region\n");
+               goto err_uar;
+       }
+       if (mlx4_mr_enable(mdev->dev, &mdev->mr)) {
+               mlx4_err(mdev, "Failed enabling memory region\n");
+               goto err_mr;
+       }
+
+       /* Build device profile according to supplied module parameters */
+       err = mlx4_en_get_profile(mdev);
+       if (err) {
+               mlx4_err(mdev, "Bad module parameters, aborting.\n");
+               goto err_mr;
+       }
+
+       /* Configure which ports to start according to module parameters */
+       mdev->port_cnt = 0;
+       mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
+               mdev->port_cnt++;
+
+       mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+               if (!dev->caps.comp_pool) {
+                       mdev->profile.prof[i].rx_ring_num =
+                               rounddown_pow_of_two(max_t(int, MIN_RX_RINGS,
+                                                          min_t(int,
+                                                                dev->caps.num_comp_vectors,
+                                                                MAX_RX_RINGS)));
+               } else {
+                       mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(
+                               min_t(int, dev->caps.comp_pool/
+                                     dev->caps.num_ports - 1 , MAX_MSIX_P_PORT - 1));
+               }
+       }
+
+       /* Create our own workqueue for reset/multicast tasks
+        * Note: we cannot use the shared workqueue because of deadlocks caused
+        *       by the rtnl lock */
+       mdev->workqueue = create_singlethread_workqueue("mlx4_en");
+       if (!mdev->workqueue) {
+               err = -ENOMEM;
+               goto err_mr;
+       }
+
+       /* At this stage all non-port specific tasks are complete:
+        * mark the card state as up */
+       mutex_init(&mdev->state_lock);
+       mdev->device_up = true;
+
+       /* Setup ports */
+
+       /* Create a netdev for each port */
+       mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+               mlx4_info(mdev, "Activating port:%d\n", i);
+               if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
+                       mdev->pndev[i] = NULL;
+       }
+       return mdev;
+
+err_mr:
+       mlx4_mr_free(dev, &mdev->mr);
+err_uar:
+       mlx4_uar_free(dev, &mdev->priv_uar);
+err_pd:
+       mlx4_pd_free(dev, mdev->priv_pdn);
+err_free_dev:
+       kfree(mdev);
+err_free_res:
+       return NULL;
+}
+
+static struct mlx4_interface mlx4_en_interface = {
+       .add            = mlx4_en_add,
+       .remove         = mlx4_en_remove,
+       .event          = mlx4_en_event,
+       .get_dev        = mlx4_en_get_netdev,
+       .protocol       = MLX4_PROT_ETH,
+};
+
+static int __init mlx4_en_init(void)
+{
+       return mlx4_register_interface(&mlx4_en_interface);
+}
+
+static void __exit mlx4_en_cleanup(void)
+{
+       mlx4_unregister_interface(&mlx4_en_interface);
+}
+
+module_init(mlx4_en_init);
+module_exit(mlx4_en_cleanup);
+
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
new file mode 100644 (file)
index 0000000..4b0f32e
--- /dev/null
@@ -0,0 +1,1166 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include <linux/mlx4/driver.h>
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/cq.h>
+
+#include "mlx4_en.h"
+#include "en_port.h"
+
+static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+       int err;
+       int idx;
+
+       en_dbg(HW, priv, "adding VLAN:%d\n", vid);
+
+       set_bit(vid, priv->active_vlans);
+
+       /* Add VID to port VLAN filter */
+       mutex_lock(&mdev->state_lock);
+       if (mdev->device_up && priv->port_up) {
+               err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
+               if (err)
+                       en_err(priv, "Failed configuring VLAN filter\n");
+       }
+       if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
+               en_err(priv, "failed adding vlan %d\n", vid);
+       mutex_unlock(&mdev->state_lock);
+
+}
+
+static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+       int err;
+       int idx;
+
+       en_dbg(HW, priv, "Killing VID:%d\n", vid);
+
+       clear_bit(vid, priv->active_vlans);
+
+       /* Remove VID from port VLAN filter */
+       mutex_lock(&mdev->state_lock);
+       if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
+               mlx4_unregister_vlan(mdev->dev, priv->port, idx);
+       else
+               en_err(priv, "could not find vid %d in cache\n", vid);
+
+       if (mdev->device_up && priv->port_up) {
+               err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
+               if (err)
+                       en_err(priv, "Failed configuring VLAN filter\n");
+       }
+       mutex_unlock(&mdev->state_lock);
+}
+
+u64 mlx4_en_mac_to_u64(u8 *addr)
+{
+       u64 mac = 0;
+       int i;
+
+       for (i = 0; i < ETH_ALEN; i++) {
+               mac <<= 8;
+               mac |= addr[i];
+       }
+       return mac;
+}
+
+static int mlx4_en_set_mac(struct net_device *dev, void *addr)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+       struct sockaddr *saddr = addr;
+
+       if (!is_valid_ether_addr(saddr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
+       priv->mac = mlx4_en_mac_to_u64(dev->dev_addr);
+       queue_work(mdev->workqueue, &priv->mac_task);
+       return 0;
+}
+
+static void mlx4_en_do_set_mac(struct work_struct *work)
+{
+       struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+                                                mac_task);
+       struct mlx4_en_dev *mdev = priv->mdev;
+       int err = 0;
+
+       mutex_lock(&mdev->state_lock);
+       if (priv->port_up) {
+               /* Remove old MAC and insert the new one */
+               err = mlx4_replace_mac(mdev->dev, priv->port,
+                                      priv->base_qpn, priv->mac, 0);
+               if (err)
+                       en_err(priv, "Failed changing HW MAC address\n");
+       } else
+               en_dbg(HW, priv, "Port is down while "
+                                "registering mac, exiting...\n");
+
+       mutex_unlock(&mdev->state_lock);
+}
+
+static void mlx4_en_clear_list(struct net_device *dev)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+
+       kfree(priv->mc_addrs);
+       priv->mc_addrs_cnt = 0;
+}
+
+static void mlx4_en_cache_mclist(struct net_device *dev)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct netdev_hw_addr *ha;
+       char *mc_addrs;
+       int mc_addrs_cnt = netdev_mc_count(dev);
+       int i;
+
+       mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC);
+       if (!mc_addrs) {
+               en_err(priv, "failed to allocate multicast list\n");
+               return;
+       }
+       i = 0;
+       netdev_for_each_mc_addr(ha, dev)
+               memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
+       priv->mc_addrs = mc_addrs;
+       priv->mc_addrs_cnt = mc_addrs_cnt;
+}
+
+
+static void mlx4_en_set_multicast(struct net_device *dev)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+
+       if (!priv->port_up)
+               return;
+
+       queue_work(priv->mdev->workqueue, &priv->mcast_task);
+}
+
+static void mlx4_en_do_set_multicast(struct work_struct *work)
+{
+       struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+                                                mcast_task);
+       struct mlx4_en_dev *mdev = priv->mdev;
+       struct net_device *dev = priv->dev;
+       u64 mcast_addr = 0;
+       u8 mc_list[16] = {0};
+       int err;
+
+       mutex_lock(&mdev->state_lock);
+       if (!mdev->device_up) {
+               en_dbg(HW, priv, "Card is not up, "
+                                "ignoring multicast change.\n");
+               goto out;
+       }
+       if (!priv->port_up) {
+               en_dbg(HW, priv, "Port is down, "
+                                "ignoring  multicast change.\n");
+               goto out;
+       }
+
+       /*
+        * Promsicuous mode: disable all filters
+        */
+
+       if (dev->flags & IFF_PROMISC) {
+               if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
+                       if (netif_msg_rx_status(priv))
+                               en_warn(priv, "Entering promiscuous mode\n");
+                       priv->flags |= MLX4_EN_FLAG_PROMISC;
+
+                       /* Enable promiscouos mode */
+                       if (!(mdev->dev->caps.flags &
+                                               MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
+                               err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
+                                                            priv->base_qpn, 1);
+                       else
+                               err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn,
+                                                              priv->port);
+                       if (err)
+                               en_err(priv, "Failed enabling "
+                                            "promiscuous mode\n");
+
+                       /* Disable port multicast filter (unconditionally) */
+                       err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
+                                                 0, MLX4_MCAST_DISABLE);
+                       if (err)
+                               en_err(priv, "Failed disabling "
+                                            "multicast filter\n");
+
+                       /* Add the default qp number as multicast promisc */
+                       if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
+                               err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
+                                                                priv->port);
+                               if (err)
+                                       en_err(priv, "Failed entering multicast promisc mode\n");
+                               priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
+                       }
+
+                       /* Disable port VLAN filter */
+                       err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
+                       if (err)
+                               en_err(priv, "Failed disabling VLAN filter\n");
+               }
+               goto out;
+       }
+
+       /*
+        * Not in promiscuous mode
+        */
+
+       if (priv->flags & MLX4_EN_FLAG_PROMISC) {
+               if (netif_msg_rx_status(priv))
+                       en_warn(priv, "Leaving promiscuous mode\n");
+               priv->flags &= ~MLX4_EN_FLAG_PROMISC;
+
+               /* Disable promiscouos mode */
+               if (!(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
+                       err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
+                                                    priv->base_qpn, 0);
+               else
+                       err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
+                                                         priv->port);
+               if (err)
+                       en_err(priv, "Failed disabling promiscuous mode\n");
+
+               /* Disable Multicast promisc */
+               if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
+                       err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
+                                                           priv->port);
+                       if (err)
+                               en_err(priv, "Failed disabling multicast promiscuous mode\n");
+                       priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+               }
+
+               /* Enable port VLAN filter */
+               err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
+               if (err)
+                       en_err(priv, "Failed enabling VLAN filter\n");
+       }
+
+       /* Enable/disable the multicast filter according to IFF_ALLMULTI */
+       if (dev->flags & IFF_ALLMULTI) {
+               err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
+                                         0, MLX4_MCAST_DISABLE);
+               if (err)
+                       en_err(priv, "Failed disabling multicast filter\n");
+
+               /* Add the default qp number as multicast promisc */
+               if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
+                       err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
+                                                        priv->port);
+                       if (err)
+                               en_err(priv, "Failed entering multicast promisc mode\n");
+                       priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
+               }
+       } else {
+               int i;
+               /* Disable Multicast promisc */
+               if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
+                       err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
+                                                           priv->port);
+                       if (err)
+                               en_err(priv, "Failed disabling multicast promiscuous mode\n");
+                       priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+               }
+
+               err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
+                                         0, MLX4_MCAST_DISABLE);
+               if (err)
+                       en_err(priv, "Failed disabling multicast filter\n");
+
+               /* Detach our qp from all the multicast addresses */
+               for (i = 0; i < priv->mc_addrs_cnt; i++) {
+                       memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
+                       mc_list[5] = priv->port;
+                       mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
+                                             mc_list, MLX4_PROT_ETH);
+               }
+               /* Flush mcast filter and init it with broadcast address */
+               mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
+                                   1, MLX4_MCAST_CONFIG);
+
+               /* Update multicast list - we cache all addresses so they won't
+                * change while HW is updated holding the command semaphor */
+               netif_tx_lock_bh(dev);
+               mlx4_en_cache_mclist(dev);
+               netif_tx_unlock_bh(dev);
+               for (i = 0; i < priv->mc_addrs_cnt; i++) {
+                       mcast_addr =
+                             mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
+                       memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
+                       mc_list[5] = priv->port;
+                       mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp,
+                                             mc_list, 0, MLX4_PROT_ETH);
+                       mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
+                                           mcast_addr, 0, MLX4_MCAST_CONFIG);
+               }
+               err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
+                                         0, MLX4_MCAST_ENABLE);
+               if (err)
+                       en_err(priv, "Failed enabling multicast filter\n");
+       }
+out:
+       mutex_unlock(&mdev->state_lock);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void mlx4_en_netpoll(struct net_device *dev)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_cq *cq;
+       unsigned long flags;
+       int i;
+
+       for (i = 0; i < priv->rx_ring_num; i++) {
+               cq = &priv->rx_cq[i];
+               spin_lock_irqsave(&cq->lock, flags);
+               napi_synchronize(&cq->napi);
+               mlx4_en_process_rx_cq(dev, cq, 0);
+               spin_unlock_irqrestore(&cq->lock, flags);
+       }
+}
+#endif
+
+static void mlx4_en_tx_timeout(struct net_device *dev)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+
+       if (netif_msg_timer(priv))
+               en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
+
+       priv->port_stats.tx_timeout++;
+       en_dbg(DRV, priv, "Scheduling watchdog\n");
+       queue_work(mdev->workqueue, &priv->watchdog_task);
+}
+
+
+static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+
+       spin_lock_bh(&priv->stats_lock);
+       memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
+       spin_unlock_bh(&priv->stats_lock);
+
+       return &priv->ret_stats;
+}
+
+static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
+{
+       struct mlx4_en_cq *cq;
+       int i;
+
+       /* If we haven't received a specific coalescing setting
+        * (module param), we set the moderation parameters as follows:
+        * - moder_cnt is set to the number of mtu sized packets to
+        *   satisfy our coelsing target.
+        * - moder_time is set to a fixed value.
+        */
+       priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
+       priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
+       en_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
+                          "rx_frames:%d rx_usecs:%d\n",
+                priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
+
+       /* Setup cq moderation params */
+       for (i = 0; i < priv->rx_ring_num; i++) {
+               cq = &priv->rx_cq[i];
+               cq->moder_cnt = priv->rx_frames;
+               cq->moder_time = priv->rx_usecs;
+       }
+
+       for (i = 0; i < priv->tx_ring_num; i++) {
+               cq = &priv->tx_cq[i];
+               cq->moder_cnt = MLX4_EN_TX_COAL_PKTS;
+               cq->moder_time = MLX4_EN_TX_COAL_TIME;
+       }
+
+       /* Reset auto-moderation params */
+       priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
+       priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
+       priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
+       priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
+       priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
+       priv->adaptive_rx_coal = 1;
+       priv->last_moder_time = MLX4_EN_AUTO_CONF;
+       priv->last_moder_jiffies = 0;
+       priv->last_moder_packets = 0;
+       priv->last_moder_tx_packets = 0;
+       priv->last_moder_bytes = 0;
+}
+
+static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
+{
+       unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
+       struct mlx4_en_cq *cq;
+       unsigned long packets;
+       unsigned long rate;
+       unsigned long avg_pkt_size;
+       unsigned long rx_packets;
+       unsigned long rx_bytes;
+       unsigned long tx_packets;
+       unsigned long tx_pkt_diff;
+       unsigned long rx_pkt_diff;
+       int moder_time;
+       int i, err;
+
+       if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
+               return;
+
+       spin_lock_bh(&priv->stats_lock);
+       rx_packets = priv->stats.rx_packets;
+       rx_bytes = priv->stats.rx_bytes;
+       tx_packets = priv->stats.tx_packets;
+       spin_unlock_bh(&priv->stats_lock);
+
+       if (!priv->last_moder_jiffies || !period)
+               goto out;
+
+       tx_pkt_diff = ((unsigned long) (tx_packets -
+                                       priv->last_moder_tx_packets));
+       rx_pkt_diff = ((unsigned long) (rx_packets -
+                                       priv->last_moder_packets));
+       packets = max(tx_pkt_diff, rx_pkt_diff);
+       rate = packets * HZ / period;
+       avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
+                                priv->last_moder_bytes)) / packets : 0;
+
+       /* Apply auto-moderation only when packet rate exceeds a rate that
+        * it matters */
+       if (rate > MLX4_EN_RX_RATE_THRESH && avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
+               /* If tx and rx packet rates are not balanced, assume that
+                * traffic is mainly BW bound and apply maximum moderation.
+                * Otherwise, moderate according to packet rate */
+               if (2 * tx_pkt_diff > 3 * rx_pkt_diff ||
+                   2 * rx_pkt_diff > 3 * tx_pkt_diff) {
+                       moder_time = priv->rx_usecs_high;
+               } else {
+                       if (rate < priv->pkt_rate_low)
+                               moder_time = priv->rx_usecs_low;
+                       else if (rate > priv->pkt_rate_high)
+                               moder_time = priv->rx_usecs_high;
+                       else
+                               moder_time = (rate - priv->pkt_rate_low) *
+                                       (priv->rx_usecs_high - priv->rx_usecs_low) /
+                                       (priv->pkt_rate_high - priv->pkt_rate_low) +
+                                       priv->rx_usecs_low;
+               }
+       } else {
+               moder_time = priv->rx_usecs_low;
+       }
+
+       en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
+              tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period);
+
+       en_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu "
+              "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n",
+                priv->last_moder_time, moder_time, period, packets,
+                avg_pkt_size, rate);
+
+       if (moder_time != priv->last_moder_time) {
+               priv->last_moder_time = moder_time;
+               for (i = 0; i < priv->rx_ring_num; i++) {
+                       cq = &priv->rx_cq[i];
+                       cq->moder_time = moder_time;
+                       err = mlx4_en_set_cq_moder(priv, cq);
+                       if (err) {
+                               en_err(priv, "Failed modifying moderation for cq:%d\n", i);
+                               break;
+                       }
+               }
+       }
+
+out:
+       priv->last_moder_packets = rx_packets;
+       priv->last_moder_tx_packets = tx_packets;
+       priv->last_moder_bytes = rx_bytes;
+       priv->last_moder_jiffies = jiffies;
+}
+
+static void mlx4_en_do_get_stats(struct work_struct *work)
+{
+       struct delayed_work *delay = to_delayed_work(work);
+       struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
+                                                stats_task);
+       struct mlx4_en_dev *mdev = priv->mdev;
+       int err;
+
+       err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
+       if (err)
+               en_dbg(HW, priv, "Could not update stats\n");
+
+       mutex_lock(&mdev->state_lock);
+       if (mdev->device_up) {
+               if (priv->port_up)
+                       mlx4_en_auto_moderation(priv);
+
+               queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
+       }
+       if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
+               queue_work(mdev->workqueue, &priv->mac_task);
+               mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
+       }
+       mutex_unlock(&mdev->state_lock);
+}
+
+static void mlx4_en_linkstate(struct work_struct *work)
+{
+       struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+                                                linkstate_task);
+       struct mlx4_en_dev *mdev = priv->mdev;
+       int linkstate = priv->link_state;
+
+       mutex_lock(&mdev->state_lock);
+       /* If observable port state changed set carrier state and
+        * report to system log */
+       if (priv->last_link_state != linkstate) {
+               if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
+                       en_info(priv, "Link Down\n");
+                       netif_carrier_off(priv->dev);
+               } else {
+                       en_info(priv, "Link Up\n");
+                       netif_carrier_on(priv->dev);
+               }
+       }
+       priv->last_link_state = linkstate;
+       mutex_unlock(&mdev->state_lock);
+}
+
+
+int mlx4_en_start_port(struct net_device *dev)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_en_cq *cq;
+       struct mlx4_en_tx_ring *tx_ring;
+       int rx_index = 0;
+       int tx_index = 0;
+       int err = 0;
+       int i;
+       int j;
+       u8 mc_list[16] = {0};
+       char name[32];
+
+       if (priv->port_up) {
+               en_dbg(DRV, priv, "start port called while port already up\n");
+               return 0;
+       }
+
+       /* Calculate Rx buf size */
+       dev->mtu = min(dev->mtu, priv->max_mtu);
+       mlx4_en_calc_rx_buf(dev);
+       en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
+
+       /* Configure rx cq's and rings */
+       err = mlx4_en_activate_rx_rings(priv);
+       if (err) {
+               en_err(priv, "Failed to activate RX rings\n");
+               return err;
+       }
+       for (i = 0; i < priv->rx_ring_num; i++) {
+               cq = &priv->rx_cq[i];
+
+               err = mlx4_en_activate_cq(priv, cq);
+               if (err) {
+                       en_err(priv, "Failed activating Rx CQ\n");
+                       goto cq_err;
+               }
+               for (j = 0; j < cq->size; j++)
+                       cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
+               err = mlx4_en_set_cq_moder(priv, cq);
+               if (err) {
+                       en_err(priv, "Failed setting cq moderation parameters");
+                       mlx4_en_deactivate_cq(priv, cq);
+                       goto cq_err;
+               }
+               mlx4_en_arm_cq(priv, cq);
+               priv->rx_ring[i].cqn = cq->mcq.cqn;
+               ++rx_index;
+       }
+
+       /* Set port mac number */
+       en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
+       err = mlx4_register_mac(mdev->dev, priv->port,
+                               priv->mac, &priv->base_qpn, 0);
+       if (err) {
+               en_err(priv, "Failed setting port mac\n");
+               goto cq_err;
+       }
+       mdev->mac_removed[priv->port] = 0;
+
+       err = mlx4_en_config_rss_steer(priv);
+       if (err) {
+               en_err(priv, "Failed configuring rss steering\n");
+               goto mac_err;
+       }
+
+       if (mdev->dev->caps.comp_pool && !priv->tx_vector) {
+               sprintf(name , "%s-tx", priv->dev->name);
+               if (mlx4_assign_eq(mdev->dev , name, &priv->tx_vector)) {
+                       mlx4_warn(mdev, "Failed Assigning an EQ to "
+                                       "%s_tx ,Falling back to legacy "
+                                       "EQ's\n", priv->dev->name);
+               }
+       }
+       /* Configure tx cq's and rings */
+       for (i = 0; i < priv->tx_ring_num; i++) {
+               /* Configure cq */
+               cq = &priv->tx_cq[i];
+               cq->vector = priv->tx_vector;
+               err = mlx4_en_activate_cq(priv, cq);
+               if (err) {
+                       en_err(priv, "Failed allocating Tx CQ\n");
+                       goto tx_err;
+               }
+               err = mlx4_en_set_cq_moder(priv, cq);
+               if (err) {
+                       en_err(priv, "Failed setting cq moderation parameters");
+                       mlx4_en_deactivate_cq(priv, cq);
+                       goto tx_err;
+               }
+               en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
+               cq->buf->wqe_index = cpu_to_be16(0xffff);
+
+               /* Configure ring */
+               tx_ring = &priv->tx_ring[i];
+               err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn);
+               if (err) {
+                       en_err(priv, "Failed allocating Tx ring\n");
+                       mlx4_en_deactivate_cq(priv, cq);
+                       goto tx_err;
+               }
+               /* Set initial ownership of all Tx TXBBs to SW (1) */
+               for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
+                       *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
+               ++tx_index;
+       }
+
+       /* Configure port */
+       err = mlx4_SET_PORT_general(mdev->dev, priv->port,
+                                   priv->rx_skb_size + ETH_FCS_LEN,
+                                   priv->prof->tx_pause,
+                                   priv->prof->tx_ppp,
+                                   priv->prof->rx_pause,
+                                   priv->prof->rx_ppp);
+       if (err) {
+               en_err(priv, "Failed setting port general configurations "
+                            "for port %d, with error %d\n", priv->port, err);
+               goto tx_err;
+       }
+       /* Set default qp number */
+       err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
+       if (err) {
+               en_err(priv, "Failed setting default qp numbers\n");
+               goto tx_err;
+       }
+
+       /* Init port */
+       en_dbg(HW, priv, "Initializing port\n");
+       err = mlx4_INIT_PORT(mdev->dev, priv->port);
+       if (err) {
+               en_err(priv, "Failed Initializing port\n");
+               goto tx_err;
+       }
+
+       /* Attach rx QP to bradcast address */
+       memset(&mc_list[10], 0xff, ETH_ALEN);
+       mc_list[5] = priv->port;
+       if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
+                                 0, MLX4_PROT_ETH))
+               mlx4_warn(mdev, "Failed Attaching Broadcast\n");
+
+       /* Must redo promiscuous mode setup. */
+       priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
+
+       /* Schedule multicast task to populate multicast list */
+       queue_work(mdev->workqueue, &priv->mcast_task);
+
+       priv->port_up = true;
+       netif_tx_start_all_queues(dev);
+       return 0;
+
+tx_err:
+       while (tx_index--) {
+               mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
+               mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
+       }
+
+       mlx4_en_release_rss_steer(priv);
+mac_err:
+       mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
+cq_err:
+       while (rx_index--)
+               mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
+       for (i = 0; i < priv->rx_ring_num; i++)
+               mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
+
+       return err; /* need to close devices */
+}
+
+
+void mlx4_en_stop_port(struct net_device *dev)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+       int i;
+       u8 mc_list[16] = {0};
+
+       if (!priv->port_up) {
+               en_dbg(DRV, priv, "stop port called while port already down\n");
+               return;
+       }
+
+       /* Synchronize with tx routine */
+       netif_tx_lock_bh(dev);
+       netif_tx_stop_all_queues(dev);
+       netif_tx_unlock_bh(dev);
+
+       /* Set port as not active */
+       priv->port_up = false;
+
+       /* Detach All multicasts */
+       memset(&mc_list[10], 0xff, ETH_ALEN);
+       mc_list[5] = priv->port;
+       mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
+                             MLX4_PROT_ETH);
+       for (i = 0; i < priv->mc_addrs_cnt; i++) {
+               memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
+               mc_list[5] = priv->port;
+               mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
+                                     mc_list, MLX4_PROT_ETH);
+       }
+       mlx4_en_clear_list(dev);
+       /* Flush multicast filter */
+       mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
+
+       /* Unregister Mac address for the port */
+       mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
+       mdev->mac_removed[priv->port] = 1;
+
+       /* Free TX Rings */
+       for (i = 0; i < priv->tx_ring_num; i++) {
+               mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
+               mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
+       }
+       msleep(10);
+
+       for (i = 0; i < priv->tx_ring_num; i++)
+               mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);
+
+       /* Free RSS qps */
+       mlx4_en_release_rss_steer(priv);
+
+       /* Free RX Rings */
+       for (i = 0; i < priv->rx_ring_num; i++) {
+               mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
+               while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state))
+                       msleep(1);
+               mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
+       }
+
+       /* close port*/
+       mlx4_CLOSE_PORT(mdev->dev, priv->port);
+}
+
+static void mlx4_en_restart(struct work_struct *work)
+{
+       struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+                                                watchdog_task);
+       struct mlx4_en_dev *mdev = priv->mdev;
+       struct net_device *dev = priv->dev;
+
+       en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
+
+       mutex_lock(&mdev->state_lock);
+       if (priv->port_up) {
+               mlx4_en_stop_port(dev);
+               if (mlx4_en_start_port(dev))
+                       en_err(priv, "Failed restarting port %d\n", priv->port);
+       }
+       mutex_unlock(&mdev->state_lock);
+}
+
+
+static int mlx4_en_open(struct net_device *dev)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+       int i;
+       int err = 0;
+
+       mutex_lock(&mdev->state_lock);
+
+       if (!mdev->device_up) {
+               en_err(priv, "Cannot open - device down/disabled\n");
+               err = -EBUSY;
+               goto out;
+       }
+
+       /* Reset HW statistics and performance counters */
+       if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
+               en_dbg(HW, priv, "Failed dumping statistics\n");
+
+       memset(&priv->stats, 0, sizeof(priv->stats));
+       memset(&priv->pstats, 0, sizeof(priv->pstats));
+
+       for (i = 0; i < priv->tx_ring_num; i++) {
+               priv->tx_ring[i].bytes = 0;
+               priv->tx_ring[i].packets = 0;
+       }
+       for (i = 0; i < priv->rx_ring_num; i++) {
+               priv->rx_ring[i].bytes = 0;
+               priv->rx_ring[i].packets = 0;
+       }
+
+       err = mlx4_en_start_port(dev);
+       if (err)
+               en_err(priv, "Failed starting port:%d\n", priv->port);
+
+out:
+       mutex_unlock(&mdev->state_lock);
+       return err;
+}
+
+
+static int mlx4_en_close(struct net_device *dev)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+
+       en_dbg(IFDOWN, priv, "Close port called\n");
+
+       mutex_lock(&mdev->state_lock);
+
+       mlx4_en_stop_port(dev);
+       netif_carrier_off(dev);
+
+       mutex_unlock(&mdev->state_lock);
+       return 0;
+}
+
+void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors)
+{
+       int i;
+
+       for (i = 0; i < priv->tx_ring_num; i++) {
+               if (priv->tx_ring[i].tx_info)
+                       mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
+               if (priv->tx_cq[i].buf)
+                       mlx4_en_destroy_cq(priv, &priv->tx_cq[i], reserve_vectors);
+       }
+
+       for (i = 0; i < priv->rx_ring_num; i++) {
+               if (priv->rx_ring[i].rx_info)
+                       mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]);
+               if (priv->rx_cq[i].buf)
+                       mlx4_en_destroy_cq(priv, &priv->rx_cq[i], reserve_vectors);
+       }
+}
+
+int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
+{
+       struct mlx4_en_port_profile *prof = priv->prof;
+       int i;
+       int base_tx_qpn, err;
+
+       err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn);
+       if (err) {
+               en_err(priv, "failed reserving range for TX rings\n");
+               return err;
+       }
+
+       /* Create tx Rings */
+       for (i = 0; i < priv->tx_ring_num; i++) {
+               if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
+                                     prof->tx_ring_size, i, TX))
+                       goto err;
+
+               if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i,
+                                          prof->tx_ring_size, TXBB_SIZE))
+                       goto err;
+       }
+
+       /* Create rx Rings */
+       for (i = 0; i < priv->rx_ring_num; i++) {
+               if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
+                                     prof->rx_ring_size, i, RX))
+                       goto err;
+
+               if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
+                                          prof->rx_ring_size, priv->stride))
+                       goto err;
+       }
+
+       return 0;
+
+err:
+       en_err(priv, "Failed to allocate NIC resources\n");
+       mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num);
+       return -ENOMEM;
+}
+
+
+void mlx4_en_destroy_netdev(struct net_device *dev)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+
+       en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
+
+       /* Unregister device - this will close the port if it was up */
+       if (priv->registered)
+               unregister_netdev(dev);
+
+       if (priv->allocated)
+               mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
+
+       cancel_delayed_work(&priv->stats_task);
+       /* flush any pending task for this netdev */
+       flush_workqueue(mdev->workqueue);
+
+       /* Detach the netdev so tasks would not attempt to access it */
+       mutex_lock(&mdev->state_lock);
+       mdev->pndev[priv->port] = NULL;
+       mutex_unlock(&mdev->state_lock);
+
+       mlx4_en_free_resources(priv, false);
+       free_netdev(dev);
+}
+
+static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+       int err = 0;
+
+       en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
+                dev->mtu, new_mtu);
+
+       if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
+               en_err(priv, "Bad MTU size:%d.\n", new_mtu);
+               return -EPERM;
+       }
+       dev->mtu = new_mtu;
+
+       if (netif_running(dev)) {
+               mutex_lock(&mdev->state_lock);
+               if (!mdev->device_up) {
+                       /* NIC is probably restarting - let watchdog task reset
+                        * the port */
+                       en_dbg(DRV, priv, "Change MTU called with card down!?\n");
+               } else {
+                       mlx4_en_stop_port(dev);
+                       err = mlx4_en_start_port(dev);
+                       if (err) {
+                               en_err(priv, "Failed restarting port:%d\n",
+                                        priv->port);
+                               queue_work(mdev->workqueue, &priv->watchdog_task);
+                       }
+               }
+               mutex_unlock(&mdev->state_lock);
+       }
+       return 0;
+}
+
+static const struct net_device_ops mlx4_netdev_ops = {
+       .ndo_open               = mlx4_en_open,
+       .ndo_stop               = mlx4_en_close,
+       .ndo_start_xmit         = mlx4_en_xmit,
+       .ndo_select_queue       = mlx4_en_select_queue,
+       .ndo_get_stats          = mlx4_en_get_stats,
+       .ndo_set_multicast_list = mlx4_en_set_multicast,
+       .ndo_set_mac_address    = mlx4_en_set_mac,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_change_mtu         = mlx4_en_change_mtu,
+       .ndo_tx_timeout         = mlx4_en_tx_timeout,
+       .ndo_vlan_rx_add_vid    = mlx4_en_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid   = mlx4_en_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = mlx4_en_netpoll,
+#endif
+};
+
+int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+                       struct mlx4_en_port_profile *prof)
+{
+       struct net_device *dev;
+       struct mlx4_en_priv *priv;
+       int i;
+       int err;
+
+       dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
+           prof->tx_ring_num, prof->rx_ring_num);
+       if (dev == NULL) {
+               mlx4_err(mdev, "Net device allocation failed\n");
+               return -ENOMEM;
+       }
+
+       SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
+       dev->dev_id =  port - 1;
+
+       /*
+        * Initialize driver private data
+        */
+
+       priv = netdev_priv(dev);
+       memset(priv, 0, sizeof(struct mlx4_en_priv));
+       priv->dev = dev;
+       priv->mdev = mdev;
+       priv->prof = prof;
+       priv->port = port;
+       priv->port_up = false;
+       priv->flags = prof->flags;
+       priv->tx_ring_num = prof->tx_ring_num;
+       priv->rx_ring_num = prof->rx_ring_num;
+       priv->mac_index = -1;
+       priv->msg_enable = MLX4_EN_MSG_LEVEL;
+       spin_lock_init(&priv->stats_lock);
+       INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
+       INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
+       INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
+       INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
+       INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
+
+       /* Query for default mac and max mtu */
+       priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
+       priv->mac = mdev->dev->caps.def_mac[priv->port];
+       if (ILLEGAL_MAC(priv->mac)) {
+               en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
+                        priv->port, priv->mac);
+               err = -EINVAL;
+               goto out;
+       }
+
+       priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
+                                         DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
+       err = mlx4_en_alloc_resources(priv);
+       if (err)
+               goto out;
+
+       /* Allocate page for receive rings */
+       err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
+                               MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
+       if (err) {
+               en_err(priv, "Failed to allocate page for rx qps\n");
+               goto out;
+       }
+       priv->allocated = 1;
+
+       /*
+        * Initialize netdev entry points
+        */
+       dev->netdev_ops = &mlx4_netdev_ops;
+       dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
+       netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
+       netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
+
+       SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
+
+       /* Set defualt MAC */
+       dev->addr_len = ETH_ALEN;
+       for (i = 0; i < ETH_ALEN; i++) {
+               dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
+               dev->perm_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
+       }
+
+       /*
+        * Set driver features
+        */
+       dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+       if (mdev->LSO_support)
+               dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+
+       dev->vlan_features = dev->hw_features;
+
+       dev->hw_features |= NETIF_F_RXCSUM;
+       dev->features = dev->hw_features | NETIF_F_HIGHDMA |
+                       NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+                       NETIF_F_HW_VLAN_FILTER;
+
+       mdev->pndev[port] = dev;
+
+       netif_carrier_off(dev);
+       err = register_netdev(dev);
+       if (err) {
+               en_err(priv, "Netdev registration failed for port %d\n", port);
+               goto out;
+       }
+
+       en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
+       en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
+
+       /* Configure port */
+       err = mlx4_SET_PORT_general(mdev->dev, priv->port,
+                                   MLX4_EN_MIN_MTU,
+                                   0, 0, 0, 0);
+       if (err) {
+               en_err(priv, "Failed setting port general configurations "
+                      "for port %d, with error %d\n", priv->port, err);
+               goto out;
+       }
+
+       /* Init port */
+       en_warn(priv, "Initializing port\n");
+       err = mlx4_INIT_PORT(mdev->dev, priv->port);
+       if (err) {
+               en_err(priv, "Failed Initializing port\n");
+               goto out;
+       }
+       priv->registered = 1;
+       mlx4_en_set_default_moderation(priv);
+       queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
+       return 0;
+
+out:
+       mlx4_en_destroy_netdev(dev);
+       return err;
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
new file mode 100644 (file)
index 0000000..5ada5b4
--- /dev/null
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+
+#include <linux/if_vlan.h>
+
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/cmd.h>
+
+#include "en_port.h"
+#include "mlx4_en.h"
+
+
+int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
+                       u64 mac, u64 clear, u8 mode)
+{
+       return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
+                       MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B);
+}
+
+int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_set_vlan_fltr_mbox *filter;
+       int i;
+       int j;
+       int index = 0;
+       u32 entry;
+       int err = 0;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       filter = mailbox->buf;
+       memset(filter, 0, sizeof(*filter));
+       for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
+               entry = 0;
+               for (j = 0; j < 32; j++)
+                       if (test_bit(index++, priv->active_vlans))
+                               entry |= 1 << j;
+               filter->entry[i] = cpu_to_be32(entry);
+       }
+       err = mlx4_cmd(dev, mailbox->dma, priv->port, 0, MLX4_CMD_SET_VLAN_FLTR,
+                      MLX4_CMD_TIME_CLASS_B);
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+
+
+int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
+                         u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_set_port_general_context *context;
+       int err;
+       u32 in_mod;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       context = mailbox->buf;
+       memset(context, 0, sizeof *context);
+
+       context->flags = SET_PORT_GEN_ALL_VALID;
+       context->mtu = cpu_to_be16(mtu);
+       context->pptx = (pptx * (!pfctx)) << 7;
+       context->pfctx = pfctx;
+       context->pprx = (pprx * (!pfcrx)) << 7;
+       context->pfcrx = pfcrx;
+
+       in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+
+int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
+                          u8 promisc)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_set_port_rqp_calc_context *context;
+       int err;
+       u32 in_mod;
+       u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
+                                               MCAST_DIRECT : MCAST_DEFAULT;
+
+       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER  &&
+                       dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
+               return 0;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       context = mailbox->buf;
+       memset(context, 0, sizeof *context);
+
+       context->base_qpn = cpu_to_be32(base_qpn);
+       context->n_mac = 0x2;
+       context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
+                                      base_qpn);
+       context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
+                                    base_qpn);
+       context->intra_no_vlan = 0;
+       context->no_vlan = MLX4_NO_VLAN_IDX;
+       context->intra_vlan_miss = 0;
+       context->vlan_miss = MLX4_VLAN_MISS_IDX;
+
+       in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+
+int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
+{
+       struct mlx4_en_query_port_context *qport_context;
+       struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
+       struct mlx4_en_port_state *state = &priv->port_state;
+       struct mlx4_cmd_mailbox *mailbox;
+       int err;
+
+       mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       memset(mailbox->buf, 0, sizeof(*qport_context));
+       err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
+                          MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B);
+       if (err)
+               goto out;
+       qport_context = mailbox->buf;
+
+       /* This command is always accessed from Ethtool context
+        * already synchronized, no need in locking */
+       state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK);
+       if ((qport_context->link_speed & MLX4_EN_SPEED_MASK) ==
+           MLX4_EN_1G_SPEED)
+               state->link_speed = 1000;
+       else
+               state->link_speed = 10000;
+       state->transciver = qport_context->transceiver;
+
+out:
+       mlx4_free_cmd_mailbox(mdev->dev, mailbox);
+       return err;
+}
+
+int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
+{
+       struct mlx4_en_stat_out_mbox *mlx4_en_stats;
+       struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
+       struct net_device_stats *stats = &priv->stats;
+       struct mlx4_cmd_mailbox *mailbox;
+       u64 in_mod = reset << 8 | port;
+       int err;
+       int i;
+
+       mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
+       err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
+                          MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B);
+       if (err)
+               goto out;
+
+       mlx4_en_stats = mailbox->buf;
+
+       spin_lock_bh(&priv->stats_lock);
+
+       stats->rx_packets = 0;
+       stats->rx_bytes = 0;
+       for (i = 0; i < priv->rx_ring_num; i++) {
+               stats->rx_packets += priv->rx_ring[i].packets;
+               stats->rx_bytes += priv->rx_ring[i].bytes;
+       }
+       stats->tx_packets = 0;
+       stats->tx_bytes = 0;
+       for (i = 0; i < priv->tx_ring_num; i++) {
+               stats->tx_packets += priv->tx_ring[i].packets;
+               stats->tx_bytes += priv->tx_ring[i].bytes;
+       }
+
+       stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
+                          be32_to_cpu(mlx4_en_stats->RdropLength) +
+                          be32_to_cpu(mlx4_en_stats->RJBBR) +
+                          be32_to_cpu(mlx4_en_stats->RCRC) +
+                          be32_to_cpu(mlx4_en_stats->RRUNT);
+       stats->tx_errors = be32_to_cpu(mlx4_en_stats->TDROP);
+       stats->multicast = be64_to_cpu(mlx4_en_stats->MCAST_prio_0) +
+                          be64_to_cpu(mlx4_en_stats->MCAST_prio_1) +
+                          be64_to_cpu(mlx4_en_stats->MCAST_prio_2) +
+                          be64_to_cpu(mlx4_en_stats->MCAST_prio_3) +
+                          be64_to_cpu(mlx4_en_stats->MCAST_prio_4) +
+                          be64_to_cpu(mlx4_en_stats->MCAST_prio_5) +
+                          be64_to_cpu(mlx4_en_stats->MCAST_prio_6) +
+                          be64_to_cpu(mlx4_en_stats->MCAST_prio_7) +
+                          be64_to_cpu(mlx4_en_stats->MCAST_novlan);
+       stats->collisions = 0;
+       stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
+       stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+       stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
+       stats->rx_frame_errors = 0;
+       stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+       stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+       stats->tx_aborted_errors = 0;
+       stats->tx_carrier_errors = 0;
+       stats->tx_fifo_errors = 0;
+       stats->tx_heartbeat_errors = 0;
+       stats->tx_window_errors = 0;
+
+       priv->pkstats.broadcast =
+                               be64_to_cpu(mlx4_en_stats->RBCAST_prio_0) +
+                               be64_to_cpu(mlx4_en_stats->RBCAST_prio_1) +
+                               be64_to_cpu(mlx4_en_stats->RBCAST_prio_2) +
+                               be64_to_cpu(mlx4_en_stats->RBCAST_prio_3) +
+                               be64_to_cpu(mlx4_en_stats->RBCAST_prio_4) +
+                               be64_to_cpu(mlx4_en_stats->RBCAST_prio_5) +
+                               be64_to_cpu(mlx4_en_stats->RBCAST_prio_6) +
+                               be64_to_cpu(mlx4_en_stats->RBCAST_prio_7) +
+                               be64_to_cpu(mlx4_en_stats->RBCAST_novlan);
+       priv->pkstats.rx_prio[0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0);
+       priv->pkstats.rx_prio[1] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1);
+       priv->pkstats.rx_prio[2] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2);
+       priv->pkstats.rx_prio[3] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3);
+       priv->pkstats.rx_prio[4] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4);
+       priv->pkstats.rx_prio[5] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5);
+       priv->pkstats.rx_prio[6] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6);
+       priv->pkstats.rx_prio[7] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7);
+       priv->pkstats.tx_prio[0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0);
+       priv->pkstats.tx_prio[1] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1);
+       priv->pkstats.tx_prio[2] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2);
+       priv->pkstats.tx_prio[3] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3);
+       priv->pkstats.tx_prio[4] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4);
+       priv->pkstats.tx_prio[5] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5);
+       priv->pkstats.tx_prio[6] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6);
+       priv->pkstats.tx_prio[7] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7);
+       spin_unlock_bh(&priv->stats_lock);
+
+out:
+       mlx4_free_cmd_mailbox(mdev->dev, mailbox);
+       return err;
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h
new file mode 100644 (file)
index 0000000..e3d73e4
--- /dev/null
@@ -0,0 +1,594 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _MLX4_EN_PORT_H_
+#define _MLX4_EN_PORT_H_
+
+
+#define SET_PORT_GEN_ALL_VALID 0x7
+#define SET_PORT_PROMISC_SHIFT 31
+#define SET_PORT_MC_PROMISC_SHIFT      30
+
+enum {
+       MLX4_CMD_SET_VLAN_FLTR  = 0x47,
+       MLX4_CMD_SET_MCAST_FLTR = 0x48,
+       MLX4_CMD_DUMP_ETH_STATS = 0x49,
+};
+
+enum {
+       MCAST_DIRECT_ONLY       = 0,
+       MCAST_DIRECT            = 1,
+       MCAST_DEFAULT           = 2
+};
+
+struct mlx4_set_port_general_context {
+       u8 reserved[3];
+       u8 flags;
+       u16 reserved2;
+       __be16 mtu;
+       u8 pptx;
+       u8 pfctx;
+       u16 reserved3;
+       u8 pprx;
+       u8 pfcrx;
+       u16 reserved4;
+};
+
+struct mlx4_set_port_rqp_calc_context {
+       __be32 base_qpn;
+       u8 rererved;
+       u8 n_mac;
+       u8 n_vlan;
+       u8 n_prio;
+       u8 reserved2[3];
+       u8 mac_miss;
+       u8 intra_no_vlan;
+       u8 no_vlan;
+       u8 intra_vlan_miss;
+       u8 vlan_miss;
+       u8 reserved3[3];
+       u8 no_vlan_prio;
+       __be32 promisc;
+       __be32 mcast;
+};
+
+#define VLAN_FLTR_SIZE 128
+struct mlx4_set_vlan_fltr_mbox {
+       __be32 entry[VLAN_FLTR_SIZE];
+};
+
+
+enum {
+       MLX4_MCAST_CONFIG       = 0,
+       MLX4_MCAST_DISABLE      = 1,
+       MLX4_MCAST_ENABLE       = 2,
+};
+
+struct mlx4_en_query_port_context {
+       u8 link_up;
+#define MLX4_EN_LINK_UP_MASK   0x80
+       u8 reserved;
+       __be16 mtu;
+       u8 reserved2;
+       u8 link_speed;
+#define MLX4_EN_SPEED_MASK     0x3
+#define MLX4_EN_1G_SPEED       0x2
+       u16 reserved3[5];
+       __be64 mac;
+       u8 transceiver;
+};
+
+
+struct mlx4_en_stat_out_mbox {
+       /* Received frames with a length of 64 octets */
+       __be64 R64_prio_0;
+       __be64 R64_prio_1;
+       __be64 R64_prio_2;
+       __be64 R64_prio_3;
+       __be64 R64_prio_4;
+       __be64 R64_prio_5;
+       __be64 R64_prio_6;
+       __be64 R64_prio_7;
+       __be64 R64_novlan;
+       /* Received frames with a length of 127 octets */
+       __be64 R127_prio_0;
+       __be64 R127_prio_1;
+       __be64 R127_prio_2;
+       __be64 R127_prio_3;
+       __be64 R127_prio_4;
+       __be64 R127_prio_5;
+       __be64 R127_prio_6;
+       __be64 R127_prio_7;
+       __be64 R127_novlan;
+       /* Received frames with a length of 255 octets */
+       __be64 R255_prio_0;
+       __be64 R255_prio_1;
+       __be64 R255_prio_2;
+       __be64 R255_prio_3;
+       __be64 R255_prio_4;
+       __be64 R255_prio_5;
+       __be64 R255_prio_6;
+       __be64 R255_prio_7;
+       __be64 R255_novlan;
+       /* Received frames with a length of 511 octets */
+       __be64 R511_prio_0;
+       __be64 R511_prio_1;
+       __be64 R511_prio_2;
+       __be64 R511_prio_3;
+       __be64 R511_prio_4;
+       __be64 R511_prio_5;
+       __be64 R511_prio_6;
+       __be64 R511_prio_7;
+       __be64 R511_novlan;
+       /* Received frames with a length of 1023 octets */
+       __be64 R1023_prio_0;
+       __be64 R1023_prio_1;
+       __be64 R1023_prio_2;
+       __be64 R1023_prio_3;
+       __be64 R1023_prio_4;
+       __be64 R1023_prio_5;
+       __be64 R1023_prio_6;
+       __be64 R1023_prio_7;
+       __be64 R1023_novlan;
+       /* Received frames with a length of 1518 octets */
+       __be64 R1518_prio_0;
+       __be64 R1518_prio_1;
+       __be64 R1518_prio_2;
+       __be64 R1518_prio_3;
+       __be64 R1518_prio_4;
+       __be64 R1518_prio_5;
+       __be64 R1518_prio_6;
+       __be64 R1518_prio_7;
+       __be64 R1518_novlan;
+       /* Received frames with a length of 1522 octets */
+       __be64 R1522_prio_0;
+       __be64 R1522_prio_1;
+       __be64 R1522_prio_2;
+       __be64 R1522_prio_3;
+       __be64 R1522_prio_4;
+       __be64 R1522_prio_5;
+       __be64 R1522_prio_6;
+       __be64 R1522_prio_7;
+       __be64 R1522_novlan;
+       /* Received frames with a length of 1548 octets */
+       __be64 R1548_prio_0;
+       __be64 R1548_prio_1;
+       __be64 R1548_prio_2;
+       __be64 R1548_prio_3;
+       __be64 R1548_prio_4;
+       __be64 R1548_prio_5;
+       __be64 R1548_prio_6;
+       __be64 R1548_prio_7;
+       __be64 R1548_novlan;
+       /* Received frames with a length of 1548 < octets < MTU */
+       __be64 R2MTU_prio_0;
+       __be64 R2MTU_prio_1;
+       __be64 R2MTU_prio_2;
+       __be64 R2MTU_prio_3;
+       __be64 R2MTU_prio_4;
+       __be64 R2MTU_prio_5;
+       __be64 R2MTU_prio_6;
+       __be64 R2MTU_prio_7;
+       __be64 R2MTU_novlan;
+       /* Received frames with a length of MTU< octets and good CRC */
+       __be64 RGIANT_prio_0;
+       __be64 RGIANT_prio_1;
+       __be64 RGIANT_prio_2;
+       __be64 RGIANT_prio_3;
+       __be64 RGIANT_prio_4;
+       __be64 RGIANT_prio_5;
+       __be64 RGIANT_prio_6;
+       __be64 RGIANT_prio_7;
+       __be64 RGIANT_novlan;
+       /* Received broadcast frames with good CRC */
+       __be64 RBCAST_prio_0;
+       __be64 RBCAST_prio_1;
+       __be64 RBCAST_prio_2;
+       __be64 RBCAST_prio_3;
+       __be64 RBCAST_prio_4;
+       __be64 RBCAST_prio_5;
+       __be64 RBCAST_prio_6;
+       __be64 RBCAST_prio_7;
+       __be64 RBCAST_novlan;
+       /* Received multicast frames with good CRC */
+       __be64 MCAST_prio_0;
+       __be64 MCAST_prio_1;
+       __be64 MCAST_prio_2;
+       __be64 MCAST_prio_3;
+       __be64 MCAST_prio_4;
+       __be64 MCAST_prio_5;
+       __be64 MCAST_prio_6;
+       __be64 MCAST_prio_7;
+       __be64 MCAST_novlan;
+       /* Received unicast not short or GIANT frames with good CRC */
+       __be64 RTOTG_prio_0;
+       __be64 RTOTG_prio_1;
+       __be64 RTOTG_prio_2;
+       __be64 RTOTG_prio_3;
+       __be64 RTOTG_prio_4;
+       __be64 RTOTG_prio_5;
+       __be64 RTOTG_prio_6;
+       __be64 RTOTG_prio_7;
+       __be64 RTOTG_novlan;
+
+       /* Count of total octets of received frames, includes framing characters */
+       __be64 RTTLOCT_prio_0;
+       /* Count of total octets of received frames, not including framing
+          characters */
+       __be64 RTTLOCT_NOFRM_prio_0;
+       /* Count of Total number of octets received
+          (only for frames without errors) */
+       __be64 ROCT_prio_0;
+
+       __be64 RTTLOCT_prio_1;
+       __be64 RTTLOCT_NOFRM_prio_1;
+       __be64 ROCT_prio_1;
+
+       __be64 RTTLOCT_prio_2;
+       __be64 RTTLOCT_NOFRM_prio_2;
+       __be64 ROCT_prio_2;
+
+       __be64 RTTLOCT_prio_3;
+       __be64 RTTLOCT_NOFRM_prio_3;
+       __be64 ROCT_prio_3;
+
+       __be64 RTTLOCT_prio_4;
+       __be64 RTTLOCT_NOFRM_prio_4;
+       __be64 ROCT_prio_4;
+
+       __be64 RTTLOCT_prio_5;
+       __be64 RTTLOCT_NOFRM_prio_5;
+       __be64 ROCT_prio_5;
+
+       __be64 RTTLOCT_prio_6;
+       __be64 RTTLOCT_NOFRM_prio_6;
+       __be64 ROCT_prio_6;
+
+       __be64 RTTLOCT_prio_7;
+       __be64 RTTLOCT_NOFRM_prio_7;
+       __be64 ROCT_prio_7;
+
+       __be64 RTTLOCT_novlan;
+       __be64 RTTLOCT_NOFRM_novlan;
+       __be64 ROCT_novlan;
+
+       /* Count of Total received frames including bad frames */
+       __be64 RTOT_prio_0;
+       /* Count of  Total number of received frames with 802.1Q encapsulation */
+       __be64 R1Q_prio_0;
+       __be64 reserved1;
+
+       __be64 RTOT_prio_1;
+       __be64 R1Q_prio_1;
+       __be64 reserved2;
+
+       __be64 RTOT_prio_2;
+       __be64 R1Q_prio_2;
+       __be64 reserved3;
+
+       __be64 RTOT_prio_3;
+       __be64 R1Q_prio_3;
+       __be64 reserved4;
+
+       __be64 RTOT_prio_4;
+       __be64 R1Q_prio_4;
+       __be64 reserved5;
+
+       __be64 RTOT_prio_5;
+       __be64 R1Q_prio_5;
+       __be64 reserved6;
+
+       __be64 RTOT_prio_6;
+       __be64 R1Q_prio_6;
+       __be64 reserved7;
+
+       __be64 RTOT_prio_7;
+       __be64 R1Q_prio_7;
+       __be64 reserved8;
+
+       __be64 RTOT_novlan;
+       __be64 R1Q_novlan;
+       __be64 reserved9;
+
+       /* Total number of Successfully Received Control Frames */
+       __be64 RCNTL;
+       __be64 reserved10;
+       __be64 reserved11;
+       __be64 reserved12;
+       /* Count of received frames with a length/type field  value between 46
+          (42 for VLANtagged frames) and 1500 (also 1500 for VLAN-tagged frames),
+          inclusive */
+       __be64 RInRangeLengthErr;
+       /* Count of received frames with length/type field between 1501 and 1535
+          decimal, inclusive */
+       __be64 ROutRangeLengthErr;
+       /* Count of received frames that are longer than max allowed size for
+          802.3 frames (1518/1522) */
+       __be64 RFrmTooLong;
+       /* Count frames received with PCS error */
+       __be64 PCS;
+
+       /* Transmit frames with a length of 64 octets */
+       __be64 T64_prio_0;
+       __be64 T64_prio_1;
+       __be64 T64_prio_2;
+       __be64 T64_prio_3;
+       __be64 T64_prio_4;
+       __be64 T64_prio_5;
+       __be64 T64_prio_6;
+       __be64 T64_prio_7;
+       __be64 T64_novlan;
+       __be64 T64_loopbk;
+       /* Transmit frames with a length of 65 to 127 octets. */
+       __be64 T127_prio_0;
+       __be64 T127_prio_1;
+       __be64 T127_prio_2;
+       __be64 T127_prio_3;
+       __be64 T127_prio_4;
+       __be64 T127_prio_5;
+       __be64 T127_prio_6;
+       __be64 T127_prio_7;
+       __be64 T127_novlan;
+       __be64 T127_loopbk;
+       /* Transmit frames with a length of 128 to 255 octets */
+       __be64 T255_prio_0;
+       __be64 T255_prio_1;
+       __be64 T255_prio_2;
+       __be64 T255_prio_3;
+       __be64 T255_prio_4;
+       __be64 T255_prio_5;
+       __be64 T255_prio_6;
+       __be64 T255_prio_7;
+       __be64 T255_novlan;
+       __be64 T255_loopbk;
+       /* Transmit frames with a length of 256 to 511 octets */
+       __be64 T511_prio_0;
+       __be64 T511_prio_1;
+       __be64 T511_prio_2;
+       __be64 T511_prio_3;
+       __be64 T511_prio_4;
+       __be64 T511_prio_5;
+       __be64 T511_prio_6;
+       __be64 T511_prio_7;
+       __be64 T511_novlan;
+       __be64 T511_loopbk;
+       /* Transmit frames with a length of 512 to 1023 octets */
+       __be64 T1023_prio_0;
+       __be64 T1023_prio_1;
+       __be64 T1023_prio_2;
+       __be64 T1023_prio_3;
+       __be64 T1023_prio_4;
+       __be64 T1023_prio_5;
+       __be64 T1023_prio_6;
+       __be64 T1023_prio_7;
+       __be64 T1023_novlan;
+       __be64 T1023_loopbk;
+       /* Transmit frames with a length of 1024 to 1518 octets */
+       __be64 T1518_prio_0;
+       __be64 T1518_prio_1;
+       __be64 T1518_prio_2;
+       __be64 T1518_prio_3;
+       __be64 T1518_prio_4;
+       __be64 T1518_prio_5;
+       __be64 T1518_prio_6;
+       __be64 T1518_prio_7;
+       __be64 T1518_novlan;
+       __be64 T1518_loopbk;
+       /* Counts transmit frames with a length of 1519 to 1522 bytes */
+       __be64 T1522_prio_0;
+       __be64 T1522_prio_1;
+       __be64 T1522_prio_2;
+       __be64 T1522_prio_3;
+       __be64 T1522_prio_4;
+       __be64 T1522_prio_5;
+       __be64 T1522_prio_6;
+       __be64 T1522_prio_7;
+       __be64 T1522_novlan;
+       __be64 T1522_loopbk;
+       /* Transmit frames with a length of 1523 to 1548 octets */
+       __be64 T1548_prio_0;
+       __be64 T1548_prio_1;
+       __be64 T1548_prio_2;
+       __be64 T1548_prio_3;
+       __be64 T1548_prio_4;
+       __be64 T1548_prio_5;
+       __be64 T1548_prio_6;
+       __be64 T1548_prio_7;
+       __be64 T1548_novlan;
+       __be64 T1548_loopbk;
+       /* Counts transmit frames with a length of 1549 to MTU bytes */
+       __be64 T2MTU_prio_0;
+       __be64 T2MTU_prio_1;
+       __be64 T2MTU_prio_2;
+       __be64 T2MTU_prio_3;
+       __be64 T2MTU_prio_4;
+       __be64 T2MTU_prio_5;
+       __be64 T2MTU_prio_6;
+       __be64 T2MTU_prio_7;
+       __be64 T2MTU_novlan;
+       __be64 T2MTU_loopbk;
+       /* Transmit frames with a length greater than MTU octets and a good CRC. */
+       __be64 TGIANT_prio_0;
+       __be64 TGIANT_prio_1;
+       __be64 TGIANT_prio_2;
+       __be64 TGIANT_prio_3;
+       __be64 TGIANT_prio_4;
+       __be64 TGIANT_prio_5;
+       __be64 TGIANT_prio_6;
+       __be64 TGIANT_prio_7;
+       __be64 TGIANT_novlan;
+       __be64 TGIANT_loopbk;
+       /* Transmit broadcast frames with a good CRC */
+       __be64 TBCAST_prio_0;
+       __be64 TBCAST_prio_1;
+       __be64 TBCAST_prio_2;
+       __be64 TBCAST_prio_3;
+       __be64 TBCAST_prio_4;
+       __be64 TBCAST_prio_5;
+       __be64 TBCAST_prio_6;
+       __be64 TBCAST_prio_7;
+       __be64 TBCAST_novlan;
+       __be64 TBCAST_loopbk;
+       /* Transmit multicast frames with a good CRC */
+       __be64 TMCAST_prio_0;
+       __be64 TMCAST_prio_1;
+       __be64 TMCAST_prio_2;
+       __be64 TMCAST_prio_3;
+       __be64 TMCAST_prio_4;
+       __be64 TMCAST_prio_5;
+       __be64 TMCAST_prio_6;
+       __be64 TMCAST_prio_7;
+       __be64 TMCAST_novlan;
+       __be64 TMCAST_loopbk;
+       /* Transmit good frames that are neither broadcast nor multicast */
+       __be64 TTOTG_prio_0;
+       __be64 TTOTG_prio_1;
+       __be64 TTOTG_prio_2;
+       __be64 TTOTG_prio_3;
+       __be64 TTOTG_prio_4;
+       __be64 TTOTG_prio_5;
+       __be64 TTOTG_prio_6;
+       __be64 TTOTG_prio_7;
+       __be64 TTOTG_novlan;
+       __be64 TTOTG_loopbk;
+
+       /* total octets of transmitted frames, including framing characters */
+       __be64 TTTLOCT_prio_0;
+       /* total octets of transmitted frames, not including framing characters */
+       __be64 TTTLOCT_NOFRM_prio_0;
+       /* ifOutOctets */
+       __be64 TOCT_prio_0;
+
+       __be64 TTTLOCT_prio_1;
+       __be64 TTTLOCT_NOFRM_prio_1;
+       __be64 TOCT_prio_1;
+
+       __be64 TTTLOCT_prio_2;
+       __be64 TTTLOCT_NOFRM_prio_2;
+       __be64 TOCT_prio_2;
+
+       __be64 TTTLOCT_prio_3;
+       __be64 TTTLOCT_NOFRM_prio_3;
+       __be64 TOCT_prio_3;
+
+       __be64 TTTLOCT_prio_4;
+       __be64 TTTLOCT_NOFRM_prio_4;
+       __be64 TOCT_prio_4;
+
+       __be64 TTTLOCT_prio_5;
+       __be64 TTTLOCT_NOFRM_prio_5;
+       __be64 TOCT_prio_5;
+
+       __be64 TTTLOCT_prio_6;
+       __be64 TTTLOCT_NOFRM_prio_6;
+       __be64 TOCT_prio_6;
+
+       __be64 TTTLOCT_prio_7;
+       __be64 TTTLOCT_NOFRM_prio_7;
+       __be64 TOCT_prio_7;
+
+       __be64 TTTLOCT_novlan;
+       __be64 TTTLOCT_NOFRM_novlan;
+       __be64 TOCT_novlan;
+
+       __be64 TTTLOCT_loopbk;
+       __be64 TTTLOCT_NOFRM_loopbk;
+       __be64 TOCT_loopbk;
+
+       /* Total frames transmitted with a good CRC that are not aborted  */
+       __be64 TTOT_prio_0;
+       /* Total number of frames transmitted with 802.1Q encapsulation */
+       __be64 T1Q_prio_0;
+       __be64 reserved13;
+
+       __be64 TTOT_prio_1;
+       __be64 T1Q_prio_1;
+       __be64 reserved14;
+
+       __be64 TTOT_prio_2;
+       __be64 T1Q_prio_2;
+       __be64 reserved15;
+
+       __be64 TTOT_prio_3;
+       __be64 T1Q_prio_3;
+       __be64 reserved16;
+
+       __be64 TTOT_prio_4;
+       __be64 T1Q_prio_4;
+       __be64 reserved17;
+
+       __be64 TTOT_prio_5;
+       __be64 T1Q_prio_5;
+       __be64 reserved18;
+
+       __be64 TTOT_prio_6;
+       __be64 T1Q_prio_6;
+       __be64 reserved19;
+
+       __be64 TTOT_prio_7;
+       __be64 T1Q_prio_7;
+       __be64 reserved20;
+
+       __be64 TTOT_novlan;
+       __be64 T1Q_novlan;
+       __be64 reserved21;
+
+       __be64 TTOT_loopbk;
+       __be64 T1Q_loopbk;
+       __be64 reserved22;
+
+       /* Received frames with a length greater than MTU octets and a bad CRC */
+       __be32 RJBBR;
+       /* Received frames with a bad CRC that are not runts, jabbers,
+          or alignment errors */
+       __be32 RCRC;
+       /* Received frames with SFD with a length of less than 64 octets and a
+          bad CRC */
+       __be32 RRUNT;
+       /* Received frames with a length less than 64 octets and a good CRC */
+       __be32 RSHORT;
+       /* Total Number of Received Packets Dropped */
+       __be32 RDROP;
+       /* Drop due to overflow  */
+       __be32 RdropOvflw;
+       /* Drop due to overflow */
+       __be32 RdropLength;
+       /* Total of good frames. Does not include frames received with
+          frame-too-long, FCS, or length errors */
+       __be32 RTOTFRMS;
+       /* Total dropped Xmited packets */
+       __be32 TDROP;
+};
+
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
new file mode 100644 (file)
index 0000000..0dfb4ec
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mlx4/qp.h>
+
+#include "mlx4_en.h"
+
+void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
+                            int is_tx, int rss, int qpn, int cqn,
+                            struct mlx4_qp_context *context)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+
+       memset(context, 0, sizeof *context);
+       context->flags = cpu_to_be32(7 << 16 | rss << 13);
+       context->pd = cpu_to_be32(mdev->priv_pdn);
+       context->mtu_msgmax = 0xff;
+       if (!is_tx && !rss)
+               context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
+       if (is_tx)
+               context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
+       else
+               context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
+       context->usr_page = cpu_to_be32(mdev->priv_uar.index);
+       context->local_qpn = cpu_to_be32(qpn);
+       context->pri_path.ackto = 1 & 0x07;
+       context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
+       context->pri_path.counter_index = 0xff;
+       context->cqn_send = cpu_to_be32(cqn);
+       context->cqn_recv = cpu_to_be32(cqn);
+       context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
+}
+
+
+int mlx4_en_map_buffer(struct mlx4_buf *buf)
+{
+       struct page **pages;
+       int i;
+
+       if (BITS_PER_LONG == 64 || buf->nbufs == 1)
+               return 0;
+
+       pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
+       if (!pages)
+               return -ENOMEM;
+
+       for (i = 0; i < buf->nbufs; ++i)
+               pages[i] = virt_to_page(buf->page_list[i].buf);
+
+       buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
+       kfree(pages);
+       if (!buf->direct.buf)
+               return -ENOMEM;
+
+       return 0;
+}
+
+void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
+{
+       if (BITS_PER_LONG == 64 || buf->nbufs == 1)
+               return;
+
+       vunmap(buf->direct.buf);
+}
+
+void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
+{
+    return;
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
new file mode 100644 (file)
index 0000000..37cc9e5
--- /dev/null
@@ -0,0 +1,918 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/mlx4/cq.h>
+#include <linux/slab.h>
+#include <linux/mlx4/qp.h>
+#include <linux/skbuff.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
+
+#include "mlx4_en.h"
+
+
+static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
+                             struct mlx4_en_rx_desc *rx_desc,
+                             struct skb_frag_struct *skb_frags,
+                             struct mlx4_en_rx_alloc *ring_alloc,
+                             int i)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
+       struct mlx4_en_rx_alloc *page_alloc = &ring_alloc[i];
+       struct page *page;
+       dma_addr_t dma;
+
+       if (page_alloc->offset == frag_info->last_offset) {
+               /* Allocate new page */
+               page = alloc_pages(GFP_ATOMIC | __GFP_COMP, MLX4_EN_ALLOC_ORDER);
+               if (!page)
+                       return -ENOMEM;
+
+               skb_frags[i].page = page_alloc->page;
+               skb_frags[i].page_offset = page_alloc->offset;
+               page_alloc->page = page;
+               page_alloc->offset = frag_info->frag_align;
+       } else {
+               page = page_alloc->page;
+               get_page(page);
+
+               skb_frags[i].page = page;
+               skb_frags[i].page_offset = page_alloc->offset;
+               page_alloc->offset += frag_info->frag_stride;
+       }
+       dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) +
+                            skb_frags[i].page_offset, frag_info->frag_size,
+                            PCI_DMA_FROMDEVICE);
+       rx_desc->data[i].addr = cpu_to_be64(dma);
+       return 0;
+}
+
+static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
+                                 struct mlx4_en_rx_ring *ring)
+{
+       struct mlx4_en_rx_alloc *page_alloc;
+       int i;
+
+       for (i = 0; i < priv->num_frags; i++) {
+               page_alloc = &ring->page_alloc[i];
+               page_alloc->page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
+                                              MLX4_EN_ALLOC_ORDER);
+               if (!page_alloc->page)
+                       goto out;
+
+               page_alloc->offset = priv->frag_info[i].frag_align;
+               en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
+                      i, page_alloc->page);
+       }
+       return 0;
+
+out:
+       while (i--) {
+               page_alloc = &ring->page_alloc[i];
+               put_page(page_alloc->page);
+               page_alloc->page = NULL;
+       }
+       return -ENOMEM;
+}
+
+static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
+                                     struct mlx4_en_rx_ring *ring)
+{
+       struct mlx4_en_rx_alloc *page_alloc;
+       int i;
+
+       for (i = 0; i < priv->num_frags; i++) {
+               page_alloc = &ring->page_alloc[i];
+               en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
+                      i, page_count(page_alloc->page));
+
+               put_page(page_alloc->page);
+               page_alloc->page = NULL;
+       }
+}
+
+
+static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
+                                struct mlx4_en_rx_ring *ring, int index)
+{
+       struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
+       struct skb_frag_struct *skb_frags = ring->rx_info +
+                                           (index << priv->log_rx_info);
+       int possible_frags;
+       int i;
+
+       /* Set size and memtype fields */
+       for (i = 0; i < priv->num_frags; i++) {
+               skb_frags[i].size = priv->frag_info[i].frag_size;
+               rx_desc->data[i].byte_count =
+                       cpu_to_be32(priv->frag_info[i].frag_size);
+               rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
+       }
+
+       /* If the number of used fragments does not fill up the ring stride,
+        * remaining (unused) fragments must be padded with null address/size
+        * and a special memory key */
+       possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
+       for (i = priv->num_frags; i < possible_frags; i++) {
+               rx_desc->data[i].byte_count = 0;
+               rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
+               rx_desc->data[i].addr = 0;
+       }
+}
+
+
+static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
+                                  struct mlx4_en_rx_ring *ring, int index)
+{
+       struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
+       struct skb_frag_struct *skb_frags = ring->rx_info +
+                                           (index << priv->log_rx_info);
+       int i;
+
+       for (i = 0; i < priv->num_frags; i++)
+               if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, ring->page_alloc, i))
+                       goto err;
+
+       return 0;
+
+err:
+       while (i--)
+               put_page(skb_frags[i].page);
+       return -ENOMEM;
+}
+
+static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
+{
+       *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
+}
+
+static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
+                                struct mlx4_en_rx_ring *ring,
+                                int index)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+       struct skb_frag_struct *skb_frags;
+       struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
+       dma_addr_t dma;
+       int nr;
+
+       skb_frags = ring->rx_info + (index << priv->log_rx_info);
+       for (nr = 0; nr < priv->num_frags; nr++) {
+               en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
+               dma = be64_to_cpu(rx_desc->data[nr].addr);
+
+               en_dbg(DRV, priv, "Unmapping buffer at dma:0x%llx\n", (u64) dma);
+               pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
+                                PCI_DMA_FROMDEVICE);
+               put_page(skb_frags[nr].page);
+       }
+}
+
+static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
+{
+       struct mlx4_en_rx_ring *ring;
+       int ring_ind;
+       int buf_ind;
+       int new_size;
+
+       for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
+               for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
+                       ring = &priv->rx_ring[ring_ind];
+
+                       if (mlx4_en_prepare_rx_desc(priv, ring,
+                                                   ring->actual_size)) {
+                               if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
+                                       en_err(priv, "Failed to allocate "
+                                                    "enough rx buffers\n");
+                                       return -ENOMEM;
+                               } else {
+                                       new_size = rounddown_pow_of_two(ring->actual_size);
+                                       en_warn(priv, "Only %d buffers allocated "
+                                                     "reducing ring size to %d",
+                                               ring->actual_size, new_size);
+                                       goto reduce_rings;
+                               }
+                       }
+                       ring->actual_size++;
+                       ring->prod++;
+               }
+       }
+       return 0;
+
+reduce_rings:
+       for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
+               ring = &priv->rx_ring[ring_ind];
+               while (ring->actual_size > new_size) {
+                       ring->actual_size--;
+                       ring->prod--;
+                       mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
+               }
+       }
+
+       return 0;
+}
+
+static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
+                               struct mlx4_en_rx_ring *ring)
+{
+       int index;
+
+       en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
+              ring->cons, ring->prod);
+
+       /* Unmap and free Rx buffers */
+       BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
+       while (ring->cons != ring->prod) {
+               index = ring->cons & ring->size_mask;
+               en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
+               mlx4_en_free_rx_desc(priv, ring, index);
+               ++ring->cons;
+       }
+}
+
+int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
+                          struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+       int err;
+       int tmp;
+
+
+       ring->prod = 0;
+       ring->cons = 0;
+       ring->size = size;
+       ring->size_mask = size - 1;
+       ring->stride = stride;
+       ring->log_stride = ffs(ring->stride) - 1;
+       ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
+
+       tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
+                                       sizeof(struct skb_frag_struct));
+       ring->rx_info = vmalloc(tmp);
+       if (!ring->rx_info) {
+               en_err(priv, "Failed allocating rx_info ring\n");
+               return -ENOMEM;
+       }
+       en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
+                ring->rx_info, tmp);
+
+       err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
+                                ring->buf_size, 2 * PAGE_SIZE);
+       if (err)
+               goto err_ring;
+
+       err = mlx4_en_map_buffer(&ring->wqres.buf);
+       if (err) {
+               en_err(priv, "Failed to map RX buffer\n");
+               goto err_hwq;
+       }
+       ring->buf = ring->wqres.buf.direct.buf;
+
+       return 0;
+
+err_hwq:
+       mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+err_ring:
+       vfree(ring->rx_info);
+       ring->rx_info = NULL;
+       return err;
+}
+
+int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
+{
+       struct mlx4_en_rx_ring *ring;
+       int i;
+       int ring_ind;
+       int err;
+       int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
+                                       DS_SIZE * priv->num_frags);
+
+       for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
+               ring = &priv->rx_ring[ring_ind];
+
+               ring->prod = 0;
+               ring->cons = 0;
+               ring->actual_size = 0;
+               ring->cqn = priv->rx_cq[ring_ind].mcq.cqn;
+
+               ring->stride = stride;
+               if (ring->stride <= TXBB_SIZE)
+                       ring->buf += TXBB_SIZE;
+
+               ring->log_stride = ffs(ring->stride) - 1;
+               ring->buf_size = ring->size * ring->stride;
+
+               memset(ring->buf, 0, ring->buf_size);
+               mlx4_en_update_rx_prod_db(ring);
+
+               /* Initailize all descriptors */
+               for (i = 0; i < ring->size; i++)
+                       mlx4_en_init_rx_desc(priv, ring, i);
+
+               /* Initialize page allocators */
+               err = mlx4_en_init_allocator(priv, ring);
+               if (err) {
+                       en_err(priv, "Failed initializing ring allocator\n");
+                       if (ring->stride <= TXBB_SIZE)
+                               ring->buf -= TXBB_SIZE;
+                       ring_ind--;
+                       goto err_allocator;
+               }
+       }
+       err = mlx4_en_fill_rx_buffers(priv);
+       if (err)
+               goto err_buffers;
+
+       for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
+               ring = &priv->rx_ring[ring_ind];
+
+               ring->size_mask = ring->actual_size - 1;
+               mlx4_en_update_rx_prod_db(ring);
+       }
+
+       return 0;
+
+err_buffers:
+       for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
+               mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]);
+
+       ring_ind = priv->rx_ring_num - 1;
+err_allocator:
+       while (ring_ind >= 0) {
+               if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE)
+                       priv->rx_ring[ring_ind].buf -= TXBB_SIZE;
+               mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
+               ring_ind--;
+       }
+       return err;
+}
+
+void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
+                            struct mlx4_en_rx_ring *ring)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+
+       mlx4_en_unmap_buffer(&ring->wqres.buf);
+       mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size + TXBB_SIZE);
+       vfree(ring->rx_info);
+       ring->rx_info = NULL;
+}
+
+void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
+                               struct mlx4_en_rx_ring *ring)
+{
+       mlx4_en_free_rx_buf(priv, ring);
+       if (ring->stride <= TXBB_SIZE)
+               ring->buf -= TXBB_SIZE;
+       mlx4_en_destroy_allocator(priv, ring);
+}
+
+
+/* Unmap a completed descriptor and free unused pages */
+static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
+                                   struct mlx4_en_rx_desc *rx_desc,
+                                   struct skb_frag_struct *skb_frags,
+                                   struct skb_frag_struct *skb_frags_rx,
+                                   struct mlx4_en_rx_alloc *page_alloc,
+                                   int length)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_en_frag_info *frag_info;
+       int nr;
+       dma_addr_t dma;
+
+       /* Collect used fragments while replacing them in the HW descirptors */
+       for (nr = 0; nr < priv->num_frags; nr++) {
+               frag_info = &priv->frag_info[nr];
+               if (length <= frag_info->frag_prefix_size)
+                       break;
+
+               /* Save page reference in skb */
+               skb_frags_rx[nr].page = skb_frags[nr].page;
+               skb_frags_rx[nr].size = skb_frags[nr].size;
+               skb_frags_rx[nr].page_offset = skb_frags[nr].page_offset;
+               dma = be64_to_cpu(rx_desc->data[nr].addr);
+
+               /* Allocate a replacement page */
+               if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, page_alloc, nr))
+                       goto fail;
+
+               /* Unmap buffer */
+               pci_unmap_single(mdev->pdev, dma, skb_frags_rx[nr].size,
+                                PCI_DMA_FROMDEVICE);
+       }
+       /* Adjust size of last fragment to match actual length */
+       if (nr > 0)
+               skb_frags_rx[nr - 1].size = length -
+                       priv->frag_info[nr - 1].frag_prefix_size;
+       return nr;
+
+fail:
+       /* Drop all accumulated fragments (which have already been replaced in
+        * the descriptor) of this packet; remaining fragments are reused... */
+       while (nr > 0) {
+               nr--;
+               put_page(skb_frags_rx[nr].page);
+       }
+       return 0;
+}
+
+
+static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
+                                     struct mlx4_en_rx_desc *rx_desc,
+                                     struct skb_frag_struct *skb_frags,
+                                     struct mlx4_en_rx_alloc *page_alloc,
+                                     unsigned int length)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+       struct sk_buff *skb;
+       void *va;
+       int used_frags;
+       dma_addr_t dma;
+
+       skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN);
+       if (!skb) {
+               en_dbg(RX_ERR, priv, "Failed allocating skb\n");
+               return NULL;
+       }
+       skb->dev = priv->dev;
+       skb_reserve(skb, NET_IP_ALIGN);
+       skb->len = length;
+       skb->truesize = length + sizeof(struct sk_buff);
+
+       /* Get pointer to first fragment so we could copy the headers into the
+        * (linear part of the) skb */
+       va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
+
+       if (length <= SMALL_PACKET_SIZE) {
+               /* We are copying all relevant data to the skb - temporarily
+                * synch buffers for the copy */
+               dma = be64_to_cpu(rx_desc->data[0].addr);
+               dma_sync_single_for_cpu(&mdev->pdev->dev, dma, length,
+                                       DMA_FROM_DEVICE);
+               skb_copy_to_linear_data(skb, va, length);
+               dma_sync_single_for_device(&mdev->pdev->dev, dma, length,
+                                          DMA_FROM_DEVICE);
+               skb->tail += length;
+       } else {
+
+               /* Move relevant fragments to skb */
+               used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
+                                                     skb_shinfo(skb)->frags,
+                                                     page_alloc, length);
+               if (unlikely(!used_frags)) {
+                       kfree_skb(skb);
+                       return NULL;
+               }
+               skb_shinfo(skb)->nr_frags = used_frags;
+
+               /* Copy headers into the skb linear buffer */
+               memcpy(skb->data, va, HEADER_COPY_SIZE);
+               skb->tail += HEADER_COPY_SIZE;
+
+               /* Skip headers in first fragment */
+               skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE;
+
+               /* Adjust size of first fragment */
+               skb_shinfo(skb)->frags[0].size -= HEADER_COPY_SIZE;
+               skb->data_len = length - HEADER_COPY_SIZE;
+       }
+       return skb;
+}
+
+static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb)
+{
+       int i;
+       int offset = ETH_HLEN;
+
+       for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) {
+               if (*(skb->data + offset) != (unsigned char) (i & 0xff))
+                       goto out_loopback;
+       }
+       /* Loopback found */
+       priv->loopback_ok = 1;
+
+out_loopback:
+       dev_kfree_skb_any(skb);
+}
+
+int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_cqe *cqe;
+       struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
+       struct skb_frag_struct *skb_frags;
+       struct mlx4_en_rx_desc *rx_desc;
+       struct sk_buff *skb;
+       int index;
+       int nr;
+       unsigned int length;
+       int polled = 0;
+       int ip_summed;
+
+       if (!priv->port_up)
+               return 0;
+
+       /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
+        * descriptor offset can be deduced from the CQE index instead of
+        * reading 'cqe->index' */
+       index = cq->mcq.cons_index & ring->size_mask;
+       cqe = &cq->buf[index];
+
+       /* Process all completed CQEs */
+       while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
+                   cq->mcq.cons_index & cq->size)) {
+
+               skb_frags = ring->rx_info + (index << priv->log_rx_info);
+               rx_desc = ring->buf + (index << ring->log_stride);
+
+               /*
+                * make sure we read the CQE after we read the ownership bit
+                */
+               rmb();
+
+               /* Drop packet on bad receive or bad checksum */
+               if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
+                                               MLX4_CQE_OPCODE_ERROR)) {
+                       en_err(priv, "CQE completed in error - vendor "
+                                 "syndrom:%d syndrom:%d\n",
+                                 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
+                                 ((struct mlx4_err_cqe *) cqe)->syndrome);
+                       goto next;
+               }
+               if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
+                       en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
+                       goto next;
+               }
+
+               /*
+                * Packet is OK - process it.
+                */
+               length = be32_to_cpu(cqe->byte_cnt);
+               ring->bytes += length;
+               ring->packets++;
+
+               if (likely(dev->features & NETIF_F_RXCSUM)) {
+                       if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
+                           (cqe->checksum == cpu_to_be16(0xffff))) {
+                               priv->port_stats.rx_chksum_good++;
+                               /* This packet is eligible for LRO if it is:
+                                * - DIX Ethernet (type interpretation)
+                                * - TCP/IP (v4)
+                                * - without IP options
+                                * - not an IP fragment */
+                               if (dev->features & NETIF_F_GRO) {
+                                       struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
+                                       if (!gro_skb)
+                                               goto next;
+
+                                       nr = mlx4_en_complete_rx_desc(
+                                               priv, rx_desc,
+                                               skb_frags, skb_shinfo(gro_skb)->frags,
+                                               ring->page_alloc, length);
+                                       if (!nr)
+                                               goto next;
+
+                                       skb_shinfo(gro_skb)->nr_frags = nr;
+                                       gro_skb->len = length;
+                                       gro_skb->data_len = length;
+                                       gro_skb->truesize += length;
+                                       gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+                                       if (cqe->vlan_my_qpn &
+                                           cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) {
+                                               u16 vid = be16_to_cpu(cqe->sl_vid);
+
+                                               __vlan_hwaccel_put_tag(gro_skb, vid);
+                                       }
+
+                                       napi_gro_frags(&cq->napi);
+
+                                       goto next;
+                               }
+
+                               /* LRO not possible, complete processing here */
+                               ip_summed = CHECKSUM_UNNECESSARY;
+                       } else {
+                               ip_summed = CHECKSUM_NONE;
+                               priv->port_stats.rx_chksum_none++;
+                       }
+               } else {
+                       ip_summed = CHECKSUM_NONE;
+                       priv->port_stats.rx_chksum_none++;
+               }
+
+               skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags,
+                                    ring->page_alloc, length);
+               if (!skb) {
+                       priv->stats.rx_dropped++;
+                       goto next;
+               }
+
+                if (unlikely(priv->validate_loopback)) {
+                       validate_loopback(priv, skb);
+                       goto next;
+               }
+
+               skb->ip_summed = ip_summed;
+               skb->protocol = eth_type_trans(skb, dev);
+               skb_record_rx_queue(skb, cq->ring);
+
+               if (be32_to_cpu(cqe->vlan_my_qpn) &
+                   MLX4_CQE_VLAN_PRESENT_MASK)
+                       __vlan_hwaccel_put_tag(skb, be16_to_cpu(cqe->sl_vid));
+
+               /* Push it up the stack */
+               netif_receive_skb(skb);
+
+next:
+               ++cq->mcq.cons_index;
+               index = (cq->mcq.cons_index) & ring->size_mask;
+               cqe = &cq->buf[index];
+               if (++polled == budget) {
+                       /* We are here because we reached the NAPI budget -
+                        * flush only pending LRO sessions */
+                       goto out;
+               }
+       }
+
+out:
+       AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
+       mlx4_cq_set_ci(&cq->mcq);
+       wmb(); /* ensure HW sees CQ consumer before we post new buffers */
+       ring->cons = cq->mcq.cons_index;
+       ring->prod += polled; /* Polled descriptors were realocated in place */
+       mlx4_en_update_rx_prod_db(ring);
+       return polled;
+}
+
+
+void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+{
+       struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
+       struct mlx4_en_priv *priv = netdev_priv(cq->dev);
+
+       if (priv->port_up)
+               napi_schedule(&cq->napi);
+       else
+               mlx4_en_arm_cq(priv, cq);
+}
+
+/* Rx CQ polling - called by NAPI */
+int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
+{
+       struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
+       struct net_device *dev = cq->dev;
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       int done;
+
+       done = mlx4_en_process_rx_cq(dev, cq, budget);
+
+       /* If we used up all the quota - we're probably not done yet... */
+       if (done == budget)
+               INC_PERF_COUNTER(priv->pstats.napi_quota);
+       else {
+               /* Done for now */
+               napi_complete(napi);
+               mlx4_en_arm_cq(priv, cq);
+       }
+       return done;
+}
+
+
+/* Calculate the last offset position that accommodates a full fragment
+ * (assuming fagment size = stride-align) */
+static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align)
+{
+       u16 res = MLX4_EN_ALLOC_SIZE % stride;
+       u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align;
+
+       en_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d "
+                           "res:%d offset:%d\n", stride, align, res, offset);
+       return offset;
+}
+
+
+static int frag_sizes[] = {
+       FRAG_SZ0,
+       FRAG_SZ1,
+       FRAG_SZ2,
+       FRAG_SZ3
+};
+
+void mlx4_en_calc_rx_buf(struct net_device *dev)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN + ETH_LLC_SNAP_SIZE;
+       int buf_size = 0;
+       int i = 0;
+
+       while (buf_size < eff_mtu) {
+               priv->frag_info[i].frag_size =
+                       (eff_mtu > buf_size + frag_sizes[i]) ?
+                               frag_sizes[i] : eff_mtu - buf_size;
+               priv->frag_info[i].frag_prefix_size = buf_size;
+               if (!i) {
+                       priv->frag_info[i].frag_align = NET_IP_ALIGN;
+                       priv->frag_info[i].frag_stride =
+                               ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES);
+               } else {
+                       priv->frag_info[i].frag_align = 0;
+                       priv->frag_info[i].frag_stride =
+                               ALIGN(frag_sizes[i], SMP_CACHE_BYTES);
+               }
+               priv->frag_info[i].last_offset = mlx4_en_last_alloc_offset(
+                                               priv, priv->frag_info[i].frag_stride,
+                                               priv->frag_info[i].frag_align);
+               buf_size += priv->frag_info[i].frag_size;
+               i++;
+       }
+
+       priv->num_frags = i;
+       priv->rx_skb_size = eff_mtu;
+       priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct));
+
+       en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
+                 "num_frags:%d):\n", eff_mtu, priv->num_frags);
+       for (i = 0; i < priv->num_frags; i++) {
+               en_dbg(DRV, priv, "  frag:%d - size:%d prefix:%d align:%d "
+                               "stride:%d last_offset:%d\n", i,
+                               priv->frag_info[i].frag_size,
+                               priv->frag_info[i].frag_prefix_size,
+                               priv->frag_info[i].frag_align,
+                               priv->frag_info[i].frag_stride,
+                               priv->frag_info[i].last_offset);
+       }
+}
+
+/* RSS related functions */
+
+static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
+                                struct mlx4_en_rx_ring *ring,
+                                enum mlx4_qp_state *state,
+                                struct mlx4_qp *qp)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_qp_context *context;
+       int err = 0;
+
+       context = kmalloc(sizeof *context , GFP_KERNEL);
+       if (!context) {
+               en_err(priv, "Failed to allocate qp context\n");
+               return -ENOMEM;
+       }
+
+       err = mlx4_qp_alloc(mdev->dev, qpn, qp);
+       if (err) {
+               en_err(priv, "Failed to allocate qp #%x\n", qpn);
+               goto out;
+       }
+       qp->event = mlx4_en_sqp_event;
+
+       memset(context, 0, sizeof *context);
+       mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
+                               qpn, ring->cqn, context);
+       context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
+
+       err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
+       if (err) {
+               mlx4_qp_remove(mdev->dev, qp);
+               mlx4_qp_free(mdev->dev, qp);
+       }
+       mlx4_en_update_rx_prod_db(ring);
+out:
+       kfree(context);
+       return err;
+}
+
+/* Allocate rx qp's and configure them according to rss map */
+int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_en_rss_map *rss_map = &priv->rss_map;
+       struct mlx4_qp_context context;
+       struct mlx4_en_rss_context *rss_context;
+       void *ptr;
+       u8 rss_mask = 0x3f;
+       int i, qpn;
+       int err = 0;
+       int good_qps = 0;
+
+       en_dbg(DRV, priv, "Configuring rss steering\n");
+       err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
+                                   priv->rx_ring_num,
+                                   &rss_map->base_qpn);
+       if (err) {
+               en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
+               return err;
+       }
+
+       for (i = 0; i < priv->rx_ring_num; i++) {
+               qpn = rss_map->base_qpn + i;
+               err = mlx4_en_config_rss_qp(priv, qpn, &priv->rx_ring[i],
+                                           &rss_map->state[i],
+                                           &rss_map->qps[i]);
+               if (err)
+                       goto rss_err;
+
+               ++good_qps;
+       }
+
+       /* Configure RSS indirection qp */
+       err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
+       if (err) {
+               en_err(priv, "Failed to allocate RSS indirection QP\n");
+               goto rss_err;
+       }
+       rss_map->indir_qp.event = mlx4_en_sqp_event;
+       mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
+                               priv->rx_ring[0].cqn, &context);
+
+       ptr = ((void *) &context) + 0x3c;
+       rss_context = ptr;
+       rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 |
+                                           (rss_map->base_qpn));
+       rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
+       rss_context->flags = rss_mask;
+
+       if (priv->mdev->profile.udp_rss)
+               rss_context->base_qpn_udp = rss_context->default_qpn;
+       err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
+                              &rss_map->indir_qp, &rss_map->indir_state);
+       if (err)
+               goto indir_err;
+
+       return 0;
+
+indir_err:
+       mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
+                      MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
+       mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
+       mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
+rss_err:
+       for (i = 0; i < good_qps; i++) {
+               mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
+                              MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
+               mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
+               mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
+       }
+       mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
+       return err;
+}
+
+void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_en_rss_map *rss_map = &priv->rss_map;
+       int i;
+
+       mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
+                      MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
+       mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
+       mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
+
+       for (i = 0; i < priv->rx_ring_num; i++) {
+               mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
+                              MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
+               mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
+               mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
+       }
+       mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
+}
+
+
+
+
+
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
new file mode 100644 (file)
index 0000000..9fdbcec
--- /dev/null
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/mlx4/driver.h>
+
+#include "mlx4_en.h"
+
+
+static int mlx4_en_test_registers(struct mlx4_en_priv *priv)
+{
+       return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK,
+                       MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
+{
+       struct sk_buff *skb;
+       struct ethhdr *ethh;
+       unsigned char *packet;
+       unsigned int packet_size = MLX4_LOOPBACK_TEST_PAYLOAD;
+       unsigned int i;
+       int err;
+
+
+       /* build the pkt before xmit */
+       skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN);
+       if (!skb) {
+               en_err(priv, "-LOOPBACK_TEST_XMIT- failed to create skb for xmit\n");
+               return -ENOMEM;
+       }
+       skb_reserve(skb, NET_IP_ALIGN);
+
+       ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
+       packet  = (unsigned char *)skb_put(skb, packet_size);
+       memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN);
+       memset(ethh->h_source, 0, ETH_ALEN);
+       ethh->h_proto = htons(ETH_P_ARP);
+       skb_set_mac_header(skb, 0);
+       for (i = 0; i < packet_size; ++i)       /* fill our packet */
+               packet[i] = (unsigned char)(i & 0xff);
+
+       /* xmit the pkt */
+       err = mlx4_en_xmit(skb, priv->dev);
+       return err;
+}
+
+static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
+{
+       u32 loopback_ok = 0;
+       int i;
+
+
+        priv->loopback_ok = 0;
+       priv->validate_loopback = 1;
+
+       /* xmit */
+       if (mlx4_en_test_loopback_xmit(priv)) {
+               en_err(priv, "Transmitting loopback packet failed\n");
+               goto mlx4_en_test_loopback_exit;
+       }
+
+       /* polling for result */
+       for (i = 0; i < MLX4_EN_LOOPBACK_RETRIES; ++i) {
+               msleep(MLX4_EN_LOOPBACK_TIMEOUT);
+               if (priv->loopback_ok) {
+                       loopback_ok = 1;
+                       break;
+               }
+       }
+       if (!loopback_ok)
+               en_err(priv, "Loopback packet didn't arrive\n");
+
+mlx4_en_test_loopback_exit:
+
+       priv->validate_loopback = 0;
+       return !loopback_ok;
+}
+
+
+static int mlx4_en_test_link(struct mlx4_en_priv *priv)
+{
+       if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
+               return -ENOMEM;
+       if (priv->port_state.link_state == 1)
+               return 0;
+       else
+               return 1;
+}
+
+static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
+{
+
+       if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
+               return -ENOMEM;
+
+       /* The device currently only supports 10G speed */
+       if (priv->port_state.link_speed != SPEED_10000)
+               return priv->port_state.link_speed;
+       return 0;
+}
+
+
+void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_en_tx_ring *tx_ring;
+       int i, carrier_ok;
+
+       memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
+
+       if (*flags & ETH_TEST_FL_OFFLINE) {
+               /* disable the interface */
+               carrier_ok = netif_carrier_ok(dev);
+
+               netif_carrier_off(dev);
+retry_tx:
+               /* Wait until all tx queues are empty.
+                * there should not be any additional incoming traffic
+                * since we turned the carrier off */
+               msleep(200);
+               for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) {
+                       tx_ring = &priv->tx_ring[i];
+                       if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb))
+                               goto retry_tx;
+               }
+
+               if (priv->mdev->dev->caps.flags &
+                                       MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
+                       buf[3] = mlx4_en_test_registers(priv);
+                       buf[4] = mlx4_en_test_loopback(priv);
+               }
+
+               if (carrier_ok)
+                       netif_carrier_on(dev);
+
+       }
+       buf[0] = mlx4_test_interrupts(mdev->dev);
+       buf[1] = mlx4_en_test_link(priv);
+       buf[2] = mlx4_en_test_speed(priv);
+
+       for (i = 0; i < MLX4_EN_NUM_SELF_TEST; i++) {
+               if (buf[i])
+                       *flags |= ETH_TEST_FL_FAILED;
+       }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
new file mode 100644 (file)
index 0000000..6e03de0
--- /dev/null
@@ -0,0 +1,828 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <asm/page.h>
+#include <linux/mlx4/cq.h>
+#include <linux/slab.h>
+#include <linux/mlx4/qp.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
+#include <linux/tcp.h>
+
+#include "mlx4_en.h"
+
+enum {
+       MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
+       MAX_BF = 256,
+};
+
+static int inline_thold __read_mostly = MAX_INLINE;
+
+module_param_named(inline_thold, inline_thold, int, 0444);
+MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
+
+int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
+                          struct mlx4_en_tx_ring *ring, int qpn, u32 size,
+                          u16 stride)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+       int tmp;
+       int err;
+
+       ring->size = size;
+       ring->size_mask = size - 1;
+       ring->stride = stride;
+
+       inline_thold = min(inline_thold, MAX_INLINE);
+
+       spin_lock_init(&ring->comp_lock);
+
+       tmp = size * sizeof(struct mlx4_en_tx_info);
+       ring->tx_info = vmalloc(tmp);
+       if (!ring->tx_info) {
+               en_err(priv, "Failed allocating tx_info ring\n");
+               return -ENOMEM;
+       }
+       en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
+                ring->tx_info, tmp);
+
+       ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
+       if (!ring->bounce_buf) {
+               en_err(priv, "Failed allocating bounce buffer\n");
+               err = -ENOMEM;
+               goto err_tx;
+       }
+       ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
+
+       err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
+                                2 * PAGE_SIZE);
+       if (err) {
+               en_err(priv, "Failed allocating hwq resources\n");
+               goto err_bounce;
+       }
+
+       err = mlx4_en_map_buffer(&ring->wqres.buf);
+       if (err) {
+               en_err(priv, "Failed to map TX buffer\n");
+               goto err_hwq_res;
+       }
+
+       ring->buf = ring->wqres.buf.direct.buf;
+
+       en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
+              "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
+              ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
+
+       ring->qpn = qpn;
+       err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
+       if (err) {
+               en_err(priv, "Failed allocating qp %d\n", ring->qpn);
+               goto err_map;
+       }
+       ring->qp.event = mlx4_en_sqp_event;
+
+       err = mlx4_bf_alloc(mdev->dev, &ring->bf);
+       if (err) {
+               en_dbg(DRV, priv, "working without blueflame (%d)", err);
+               ring->bf.uar = &mdev->priv_uar;
+               ring->bf.uar->map = mdev->uar_map;
+               ring->bf_enabled = false;
+       } else
+               ring->bf_enabled = true;
+
+       return 0;
+
+err_map:
+       mlx4_en_unmap_buffer(&ring->wqres.buf);
+err_hwq_res:
+       mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+err_bounce:
+       kfree(ring->bounce_buf);
+       ring->bounce_buf = NULL;
+err_tx:
+       vfree(ring->tx_info);
+       ring->tx_info = NULL;
+       return err;
+}
+
+void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
+                            struct mlx4_en_tx_ring *ring)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+       en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
+
+       if (ring->bf_enabled)
+               mlx4_bf_free(mdev->dev, &ring->bf);
+       mlx4_qp_remove(mdev->dev, &ring->qp);
+       mlx4_qp_free(mdev->dev, &ring->qp);
+       mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
+       mlx4_en_unmap_buffer(&ring->wqres.buf);
+       mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+       kfree(ring->bounce_buf);
+       ring->bounce_buf = NULL;
+       vfree(ring->tx_info);
+       ring->tx_info = NULL;
+}
+
+int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
+                            struct mlx4_en_tx_ring *ring,
+                            int cq)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+       int err;
+
+       ring->cqn = cq;
+       ring->prod = 0;
+       ring->cons = 0xffffffff;
+       ring->last_nr_txbb = 1;
+       ring->poll_cnt = 0;
+       ring->blocked = 0;
+       memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
+       memset(ring->buf, 0, ring->buf_size);
+
+       ring->qp_state = MLX4_QP_STATE_RST;
+       ring->doorbell_qpn = swab32(ring->qp.qpn << 8);
+
+       mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
+                               ring->cqn, &ring->context);
+       if (ring->bf_enabled)
+               ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
+
+       err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
+                              &ring->qp, &ring->qp_state);
+
+       return err;
+}
+
+void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
+                               struct mlx4_en_tx_ring *ring)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+
+       mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
+                      MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
+}
+
+
+static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
+                               struct mlx4_en_tx_ring *ring,
+                               int index, u8 owner)
+{
+       struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
+       struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
+       struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
+       struct sk_buff *skb = tx_info->skb;
+       struct skb_frag_struct *frag;
+       void *end = ring->buf + ring->buf_size;
+       int frags = skb_shinfo(skb)->nr_frags;
+       int i;
+       __be32 *ptr = (__be32 *)tx_desc;
+       __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
+
+       /* Optimize the common case when there are no wraparounds */
+       if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
+               if (!tx_info->inl) {
+                       if (tx_info->linear) {
+                               pci_unmap_single(mdev->pdev,
+                                       (dma_addr_t) be64_to_cpu(data->addr),
+                                        be32_to_cpu(data->byte_count),
+                                        PCI_DMA_TODEVICE);
+                               ++data;
+                       }
+
+                       for (i = 0; i < frags; i++) {
+                               frag = &skb_shinfo(skb)->frags[i];
+                               pci_unmap_page(mdev->pdev,
+                                       (dma_addr_t) be64_to_cpu(data[i].addr),
+                                       frag->size, PCI_DMA_TODEVICE);
+                       }
+               }
+               /* Stamp the freed descriptor */
+               for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
+                       *ptr = stamp;
+                       ptr += STAMP_DWORDS;
+               }
+
+       } else {
+               if (!tx_info->inl) {
+                       if ((void *) data >= end) {
+                               data = ring->buf + ((void *)data - end);
+                       }
+
+                       if (tx_info->linear) {
+                               pci_unmap_single(mdev->pdev,
+                                       (dma_addr_t) be64_to_cpu(data->addr),
+                                        be32_to_cpu(data->byte_count),
+                                        PCI_DMA_TODEVICE);
+                               ++data;
+                       }
+
+                       for (i = 0; i < frags; i++) {
+                               /* Check for wraparound before unmapping */
+                               if ((void *) data >= end)
+                                       data = ring->buf;
+                               frag = &skb_shinfo(skb)->frags[i];
+                               pci_unmap_page(mdev->pdev,
+                                       (dma_addr_t) be64_to_cpu(data->addr),
+                                        frag->size, PCI_DMA_TODEVICE);
+                               ++data;
+                       }
+               }
+               /* Stamp the freed descriptor */
+               for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
+                       *ptr = stamp;
+                       ptr += STAMP_DWORDS;
+                       if ((void *) ptr >= end) {
+                               ptr = ring->buf;
+                               stamp ^= cpu_to_be32(0x80000000);
+                       }
+               }
+
+       }
+       dev_kfree_skb_any(skb);
+       return tx_info->nr_txbb;
+}
+
+
+int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       int cnt = 0;
+
+       /* Skip last polled descriptor */
+       ring->cons += ring->last_nr_txbb;
+       en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
+                ring->cons, ring->prod);
+
+       if ((u32) (ring->prod - ring->cons) > ring->size) {
+               if (netif_msg_tx_err(priv))
+                       en_warn(priv, "Tx consumer passed producer!\n");
+               return 0;
+       }
+
+       while (ring->cons != ring->prod) {
+               ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
+                                               ring->cons & ring->size_mask,
+                                               !!(ring->cons & ring->size));
+               ring->cons += ring->last_nr_txbb;
+               cnt++;
+       }
+
+       if (cnt)
+               en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
+
+       return cnt;
+}
+
+
+static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_cq *mcq = &cq->mcq;
+       struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
+       struct mlx4_cqe *cqe = cq->buf;
+       u16 index;
+       u16 new_index;
+       u32 txbbs_skipped = 0;
+       u32 cq_last_sav;
+
+       /* index always points to the first TXBB of the last polled descriptor */
+       index = ring->cons & ring->size_mask;
+       new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
+       if (index == new_index)
+               return;
+
+       if (!priv->port_up)
+               return;
+
+       /*
+        * We use a two-stage loop:
+        * - the first samples the HW-updated CQE
+        * - the second frees TXBBs until the last sample
+        * This lets us amortize CQE cache misses, while still polling the CQ
+        * until is quiescent.
+        */
+       cq_last_sav = mcq->cons_index;
+       do {
+               do {
+                       /* Skip over last polled CQE */
+                       index = (index + ring->last_nr_txbb) & ring->size_mask;
+                       txbbs_skipped += ring->last_nr_txbb;
+
+                       /* Poll next CQE */
+                       ring->last_nr_txbb = mlx4_en_free_tx_desc(
+                                               priv, ring, index,
+                                               !!((ring->cons + txbbs_skipped) &
+                                                  ring->size));
+                       ++mcq->cons_index;
+
+               } while (index != new_index);
+
+               new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
+       } while (index != new_index);
+       AVG_PERF_COUNTER(priv->pstats.tx_coal_avg,
+                        (u32) (mcq->cons_index - cq_last_sav));
+
+       /*
+        * To prevent CQ overflow we first update CQ consumer and only then
+        * the ring consumer.
+        */
+       mlx4_cq_set_ci(mcq);
+       wmb();
+       ring->cons += txbbs_skipped;
+
+       /* Wakeup Tx queue if this ring stopped it */
+       if (unlikely(ring->blocked)) {
+               if ((u32) (ring->prod - ring->cons) <=
+                    ring->size - HEADROOM - MAX_DESC_TXBBS) {
+                       ring->blocked = 0;
+                       netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
+                       priv->port_stats.wake_queue++;
+               }
+       }
+}
+
+void mlx4_en_tx_irq(struct mlx4_cq *mcq)
+{
+       struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
+       struct mlx4_en_priv *priv = netdev_priv(cq->dev);
+       struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
+
+       if (!spin_trylock(&ring->comp_lock))
+               return;
+       mlx4_en_process_tx_cq(cq->dev, cq);
+       mod_timer(&cq->timer, jiffies + 1);
+       spin_unlock(&ring->comp_lock);
+}
+
+
+void mlx4_en_poll_tx_cq(unsigned long data)
+{
+       struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
+       struct mlx4_en_priv *priv = netdev_priv(cq->dev);
+       struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
+       u32 inflight;
+
+       INC_PERF_COUNTER(priv->pstats.tx_poll);
+
+       if (!spin_trylock_irq(&ring->comp_lock)) {
+               mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
+               return;
+       }
+       mlx4_en_process_tx_cq(cq->dev, cq);
+       inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
+
+       /* If there are still packets in flight and the timer has not already
+        * been scheduled by the Tx routine then schedule it here to guarantee
+        * completion processing of these packets */
+       if (inflight && priv->port_up)
+               mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
+
+       spin_unlock_irq(&ring->comp_lock);
+}
+
+static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
+                                                     struct mlx4_en_tx_ring *ring,
+                                                     u32 index,
+                                                     unsigned int desc_size)
+{
+       u32 copy = (ring->size - index) * TXBB_SIZE;
+       int i;
+
+       for (i = desc_size - copy - 4; i >= 0; i -= 4) {
+               if ((i & (TXBB_SIZE - 1)) == 0)
+                       wmb();
+
+               *((u32 *) (ring->buf + i)) =
+                       *((u32 *) (ring->bounce_buf + copy + i));
+       }
+
+       for (i = copy - 4; i >= 4 ; i -= 4) {
+               if ((i & (TXBB_SIZE - 1)) == 0)
+                       wmb();
+
+               *((u32 *) (ring->buf + index * TXBB_SIZE + i)) =
+                       *((u32 *) (ring->bounce_buf + i));
+       }
+
+       /* Return real descriptor location */
+       return ring->buf + index * TXBB_SIZE;
+}
+
+static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
+{
+       struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
+       struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
+       unsigned long flags;
+
+       /* If we don't have a pending timer, set one up to catch our recent
+          post in case the interface becomes idle */
+       if (!timer_pending(&cq->timer))
+               mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
+
+       /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
+       if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
+               if (spin_trylock_irqsave(&ring->comp_lock, flags)) {
+                       mlx4_en_process_tx_cq(priv->dev, cq);
+                       spin_unlock_irqrestore(&ring->comp_lock, flags);
+               }
+}
+
+static void *get_frag_ptr(struct sk_buff *skb)
+{
+       struct skb_frag_struct *frag =  &skb_shinfo(skb)->frags[0];
+       struct page *page = frag->page;
+       void *ptr;
+
+       ptr = page_address(page);
+       if (unlikely(!ptr))
+               return NULL;
+
+       return ptr + frag->page_offset;
+}
+
+static int is_inline(struct sk_buff *skb, void **pfrag)
+{
+       void *ptr;
+
+       if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) {
+               if (skb_shinfo(skb)->nr_frags == 1) {
+                       ptr = get_frag_ptr(skb);
+                       if (unlikely(!ptr))
+                               return 0;
+
+                       if (pfrag)
+                               *pfrag = ptr;
+
+                       return 1;
+               } else if (unlikely(skb_shinfo(skb)->nr_frags))
+                       return 0;
+               else
+                       return 1;
+       }
+
+       return 0;
+}
+
+static int inline_size(struct sk_buff *skb)
+{
+       if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg)
+           <= MLX4_INLINE_ALIGN)
+               return ALIGN(skb->len + CTRL_SIZE +
+                            sizeof(struct mlx4_wqe_inline_seg), 16);
+       else
+               return ALIGN(skb->len + CTRL_SIZE + 2 *
+                            sizeof(struct mlx4_wqe_inline_seg), 16);
+}
+
+static int get_real_size(struct sk_buff *skb, struct net_device *dev,
+                        int *lso_header_size)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       int real_size;
+
+       if (skb_is_gso(skb)) {
+               *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
+               real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE +
+                       ALIGN(*lso_header_size + 4, DS_SIZE);
+               if (unlikely(*lso_header_size != skb_headlen(skb))) {
+                       /* We add a segment for the skb linear buffer only if
+                        * it contains data */
+                       if (*lso_header_size < skb_headlen(skb))
+                               real_size += DS_SIZE;
+                       else {
+                               if (netif_msg_tx_err(priv))
+                                       en_warn(priv, "Non-linear headers\n");
+                               return 0;
+                       }
+               }
+       } else {
+               *lso_header_size = 0;
+               if (!is_inline(skb, NULL))
+                       real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE;
+               else
+                       real_size = inline_size(skb);
+       }
+
+       return real_size;
+}
+
+static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb,
+                            int real_size, u16 *vlan_tag, int tx_ind, void *fragptr)
+{
+       struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
+       int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
+
+       if (skb->len <= spc) {
+               inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
+               skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
+               if (skb_shinfo(skb)->nr_frags)
+                       memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
+                              skb_shinfo(skb)->frags[0].size);
+
+       } else {
+               inl->byte_count = cpu_to_be32(1 << 31 | spc);
+               if (skb_headlen(skb) <= spc) {
+                       skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
+                       if (skb_headlen(skb) < spc) {
+                               memcpy(((void *)(inl + 1)) + skb_headlen(skb),
+                                       fragptr, spc - skb_headlen(skb));
+                               fragptr +=  spc - skb_headlen(skb);
+                       }
+                       inl = (void *) (inl + 1) + spc;
+                       memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
+               } else {
+                       skb_copy_from_linear_data(skb, inl + 1, spc);
+                       inl = (void *) (inl + 1) + spc;
+                       skb_copy_from_linear_data_offset(skb, spc, inl + 1,
+                                       skb_headlen(skb) - spc);
+                       if (skb_shinfo(skb)->nr_frags)
+                               memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
+                                       fragptr, skb_shinfo(skb)->frags[0].size);
+               }
+
+               wmb();
+               inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
+       }
+       tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
+       tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag);
+       tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+}
+
+u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       u16 vlan_tag = 0;
+
+       /* If we support per priority flow control and the packet contains
+        * a vlan tag, send the packet to the TX ring assigned to that priority
+        */
+       if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) {
+               vlan_tag = vlan_tx_tag_get(skb);
+               return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13);
+       }
+
+       return skb_tx_hash(dev, skb);
+}
+
+static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, unsigned bytecnt)
+{
+       __iowrite64_copy(dst, src, bytecnt / 8);
+}
+
+netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_en_tx_ring *ring;
+       struct mlx4_en_cq *cq;
+       struct mlx4_en_tx_desc *tx_desc;
+       struct mlx4_wqe_data_seg *data;
+       struct skb_frag_struct *frag;
+       struct mlx4_en_tx_info *tx_info;
+       struct ethhdr *ethh;
+       u64 mac;
+       u32 mac_l, mac_h;
+       int tx_ind = 0;
+       int nr_txbb;
+       int desc_size;
+       int real_size;
+       dma_addr_t dma;
+       u32 index, bf_index;
+       __be32 op_own;
+       u16 vlan_tag = 0;
+       int i;
+       int lso_header_size;
+       void *fragptr;
+       bool bounce = false;
+
+       if (!priv->port_up)
+               goto tx_drop;
+
+       real_size = get_real_size(skb, dev, &lso_header_size);
+       if (unlikely(!real_size))
+               goto tx_drop;
+
+       /* Align descriptor to TXBB size */
+       desc_size = ALIGN(real_size, TXBB_SIZE);
+       nr_txbb = desc_size / TXBB_SIZE;
+       if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
+               if (netif_msg_tx_err(priv))
+                       en_warn(priv, "Oversized header or SG list\n");
+               goto tx_drop;
+       }
+
+       tx_ind = skb->queue_mapping;
+       ring = &priv->tx_ring[tx_ind];
+       if (vlan_tx_tag_present(skb))
+               vlan_tag = vlan_tx_tag_get(skb);
+
+       /* Check available TXBBs And 2K spare for prefetch */
+       if (unlikely(((int)(ring->prod - ring->cons)) >
+                    ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+               /* every full Tx ring stops queue */
+               netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
+               ring->blocked = 1;
+               priv->port_stats.queue_stopped++;
+
+               /* Use interrupts to find out when queue opened */
+               cq = &priv->tx_cq[tx_ind];
+               mlx4_en_arm_cq(priv, cq);
+               return NETDEV_TX_BUSY;
+       }
+
+       /* Track current inflight packets for performance analysis */
+       AVG_PERF_COUNTER(priv->pstats.inflight_avg,
+                        (u32) (ring->prod - ring->cons - 1));
+
+       /* Packet is good - grab an index and transmit it */
+       index = ring->prod & ring->size_mask;
+       bf_index = ring->prod;
+
+       /* See if we have enough space for whole descriptor TXBB for setting
+        * SW ownership on next descriptor; if not, use a bounce buffer. */
+       if (likely(index + nr_txbb <= ring->size))
+               tx_desc = ring->buf + index * TXBB_SIZE;
+       else {
+               tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
+               bounce = true;
+       }
+
+       /* Save skb in tx_info ring */
+       tx_info = &ring->tx_info[index];
+       tx_info->skb = skb;
+       tx_info->nr_txbb = nr_txbb;
+
+       /* Prepare ctrl segement apart opcode+ownership, which depends on
+        * whether LSO is used */
+       tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
+       tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag;
+       tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+       tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
+                                               MLX4_WQE_CTRL_SOLICITED);
+       if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+               tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
+                                                        MLX4_WQE_CTRL_TCP_UDP_CSUM);
+               priv->port_stats.tx_chksum_offload++;
+       }
+
+       if (unlikely(priv->validate_loopback)) {
+               /* Copy dst mac address to wqe */
+               skb_reset_mac_header(skb);
+               ethh = eth_hdr(skb);
+               if (ethh && ethh->h_dest) {
+                       mac = mlx4_en_mac_to_u64(ethh->h_dest);
+                       mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
+                       mac_l = (u32) (mac & 0xffffffff);
+                       tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
+                       tx_desc->ctrl.imm = cpu_to_be32(mac_l);
+               }
+       }
+
+       /* Handle LSO (TSO) packets */
+       if (lso_header_size) {
+               /* Mark opcode as LSO */
+               op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) |
+                       ((ring->prod & ring->size) ?
+                               cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
+
+               /* Fill in the LSO prefix */
+               tx_desc->lso.mss_hdr_size = cpu_to_be32(
+                       skb_shinfo(skb)->gso_size << 16 | lso_header_size);
+
+               /* Copy headers;
+                * note that we already verified that it is linear */
+               memcpy(tx_desc->lso.header, skb->data, lso_header_size);
+               data = ((void *) &tx_desc->lso +
+                       ALIGN(lso_header_size + 4, DS_SIZE));
+
+               priv->port_stats.tso_packets++;
+               i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
+                       !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
+               ring->bytes += skb->len + (i - 1) * lso_header_size;
+               ring->packets += i;
+       } else {
+               /* Normal (Non LSO) packet */
+               op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
+                       ((ring->prod & ring->size) ?
+                        cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
+               data = &tx_desc->data;
+               ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN);
+               ring->packets++;
+
+       }
+       AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
+
+
+       /* valid only for none inline segments */
+       tx_info->data_offset = (void *) data - (void *) tx_desc;
+
+       tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
+       data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
+
+       if (!is_inline(skb, &fragptr)) {
+               /* Map fragments */
+               for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
+                       frag = &skb_shinfo(skb)->frags[i];
+                       dma = pci_map_page(mdev->dev->pdev, frag->page, frag->page_offset,
+                                          frag->size, PCI_DMA_TODEVICE);
+                       data->addr = cpu_to_be64(dma);
+                       data->lkey = cpu_to_be32(mdev->mr.key);
+                       wmb();
+                       data->byte_count = cpu_to_be32(frag->size);
+                       --data;
+               }
+
+               /* Map linear part */
+               if (tx_info->linear) {
+                       dma = pci_map_single(mdev->dev->pdev, skb->data + lso_header_size,
+                                            skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
+                       data->addr = cpu_to_be64(dma);
+                       data->lkey = cpu_to_be32(mdev->mr.key);
+                       wmb();
+                       data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
+               }
+               tx_info->inl = 0;
+       } else {
+               build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
+               tx_info->inl = 1;
+       }
+
+       ring->prod += nr_txbb;
+
+       /* If we used a bounce buffer then copy descriptor back into place */
+       if (bounce)
+               tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
+
+       /* Run destructor before passing skb to HW */
+       if (likely(!skb_shared(skb)))
+               skb_orphan(skb);
+
+       if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
+               *(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn;
+               op_own |= htonl((bf_index & 0xffff) << 8);
+               /* Ensure new descirptor hits memory
+               * before setting ownership of this descriptor to HW */
+               wmb();
+               tx_desc->ctrl.owner_opcode = op_own;
+
+               wmb();
+
+               mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl,
+                    desc_size);
+
+               wmb();
+
+               ring->bf.offset ^= ring->bf.buf_size;
+       } else {
+               /* Ensure new descirptor hits memory
+               * before setting ownership of this descriptor to HW */
+               wmb();
+               tx_desc->ctrl.owner_opcode = op_own;
+               wmb();
+               writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
+       }
+
+       /* Poll CQ here */
+       mlx4_en_xmit_poll(priv, tx_ind);
+
+       return NETDEV_TX_OK;
+
+tx_drop:
+       dev_kfree_skb_any(skb);
+       priv->stats.tx_dropped++;
+       return NETDEV_TX_OK;
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
new file mode 100644 (file)
index 0000000..1ad1f60
--- /dev/null
@@ -0,0 +1,842 @@
+/*
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *     - Redistributions of source code must retain the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer.
+ *
+ *     - Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials
+ *       provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4.h"
+#include "fw.h"
+
+enum {
+       MLX4_IRQNAME_SIZE       = 32
+};
+
+enum {
+       MLX4_NUM_ASYNC_EQE      = 0x100,
+       MLX4_NUM_SPARE_EQE      = 0x80,
+       MLX4_EQ_ENTRY_SIZE      = 0x20
+};
+
+/*
+ * Must be packed because start is 64 bits but only aligned to 32 bits.
+ */
+struct mlx4_eq_context {
+       __be32                  flags;
+       u16                     reserved1[3];
+       __be16                  page_offset;
+       u8                      log_eq_size;
+       u8                      reserved2[4];
+       u8                      eq_period;
+       u8                      reserved3;
+       u8                      eq_max_count;
+       u8                      reserved4[3];
+       u8                      intr;
+       u8                      log_page_size;
+       u8                      reserved5[2];
+       u8                      mtt_base_addr_h;
+       __be32                  mtt_base_addr_l;
+       u32                     reserved6[2];
+       __be32                  consumer_index;
+       __be32                  producer_index;
+       u32                     reserved7[4];
+};
+
+#define MLX4_EQ_STATUS_OK         ( 0 << 28)
+#define MLX4_EQ_STATUS_WRITE_FAIL  (10 << 28)
+#define MLX4_EQ_OWNER_SW          ( 0 << 24)
+#define MLX4_EQ_OWNER_HW          ( 1 << 24)
+#define MLX4_EQ_FLAG_EC                   ( 1 << 18)
+#define MLX4_EQ_FLAG_OI                   ( 1 << 17)
+#define MLX4_EQ_STATE_ARMED       ( 9 <<  8)
+#define MLX4_EQ_STATE_FIRED       (10 <<  8)
+#define MLX4_EQ_STATE_ALWAYS_ARMED (11 <<  8)
+
+#define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG)          | \
+                              (1ull << MLX4_EVENT_TYPE_COMM_EST)           | \
+                              (1ull << MLX4_EVENT_TYPE_SQ_DRAINED)         | \
+                              (1ull << MLX4_EVENT_TYPE_CQ_ERROR)           | \
+                              (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR)     | \
+                              (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR)    | \
+                              (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED)    | \
+                              (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
+                              (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
+                              (1ull << MLX4_EVENT_TYPE_PORT_CHANGE)        | \
+                              (1ull << MLX4_EVENT_TYPE_ECC_DETECT)         | \
+                              (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
+                              (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE)    | \
+                              (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT)          | \
+                              (1ull << MLX4_EVENT_TYPE_CMD))
+
+struct mlx4_eqe {
+       u8                      reserved1;
+       u8                      type;
+       u8                      reserved2;
+       u8                      subtype;
+       union {
+               u32             raw[6];
+               struct {
+                       __be32  cqn;
+               } __packed comp;
+               struct {
+                       u16     reserved1;
+                       __be16  token;
+                       u32     reserved2;
+                       u8      reserved3[3];
+                       u8      status;
+                       __be64  out_param;
+               } __packed cmd;
+               struct {
+                       __be32  qpn;
+               } __packed qp;
+               struct {
+                       __be32  srqn;
+               } __packed srq;
+               struct {
+                       __be32  cqn;
+                       u32     reserved1;
+                       u8      reserved2[3];
+                       u8      syndrome;
+               } __packed cq_err;
+               struct {
+                       u32     reserved1[2];
+                       __be32  port;
+               } __packed port_change;
+       }                       event;
+       u8                      reserved3[3];
+       u8                      owner;
+} __packed;
+
+static void eq_set_ci(struct mlx4_eq *eq, int req_not)
+{
+       __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
+                                              req_not << 31),
+                    eq->doorbell);
+       /* We still want ordering, just not swabbing, so add a barrier */
+       mb();
+}
+
+static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry)
+{
+       unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;
+       return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
+}
+
+static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
+{
+       struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);
+       return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
+}
+
+static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
+{
+       struct mlx4_eqe *eqe;
+       int cqn;
+       int eqes_found = 0;
+       int set_ci = 0;
+       int port;
+
+       while ((eqe = next_eqe_sw(eq))) {
+               /*
+                * Make sure we read EQ entry contents after we've
+                * checked the ownership bit.
+                */
+               rmb();
+
+               switch (eqe->type) {
+               case MLX4_EVENT_TYPE_COMP:
+                       cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
+                       mlx4_cq_completion(dev, cqn);
+                       break;
+
+               case MLX4_EVENT_TYPE_PATH_MIG:
+               case MLX4_EVENT_TYPE_COMM_EST:
+               case MLX4_EVENT_TYPE_SQ_DRAINED:
+               case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
+               case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
+               case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
+               case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+               case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
+                       mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
+                                     eqe->type);
+                       break;
+
+               case MLX4_EVENT_TYPE_SRQ_LIMIT:
+               case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
+                       mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
+                                     eqe->type);
+                       break;
+
+               case MLX4_EVENT_TYPE_CMD:
+                       mlx4_cmd_event(dev,
+                                      be16_to_cpu(eqe->event.cmd.token),
+                                      eqe->event.cmd.status,
+                                      be64_to_cpu(eqe->event.cmd.out_param));
+                       break;
+
+               case MLX4_EVENT_TYPE_PORT_CHANGE:
+                       port = be32_to_cpu(eqe->event.port_change.port) >> 28;
+                       if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
+                               mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
+                                                   port);
+                               mlx4_priv(dev)->sense.do_sense_port[port] = 1;
+                       } else {
+                               mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP,
+                                                   port);
+                               mlx4_priv(dev)->sense.do_sense_port[port] = 0;
+                       }
+                       break;
+
+               case MLX4_EVENT_TYPE_CQ_ERROR:
+                       mlx4_warn(dev, "CQ %s on CQN %06x\n",
+                                 eqe->event.cq_err.syndrome == 1 ?
+                                 "overrun" : "access violation",
+                                 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
+                       mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
+                                     eqe->type);
+                       break;
+
+               case MLX4_EVENT_TYPE_EQ_OVERFLOW:
+                       mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
+                       break;
+
+               case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
+               case MLX4_EVENT_TYPE_ECC_DETECT:
+               default:
+                       mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
+                                 eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
+                       break;
+               }
+
+               ++eq->cons_index;
+               eqes_found = 1;
+               ++set_ci;
+
+               /*
+                * The HCA will think the queue has overflowed if we
+                * don't tell it we've been processing events.  We
+                * create our EQs with MLX4_NUM_SPARE_EQE extra
+                * entries, so we must update our consumer index at
+                * least that often.
+                */
+               if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
+                       eq_set_ci(eq, 0);
+                       set_ci = 0;
+               }
+       }
+
+       eq_set_ci(eq, 1);
+
+       return eqes_found;
+}
+
+static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
+{
+       struct mlx4_dev *dev = dev_ptr;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int work = 0;
+       int i;
+
+       writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
+
+       for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
+               work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
+
+       return IRQ_RETVAL(work);
+}
+
+static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
+{
+       struct mlx4_eq  *eq  = eq_ptr;
+       struct mlx4_dev *dev = eq->dev;
+
+       mlx4_eq_int(dev, eq);
+
+       /* MSI-X vectors always belong to us */
+       return IRQ_HANDLED;
+}
+
+static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
+                       int eq_num)
+{
+       return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
+                       0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B);
+}
+
+static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+                        int eq_num)
+{
+       return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,
+                       MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+                        int eq_num)
+{
+       return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ,
+                           MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_num_eq_uar(struct mlx4_dev *dev)
+{
+       /*
+        * Each UAR holds 4 EQ doorbells.  To figure out how many UARs
+        * we need to map, take the difference of highest index and
+        * the lowest index we'll use and add 1.
+        */
+       return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
+                dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
+}
+
+static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int index;
+
+       index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
+
+       if (!priv->eq_table.uar_map[index]) {
+               priv->eq_table.uar_map[index] =
+                       ioremap(pci_resource_start(dev->pdev, 2) +
+                               ((eq->eqn / 4) << PAGE_SHIFT),
+                               PAGE_SIZE);
+               if (!priv->eq_table.uar_map[index]) {
+                       mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
+                                eq->eqn);
+                       return NULL;
+               }
+       }
+
+       return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
+}
+
+static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
+                         u8 intr, struct mlx4_eq *eq)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_eq_context *eq_context;
+       int npages;
+       u64 *dma_list = NULL;
+       dma_addr_t t;
+       u64 mtt_addr;
+       int err = -ENOMEM;
+       int i;
+
+       eq->dev   = dev;
+       eq->nent  = roundup_pow_of_two(max(nent, 2));
+       npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;
+
+       eq->page_list = kmalloc(npages * sizeof *eq->page_list,
+                               GFP_KERNEL);
+       if (!eq->page_list)
+               goto err_out;
+
+       for (i = 0; i < npages; ++i)
+               eq->page_list[i].buf = NULL;
+
+       dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
+       if (!dma_list)
+               goto err_out_free;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               goto err_out_free;
+       eq_context = mailbox->buf;
+
+       for (i = 0; i < npages; ++i) {
+               eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
+                                                         PAGE_SIZE, &t, GFP_KERNEL);
+               if (!eq->page_list[i].buf)
+                       goto err_out_free_pages;
+
+               dma_list[i] = t;
+               eq->page_list[i].map = t;
+
+               memset(eq->page_list[i].buf, 0, PAGE_SIZE);
+       }
+
+       eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
+       if (eq->eqn == -1)
+               goto err_out_free_pages;
+
+       eq->doorbell = mlx4_get_eq_uar(dev, eq);
+       if (!eq->doorbell) {
+               err = -ENOMEM;
+               goto err_out_free_eq;
+       }
+
+       err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
+       if (err)
+               goto err_out_free_eq;
+
+       err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
+       if (err)
+               goto err_out_free_mtt;
+
+       memset(eq_context, 0, sizeof *eq_context);
+       eq_context->flags         = cpu_to_be32(MLX4_EQ_STATUS_OK   |
+                                               MLX4_EQ_STATE_ARMED);
+       eq_context->log_eq_size   = ilog2(eq->nent);
+       eq_context->intr          = intr;
+       eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
+
+       mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
+       eq_context->mtt_base_addr_h = mtt_addr >> 32;
+       eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
+
+       err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
+       if (err) {
+               mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
+               goto err_out_free_mtt;
+       }
+
+       kfree(dma_list);
+       mlx4_free_cmd_mailbox(dev, mailbox);
+
+       eq->cons_index = 0;
+
+       return err;
+
+err_out_free_mtt:
+       mlx4_mtt_cleanup(dev, &eq->mtt);
+
+err_out_free_eq:
+       mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
+
+err_out_free_pages:
+       for (i = 0; i < npages; ++i)
+               if (eq->page_list[i].buf)
+                       dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+                                         eq->page_list[i].buf,
+                                         eq->page_list[i].map);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+
+err_out_free:
+       kfree(eq->page_list);
+       kfree(dma_list);
+
+err_out:
+       return err;
+}
+
+static void mlx4_free_eq(struct mlx4_dev *dev,
+                        struct mlx4_eq *eq)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_cmd_mailbox *mailbox;
+       int err;
+       int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE;
+       int i;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return;
+
+       err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
+       if (err)
+               mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
+
+       if (0) {
+               mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
+               for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
+                       if (i % 4 == 0)
+                               pr_cont("[%02x] ", i * 4);
+                       pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4));
+                       if ((i + 1) % 4 == 0)
+                               pr_cont("\n");
+               }
+       }
+
+       mlx4_mtt_cleanup(dev, &eq->mtt);
+       for (i = 0; i < npages; ++i)
+               pci_free_consistent(dev->pdev, PAGE_SIZE,
+                                   eq->page_list[i].buf,
+                                   eq->page_list[i].map);
+
+       kfree(eq->page_list);
+       mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
+       mlx4_free_cmd_mailbox(dev, mailbox);
+}
+
+static void mlx4_free_irqs(struct mlx4_dev *dev)
+{
+       struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int     i, vec;
+
+       if (eq_table->have_irq)
+               free_irq(dev->pdev->irq, dev);
+
+       for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
+               if (eq_table->eq[i].have_irq) {
+                       free_irq(eq_table->eq[i].irq, eq_table->eq + i);
+                       eq_table->eq[i].have_irq = 0;
+               }
+
+       for (i = 0; i < dev->caps.comp_pool; i++) {
+               /*
+                * Freeing the assigned irq's
+                * all bits should be 0, but we need to validate
+                */
+               if (priv->msix_ctl.pool_bm & 1ULL << i) {
+                       /* NO need protecting*/
+                       vec = dev->caps.num_comp_vectors + 1 + i;
+                       free_irq(priv->eq_table.eq[vec].irq,
+                                &priv->eq_table.eq[vec]);
+               }
+       }
+
+
+       kfree(eq_table->irq_names);
+}
+
+static int mlx4_map_clr_int(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
+                                priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
+       if (!priv->clr_base) {
+               mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       iounmap(priv->clr_base);
+}
+
+int mlx4_alloc_eq_table(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs,
+                                   sizeof *priv->eq_table.eq, GFP_KERNEL);
+       if (!priv->eq_table.eq)
+               return -ENOMEM;
+
+       return 0;
+}
+
+void mlx4_free_eq_table(struct mlx4_dev *dev)
+{
+       kfree(mlx4_priv(dev)->eq_table.eq);
+}
+
+int mlx4_init_eq_table(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int err;
+       int i;
+
+       priv->eq_table.uar_map = kcalloc(sizeof *priv->eq_table.uar_map,
+                                        mlx4_num_eq_uar(dev), GFP_KERNEL);
+       if (!priv->eq_table.uar_map) {
+               err = -ENOMEM;
+               goto err_out_free;
+       }
+
+       err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
+                              dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
+       if (err)
+               goto err_out_free;
+
+       for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
+               priv->eq_table.uar_map[i] = NULL;
+
+       err = mlx4_map_clr_int(dev);
+       if (err)
+               goto err_out_bitmap;
+
+       priv->eq_table.clr_mask =
+               swab32(1 << (priv->eq_table.inta_pin & 31));
+       priv->eq_table.clr_int  = priv->clr_base +
+               (priv->eq_table.inta_pin < 32 ? 4 : 0);
+
+       priv->eq_table.irq_names =
+               kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
+                                            dev->caps.comp_pool),
+                       GFP_KERNEL);
+       if (!priv->eq_table.irq_names) {
+               err = -ENOMEM;
+               goto err_out_bitmap;
+       }
+
+       for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
+               err = mlx4_create_eq(dev, dev->caps.num_cqs -
+                                         dev->caps.reserved_cqs +
+                                         MLX4_NUM_SPARE_EQE,
+                                    (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
+                                    &priv->eq_table.eq[i]);
+               if (err) {
+                       --i;
+                       goto err_out_unmap;
+               }
+       }
+
+       err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
+                            (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0,
+                            &priv->eq_table.eq[dev->caps.num_comp_vectors]);
+       if (err)
+               goto err_out_comp;
+
+       /*if additional completion vectors poolsize is 0 this loop will not run*/
+       for (i = dev->caps.num_comp_vectors + 1;
+             i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
+
+               err = mlx4_create_eq(dev, dev->caps.num_cqs -
+                                         dev->caps.reserved_cqs +
+                                         MLX4_NUM_SPARE_EQE,
+                                    (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
+                                    &priv->eq_table.eq[i]);
+               if (err) {
+                       --i;
+                       goto err_out_unmap;
+               }
+       }
+
+
+       if (dev->flags & MLX4_FLAG_MSI_X) {
+               const char *eq_name;
+
+               for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
+                       if (i < dev->caps.num_comp_vectors) {
+                               snprintf(priv->eq_table.irq_names +
+                                        i * MLX4_IRQNAME_SIZE,
+                                        MLX4_IRQNAME_SIZE,
+                                        "mlx4-comp-%d@pci:%s", i,
+                                        pci_name(dev->pdev));
+                       } else {
+                               snprintf(priv->eq_table.irq_names +
+                                        i * MLX4_IRQNAME_SIZE,
+                                        MLX4_IRQNAME_SIZE,
+                                        "mlx4-async@pci:%s",
+                                        pci_name(dev->pdev));
+                       }
+
+                       eq_name = priv->eq_table.irq_names +
+                                 i * MLX4_IRQNAME_SIZE;
+                       err = request_irq(priv->eq_table.eq[i].irq,
+                                         mlx4_msi_x_interrupt, 0, eq_name,
+                                         priv->eq_table.eq + i);
+                       if (err)
+                               goto err_out_async;
+
+                       priv->eq_table.eq[i].have_irq = 1;
+               }
+       } else {
+               snprintf(priv->eq_table.irq_names,
+                        MLX4_IRQNAME_SIZE,
+                        DRV_NAME "@pci:%s",
+                        pci_name(dev->pdev));
+               err = request_irq(dev->pdev->irq, mlx4_interrupt,
+                                 IRQF_SHARED, priv->eq_table.irq_names, dev);
+               if (err)
+                       goto err_out_async;
+
+               priv->eq_table.have_irq = 1;
+       }
+
+       err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
+                         priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+       if (err)
+               mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
+                          priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
+
+       for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
+               eq_set_ci(&priv->eq_table.eq[i], 1);
+
+       return 0;
+
+err_out_async:
+       mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
+
+err_out_comp:
+       i = dev->caps.num_comp_vectors - 1;
+
+err_out_unmap:
+       while (i >= 0) {
+               mlx4_free_eq(dev, &priv->eq_table.eq[i]);
+               --i;
+       }
+       mlx4_unmap_clr_int(dev);
+       mlx4_free_irqs(dev);
+
+err_out_bitmap:
+       mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
+
+err_out_free:
+       kfree(priv->eq_table.uar_map);
+
+       return err;
+}
+
+void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int i;
+
+       mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
+                   priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+
+       mlx4_free_irqs(dev);
+
+       for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
+               mlx4_free_eq(dev, &priv->eq_table.eq[i]);
+
+       mlx4_unmap_clr_int(dev);
+
+       for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
+               if (priv->eq_table.uar_map[i])
+                       iounmap(priv->eq_table.uar_map[i]);
+
+       mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
+
+       kfree(priv->eq_table.uar_map);
+}
+
+/* A test that verifies that we can accept interrupts on all
+ * the irq vectors of the device.
+ * Interrupts are checked using the NOP command.
+ */
+int mlx4_test_interrupts(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int i;
+       int err;
+
+       err = mlx4_NOP(dev);
+       /* When not in MSI_X, there is only one irq to check */
+       if (!(dev->flags & MLX4_FLAG_MSI_X))
+               return err;
+
+       /* A loop over all completion vectors, for each vector we will check
+        * whether it works by mapping command completions to that vector
+        * and performing a NOP command
+        */
+       for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
+               /* Temporary use polling for command completions */
+               mlx4_cmd_use_polling(dev);
+
+               /* Map the new eq to handle all asyncronous events */
+               err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
+                                 priv->eq_table.eq[i].eqn);
+               if (err) {
+                       mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
+                       mlx4_cmd_use_events(dev);
+                       break;
+               }
+
+               /* Go back to using events */
+               mlx4_cmd_use_events(dev);
+               err = mlx4_NOP(dev);
+       }
+
+       /* Return to default */
+       mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
+                   priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+       return err;
+}
+EXPORT_SYMBOL(mlx4_test_interrupts);
+
+int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
+{
+
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int vec = 0, err = 0, i;
+
+       spin_lock(&priv->msix_ctl.pool_lock);
+       for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
+               if (~priv->msix_ctl.pool_bm & 1ULL << i) {
+                       priv->msix_ctl.pool_bm |= 1ULL << i;
+                       vec = dev->caps.num_comp_vectors + 1 + i;
+                       snprintf(priv->eq_table.irq_names +
+                                       vec * MLX4_IRQNAME_SIZE,
+                                       MLX4_IRQNAME_SIZE, "%s", name);
+                       err = request_irq(priv->eq_table.eq[vec].irq,
+                                         mlx4_msi_x_interrupt, 0,
+                                         &priv->eq_table.irq_names[vec<<5],
+                                         priv->eq_table.eq + vec);
+                       if (err) {
+                               /*zero out bit by fliping it*/
+                               priv->msix_ctl.pool_bm ^= 1 << i;
+                               vec = 0;
+                               continue;
+                               /*we dont want to break here*/
+                       }
+                       eq_set_ci(&priv->eq_table.eq[vec], 1);
+               }
+       }
+       spin_unlock(&priv->msix_ctl.pool_lock);
+
+       if (vec) {
+               *vector = vec;
+       } else {
+               *vector = 0;
+               err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
+       }
+       return err;
+}
+EXPORT_SYMBOL(mlx4_assign_eq);
+
+void mlx4_release_eq(struct mlx4_dev *dev, int vec)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       /*bm index*/
+       int i = vec - dev->caps.num_comp_vectors - 1;
+
+       if (likely(i >= 0)) {
+               /*sanity check , making sure were not trying to free irq's
+                 Belonging to a legacy EQ*/
+               spin_lock(&priv->msix_ctl.pool_lock);
+               if (priv->msix_ctl.pool_bm & 1ULL << i) {
+                       free_irq(priv->eq_table.eq[vec].irq,
+                                &priv->eq_table.eq[vec]);
+                       priv->msix_ctl.pool_bm &= ~(1ULL << i);
+               }
+               spin_unlock(&priv->msix_ctl.pool_lock);
+       }
+
+}
+EXPORT_SYMBOL(mlx4_release_eq);
+
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
new file mode 100644 (file)
index 0000000..7eb8ba8
--- /dev/null
@@ -0,0 +1,944 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx4/cmd.h>
+#include <linux/cache.h>
+
+#include "fw.h"
+#include "icm.h"
+
+enum {
+       MLX4_COMMAND_INTERFACE_MIN_REV          = 2,
+       MLX4_COMMAND_INTERFACE_MAX_REV          = 3,
+       MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS    = 3,
+};
+
+extern void __buggy_use_of_MLX4_GET(void);
+extern void __buggy_use_of_MLX4_PUT(void);
+
+static int enable_qos;
+module_param(enable_qos, bool, 0444);
+MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
+
+#define MLX4_GET(dest, source, offset)                               \
+       do {                                                          \
+               void *__p = (char *) (source) + (offset);             \
+               switch (sizeof (dest)) {                              \
+               case 1: (dest) = *(u8 *) __p;       break;            \
+               case 2: (dest) = be16_to_cpup(__p); break;            \
+               case 4: (dest) = be32_to_cpup(__p); break;            \
+               case 8: (dest) = be64_to_cpup(__p); break;            \
+               default: __buggy_use_of_MLX4_GET();                   \
+               }                                                     \
+       } while (0)
+
+#define MLX4_PUT(dest, source, offset)                               \
+       do {                                                          \
+               void *__d = ((char *) (dest) + (offset));             \
+               switch (sizeof(source)) {                             \
+               case 1: *(u8 *) __d = (source);                break; \
+               case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
+               case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
+               case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
+               default: __buggy_use_of_MLX4_PUT();                   \
+               }                                                     \
+       } while (0)
+
+static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
+{
+       static const char *fname[] = {
+               [ 0] = "RC transport",
+               [ 1] = "UC transport",
+               [ 2] = "UD transport",
+               [ 3] = "XRC transport",
+               [ 4] = "reliable multicast",
+               [ 5] = "FCoIB support",
+               [ 6] = "SRQ support",
+               [ 7] = "IPoIB checksum offload",
+               [ 8] = "P_Key violation counter",
+               [ 9] = "Q_Key violation counter",
+               [10] = "VMM",
+               [12] = "DPDP",
+               [15] = "Big LSO headers",
+               [16] = "MW support",
+               [17] = "APM support",
+               [18] = "Atomic ops support",
+               [19] = "Raw multicast support",
+               [20] = "Address vector port checking support",
+               [21] = "UD multicast support",
+               [24] = "Demand paging support",
+               [25] = "Router support",
+               [30] = "IBoE support",
+               [32] = "Unicast loopback support",
+               [38] = "Wake On LAN support",
+               [40] = "UDP RSS support",
+               [41] = "Unicast VEP steering support",
+               [42] = "Multicast VEP steering support",
+               [48] = "Counters support",
+       };
+       int i;
+
+       mlx4_dbg(dev, "DEV_CAP flags:\n");
+       for (i = 0; i < ARRAY_SIZE(fname); ++i)
+               if (fname[i] && (flags & (1LL << i)))
+                       mlx4_dbg(dev, "    %s\n", fname[i]);
+}
+
+int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       u32 *inbox;
+       int err = 0;
+
+#define MOD_STAT_CFG_IN_SIZE           0x100
+
+#define MOD_STAT_CFG_PG_SZ_M_OFFSET    0x002
+#define MOD_STAT_CFG_PG_SZ_OFFSET      0x003
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       inbox = mailbox->buf;
+
+       memset(inbox, 0, MOD_STAT_CFG_IN_SIZE);
+
+       MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
+       MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
+
+       err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
+                       MLX4_CMD_TIME_CLASS_A);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+
+int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       u32 *outbox;
+       u8 field;
+       u32 field32, flags, ext_flags;
+       u16 size;
+       u16 stat_rate;
+       int err;
+       int i;
+
+#define QUERY_DEV_CAP_OUT_SIZE                0x100
+#define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET                0x10
+#define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET         0x11
+#define QUERY_DEV_CAP_RSVD_QP_OFFSET           0x12
+#define QUERY_DEV_CAP_MAX_QP_OFFSET            0x13
+#define QUERY_DEV_CAP_RSVD_SRQ_OFFSET          0x14
+#define QUERY_DEV_CAP_MAX_SRQ_OFFSET           0x15
+#define QUERY_DEV_CAP_RSVD_EEC_OFFSET          0x16
+#define QUERY_DEV_CAP_MAX_EEC_OFFSET           0x17
+#define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET         0x19
+#define QUERY_DEV_CAP_RSVD_CQ_OFFSET           0x1a
+#define QUERY_DEV_CAP_MAX_CQ_OFFSET            0x1b
+#define QUERY_DEV_CAP_MAX_MPT_OFFSET           0x1d
+#define QUERY_DEV_CAP_RSVD_EQ_OFFSET           0x1e
+#define QUERY_DEV_CAP_MAX_EQ_OFFSET            0x1f
+#define QUERY_DEV_CAP_RSVD_MTT_OFFSET          0x20
+#define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET                0x21
+#define QUERY_DEV_CAP_RSVD_MRW_OFFSET          0x22
+#define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET       0x23
+#define QUERY_DEV_CAP_MAX_AV_OFFSET            0x27
+#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET                0x29
+#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET                0x2b
+#define QUERY_DEV_CAP_MAX_GSO_OFFSET           0x2d
+#define QUERY_DEV_CAP_MAX_RDMA_OFFSET          0x2f
+#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET           0x33
+#define QUERY_DEV_CAP_ACK_DELAY_OFFSET         0x35
+#define QUERY_DEV_CAP_MTU_WIDTH_OFFSET         0x36
+#define QUERY_DEV_CAP_VL_PORT_OFFSET           0x37
+#define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET                0x38
+#define QUERY_DEV_CAP_MAX_GID_OFFSET           0x3b
+#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET      0x3c
+#define QUERY_DEV_CAP_MAX_PKEY_OFFSET          0x3f
+#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET         0x40
+#define QUERY_DEV_CAP_FLAGS_OFFSET             0x44
+#define QUERY_DEV_CAP_RSVD_UAR_OFFSET          0x48
+#define QUERY_DEV_CAP_UAR_SZ_OFFSET            0x49
+#define QUERY_DEV_CAP_PAGE_SZ_OFFSET           0x4b
+#define QUERY_DEV_CAP_BF_OFFSET                        0x4c
+#define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET     0x4d
+#define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET  0x4e
+#define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET  0x4f
+#define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET         0x51
+#define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET    0x52
+#define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET         0x55
+#define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET    0x56
+#define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET                0x61
+#define QUERY_DEV_CAP_RSVD_MCG_OFFSET          0x62
+#define QUERY_DEV_CAP_MAX_MCG_OFFSET           0x63
+#define QUERY_DEV_CAP_RSVD_PD_OFFSET           0x64
+#define QUERY_DEV_CAP_MAX_PD_OFFSET            0x65
+#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET      0x68
+#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET   0x80
+#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET      0x82
+#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET      0x84
+#define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET     0x86
+#define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET      0x88
+#define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET      0x8a
+#define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET      0x8c
+#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET    0x8e
+#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET      0x90
+#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET    0x92
+#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET                0x94
+#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET         0x98
+#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET                0xa0
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       outbox = mailbox->buf;
+
+       err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
+                          MLX4_CMD_TIME_CLASS_A);
+       if (err)
+               goto out;
+
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
+       dev_cap->reserved_qps = 1 << (field & 0xf);
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
+       dev_cap->max_qps = 1 << (field & 0x1f);
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET);
+       dev_cap->reserved_srqs = 1 << (field >> 4);
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET);
+       dev_cap->max_srqs = 1 << (field & 0x1f);
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET);
+       dev_cap->max_cq_sz = 1 << field;
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET);
+       dev_cap->reserved_cqs = 1 << (field & 0xf);
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET);
+       dev_cap->max_cqs = 1 << (field & 0x1f);
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
+       dev_cap->max_mpts = 1 << (field & 0x3f);
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
+       dev_cap->reserved_eqs = field & 0xf;
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
+       dev_cap->max_eqs = 1 << (field & 0xf);
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
+       dev_cap->reserved_mtts = 1 << (field >> 4);
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET);
+       dev_cap->max_mrw_sz = 1 << field;
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
+       dev_cap->reserved_mrws = 1 << (field & 0xf);
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
+       dev_cap->max_mtt_seg = 1 << (field & 0x3f);
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
+       dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
+       dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
+       field &= 0x1f;
+       if (!field)
+               dev_cap->max_gso_sz = 0;
+       else
+               dev_cap->max_gso_sz = 1 << field;
+
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
+       dev_cap->max_rdma_global = 1 << (field & 0x3f);
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
+       dev_cap->local_ca_ack_delay = field & 0x1f;
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
+       dev_cap->num_ports = field & 0xf;
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
+       dev_cap->max_msg_sz = 1 << (field & 0x1f);
+       MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
+       dev_cap->stat_rate_support = stat_rate;
+       MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
+       MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
+       dev_cap->flags = flags | (u64)ext_flags << 32;
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
+       dev_cap->reserved_uars = field >> 4;
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
+       dev_cap->uar_size = 1 << ((field & 0x3f) + 20);
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET);
+       dev_cap->min_page_sz = 1 << field;
+
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET);
+       if (field & 0x80) {
+               MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
+               dev_cap->bf_reg_size = 1 << (field & 0x1f);
+               MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
+               if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
+                       field = 3;
+               dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
+               mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
+                        dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
+       } else {
+               dev_cap->bf_reg_size = 0;
+               mlx4_dbg(dev, "BlueFlame not available\n");
+       }
+
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
+       dev_cap->max_sq_sg = field;
+       MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
+       dev_cap->max_sq_desc_sz = size;
+
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
+       dev_cap->max_qp_per_mcg = 1 << field;
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
+       dev_cap->reserved_mgms = field & 0xf;
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET);
+       dev_cap->max_mcgs = 1 << field;
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET);
+       dev_cap->reserved_pds = field >> 4;
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
+       dev_cap->max_pds = 1 << (field & 0x3f);
+
+       MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
+       dev_cap->rdmarc_entry_sz = size;
+       MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET);
+       dev_cap->qpc_entry_sz = size;
+       MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET);
+       dev_cap->aux_entry_sz = size;
+       MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET);
+       dev_cap->altc_entry_sz = size;
+       MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET);
+       dev_cap->eqc_entry_sz = size;
+       MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET);
+       dev_cap->cqc_entry_sz = size;
+       MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET);
+       dev_cap->srq_entry_sz = size;
+       MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET);
+       dev_cap->cmpt_entry_sz = size;
+       MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET);
+       dev_cap->mtt_entry_sz = size;
+       MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET);
+       dev_cap->dmpt_entry_sz = size;
+
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET);
+       dev_cap->max_srq_sz = 1 << field;
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET);
+       dev_cap->max_qp_sz = 1 << field;
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET);
+       dev_cap->resize_srq = field & 1;
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET);
+       dev_cap->max_rq_sg = field;
+       MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
+       dev_cap->max_rq_desc_sz = size;
+
+       MLX4_GET(dev_cap->bmme_flags, outbox,
+                QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
+       MLX4_GET(dev_cap->reserved_lkey, outbox,
+                QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
+       MLX4_GET(dev_cap->max_icm_sz, outbox,
+                QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
+       if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
+               MLX4_GET(dev_cap->max_counters, outbox,
+                        QUERY_DEV_CAP_MAX_COUNTERS_OFFSET);
+
+       if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+               for (i = 1; i <= dev_cap->num_ports; ++i) {
+                       MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
+                       dev_cap->max_vl[i]         = field >> 4;
+                       MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
+                       dev_cap->ib_mtu[i]         = field >> 4;
+                       dev_cap->max_port_width[i] = field & 0xf;
+                       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
+                       dev_cap->max_gids[i]       = 1 << (field & 0xf);
+                       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
+                       dev_cap->max_pkeys[i]      = 1 << (field & 0xf);
+               }
+       } else {
+#define QUERY_PORT_SUPPORTED_TYPE_OFFSET       0x00
+#define QUERY_PORT_MTU_OFFSET                  0x01
+#define QUERY_PORT_ETH_MTU_OFFSET              0x02
+#define QUERY_PORT_WIDTH_OFFSET                        0x06
+#define QUERY_PORT_MAX_GID_PKEY_OFFSET         0x07
+#define QUERY_PORT_MAX_MACVLAN_OFFSET          0x0a
+#define QUERY_PORT_MAX_VL_OFFSET               0x0b
+#define QUERY_PORT_MAC_OFFSET                  0x10
+#define QUERY_PORT_TRANS_VENDOR_OFFSET         0x18
+#define QUERY_PORT_WAVELENGTH_OFFSET           0x1c
+#define QUERY_PORT_TRANS_CODE_OFFSET           0x20
+
+               for (i = 1; i <= dev_cap->num_ports; ++i) {
+                       err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
+                                          MLX4_CMD_TIME_CLASS_B);
+                       if (err)
+                               goto out;
+
+                       MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+                       dev_cap->supported_port_types[i] = field & 3;
+                       MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
+                       dev_cap->ib_mtu[i]         = field & 0xf;
+                       MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
+                       dev_cap->max_port_width[i] = field & 0xf;
+                       MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
+                       dev_cap->max_gids[i]       = 1 << (field >> 4);
+                       dev_cap->max_pkeys[i]      = 1 << (field & 0xf);
+                       MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
+                       dev_cap->max_vl[i]         = field & 0xf;
+                       MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
+                       dev_cap->log_max_macs[i]  = field & 0xf;
+                       dev_cap->log_max_vlans[i] = field >> 4;
+                       MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
+                       MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
+                       MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
+                       dev_cap->trans_type[i] = field32 >> 24;
+                       dev_cap->vendor_oui[i] = field32 & 0xffffff;
+                       MLX4_GET(dev_cap->wavelength[i], outbox, QUERY_PORT_WAVELENGTH_OFFSET);
+                       MLX4_GET(dev_cap->trans_code[i], outbox, QUERY_PORT_TRANS_CODE_OFFSET);
+               }
+       }
+
+       mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
+                dev_cap->bmme_flags, dev_cap->reserved_lkey);
+
+       /*
+        * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
+        * we can't use any EQs whose doorbell falls on that page,
+        * even if the EQ itself isn't reserved.
+        */
+       dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
+                                   dev_cap->reserved_eqs);
+
+       mlx4_dbg(dev, "Max ICM size %lld MB\n",
+                (unsigned long long) dev_cap->max_icm_sz >> 20);
+       mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
+                dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz);
+       mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
+                dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
+       mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
+                dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
+       mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
+                dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz);
+       mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
+                dev_cap->reserved_mrws, dev_cap->reserved_mtts);
+       mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
+                dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars);
+       mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
+                dev_cap->max_pds, dev_cap->reserved_mgms);
+       mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
+                dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
+       mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
+                dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1],
+                dev_cap->max_port_width[1]);
+       mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
+                dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
+       mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
+                dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
+       mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
+       mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters);
+
+       dump_dev_cap_flags(dev, dev_cap->flags);
+
+out:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+
+int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_icm_iter iter;
+       __be64 *pages;
+       int lg;
+       int nent = 0;
+       int i;
+       int err = 0;
+       int ts = 0, tc = 0;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
+       pages = mailbox->buf;
+
+       for (mlx4_icm_first(icm, &iter);
+            !mlx4_icm_last(&iter);
+            mlx4_icm_next(&iter)) {
+               /*
+                * We have to pass pages that are aligned to their
+                * size, so find the least significant 1 in the
+                * address or size and use that as our log2 size.
+                */
+               lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
+               if (lg < MLX4_ICM_PAGE_SHIFT) {
+                       mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
+                                  MLX4_ICM_PAGE_SIZE,
+                                  (unsigned long long) mlx4_icm_addr(&iter),
+                                  mlx4_icm_size(&iter));
+                       err = -EINVAL;
+                       goto out;
+               }
+
+               for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
+                       if (virt != -1) {
+                               pages[nent * 2] = cpu_to_be64(virt);
+                               virt += 1 << lg;
+                       }
+
+                       pages[nent * 2 + 1] =
+                               cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
+                                           (lg - MLX4_ICM_PAGE_SHIFT));
+                       ts += 1 << (lg - 10);
+                       ++tc;
+
+                       if (++nent == MLX4_MAILBOX_SIZE / 16) {
+                               err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
+                                               MLX4_CMD_TIME_CLASS_B);
+                               if (err)
+                                       goto out;
+                               nent = 0;
+                       }
+               }
+       }
+
+       if (nent)
+               err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B);
+       if (err)
+               goto out;
+
+       switch (op) {
+       case MLX4_CMD_MAP_FA:
+               mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts);
+               break;
+       case MLX4_CMD_MAP_ICM_AUX:
+               mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts);
+               break;
+       case MLX4_CMD_MAP_ICM:
+               mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n",
+                         tc, ts, (unsigned long long) virt - (ts << 10));
+               break;
+       }
+
+out:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+
+int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
+{
+       return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1);
+}
+
+int mlx4_UNMAP_FA(struct mlx4_dev *dev)
+{
+       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B);
+}
+
+
+int mlx4_RUN_FW(struct mlx4_dev *dev)
+{
+       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A);
+}
+
+int mlx4_QUERY_FW(struct mlx4_dev *dev)
+{
+       struct mlx4_fw  *fw  = &mlx4_priv(dev)->fw;
+       struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
+       struct mlx4_cmd_mailbox *mailbox;
+       u32 *outbox;
+       int err = 0;
+       u64 fw_ver;
+       u16 cmd_if_rev;
+       u8 lg;
+
+#define QUERY_FW_OUT_SIZE             0x100
+#define QUERY_FW_VER_OFFSET            0x00
+#define QUERY_FW_CMD_IF_REV_OFFSET     0x0a
+#define QUERY_FW_MAX_CMD_OFFSET        0x0f
+#define QUERY_FW_ERR_START_OFFSET      0x30
+#define QUERY_FW_ERR_SIZE_OFFSET       0x38
+#define QUERY_FW_ERR_BAR_OFFSET        0x3c
+
+#define QUERY_FW_SIZE_OFFSET           0x00
+#define QUERY_FW_CLR_INT_BASE_OFFSET   0x20
+#define QUERY_FW_CLR_INT_BAR_OFFSET    0x28
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       outbox = mailbox->buf;
+
+       err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
+                           MLX4_CMD_TIME_CLASS_A);
+       if (err)
+               goto out;
+
+       MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET);
+       /*
+        * FW subminor version is at more significant bits than minor
+        * version, so swap here.
+        */
+       dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) |
+               ((fw_ver & 0xffff0000ull) >> 16) |
+               ((fw_ver & 0x0000ffffull) << 16);
+
+       MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
+       if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
+           cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
+               mlx4_err(dev, "Installed FW has unsupported "
+                        "command interface revision %d.\n",
+                        cmd_if_rev);
+               mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
+                        (int) (dev->caps.fw_ver >> 32),
+                        (int) (dev->caps.fw_ver >> 16) & 0xffff,
+                        (int) dev->caps.fw_ver & 0xffff);
+               mlx4_err(dev, "This driver version supports only revisions %d to %d.\n",
+                        MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
+               err = -ENODEV;
+               goto out;
+       }
+
+       if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS)
+               dev->flags |= MLX4_FLAG_OLD_PORT_CMDS;
+
+       MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
+       cmd->max_cmds = 1 << lg;
+
+       mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
+                (int) (dev->caps.fw_ver >> 32),
+                (int) (dev->caps.fw_ver >> 16) & 0xffff,
+                (int) dev->caps.fw_ver & 0xffff,
+                cmd_if_rev, cmd->max_cmds);
+
+       MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET);
+       MLX4_GET(fw->catas_size,   outbox, QUERY_FW_ERR_SIZE_OFFSET);
+       MLX4_GET(fw->catas_bar,    outbox, QUERY_FW_ERR_BAR_OFFSET);
+       fw->catas_bar = (fw->catas_bar >> 6) * 2;
+
+       mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
+                (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar);
+
+       MLX4_GET(fw->fw_pages,     outbox, QUERY_FW_SIZE_OFFSET);
+       MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
+       MLX4_GET(fw->clr_int_bar,  outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
+       fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
+
+       mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
+
+       /*
+        * Round up number of system pages needed in case
+        * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
+        */
+       fw->fw_pages =
+               ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
+               (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
+
+       mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n",
+                (unsigned long long) fw->clr_int_base, fw->clr_int_bar);
+
+out:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+
+static void get_board_id(void *vsd, char *board_id)
+{
+       int i;
+
+#define VSD_OFFSET_SIG1                0x00
+#define VSD_OFFSET_SIG2                0xde
+#define VSD_OFFSET_MLX_BOARD_ID        0xd0
+#define VSD_OFFSET_TS_BOARD_ID 0x20
+
+#define VSD_SIGNATURE_TOPSPIN  0x5ad
+
+       memset(board_id, 0, MLX4_BOARD_ID_LEN);
+
+       if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
+           be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
+               strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
+       } else {
+               /*
+                * The board ID is a string but the firmware byte
+                * swaps each 4-byte word before passing it back to
+                * us.  Therefore we need to swab it before printing.
+                */
+               for (i = 0; i < 4; ++i)
+                       ((u32 *) board_id)[i] =
+                               swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
+       }
+}
+
+int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       u32 *outbox;
+       int err;
+
+#define QUERY_ADAPTER_OUT_SIZE             0x100
+#define QUERY_ADAPTER_INTA_PIN_OFFSET      0x10
+#define QUERY_ADAPTER_VSD_OFFSET           0x20
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       outbox = mailbox->buf;
+
+       err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
+                          MLX4_CMD_TIME_CLASS_A);
+       if (err)
+               goto out;
+
+       MLX4_GET(adapter->inta_pin, outbox,    QUERY_ADAPTER_INTA_PIN_OFFSET);
+
+       get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
+                    adapter->board_id);
+
+out:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+
+int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       __be32 *inbox;
+       int err;
+
+#define INIT_HCA_IN_SIZE                0x200
+#define INIT_HCA_VERSION_OFFSET                 0x000
+#define         INIT_HCA_VERSION                2
+#define INIT_HCA_CACHELINE_SZ_OFFSET    0x0e
+#define INIT_HCA_FLAGS_OFFSET           0x014
+#define INIT_HCA_QPC_OFFSET             0x020
+#define         INIT_HCA_QPC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x10)
+#define         INIT_HCA_LOG_QP_OFFSET          (INIT_HCA_QPC_OFFSET + 0x17)
+#define         INIT_HCA_SRQC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x28)
+#define         INIT_HCA_LOG_SRQ_OFFSET         (INIT_HCA_QPC_OFFSET + 0x2f)
+#define         INIT_HCA_CQC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x30)
+#define         INIT_HCA_LOG_CQ_OFFSET          (INIT_HCA_QPC_OFFSET + 0x37)
+#define         INIT_HCA_ALTC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x40)
+#define         INIT_HCA_AUXC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x50)
+#define         INIT_HCA_EQC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x60)
+#define         INIT_HCA_LOG_EQ_OFFSET          (INIT_HCA_QPC_OFFSET + 0x67)
+#define         INIT_HCA_RDMARC_BASE_OFFSET     (INIT_HCA_QPC_OFFSET + 0x70)
+#define         INIT_HCA_LOG_RD_OFFSET          (INIT_HCA_QPC_OFFSET + 0x77)
+#define INIT_HCA_MCAST_OFFSET           0x0c0
+#define         INIT_HCA_MC_BASE_OFFSET         (INIT_HCA_MCAST_OFFSET + 0x00)
+#define         INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
+#define         INIT_HCA_LOG_MC_HASH_SZ_OFFSET  (INIT_HCA_MCAST_OFFSET + 0x16)
+#define  INIT_HCA_UC_STEERING_OFFSET    (INIT_HCA_MCAST_OFFSET + 0x18)
+#define         INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
+#define INIT_HCA_TPT_OFFSET             0x0f0
+#define         INIT_HCA_DMPT_BASE_OFFSET       (INIT_HCA_TPT_OFFSET + 0x00)
+#define         INIT_HCA_LOG_MPT_SZ_OFFSET      (INIT_HCA_TPT_OFFSET + 0x0b)
+#define         INIT_HCA_MTT_BASE_OFFSET        (INIT_HCA_TPT_OFFSET + 0x10)
+#define         INIT_HCA_CMPT_BASE_OFFSET       (INIT_HCA_TPT_OFFSET + 0x18)
+#define INIT_HCA_UAR_OFFSET             0x120
+#define         INIT_HCA_LOG_UAR_SZ_OFFSET      (INIT_HCA_UAR_OFFSET + 0x0a)
+#define  INIT_HCA_UAR_PAGE_SZ_OFFSET     (INIT_HCA_UAR_OFFSET + 0x0b)
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       inbox = mailbox->buf;
+
+       memset(inbox, 0, INIT_HCA_IN_SIZE);
+
+       *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
+
+       *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
+               (ilog2(cache_line_size()) - 4) << 5;
+
+#if defined(__LITTLE_ENDIAN)
+       *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
+#elif defined(__BIG_ENDIAN)
+       *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
+#else
+#error Host endianness not defined
+#endif
+       /* Check port for UD address vector: */
+       *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
+
+       /* Enable IPoIB checksumming if we can: */
+       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
+               *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
+
+       /* Enable QoS support if module parameter set */
+       if (enable_qos)
+               *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
+
+       /* enable counters */
+       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
+               *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
+
+       /* QPC/EEC/CQC/EQC/RDMARC attributes */
+
+       MLX4_PUT(inbox, param->qpc_base,      INIT_HCA_QPC_BASE_OFFSET);
+       MLX4_PUT(inbox, param->log_num_qps,   INIT_HCA_LOG_QP_OFFSET);
+       MLX4_PUT(inbox, param->srqc_base,     INIT_HCA_SRQC_BASE_OFFSET);
+       MLX4_PUT(inbox, param->log_num_srqs,  INIT_HCA_LOG_SRQ_OFFSET);
+       MLX4_PUT(inbox, param->cqc_base,      INIT_HCA_CQC_BASE_OFFSET);
+       MLX4_PUT(inbox, param->log_num_cqs,   INIT_HCA_LOG_CQ_OFFSET);
+       MLX4_PUT(inbox, param->altc_base,     INIT_HCA_ALTC_BASE_OFFSET);
+       MLX4_PUT(inbox, param->auxc_base,     INIT_HCA_AUXC_BASE_OFFSET);
+       MLX4_PUT(inbox, param->eqc_base,      INIT_HCA_EQC_BASE_OFFSET);
+       MLX4_PUT(inbox, param->log_num_eqs,   INIT_HCA_LOG_EQ_OFFSET);
+       MLX4_PUT(inbox, param->rdmarc_base,   INIT_HCA_RDMARC_BASE_OFFSET);
+       MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
+
+       /* multicast attributes */
+
+       MLX4_PUT(inbox, param->mc_base,         INIT_HCA_MC_BASE_OFFSET);
+       MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+       MLX4_PUT(inbox, param->log_mc_hash_sz,  INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
+               MLX4_PUT(inbox, (u8) (1 << 3),  INIT_HCA_UC_STEERING_OFFSET);
+       MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+
+       /* TPT attributes */
+
+       MLX4_PUT(inbox, param->dmpt_base,  INIT_HCA_DMPT_BASE_OFFSET);
+       MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
+       MLX4_PUT(inbox, param->mtt_base,   INIT_HCA_MTT_BASE_OFFSET);
+       MLX4_PUT(inbox, param->cmpt_base,  INIT_HCA_CMPT_BASE_OFFSET);
+
+       /* UAR attributes */
+
+       MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET);
+       MLX4_PUT(inbox, param->log_uar_sz,      INIT_HCA_LOG_UAR_SZ_OFFSET);
+
+       err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000);
+
+       if (err)
+               mlx4_err(dev, "INIT_HCA returns %d\n", err);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+
+int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       u32 *inbox;
+       int err;
+       u32 flags;
+       u16 field;
+
+       if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+#define INIT_PORT_IN_SIZE          256
+#define INIT_PORT_FLAGS_OFFSET     0x00
+#define INIT_PORT_FLAG_SIG         (1 << 18)
+#define INIT_PORT_FLAG_NG          (1 << 17)
+#define INIT_PORT_FLAG_G0          (1 << 16)
+#define INIT_PORT_VL_SHIFT         4
+#define INIT_PORT_PORT_WIDTH_SHIFT 8
+#define INIT_PORT_MTU_OFFSET       0x04
+#define INIT_PORT_MAX_GID_OFFSET   0x06
+#define INIT_PORT_MAX_PKEY_OFFSET  0x0a
+#define INIT_PORT_GUID0_OFFSET     0x10
+#define INIT_PORT_NODE_GUID_OFFSET 0x18
+#define INIT_PORT_SI_GUID_OFFSET   0x20
+
+               mailbox = mlx4_alloc_cmd_mailbox(dev);
+               if (IS_ERR(mailbox))
+                       return PTR_ERR(mailbox);
+               inbox = mailbox->buf;
+
+               memset(inbox, 0, INIT_PORT_IN_SIZE);
+
+               flags = 0;
+               flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
+               flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
+               MLX4_PUT(inbox, flags,            INIT_PORT_FLAGS_OFFSET);
+
+               field = 128 << dev->caps.ib_mtu_cap[port];
+               MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
+               field = dev->caps.gid_table_len[port];
+               MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
+               field = dev->caps.pkey_table_len[port];
+               MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
+
+               err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
+                              MLX4_CMD_TIME_CLASS_A);
+
+               mlx4_free_cmd_mailbox(dev, mailbox);
+       } else
+               err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
+                              MLX4_CMD_TIME_CLASS_A);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
+
+int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
+{
+       return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000);
+}
+EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
+
+int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
+{
+       return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000);
+}
+
+int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
+{
+       int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
+                              MLX4_CMD_SET_ICM_SIZE,
+                              MLX4_CMD_TIME_CLASS_A);
+       if (ret)
+               return ret;
+
+       /*
+        * Round up number of system pages needed in case
+        * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
+        */
+       *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
+               (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
+
+       return 0;
+}
+
+int mlx4_NOP(struct mlx4_dev *dev)
+{
+       /* Input modifier of 0x1f means "finish as soon as possible." */
+       return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100);
+}
+
+#define MLX4_WOL_SETUP_MODE (5 << 28)
+int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
+{
+       u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
+
+       return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
+                           MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A);
+}
+EXPORT_SYMBOL_GPL(mlx4_wol_read);
+
+int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
+{
+       u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
+
+       return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
+                                       MLX4_CMD_TIME_CLASS_A);
+}
+EXPORT_SYMBOL_GPL(mlx4_wol_write);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
new file mode 100644 (file)
index 0000000..1e8ecc3
--- /dev/null
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2006, 2007 Cisco Systems.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX4_FW_H
+#define MLX4_FW_H
+
+#include "mlx4.h"
+#include "icm.h"
+
+struct mlx4_mod_stat_cfg {
+       u8 log_pg_sz;
+       u8 log_pg_sz_m;
+};
+
+struct mlx4_dev_cap {
+       int max_srq_sz;
+       int max_qp_sz;
+       int reserved_qps;
+       int max_qps;
+       int reserved_srqs;
+       int max_srqs;
+       int max_cq_sz;
+       int reserved_cqs;
+       int max_cqs;
+       int max_mpts;
+       int reserved_eqs;
+       int max_eqs;
+       int reserved_mtts;
+       int max_mrw_sz;
+       int reserved_mrws;
+       int max_mtt_seg;
+       int max_requester_per_qp;
+       int max_responder_per_qp;
+       int max_rdma_global;
+       int local_ca_ack_delay;
+       int num_ports;
+       u32 max_msg_sz;
+       int ib_mtu[MLX4_MAX_PORTS + 1];
+       int max_port_width[MLX4_MAX_PORTS + 1];
+       int max_vl[MLX4_MAX_PORTS + 1];
+       int max_gids[MLX4_MAX_PORTS + 1];
+       int max_pkeys[MLX4_MAX_PORTS + 1];
+       u64 def_mac[MLX4_MAX_PORTS + 1];
+       u16 eth_mtu[MLX4_MAX_PORTS + 1];
+       int trans_type[MLX4_MAX_PORTS + 1];
+       int vendor_oui[MLX4_MAX_PORTS + 1];
+       u16 wavelength[MLX4_MAX_PORTS + 1];
+       u64 trans_code[MLX4_MAX_PORTS + 1];
+       u16 stat_rate_support;
+       u64 flags;
+       int reserved_uars;
+       int uar_size;
+       int min_page_sz;
+       int bf_reg_size;
+       int bf_regs_per_page;
+       int max_sq_sg;
+       int max_sq_desc_sz;
+       int max_rq_sg;
+       int max_rq_desc_sz;
+       int max_qp_per_mcg;
+       int reserved_mgms;
+       int max_mcgs;
+       int reserved_pds;
+       int max_pds;
+       int qpc_entry_sz;
+       int rdmarc_entry_sz;
+       int altc_entry_sz;
+       int aux_entry_sz;
+       int srq_entry_sz;
+       int cqc_entry_sz;
+       int eqc_entry_sz;
+       int dmpt_entry_sz;
+       int cmpt_entry_sz;
+       int mtt_entry_sz;
+       int resize_srq;
+       u32 bmme_flags;
+       u32 reserved_lkey;
+       u64 max_icm_sz;
+       int max_gso_sz;
+       u8  supported_port_types[MLX4_MAX_PORTS + 1];
+       u8  log_max_macs[MLX4_MAX_PORTS + 1];
+       u8  log_max_vlans[MLX4_MAX_PORTS + 1];
+       u32 max_counters;
+};
+
+struct mlx4_adapter {
+       char board_id[MLX4_BOARD_ID_LEN];
+       u8   inta_pin;
+};
+
+struct mlx4_init_hca_param {
+       u64 qpc_base;
+       u64 rdmarc_base;
+       u64 auxc_base;
+       u64 altc_base;
+       u64 srqc_base;
+       u64 cqc_base;
+       u64 eqc_base;
+       u64 mc_base;
+       u64 dmpt_base;
+       u64 cmpt_base;
+       u64 mtt_base;
+       u16 log_mc_entry_sz;
+       u16 log_mc_hash_sz;
+       u8  log_num_qps;
+       u8  log_num_srqs;
+       u8  log_num_cqs;
+       u8  log_num_eqs;
+       u8  log_rd_per_qp;
+       u8  log_mc_table_sz;
+       u8  log_mpt_sz;
+       u8  log_uar_sz;
+};
+
+struct mlx4_init_ib_param {
+       int port_width;
+       int vl_cap;
+       int mtu_cap;
+       u16 gid_cap;
+       u16 pkey_cap;
+       int set_guid0;
+       u64 guid0;
+       int set_node_guid;
+       u64 node_guid;
+       int set_si_guid;
+       u64 si_guid;
+};
+
+struct mlx4_set_ib_param {
+       int set_si_guid;
+       int reset_qkey_viol;
+       u64 si_guid;
+       u32 cap_mask;
+};
+
+int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
+int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm);
+int mlx4_UNMAP_FA(struct mlx4_dev *dev);
+int mlx4_RUN_FW(struct mlx4_dev *dev);
+int mlx4_QUERY_FW(struct mlx4_dev *dev);
+int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter);
+int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
+int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic);
+int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt);
+int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages);
+int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
+int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
+int mlx4_NOP(struct mlx4_dev *dev);
+int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg);
+
+#endif /* MLX4_FW_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
new file mode 100644 (file)
index 0000000..02393fd
--- /dev/null
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4.h"
+#include "icm.h"
+#include "fw.h"
+
+/*
+ * We allocate in as big chunks as we can, up to a maximum of 256 KB
+ * per chunk.
+ */
+enum {
+       MLX4_ICM_ALLOC_SIZE     = 1 << 18,
+       MLX4_TABLE_CHUNK_SIZE   = 1 << 18
+};
+
+static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
+{
+       int i;
+
+       if (chunk->nsg > 0)
+               pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
+                            PCI_DMA_BIDIRECTIONAL);
+
+       for (i = 0; i < chunk->npages; ++i)
+               __free_pages(sg_page(&chunk->mem[i]),
+                            get_order(chunk->mem[i].length));
+}
+
+static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
+{
+       int i;
+
+       for (i = 0; i < chunk->npages; ++i)
+               dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
+                                 lowmem_page_address(sg_page(&chunk->mem[i])),
+                                 sg_dma_address(&chunk->mem[i]));
+}
+
+void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
+{
+       struct mlx4_icm_chunk *chunk, *tmp;
+
+       if (!icm)
+               return;
+
+       list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
+               if (coherent)
+                       mlx4_free_icm_coherent(dev, chunk);
+               else
+                       mlx4_free_icm_pages(dev, chunk);
+
+               kfree(chunk);
+       }
+
+       kfree(icm);
+}
+
+static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
+{
+       struct page *page;
+
+       page = alloc_pages(gfp_mask, order);
+       if (!page)
+               return -ENOMEM;
+
+       sg_set_page(mem, page, PAGE_SIZE << order, 0);
+       return 0;
+}
+
+static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
+                                   int order, gfp_t gfp_mask)
+{
+       void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
+                                      &sg_dma_address(mem), gfp_mask);
+       if (!buf)
+               return -ENOMEM;
+
+       sg_set_buf(mem, buf, PAGE_SIZE << order);
+       BUG_ON(mem->offset);
+       sg_dma_len(mem) = PAGE_SIZE << order;
+       return 0;
+}
+
+struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
+                               gfp_t gfp_mask, int coherent)
+{
+       struct mlx4_icm *icm;
+       struct mlx4_icm_chunk *chunk = NULL;
+       int cur_order;
+       int ret;
+
+       /* We use sg_set_buf for coherent allocs, which assumes low memory */
+       BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
+
+       icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
+       if (!icm)
+               return NULL;
+
+       icm->refcount = 0;
+       INIT_LIST_HEAD(&icm->chunk_list);
+
+       cur_order = get_order(MLX4_ICM_ALLOC_SIZE);
+
+       while (npages > 0) {
+               if (!chunk) {
+                       chunk = kmalloc(sizeof *chunk,
+                                       gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
+                       if (!chunk)
+                               goto fail;
+
+                       sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
+                       chunk->npages = 0;
+                       chunk->nsg    = 0;
+                       list_add_tail(&chunk->list, &icm->chunk_list);
+               }
+
+               while (1 << cur_order > npages)
+                       --cur_order;
+
+               if (coherent)
+                       ret = mlx4_alloc_icm_coherent(&dev->pdev->dev,
+                                                     &chunk->mem[chunk->npages],
+                                                     cur_order, gfp_mask);
+               else
+                       ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
+                                                  cur_order, gfp_mask);
+
+               if (ret) {
+                       if (--cur_order < 0)
+                               goto fail;
+                       else
+                               continue;
+               }
+
+               ++chunk->npages;
+
+               if (coherent)
+                       ++chunk->nsg;
+               else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
+                       chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
+                                               chunk->npages,
+                                               PCI_DMA_BIDIRECTIONAL);
+
+                       if (chunk->nsg <= 0)
+                               goto fail;
+               }
+
+               if (chunk->npages == MLX4_ICM_CHUNK_LEN)
+                       chunk = NULL;
+
+               npages -= 1 << cur_order;
+       }
+
+       if (!coherent && chunk) {
+               chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
+                                       chunk->npages,
+                                       PCI_DMA_BIDIRECTIONAL);
+
+               if (chunk->nsg <= 0)
+                       goto fail;
+       }
+
+       return icm;
+
+fail:
+       mlx4_free_icm(dev, icm, coherent);
+       return NULL;
+}
+
+static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
+{
+       return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
+}
+
+static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
+{
+       return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
+                       MLX4_CMD_TIME_CLASS_B);
+}
+
+int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
+{
+       return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
+}
+
+int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
+{
+       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B);
+}
+
+int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
+{
+       int i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
+       int ret = 0;
+
+       mutex_lock(&table->mutex);
+
+       if (table->icm[i]) {
+               ++table->icm[i]->refcount;
+               goto out;
+       }
+
+       table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
+                                      (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
+                                      __GFP_NOWARN, table->coherent);
+       if (!table->icm[i]) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
+                        (u64) i * MLX4_TABLE_CHUNK_SIZE)) {
+               mlx4_free_icm(dev, table->icm[i], table->coherent);
+               table->icm[i] = NULL;
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       ++table->icm[i]->refcount;
+
+out:
+       mutex_unlock(&table->mutex);
+       return ret;
+}
+
+void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
+{
+       int i;
+
+       i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
+
+       mutex_lock(&table->mutex);
+
+       if (--table->icm[i]->refcount == 0) {
+               mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
+                              MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
+               mlx4_free_icm(dev, table->icm[i], table->coherent);
+               table->icm[i] = NULL;
+       }
+
+       mutex_unlock(&table->mutex);
+}
+
+void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle)
+{
+       int idx, offset, dma_offset, i;
+       struct mlx4_icm_chunk *chunk;
+       struct mlx4_icm *icm;
+       struct page *page = NULL;
+
+       if (!table->lowmem)
+               return NULL;
+
+       mutex_lock(&table->mutex);
+
+       idx = (obj & (table->num_obj - 1)) * table->obj_size;
+       icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE];
+       dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE;
+
+       if (!icm)
+               goto out;
+
+       list_for_each_entry(chunk, &icm->chunk_list, list) {
+               for (i = 0; i < chunk->npages; ++i) {
+                       if (dma_handle && dma_offset >= 0) {
+                               if (sg_dma_len(&chunk->mem[i]) > dma_offset)
+                                       *dma_handle = sg_dma_address(&chunk->mem[i]) +
+                                               dma_offset;
+                               dma_offset -= sg_dma_len(&chunk->mem[i]);
+                       }
+                       /*
+                        * DMA mapping can merge pages but not split them,
+                        * so if we found the page, dma_handle has already
+                        * been assigned to.
+                        */
+                       if (chunk->mem[i].length > offset) {
+                               page = sg_page(&chunk->mem[i]);
+                               goto out;
+                       }
+                       offset -= chunk->mem[i].length;
+               }
+       }
+
+out:
+       mutex_unlock(&table->mutex);
+       return page ? lowmem_page_address(page) + offset : NULL;
+}
+
+int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
+                        int start, int end)
+{
+       int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
+       int i, err;
+
+       for (i = start; i <= end; i += inc) {
+               err = mlx4_table_get(dev, table, i);
+               if (err)
+                       goto fail;
+       }
+
+       return 0;
+
+fail:
+       while (i > start) {
+               i -= inc;
+               mlx4_table_put(dev, table, i);
+       }
+
+       return err;
+}
+
+void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
+                         int start, int end)
+{
+       int i;
+
+       for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
+               mlx4_table_put(dev, table, i);
+}
+
+int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
+                       u64 virt, int obj_size, int nobj, int reserved,
+                       int use_lowmem, int use_coherent)
+{
+       int obj_per_chunk;
+       int num_icm;
+       unsigned chunk_size;
+       int i;
+
+       obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
+       num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
+
+       table->icm      = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL);
+       if (!table->icm)
+               return -ENOMEM;
+       table->virt     = virt;
+       table->num_icm  = num_icm;
+       table->num_obj  = nobj;
+       table->obj_size = obj_size;
+       table->lowmem   = use_lowmem;
+       table->coherent = use_coherent;
+       mutex_init(&table->mutex);
+
+       for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
+               chunk_size = MLX4_TABLE_CHUNK_SIZE;
+               if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size)
+                       chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE);
+
+               table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
+                                              (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
+                                              __GFP_NOWARN, use_coherent);
+               if (!table->icm[i])
+                       goto err;
+               if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
+                       mlx4_free_icm(dev, table->icm[i], use_coherent);
+                       table->icm[i] = NULL;
+                       goto err;
+               }
+
+               /*
+                * Add a reference to this ICM chunk so that it never
+                * gets freed (since it contains reserved firmware objects).
+                */
+               ++table->icm[i]->refcount;
+       }
+
+       return 0;
+
+err:
+       for (i = 0; i < num_icm; ++i)
+               if (table->icm[i]) {
+                       mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
+                                      MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
+                       mlx4_free_icm(dev, table->icm[i], use_coherent);
+               }
+
+       return -ENOMEM;
+}
+
+void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
+{
+       int i;
+
+       for (i = 0; i < table->num_icm; ++i)
+               if (table->icm[i]) {
+                       mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
+                                      MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
+                       mlx4_free_icm(dev, table->icm[i], table->coherent);
+               }
+
+       kfree(table->icm);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
new file mode 100644 (file)
index 0000000..b10c07a
--- /dev/null
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX4_ICM_H
+#define MLX4_ICM_H
+
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/mutex.h>
+
+#define MLX4_ICM_CHUNK_LEN                                             \
+       ((256 - sizeof (struct list_head) - 2 * sizeof (int)) /         \
+        (sizeof (struct scatterlist)))
+
+enum {
+       MLX4_ICM_PAGE_SHIFT     = 12,
+       MLX4_ICM_PAGE_SIZE      = 1 << MLX4_ICM_PAGE_SHIFT,
+};
+
+struct mlx4_icm_chunk {
+       struct list_head        list;
+       int                     npages;
+       int                     nsg;
+       struct scatterlist      mem[MLX4_ICM_CHUNK_LEN];
+};
+
+struct mlx4_icm {
+       struct list_head        chunk_list;
+       int                     refcount;
+};
+
+struct mlx4_icm_iter {
+       struct mlx4_icm        *icm;
+       struct mlx4_icm_chunk  *chunk;
+       int                     page_idx;
+};
+
+struct mlx4_dev;
+
+struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
+                               gfp_t gfp_mask, int coherent);
+void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent);
+
+int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
+void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
+int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
+                        int start, int end);
+void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
+                         int start, int end);
+int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
+                       u64 virt, int obj_size, int nobj, int reserved,
+                       int use_lowmem, int use_coherent);
+void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);
+int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
+void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
+void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle);
+int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
+                        int start, int end);
+void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
+                         int start, int end);
+
+static inline void mlx4_icm_first(struct mlx4_icm *icm,
+                                 struct mlx4_icm_iter *iter)
+{
+       iter->icm      = icm;
+       iter->chunk    = list_empty(&icm->chunk_list) ?
+               NULL : list_entry(icm->chunk_list.next,
+                                 struct mlx4_icm_chunk, list);
+       iter->page_idx = 0;
+}
+
+static inline int mlx4_icm_last(struct mlx4_icm_iter *iter)
+{
+       return !iter->chunk;
+}
+
+static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
+{
+       if (++iter->page_idx >= iter->chunk->nsg) {
+               if (iter->chunk->list.next == &iter->icm->chunk_list) {
+                       iter->chunk = NULL;
+                       return;
+               }
+
+               iter->chunk = list_entry(iter->chunk->list.next,
+                                        struct mlx4_icm_chunk, list);
+               iter->page_idx = 0;
+       }
+}
+
+static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
+{
+       return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
+}
+
+static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
+{
+       return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
+}
+
+int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
+int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
+
+#endif /* MLX4_ICM_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
new file mode 100644 (file)
index 0000000..73c94fc
--- /dev/null
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/slab.h>
+
+#include "mlx4.h"
+
+struct mlx4_device_context {
+       struct list_head        list;
+       struct mlx4_interface  *intf;
+       void                   *context;
+};
+
+static LIST_HEAD(intf_list);
+static LIST_HEAD(dev_list);
+static DEFINE_MUTEX(intf_mutex);
+
+static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
+{
+       struct mlx4_device_context *dev_ctx;
+
+       dev_ctx = kmalloc(sizeof *dev_ctx, GFP_KERNEL);
+       if (!dev_ctx)
+               return;
+
+       dev_ctx->intf    = intf;
+       dev_ctx->context = intf->add(&priv->dev);
+
+       if (dev_ctx->context) {
+               spin_lock_irq(&priv->ctx_lock);
+               list_add_tail(&dev_ctx->list, &priv->ctx_list);
+               spin_unlock_irq(&priv->ctx_lock);
+       } else
+               kfree(dev_ctx);
+}
+
+static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
+{
+       struct mlx4_device_context *dev_ctx;
+
+       list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+               if (dev_ctx->intf == intf) {
+                       spin_lock_irq(&priv->ctx_lock);
+                       list_del(&dev_ctx->list);
+                       spin_unlock_irq(&priv->ctx_lock);
+
+                       intf->remove(&priv->dev, dev_ctx->context);
+                       kfree(dev_ctx);
+                       return;
+               }
+}
+
+int mlx4_register_interface(struct mlx4_interface *intf)
+{
+       struct mlx4_priv *priv;
+
+       if (!intf->add || !intf->remove)
+               return -EINVAL;
+
+       mutex_lock(&intf_mutex);
+
+       list_add_tail(&intf->list, &intf_list);
+       list_for_each_entry(priv, &dev_list, dev_list)
+               mlx4_add_device(intf, priv);
+
+       mutex_unlock(&intf_mutex);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_register_interface);
+
+void mlx4_unregister_interface(struct mlx4_interface *intf)
+{
+       struct mlx4_priv *priv;
+
+       mutex_lock(&intf_mutex);
+
+       list_for_each_entry(priv, &dev_list, dev_list)
+               mlx4_remove_device(intf, priv);
+
+       list_del(&intf->list);
+
+       mutex_unlock(&intf_mutex);
+}
+EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
+
+void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_device_context *dev_ctx;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->ctx_lock, flags);
+
+       list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+               if (dev_ctx->intf->event)
+                       dev_ctx->intf->event(dev, dev_ctx->context, type, port);
+
+       spin_unlock_irqrestore(&priv->ctx_lock, flags);
+}
+
+int mlx4_register_device(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_interface *intf;
+
+       mutex_lock(&intf_mutex);
+
+       list_add_tail(&priv->dev_list, &dev_list);
+       list_for_each_entry(intf, &intf_list, list)
+               mlx4_add_device(intf, priv);
+
+       mutex_unlock(&intf_mutex);
+       mlx4_start_catas_poll(dev);
+
+       return 0;
+}
+
+void mlx4_unregister_device(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_interface *intf;
+
+       mlx4_stop_catas_poll(dev);
+       mutex_lock(&intf_mutex);
+
+       list_for_each_entry(intf, &intf_list, list)
+               mlx4_remove_device(intf, priv);
+
+       list_del(&priv->dev_list);
+
+       mutex_unlock(&intf_mutex);
+}
+
+void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_device_context *dev_ctx;
+       unsigned long flags;
+       void *result = NULL;
+
+       spin_lock_irqsave(&priv->ctx_lock, flags);
+
+       list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+               if (dev_ctx->intf->protocol == proto && dev_ctx->intf->get_dev) {
+                       result = dev_ctx->intf->get_dev(dev, dev_ctx->context, port);
+                       break;
+               }
+
+       spin_unlock_irqrestore(&priv->ctx_lock, flags);
+
+       return result;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
new file mode 100644 (file)
index 0000000..f0ee35d
--- /dev/null
@@ -0,0 +1,1529 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/io-mapping.h>
+
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/doorbell.h>
+
+#include "mlx4.h"
+#include "fw.h"
+#include "icm.h"
+
+MODULE_AUTHOR("Roland Dreier");
+MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+
+struct workqueue_struct *mlx4_wq;
+
+#ifdef CONFIG_MLX4_DEBUG
+
+int mlx4_debug_level = 0;
+module_param_named(debug_level, mlx4_debug_level, int, 0644);
+MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
+
+#endif /* CONFIG_MLX4_DEBUG */
+
+#ifdef CONFIG_PCI_MSI
+
+static int msi_x = 1;
+module_param(msi_x, int, 0444);
+MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
+
+#else /* CONFIG_PCI_MSI */
+
+#define msi_x (0)
+
+#endif /* CONFIG_PCI_MSI */
+
+static char mlx4_version[] __devinitdata =
+       DRV_NAME ": Mellanox ConnectX core driver v"
+       DRV_VERSION " (" DRV_RELDATE ")\n";
+
+static struct mlx4_profile default_profile = {
+       .num_qp         = 1 << 17,
+       .num_srq        = 1 << 16,
+       .rdmarc_per_qp  = 1 << 4,
+       .num_cq         = 1 << 16,
+       .num_mcg        = 1 << 13,
+       .num_mpt        = 1 << 17,
+       .num_mtt        = 1 << 20,
+};
+
+static int log_num_mac = 2;
+module_param_named(log_num_mac, log_num_mac, int, 0444);
+MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
+
+static int log_num_vlan;
+module_param_named(log_num_vlan, log_num_vlan, int, 0444);
+MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
+
+static int use_prio;
+module_param_named(use_prio, use_prio, bool, 0444);
+MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
+                 "(0/1, default 0)");
+
+static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
+module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
+MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
+
+int mlx4_check_port_params(struct mlx4_dev *dev,
+                          enum mlx4_port_type *port_type)
+{
+       int i;
+
+       for (i = 0; i < dev->caps.num_ports - 1; i++) {
+               if (port_type[i] != port_type[i + 1]) {
+                       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
+                               mlx4_err(dev, "Only same port types supported "
+                                        "on this HCA, aborting.\n");
+                               return -EINVAL;
+                       }
+                       if (port_type[i] == MLX4_PORT_TYPE_ETH &&
+                           port_type[i + 1] == MLX4_PORT_TYPE_IB)
+                               return -EINVAL;
+               }
+       }
+
+       for (i = 0; i < dev->caps.num_ports; i++) {
+               if (!(port_type[i] & dev->caps.supported_type[i+1])) {
+                       mlx4_err(dev, "Requested port type for port %d is not "
+                                     "supported on this HCA\n", i + 1);
+                       return -EINVAL;
+               }
+       }
+       return 0;
+}
+
+static void mlx4_set_port_mask(struct mlx4_dev *dev)
+{
+       int i;
+
+       dev->caps.port_mask = 0;
+       for (i = 1; i <= dev->caps.num_ports; ++i)
+               if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
+                       dev->caps.port_mask |= 1 << (i - 1);
+}
+
+static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
+{
+       int err;
+       int i;
+
+       err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
+       if (err) {
+               mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+               return err;
+       }
+
+       if (dev_cap->min_page_sz > PAGE_SIZE) {
+               mlx4_err(dev, "HCA minimum page size of %d bigger than "
+                        "kernel PAGE_SIZE of %ld, aborting.\n",
+                        dev_cap->min_page_sz, PAGE_SIZE);
+               return -ENODEV;
+       }
+       if (dev_cap->num_ports > MLX4_MAX_PORTS) {
+               mlx4_err(dev, "HCA has %d ports, but we only support %d, "
+                        "aborting.\n",
+                        dev_cap->num_ports, MLX4_MAX_PORTS);
+               return -ENODEV;
+       }
+
+       if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
+               mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
+                        "PCI resource 2 size of 0x%llx, aborting.\n",
+                        dev_cap->uar_size,
+                        (unsigned long long) pci_resource_len(dev->pdev, 2));
+               return -ENODEV;
+       }
+
+       dev->caps.num_ports          = dev_cap->num_ports;
+       for (i = 1; i <= dev->caps.num_ports; ++i) {
+               dev->caps.vl_cap[i]         = dev_cap->max_vl[i];
+               dev->caps.ib_mtu_cap[i]     = dev_cap->ib_mtu[i];
+               dev->caps.gid_table_len[i]  = dev_cap->max_gids[i];
+               dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
+               dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
+               dev->caps.eth_mtu_cap[i]    = dev_cap->eth_mtu[i];
+               dev->caps.def_mac[i]        = dev_cap->def_mac[i];
+               dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
+               dev->caps.trans_type[i]     = dev_cap->trans_type[i];
+               dev->caps.vendor_oui[i]     = dev_cap->vendor_oui[i];
+               dev->caps.wavelength[i]     = dev_cap->wavelength[i];
+               dev->caps.trans_code[i]     = dev_cap->trans_code[i];
+       }
+
+       dev->caps.num_uars           = dev_cap->uar_size / PAGE_SIZE;
+       dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
+       dev->caps.bf_reg_size        = dev_cap->bf_reg_size;
+       dev->caps.bf_regs_per_page   = dev_cap->bf_regs_per_page;
+       dev->caps.max_sq_sg          = dev_cap->max_sq_sg;
+       dev->caps.max_rq_sg          = dev_cap->max_rq_sg;
+       dev->caps.max_wqes           = dev_cap->max_qp_sz;
+       dev->caps.max_qp_init_rdma   = dev_cap->max_requester_per_qp;
+       dev->caps.max_srq_wqes       = dev_cap->max_srq_sz;
+       dev->caps.max_srq_sge        = dev_cap->max_rq_sg - 1;
+       dev->caps.reserved_srqs      = dev_cap->reserved_srqs;
+       dev->caps.max_sq_desc_sz     = dev_cap->max_sq_desc_sz;
+       dev->caps.max_rq_desc_sz     = dev_cap->max_rq_desc_sz;
+       dev->caps.num_qp_per_mgm     = MLX4_QP_PER_MGM;
+       /*
+        * Subtract 1 from the limit because we need to allocate a
+        * spare CQE so the HCA HW can tell the difference between an
+        * empty CQ and a full CQ.
+        */
+       dev->caps.max_cqes           = dev_cap->max_cq_sz - 1;
+       dev->caps.reserved_cqs       = dev_cap->reserved_cqs;
+       dev->caps.reserved_eqs       = dev_cap->reserved_eqs;
+       dev->caps.mtts_per_seg       = 1 << log_mtts_per_seg;
+       dev->caps.reserved_mtts      = DIV_ROUND_UP(dev_cap->reserved_mtts,
+                                                   dev->caps.mtts_per_seg);
+       dev->caps.reserved_mrws      = dev_cap->reserved_mrws;
+       dev->caps.reserved_uars      = dev_cap->reserved_uars;
+       dev->caps.reserved_pds       = dev_cap->reserved_pds;
+       dev->caps.mtt_entry_sz       = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
+       dev->caps.max_msg_sz         = dev_cap->max_msg_sz;
+       dev->caps.page_size_cap      = ~(u32) (dev_cap->min_page_sz - 1);
+       dev->caps.flags              = dev_cap->flags;
+       dev->caps.bmme_flags         = dev_cap->bmme_flags;
+       dev->caps.reserved_lkey      = dev_cap->reserved_lkey;
+       dev->caps.stat_rate_support  = dev_cap->stat_rate_support;
+       dev->caps.max_gso_sz         = dev_cap->max_gso_sz;
+
+       dev->caps.log_num_macs  = log_num_mac;
+       dev->caps.log_num_vlans = log_num_vlan;
+       dev->caps.log_num_prios = use_prio ? 3 : 0;
+
+       for (i = 1; i <= dev->caps.num_ports; ++i) {
+               if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
+                       dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
+               else
+                       dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+               dev->caps.possible_type[i] = dev->caps.port_type[i];
+               mlx4_priv(dev)->sense.sense_allowed[i] =
+                       dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO;
+
+               if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
+                       dev->caps.log_num_macs = dev_cap->log_max_macs[i];
+                       mlx4_warn(dev, "Requested number of MACs is too much "
+                                 "for port %d, reducing to %d.\n",
+                                 i, 1 << dev->caps.log_num_macs);
+               }
+               if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
+                       dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
+                       mlx4_warn(dev, "Requested number of VLANs is too much "
+                                 "for port %d, reducing to %d.\n",
+                                 i, 1 << dev->caps.log_num_vlans);
+               }
+       }
+
+       mlx4_set_port_mask(dev);
+
+       dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
+
+       dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
+       dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
+               dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
+               (1 << dev->caps.log_num_macs) *
+               (1 << dev->caps.log_num_vlans) *
+               (1 << dev->caps.log_num_prios) *
+               dev->caps.num_ports;
+       dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
+
+       dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
+               dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
+               dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
+               dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
+
+       return 0;
+}
+
+/*
+ * Change the port configuration of the device.
+ * Every user of this function must hold the port mutex.
+ */
+int mlx4_change_port_types(struct mlx4_dev *dev,
+                          enum mlx4_port_type *port_types)
+{
+       int err = 0;
+       int change = 0;
+       int port;
+
+       for (port = 0; port <  dev->caps.num_ports; port++) {
+               /* Change the port type only if the new type is different
+                * from the current, and not set to Auto */
+               if (port_types[port] != dev->caps.port_type[port + 1]) {
+                       change = 1;
+                       dev->caps.port_type[port + 1] = port_types[port];
+               }
+       }
+       if (change) {
+               mlx4_unregister_device(dev);
+               for (port = 1; port <= dev->caps.num_ports; port++) {
+                       mlx4_CLOSE_PORT(dev, port);
+                       err = mlx4_SET_PORT(dev, port);
+                       if (err) {
+                               mlx4_err(dev, "Failed to set port %d, "
+                                             "aborting\n", port);
+                               goto out;
+                       }
+               }
+               mlx4_set_port_mask(dev);
+               err = mlx4_register_device(dev);
+       }
+
+out:
+       return err;
+}
+
+static ssize_t show_port_type(struct device *dev,
+                             struct device_attribute *attr,
+                             char *buf)
+{
+       struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
+                                                  port_attr);
+       struct mlx4_dev *mdev = info->dev;
+       char type[8];
+
+       sprintf(type, "%s",
+               (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
+               "ib" : "eth");
+       if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
+               sprintf(buf, "auto (%s)\n", type);
+       else
+               sprintf(buf, "%s\n", type);
+
+       return strlen(buf);
+}
+
+static ssize_t set_port_type(struct device *dev,
+                            struct device_attribute *attr,
+                            const char *buf, size_t count)
+{
+       struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
+                                                  port_attr);
+       struct mlx4_dev *mdev = info->dev;
+       struct mlx4_priv *priv = mlx4_priv(mdev);
+       enum mlx4_port_type types[MLX4_MAX_PORTS];
+       enum mlx4_port_type new_types[MLX4_MAX_PORTS];
+       int i;
+       int err = 0;
+
+       if (!strcmp(buf, "ib\n"))
+               info->tmp_type = MLX4_PORT_TYPE_IB;
+       else if (!strcmp(buf, "eth\n"))
+               info->tmp_type = MLX4_PORT_TYPE_ETH;
+       else if (!strcmp(buf, "auto\n"))
+               info->tmp_type = MLX4_PORT_TYPE_AUTO;
+       else {
+               mlx4_err(mdev, "%s is not supported port type\n", buf);
+               return -EINVAL;
+       }
+
+       mlx4_stop_sense(mdev);
+       mutex_lock(&priv->port_mutex);
+       /* Possible type is always the one that was delivered */
+       mdev->caps.possible_type[info->port] = info->tmp_type;
+
+       for (i = 0; i < mdev->caps.num_ports; i++) {
+               types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
+                                       mdev->caps.possible_type[i+1];
+               if (types[i] == MLX4_PORT_TYPE_AUTO)
+                       types[i] = mdev->caps.port_type[i+1];
+       }
+
+       if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
+               for (i = 1; i <= mdev->caps.num_ports; i++) {
+                       if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
+                               mdev->caps.possible_type[i] = mdev->caps.port_type[i];
+                               err = -EINVAL;
+                       }
+               }
+       }
+       if (err) {
+               mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
+                              "Set only 'eth' or 'ib' for both ports "
+                              "(should be the same)\n");
+               goto out;
+       }
+
+       mlx4_do_sense_ports(mdev, new_types, types);
+
+       err = mlx4_check_port_params(mdev, new_types);
+       if (err)
+               goto out;
+
+       /* We are about to apply the changes after the configuration
+        * was verified, no need to remember the temporary types
+        * any more */
+       for (i = 0; i < mdev->caps.num_ports; i++)
+               priv->port[i + 1].tmp_type = 0;
+
+       err = mlx4_change_port_types(mdev, new_types);
+
+out:
+       mlx4_start_sense(mdev);
+       mutex_unlock(&priv->port_mutex);
+       return err ? err : count;
+}
+
+static int mlx4_load_fw(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int err;
+
+       priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
+                                        GFP_HIGHUSER | __GFP_NOWARN, 0);
+       if (!priv->fw.fw_icm) {
+               mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
+               return -ENOMEM;
+       }
+
+       err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
+       if (err) {
+               mlx4_err(dev, "MAP_FA command failed, aborting.\n");
+               goto err_free;
+       }
+
+       err = mlx4_RUN_FW(dev);
+       if (err) {
+               mlx4_err(dev, "RUN_FW command failed, aborting.\n");
+               goto err_unmap_fa;
+       }
+
+       return 0;
+
+err_unmap_fa:
+       mlx4_UNMAP_FA(dev);
+
+err_free:
+       mlx4_free_icm(dev, priv->fw.fw_icm, 0);
+       return err;
+}
+
+static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
+                               int cmpt_entry_sz)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int err;
+
+       err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
+                                 cmpt_base +
+                                 ((u64) (MLX4_CMPT_TYPE_QP *
+                                         cmpt_entry_sz) << MLX4_CMPT_SHIFT),
+                                 cmpt_entry_sz, dev->caps.num_qps,
+                                 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+                                 0, 0);
+       if (err)
+               goto err;
+
+       err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
+                                 cmpt_base +
+                                 ((u64) (MLX4_CMPT_TYPE_SRQ *
+                                         cmpt_entry_sz) << MLX4_CMPT_SHIFT),
+                                 cmpt_entry_sz, dev->caps.num_srqs,
+                                 dev->caps.reserved_srqs, 0, 0);
+       if (err)
+               goto err_qp;
+
+       err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
+                                 cmpt_base +
+                                 ((u64) (MLX4_CMPT_TYPE_CQ *
+                                         cmpt_entry_sz) << MLX4_CMPT_SHIFT),
+                                 cmpt_entry_sz, dev->caps.num_cqs,
+                                 dev->caps.reserved_cqs, 0, 0);
+       if (err)
+               goto err_srq;
+
+       err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
+                                 cmpt_base +
+                                 ((u64) (MLX4_CMPT_TYPE_EQ *
+                                         cmpt_entry_sz) << MLX4_CMPT_SHIFT),
+                                 cmpt_entry_sz,
+                                 dev->caps.num_eqs, dev->caps.num_eqs, 0, 0);
+       if (err)
+               goto err_cq;
+
+       return 0;
+
+err_cq:
+       mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
+
+err_srq:
+       mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
+
+err_qp:
+       mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
+
+err:
+       return err;
+}
+
+static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
+                        struct mlx4_init_hca_param *init_hca, u64 icm_size)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       u64 aux_pages;
+       int err;
+
+       err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
+       if (err) {
+               mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
+               return err;
+       }
+
+       mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
+                (unsigned long long) icm_size >> 10,
+                (unsigned long long) aux_pages << 2);
+
+       priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
+                                         GFP_HIGHUSER | __GFP_NOWARN, 0);
+       if (!priv->fw.aux_icm) {
+               mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
+               return -ENOMEM;
+       }
+
+       err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
+       if (err) {
+               mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
+               goto err_free_aux;
+       }
+
+       err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
+       if (err) {
+               mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
+               goto err_unmap_aux;
+       }
+
+       err = mlx4_init_icm_table(dev, &priv->eq_table.table,
+                                 init_hca->eqc_base, dev_cap->eqc_entry_sz,
+                                 dev->caps.num_eqs, dev->caps.num_eqs,
+                                 0, 0);
+       if (err) {
+               mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
+               goto err_unmap_cmpt;
+       }
+
+       /*
+        * Reserved MTT entries must be aligned up to a cacheline
+        * boundary, since the FW will write to them, while the driver
+        * writes to all other MTT entries. (The variable
+        * dev->caps.mtt_entry_sz below is really the MTT segment
+        * size, not the raw entry size)
+        */
+       dev->caps.reserved_mtts =
+               ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
+                     dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
+
+       err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
+                                 init_hca->mtt_base,
+                                 dev->caps.mtt_entry_sz,
+                                 dev->caps.num_mtt_segs,
+                                 dev->caps.reserved_mtts, 1, 0);
+       if (err) {
+               mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
+               goto err_unmap_eq;
+       }
+
+       err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
+                                 init_hca->dmpt_base,
+                                 dev_cap->dmpt_entry_sz,
+                                 dev->caps.num_mpts,
+                                 dev->caps.reserved_mrws, 1, 1);
+       if (err) {
+               mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
+               goto err_unmap_mtt;
+       }
+
+       err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
+                                 init_hca->qpc_base,
+                                 dev_cap->qpc_entry_sz,
+                                 dev->caps.num_qps,
+                                 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+                                 0, 0);
+       if (err) {
+               mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
+               goto err_unmap_dmpt;
+       }
+
+       err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
+                                 init_hca->auxc_base,
+                                 dev_cap->aux_entry_sz,
+                                 dev->caps.num_qps,
+                                 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+                                 0, 0);
+       if (err) {
+               mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
+               goto err_unmap_qp;
+       }
+
+       err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
+                                 init_hca->altc_base,
+                                 dev_cap->altc_entry_sz,
+                                 dev->caps.num_qps,
+                                 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+                                 0, 0);
+       if (err) {
+               mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
+               goto err_unmap_auxc;
+       }
+
+       err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
+                                 init_hca->rdmarc_base,
+                                 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
+                                 dev->caps.num_qps,
+                                 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+                                 0, 0);
+       if (err) {
+               mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
+               goto err_unmap_altc;
+       }
+
+       err = mlx4_init_icm_table(dev, &priv->cq_table.table,
+                                 init_hca->cqc_base,
+                                 dev_cap->cqc_entry_sz,
+                                 dev->caps.num_cqs,
+                                 dev->caps.reserved_cqs, 0, 0);
+       if (err) {
+               mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
+               goto err_unmap_rdmarc;
+       }
+
+       err = mlx4_init_icm_table(dev, &priv->srq_table.table,
+                                 init_hca->srqc_base,
+                                 dev_cap->srq_entry_sz,
+                                 dev->caps.num_srqs,
+                                 dev->caps.reserved_srqs, 0, 0);
+       if (err) {
+               mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
+               goto err_unmap_cq;
+       }
+
+       /*
+        * It's not strictly required, but for simplicity just map the
+        * whole multicast group table now.  The table isn't very big
+        * and it's a lot easier than trying to track ref counts.
+        */
+       err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
+                                 init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
+                                 dev->caps.num_mgms + dev->caps.num_amgms,
+                                 dev->caps.num_mgms + dev->caps.num_amgms,
+                                 0, 0);
+       if (err) {
+               mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
+               goto err_unmap_srq;
+       }
+
+       return 0;
+
+err_unmap_srq:
+       mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
+
+err_unmap_cq:
+       mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
+
+err_unmap_rdmarc:
+       mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
+
+err_unmap_altc:
+       mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
+
+err_unmap_auxc:
+       mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
+
+err_unmap_qp:
+       mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
+
+err_unmap_dmpt:
+       mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
+
+err_unmap_mtt:
+       mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
+
+err_unmap_eq:
+       mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
+
+err_unmap_cmpt:
+       mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
+       mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
+       mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
+       mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
+
+err_unmap_aux:
+       mlx4_UNMAP_ICM_AUX(dev);
+
+err_free_aux:
+       mlx4_free_icm(dev, priv->fw.aux_icm, 0);
+
+       return err;
+}
+
+static void mlx4_free_icms(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
+       mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
+       mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
+       mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
+       mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
+       mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
+       mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
+       mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
+       mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
+       mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
+       mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
+       mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
+       mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
+       mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
+
+       mlx4_UNMAP_ICM_AUX(dev);
+       mlx4_free_icm(dev, priv->fw.aux_icm, 0);
+}
+
+static int map_bf_area(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       resource_size_t bf_start;
+       resource_size_t bf_len;
+       int err = 0;
+
+       bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT);
+       bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT);
+       priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
+       if (!priv->bf_mapping)
+               err = -ENOMEM;
+
+       return err;
+}
+
+static void unmap_bf_area(struct mlx4_dev *dev)
+{
+       if (mlx4_priv(dev)->bf_mapping)
+               io_mapping_free(mlx4_priv(dev)->bf_mapping);
+}
+
+static void mlx4_close_hca(struct mlx4_dev *dev)
+{
+       unmap_bf_area(dev);
+       mlx4_CLOSE_HCA(dev, 0);
+       mlx4_free_icms(dev);
+       mlx4_UNMAP_FA(dev);
+       mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
+}
+
+static int mlx4_init_hca(struct mlx4_dev *dev)
+{
+       struct mlx4_priv          *priv = mlx4_priv(dev);
+       struct mlx4_adapter        adapter;
+       struct mlx4_dev_cap        dev_cap;
+       struct mlx4_mod_stat_cfg   mlx4_cfg;
+       struct mlx4_profile        profile;
+       struct mlx4_init_hca_param init_hca;
+       u64 icm_size;
+       int err;
+
+       err = mlx4_QUERY_FW(dev);
+       if (err) {
+               if (err == -EACCES)
+                       mlx4_info(dev, "non-primary physical function, skipping.\n");
+               else
+                       mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
+               return err;
+       }
+
+       err = mlx4_load_fw(dev);
+       if (err) {
+               mlx4_err(dev, "Failed to start FW, aborting.\n");
+               return err;
+       }
+
+       mlx4_cfg.log_pg_sz_m = 1;
+       mlx4_cfg.log_pg_sz = 0;
+       err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
+       if (err)
+               mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
+
+       err = mlx4_dev_cap(dev, &dev_cap);
+       if (err) {
+               mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+               goto err_stop_fw;
+       }
+
+       profile = default_profile;
+
+       icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
+       if ((long long) icm_size < 0) {
+               err = icm_size;
+               goto err_stop_fw;
+       }
+
+       if (map_bf_area(dev))
+               mlx4_dbg(dev, "Failed to map blue flame area\n");
+
+       init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
+
+       err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
+       if (err)
+               goto err_stop_fw;
+
+       err = mlx4_INIT_HCA(dev, &init_hca);
+       if (err) {
+               mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
+               goto err_free_icm;
+       }
+
+       err = mlx4_QUERY_ADAPTER(dev, &adapter);
+       if (err) {
+               mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
+               goto err_close;
+       }
+
+       priv->eq_table.inta_pin = adapter.inta_pin;
+       memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
+
+       return 0;
+
+err_close:
+       mlx4_CLOSE_HCA(dev, 0);
+
+err_free_icm:
+       mlx4_free_icms(dev);
+
+err_stop_fw:
+       unmap_bf_area(dev);
+       mlx4_UNMAP_FA(dev);
+       mlx4_free_icm(dev, priv->fw.fw_icm, 0);
+
+       return err;
+}
+
+static int mlx4_init_counters_table(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int nent;
+
+       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
+               return -ENOENT;
+
+       nent = dev->caps.max_counters;
+       return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
+}
+
+static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
+{
+       mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
+}
+
+int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
+               return -ENOENT;
+
+       *idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
+       if (*idx == -1)
+               return -ENOMEM;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
+
+void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
+{
+       mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx);
+       return;
+}
+EXPORT_SYMBOL_GPL(mlx4_counter_free);
+
+static int mlx4_setup_hca(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int err;
+       int port;
+       __be32 ib_port_default_caps;
+
+       err = mlx4_init_uar_table(dev);
+       if (err) {
+               mlx4_err(dev, "Failed to initialize "
+                        "user access region table, aborting.\n");
+               return err;
+       }
+
+       err = mlx4_uar_alloc(dev, &priv->driver_uar);
+       if (err) {
+               mlx4_err(dev, "Failed to allocate driver access region, "
+                        "aborting.\n");
+               goto err_uar_table_free;
+       }
+
+       priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
+       if (!priv->kar) {
+               mlx4_err(dev, "Couldn't map kernel access region, "
+                        "aborting.\n");
+               err = -ENOMEM;
+               goto err_uar_free;
+       }
+
+       err = mlx4_init_pd_table(dev);
+       if (err) {
+               mlx4_err(dev, "Failed to initialize "
+                        "protection domain table, aborting.\n");
+               goto err_kar_unmap;
+       }
+
+       err = mlx4_init_mr_table(dev);
+       if (err) {
+               mlx4_err(dev, "Failed to initialize "
+                        "memory region table, aborting.\n");
+               goto err_pd_table_free;
+       }
+
+       err = mlx4_init_eq_table(dev);
+       if (err) {
+               mlx4_err(dev, "Failed to initialize "
+                        "event queue table, aborting.\n");
+               goto err_mr_table_free;
+       }
+
+       err = mlx4_cmd_use_events(dev);
+       if (err) {
+               mlx4_err(dev, "Failed to switch to event-driven "
+                        "firmware commands, aborting.\n");
+               goto err_eq_table_free;
+       }
+
+       err = mlx4_NOP(dev);
+       if (err) {
+               if (dev->flags & MLX4_FLAG_MSI_X) {
+                       mlx4_warn(dev, "NOP command failed to generate MSI-X "
+                                 "interrupt IRQ %d).\n",
+                                 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
+                       mlx4_warn(dev, "Trying again without MSI-X.\n");
+               } else {
+                       mlx4_err(dev, "NOP command failed to generate interrupt "
+                                "(IRQ %d), aborting.\n",
+                                priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
+                       mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
+               }
+
+               goto err_cmd_poll;
+       }
+
+       mlx4_dbg(dev, "NOP command IRQ test passed\n");
+
+       err = mlx4_init_cq_table(dev);
+       if (err) {
+               mlx4_err(dev, "Failed to initialize "
+                        "completion queue table, aborting.\n");
+               goto err_cmd_poll;
+       }
+
+       err = mlx4_init_srq_table(dev);
+       if (err) {
+               mlx4_err(dev, "Failed to initialize "
+                        "shared receive queue table, aborting.\n");
+               goto err_cq_table_free;
+       }
+
+       err = mlx4_init_qp_table(dev);
+       if (err) {
+               mlx4_err(dev, "Failed to initialize "
+                        "queue pair table, aborting.\n");
+               goto err_srq_table_free;
+       }
+
+       err = mlx4_init_mcg_table(dev);
+       if (err) {
+               mlx4_err(dev, "Failed to initialize "
+                        "multicast group table, aborting.\n");
+               goto err_qp_table_free;
+       }
+
+       err = mlx4_init_counters_table(dev);
+       if (err && err != -ENOENT) {
+               mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
+               goto err_counters_table_free;
+       }
+
+       for (port = 1; port <= dev->caps.num_ports; port++) {
+               enum mlx4_port_type port_type = 0;
+               mlx4_SENSE_PORT(dev, port, &port_type);
+               if (port_type)
+                       dev->caps.port_type[port] = port_type;
+               ib_port_default_caps = 0;
+               err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
+               if (err)
+                       mlx4_warn(dev, "failed to get port %d default "
+                                 "ib capabilities (%d). Continuing with "
+                                 "caps = 0\n", port, err);
+               dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
+               err = mlx4_SET_PORT(dev, port);
+               if (err) {
+                       mlx4_err(dev, "Failed to set port %d, aborting\n",
+                               port);
+                       goto err_mcg_table_free;
+               }
+       }
+       mlx4_set_port_mask(dev);
+
+       return 0;
+
+err_mcg_table_free:
+       mlx4_cleanup_mcg_table(dev);
+
+err_counters_table_free:
+       mlx4_cleanup_counters_table(dev);
+
+err_qp_table_free:
+       mlx4_cleanup_qp_table(dev);
+
+err_srq_table_free:
+       mlx4_cleanup_srq_table(dev);
+
+err_cq_table_free:
+       mlx4_cleanup_cq_table(dev);
+
+err_cmd_poll:
+       mlx4_cmd_use_polling(dev);
+
+err_eq_table_free:
+       mlx4_cleanup_eq_table(dev);
+
+err_mr_table_free:
+       mlx4_cleanup_mr_table(dev);
+
+err_pd_table_free:
+       mlx4_cleanup_pd_table(dev);
+
+err_kar_unmap:
+       iounmap(priv->kar);
+
+err_uar_free:
+       mlx4_uar_free(dev, &priv->driver_uar);
+
+err_uar_table_free:
+       mlx4_cleanup_uar_table(dev);
+       return err;
+}
+
+static void mlx4_enable_msi_x(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct msix_entry *entries;
+       int nreq = min_t(int, dev->caps.num_ports *
+                        min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT)
+                               + MSIX_LEGACY_SZ, MAX_MSIX);
+       int err;
+       int i;
+
+       if (msi_x) {
+               nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
+                            nreq);
+               entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
+               if (!entries)
+                       goto no_msi;
+
+               for (i = 0; i < nreq; ++i)
+                       entries[i].entry = i;
+
+       retry:
+               err = pci_enable_msix(dev->pdev, entries, nreq);
+               if (err) {
+                       /* Try again if at least 2 vectors are available */
+                       if (err > 1) {
+                               mlx4_info(dev, "Requested %d vectors, "
+                                         "but only %d MSI-X vectors available, "
+                                         "trying again\n", nreq, err);
+                               nreq = err;
+                               goto retry;
+                       }
+                       kfree(entries);
+                       goto no_msi;
+               }
+
+               if (nreq <
+                   MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
+                       /*Working in legacy mode , all EQ's shared*/
+                       dev->caps.comp_pool           = 0;
+                       dev->caps.num_comp_vectors = nreq - 1;
+               } else {
+                       dev->caps.comp_pool           = nreq - MSIX_LEGACY_SZ;
+                       dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
+               }
+               for (i = 0; i < nreq; ++i)
+                       priv->eq_table.eq[i].irq = entries[i].vector;
+
+               dev->flags |= MLX4_FLAG_MSI_X;
+
+               kfree(entries);
+               return;
+       }
+
+no_msi:
+       dev->caps.num_comp_vectors = 1;
+       dev->caps.comp_pool        = 0;
+
+       for (i = 0; i < 2; ++i)
+               priv->eq_table.eq[i].irq = dev->pdev->irq;
+}
+
+static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
+{
+       struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+       int err = 0;
+
+       info->dev = dev;
+       info->port = port;
+       mlx4_init_mac_table(dev, &info->mac_table);
+       mlx4_init_vlan_table(dev, &info->vlan_table);
+       info->base_qpn = dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
+                       (port - 1) * (1 << log_num_mac);
+
+       sprintf(info->dev_name, "mlx4_port%d", port);
+       info->port_attr.attr.name = info->dev_name;
+       info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
+       info->port_attr.show      = show_port_type;
+       info->port_attr.store     = set_port_type;
+       sysfs_attr_init(&info->port_attr.attr);
+
+       err = device_create_file(&dev->pdev->dev, &info->port_attr);
+       if (err) {
+               mlx4_err(dev, "Failed to create file for port %d\n", port);
+               info->port = -1;
+       }
+
+       return err;
+}
+
+static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
+{
+       if (info->port < 0)
+               return;
+
+       device_remove_file(&info->dev->pdev->dev, &info->port_attr);
+}
+
+static int mlx4_init_steering(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int num_entries = dev->caps.num_ports;
+       int i, j;
+
+       priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
+       if (!priv->steer)
+               return -ENOMEM;
+
+       for (i = 0; i < num_entries; i++) {
+               for (j = 0; j < MLX4_NUM_STEERS; j++) {
+                       INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
+                       INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
+               }
+               INIT_LIST_HEAD(&priv->steer[i].high_prios);
+       }
+       return 0;
+}
+
+static void mlx4_clear_steering(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_steer_index *entry, *tmp_entry;
+       struct mlx4_promisc_qp *pqp, *tmp_pqp;
+       int num_entries = dev->caps.num_ports;
+       int i, j;
+
+       for (i = 0; i < num_entries; i++) {
+               for (j = 0; j < MLX4_NUM_STEERS; j++) {
+                       list_for_each_entry_safe(pqp, tmp_pqp,
+                                                &priv->steer[i].promisc_qps[j],
+                                                list) {
+                               list_del(&pqp->list);
+                               kfree(pqp);
+                       }
+                       list_for_each_entry_safe(entry, tmp_entry,
+                                                &priv->steer[i].steer_entries[j],
+                                                list) {
+                               list_del(&entry->list);
+                               list_for_each_entry_safe(pqp, tmp_pqp,
+                                                        &entry->duplicates,
+                                                        list) {
+                                       list_del(&pqp->list);
+                                       kfree(pqp);
+                               }
+                               kfree(entry);
+                       }
+               }
+       }
+       kfree(priv->steer);
+}
+
+static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct mlx4_priv *priv;
+       struct mlx4_dev *dev;
+       int err;
+       int port;
+
+       pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
+
+       err = pci_enable_device(pdev);
+       if (err) {
+               dev_err(&pdev->dev, "Cannot enable PCI device, "
+                       "aborting.\n");
+               return err;
+       }
+
+       /*
+        * Check for BARs.  We expect 0: 1MB
+        */
+       if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
+           pci_resource_len(pdev, 0) != 1 << 20) {
+               dev_err(&pdev->dev, "Missing DCS, aborting.\n");
+               err = -ENODEV;
+               goto err_disable_pdev;
+       }
+       if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+               dev_err(&pdev->dev, "Missing UAR, aborting.\n");
+               err = -ENODEV;
+               goto err_disable_pdev;
+       }
+
+       err = pci_request_regions(pdev, DRV_NAME);
+       if (err) {
+               dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
+               goto err_disable_pdev;
+       }
+
+       pci_set_master(pdev);
+
+       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (err) {
+               dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
+               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (err) {
+                       dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
+                       goto err_release_regions;
+               }
+       }
+       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (err) {
+               dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
+                        "consistent PCI DMA mask.\n");
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (err) {
+                       dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
+                               "aborting.\n");
+                       goto err_release_regions;
+               }
+       }
+
+       /* Allow large DMA segments, up to the firmware limit of 1 GB */
+       dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
+
+       priv = kzalloc(sizeof *priv, GFP_KERNEL);
+       if (!priv) {
+               dev_err(&pdev->dev, "Device struct alloc failed, "
+                       "aborting.\n");
+               err = -ENOMEM;
+               goto err_release_regions;
+       }
+
+       dev       = &priv->dev;
+       dev->pdev = pdev;
+       INIT_LIST_HEAD(&priv->ctx_list);
+       spin_lock_init(&priv->ctx_lock);
+
+       mutex_init(&priv->port_mutex);
+
+       INIT_LIST_HEAD(&priv->pgdir_list);
+       mutex_init(&priv->pgdir_mutex);
+
+       INIT_LIST_HEAD(&priv->bf_list);
+       mutex_init(&priv->bf_mutex);
+
+       dev->rev_id = pdev->revision;
+
+       /*
+        * Now reset the HCA before we touch the PCI capabilities or
+        * attempt a firmware command, since a boot ROM may have left
+        * the HCA in an undefined state.
+        */
+       err = mlx4_reset(dev);
+       if (err) {
+               mlx4_err(dev, "Failed to reset HCA, aborting.\n");
+               goto err_free_dev;
+       }
+
+       if (mlx4_cmd_init(dev)) {
+               mlx4_err(dev, "Failed to init command interface, aborting.\n");
+               goto err_free_dev;
+       }
+
+       err = mlx4_init_hca(dev);
+       if (err)
+               goto err_cmd;
+
+       err = mlx4_alloc_eq_table(dev);
+       if (err)
+               goto err_close;
+
+       priv->msix_ctl.pool_bm = 0;
+       spin_lock_init(&priv->msix_ctl.pool_lock);
+
+       mlx4_enable_msi_x(dev);
+
+       err = mlx4_init_steering(dev);
+       if (err)
+               goto err_free_eq;
+
+       err = mlx4_setup_hca(dev);
+       if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
+               dev->flags &= ~MLX4_FLAG_MSI_X;
+               pci_disable_msix(pdev);
+               err = mlx4_setup_hca(dev);
+       }
+
+       if (err)
+               goto err_steer;
+
+       for (port = 1; port <= dev->caps.num_ports; port++) {
+               err = mlx4_init_port_info(dev, port);
+               if (err)
+                       goto err_port;
+       }
+
+       err = mlx4_register_device(dev);
+       if (err)
+               goto err_port;
+
+       mlx4_sense_init(dev);
+       mlx4_start_sense(dev);
+
+       pci_set_drvdata(pdev, dev);
+
+       return 0;
+
+err_port:
+       for (--port; port >= 1; --port)
+               mlx4_cleanup_port_info(&priv->port[port]);
+
+       mlx4_cleanup_counters_table(dev);
+       mlx4_cleanup_mcg_table(dev);
+       mlx4_cleanup_qp_table(dev);
+       mlx4_cleanup_srq_table(dev);
+       mlx4_cleanup_cq_table(dev);
+       mlx4_cmd_use_polling(dev);
+       mlx4_cleanup_eq_table(dev);
+       mlx4_cleanup_mr_table(dev);
+       mlx4_cleanup_pd_table(dev);
+       mlx4_cleanup_uar_table(dev);
+
+err_steer:
+       mlx4_clear_steering(dev);
+
+err_free_eq:
+       mlx4_free_eq_table(dev);
+
+err_close:
+       if (dev->flags & MLX4_FLAG_MSI_X)
+               pci_disable_msix(pdev);
+
+       mlx4_close_hca(dev);
+
+err_cmd:
+       mlx4_cmd_cleanup(dev);
+
+err_free_dev:
+       kfree(priv);
+
+err_release_regions:
+       pci_release_regions(pdev);
+
+err_disable_pdev:
+       pci_disable_device(pdev);
+       pci_set_drvdata(pdev, NULL);
+       return err;
+}
+
+static int __devinit mlx4_init_one(struct pci_dev *pdev,
+                                  const struct pci_device_id *id)
+{
+       printk_once(KERN_INFO "%s", mlx4_version);
+
+       return __mlx4_init_one(pdev, id);
+}
+
+static void mlx4_remove_one(struct pci_dev *pdev)
+{
+       struct mlx4_dev  *dev  = pci_get_drvdata(pdev);
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int p;
+
+       if (dev) {
+               mlx4_stop_sense(dev);
+               mlx4_unregister_device(dev);
+
+               for (p = 1; p <= dev->caps.num_ports; p++) {
+                       mlx4_cleanup_port_info(&priv->port[p]);
+                       mlx4_CLOSE_PORT(dev, p);
+               }
+
+               mlx4_cleanup_counters_table(dev);
+               mlx4_cleanup_mcg_table(dev);
+               mlx4_cleanup_qp_table(dev);
+               mlx4_cleanup_srq_table(dev);
+               mlx4_cleanup_cq_table(dev);
+               mlx4_cmd_use_polling(dev);
+               mlx4_cleanup_eq_table(dev);
+               mlx4_cleanup_mr_table(dev);
+               mlx4_cleanup_pd_table(dev);
+
+               iounmap(priv->kar);
+               mlx4_uar_free(dev, &priv->driver_uar);
+               mlx4_cleanup_uar_table(dev);
+               mlx4_clear_steering(dev);
+               mlx4_free_eq_table(dev);
+               mlx4_close_hca(dev);
+               mlx4_cmd_cleanup(dev);
+
+               if (dev->flags & MLX4_FLAG_MSI_X)
+                       pci_disable_msix(pdev);
+
+               kfree(priv);
+               pci_release_regions(pdev);
+               pci_disable_device(pdev);
+               pci_set_drvdata(pdev, NULL);
+       }
+}
+
+int mlx4_restart_one(struct pci_dev *pdev)
+{
+       mlx4_remove_one(pdev);
+       return __mlx4_init_one(pdev, NULL);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
+       { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
+       { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
+       { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
+       { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
+       { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
+       { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
+       { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
+       { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
+       { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
+       { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
+       { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
+       { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
+       { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
+       { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
+       { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
+       { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
+       { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
+       { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
+       { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
+       { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
+       { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
+       { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
+       { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
+
+static struct pci_driver mlx4_driver = {
+       .name           = DRV_NAME,
+       .id_table       = mlx4_pci_table,
+       .probe          = mlx4_init_one,
+       .remove         = __devexit_p(mlx4_remove_one)
+};
+
+static int __init mlx4_verify_params(void)
+{
+       if ((log_num_mac < 0) || (log_num_mac > 7)) {
+               pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac);
+               return -1;
+       }
+
+       if ((log_num_vlan < 0) || (log_num_vlan > 7)) {
+               pr_warning("mlx4_core: bad num_vlan: %d\n", log_num_vlan);
+               return -1;
+       }
+
+       if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
+               pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
+               return -1;
+       }
+
+       return 0;
+}
+
+static int __init mlx4_init(void)
+{
+       int ret;
+
+       if (mlx4_verify_params())
+               return -EINVAL;
+
+       mlx4_catas_init();
+
+       mlx4_wq = create_singlethread_workqueue("mlx4");
+       if (!mlx4_wq)
+               return -ENOMEM;
+
+       ret = pci_register_driver(&mlx4_driver);
+       return ret < 0 ? ret : 0;
+}
+
+static void __exit mlx4_cleanup(void)
+{
+       pci_unregister_driver(&mlx4_driver);
+       destroy_workqueue(mlx4_wq);
+}
+
+module_init(mlx4_init);
+module_exit(mlx4_cleanup);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
new file mode 100644 (file)
index 0000000..cd17845
--- /dev/null
@@ -0,0 +1,928 @@
+/*
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/string.h>
+#include <linux/etherdevice.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4.h"
+
+#define MGM_QPN_MASK       0x00FFFFFF
+#define MGM_BLCK_LB_BIT    30
+
+static const u8 zero_gid[16];  /* automatically initialized to 0 */
+
+static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
+                          struct mlx4_cmd_mailbox *mailbox)
+{
+       return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
+                           MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
+                           struct mlx4_cmd_mailbox *mailbox)
+{
+       return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
+                       MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer,
+                             struct mlx4_cmd_mailbox *mailbox)
+{
+       u32 in_mod;
+
+       in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1;
+       return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
+                       MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+                        u16 *hash, u8 op_mod)
+{
+       u64 imm;
+       int err;
+
+       err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
+                          MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A);
+
+       if (!err)
+               *hash = imm;
+
+       return err;
+}
+
+static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
+                                             enum mlx4_steer_type steer,
+                                             u32 qpn)
+{
+       struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num];
+       struct mlx4_promisc_qp *pqp;
+
+       list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
+               if (pqp->qpn == qpn)
+                       return pqp;
+       }
+       /* not found */
+       return NULL;
+}
+
+/*
+ * Add new entry to steering data structure.
+ * All promisc QPs should be added as well
+ */
+static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+                             enum mlx4_steer_type steer,
+                             unsigned int index, u32 qpn)
+{
+       struct mlx4_steer *s_steer;
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_mgm *mgm;
+       u32 members_count;
+       struct mlx4_steer_index *new_entry;
+       struct mlx4_promisc_qp *pqp;
+       struct mlx4_promisc_qp *dqp = NULL;
+       u32 prot;
+       int err;
+       u8 pf_num;
+
+       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
+       s_steer = &mlx4_priv(dev)->steer[pf_num];
+       new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
+       if (!new_entry)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&new_entry->duplicates);
+       new_entry->index = index;
+       list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
+
+       /* If the given qpn is also a promisc qp,
+        * it should be inserted to duplicates list
+        */
+       pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+       if (pqp) {
+               dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
+               if (!dqp) {
+                       err = -ENOMEM;
+                       goto out_alloc;
+               }
+               dqp->qpn = qpn;
+               list_add_tail(&dqp->list, &new_entry->duplicates);
+       }
+
+       /* if no promisc qps for this vep, we are done */
+       if (list_empty(&s_steer->promisc_qps[steer]))
+               return 0;
+
+       /* now need to add all the promisc qps to the new
+        * steering entry, as they should also receive the packets
+        * destined to this address */
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox)) {
+               err = -ENOMEM;
+               goto out_alloc;
+       }
+       mgm = mailbox->buf;
+
+       err = mlx4_READ_ENTRY(dev, index, mailbox);
+       if (err)
+               goto out_mailbox;
+
+       members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
+       prot = be32_to_cpu(mgm->members_count) >> 30;
+       list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
+               /* don't add already existing qpn */
+               if (pqp->qpn == qpn)
+                       continue;
+               if (members_count == MLX4_QP_PER_MGM) {
+                       /* out of space */
+                       err = -ENOMEM;
+                       goto out_mailbox;
+               }
+
+               /* add the qpn */
+               mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
+       }
+       /* update the qps count and update the entry with all the promisc qps*/
+       mgm->members_count = cpu_to_be32(members_count | (prot << 30));
+       err = mlx4_WRITE_ENTRY(dev, index, mailbox);
+
+out_mailbox:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       if (!err)
+               return 0;
+out_alloc:
+       if (dqp) {
+               list_del(&dqp->list);
+               kfree(dqp);
+       }
+       list_del(&new_entry->list);
+       kfree(new_entry);
+       return err;
+}
+
+/* update the data structures with existing steering entry */
+static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+                                  enum mlx4_steer_type steer,
+                                  unsigned int index, u32 qpn)
+{
+       struct mlx4_steer *s_steer;
+       struct mlx4_steer_index *tmp_entry, *entry = NULL;
+       struct mlx4_promisc_qp *pqp;
+       struct mlx4_promisc_qp *dqp;
+       u8 pf_num;
+
+       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
+       s_steer = &mlx4_priv(dev)->steer[pf_num];
+
+       pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+       if (!pqp)
+               return 0; /* nothing to do */
+
+       list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
+               if (tmp_entry->index == index) {
+                       entry = tmp_entry;
+                       break;
+               }
+       }
+       if (unlikely(!entry)) {
+               mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
+               return -EINVAL;
+       }
+
+       /* the given qpn is listed as a promisc qpn
+        * we need to add it as a duplicate to this entry
+        * for future references */
+       list_for_each_entry(dqp, &entry->duplicates, list) {
+               if (qpn == dqp->qpn)
+                       return 0; /* qp is already duplicated */
+       }
+
+       /* add the qp as a duplicate on this index */
+       dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
+       if (!dqp)
+               return -ENOMEM;
+       dqp->qpn = qpn;
+       list_add_tail(&dqp->list, &entry->duplicates);
+
+       return 0;
+}
+
+/* Check whether a qpn is a duplicate on steering entry
+ * If so, it should not be removed from mgm */
+static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+                                 enum mlx4_steer_type steer,
+                                 unsigned int index, u32 qpn)
+{
+       struct mlx4_steer *s_steer;
+       struct mlx4_steer_index *tmp_entry, *entry = NULL;
+       struct mlx4_promisc_qp *dqp, *tmp_dqp;
+       u8 pf_num;
+
+       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
+       s_steer = &mlx4_priv(dev)->steer[pf_num];
+
+       /* if qp is not promisc, it cannot be duplicated */
+       if (!get_promisc_qp(dev, pf_num, steer, qpn))
+               return false;
+
+       /* The qp is promisc qp so it is a duplicate on this index
+        * Find the index entry, and remove the duplicate */
+       list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
+               if (tmp_entry->index == index) {
+                       entry = tmp_entry;
+                       break;
+               }
+       }
+       if (unlikely(!entry)) {
+               mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
+               return false;
+       }
+       list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
+               if (dqp->qpn == qpn) {
+                       list_del(&dqp->list);
+                       kfree(dqp);
+               }
+       }
+       return true;
+}
+
+/* I a steering entry contains only promisc QPs, it can be removed. */
+static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+                                     enum mlx4_steer_type steer,
+                                     unsigned int index, u32 tqpn)
+{
+       struct mlx4_steer *s_steer;
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_mgm *mgm;
+       struct mlx4_steer_index *entry = NULL, *tmp_entry;
+       u32 qpn;
+       u32 members_count;
+       bool ret = false;
+       int i;
+       u8 pf_num;
+
+       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
+       s_steer = &mlx4_priv(dev)->steer[pf_num];
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return false;
+       mgm = mailbox->buf;
+
+       if (mlx4_READ_ENTRY(dev, index, mailbox))
+               goto out;
+       members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
+       for (i = 0;  i < members_count; i++) {
+               qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
+               if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) {
+                       /* the qp is not promisc, the entry can't be removed */
+                       goto out;
+               }
+       }
+        /* All the qps currently registered for this entry are promiscuous,
+         * Checking for duplicates */
+       ret = true;
+       list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
+               if (entry->index == index) {
+                       if (list_empty(&entry->duplicates)) {
+                               list_del(&entry->list);
+                               kfree(entry);
+                       } else {
+                               /* This entry contains duplicates so it shouldn't be removed */
+                               ret = false;
+                               goto out;
+                       }
+               }
+       }
+
+out:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return ret;
+}
+
+static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
+                         enum mlx4_steer_type steer, u32 qpn)
+{
+       struct mlx4_steer *s_steer;
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_mgm *mgm;
+       struct mlx4_steer_index *entry;
+       struct mlx4_promisc_qp *pqp;
+       struct mlx4_promisc_qp *dqp;
+       u32 members_count;
+       u32 prot;
+       int i;
+       bool found;
+       int last_index;
+       int err;
+       u8 pf_num;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
+       s_steer = &mlx4_priv(dev)->steer[pf_num];
+
+       mutex_lock(&priv->mcg_table.mutex);
+
+       if (get_promisc_qp(dev, pf_num, steer, qpn)) {
+               err = 0;  /* Noting to do, already exists */
+               goto out_mutex;
+       }
+
+       pqp = kmalloc(sizeof *pqp, GFP_KERNEL);
+       if (!pqp) {
+               err = -ENOMEM;
+               goto out_mutex;
+       }
+       pqp->qpn = qpn;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox)) {
+               err = -ENOMEM;
+               goto out_alloc;
+       }
+       mgm = mailbox->buf;
+
+       /* the promisc qp needs to be added for each one of the steering
+        * entries, if it already exists, needs to be added as a duplicate
+        * for this entry */
+       list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
+               err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
+               if (err)
+                       goto out_mailbox;
+
+               members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
+               prot = be32_to_cpu(mgm->members_count) >> 30;
+               found = false;
+               for (i = 0; i < members_count; i++) {
+                       if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
+                               /* Entry already exists, add to duplicates */
+                               dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
+                               if (!dqp)
+                                       goto out_mailbox;
+                               dqp->qpn = qpn;
+                               list_add_tail(&dqp->list, &entry->duplicates);
+                               found = true;
+                       }
+               }
+               if (!found) {
+                       /* Need to add the qpn to mgm */
+                       if (members_count == MLX4_QP_PER_MGM) {
+                               /* entry is full */
+                               err = -ENOMEM;
+                               goto out_mailbox;
+                       }
+                       mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
+                       mgm->members_count = cpu_to_be32(members_count | (prot << 30));
+                       err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
+                       if (err)
+                               goto out_mailbox;
+               }
+               last_index = entry->index;
+       }
+
+       /* add the new qpn to list of promisc qps */
+       list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
+       /* now need to add all the promisc qps to default entry */
+       memset(mgm, 0, sizeof *mgm);
+       members_count = 0;
+       list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
+               mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
+       mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
+
+       err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
+       if (err)
+               goto out_list;
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       mutex_unlock(&priv->mcg_table.mutex);
+       return 0;
+
+out_list:
+       list_del(&pqp->list);
+out_mailbox:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+out_alloc:
+       kfree(pqp);
+out_mutex:
+       mutex_unlock(&priv->mcg_table.mutex);
+       return err;
+}
+
+static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
+                            enum mlx4_steer_type steer, u32 qpn)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_steer *s_steer;
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_mgm *mgm;
+       struct mlx4_steer_index *entry;
+       struct mlx4_promisc_qp *pqp;
+       struct mlx4_promisc_qp *dqp;
+       u32 members_count;
+       bool found;
+       bool back_to_list = false;
+       int loc, i;
+       int err;
+       u8 pf_num;
+
+       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
+       s_steer = &mlx4_priv(dev)->steer[pf_num];
+       mutex_lock(&priv->mcg_table.mutex);
+
+       pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+       if (unlikely(!pqp)) {
+               mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
+               /* nothing to do */
+               err = 0;
+               goto out_mutex;
+       }
+
+       /*remove from list of promisc qps */
+       list_del(&pqp->list);
+
+       /* set the default entry not to include the removed one */
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox)) {
+               err = -ENOMEM;
+               back_to_list = true;
+               goto out_list;
+       }
+       mgm = mailbox->buf;
+       members_count = 0;
+       list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
+               mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
+       mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
+
+       err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
+       if (err)
+               goto out_mailbox;
+
+       /* remove the qp from all the steering entries*/
+       list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
+               found = false;
+               list_for_each_entry(dqp, &entry->duplicates, list) {
+                       if (dqp->qpn == qpn) {
+                               found = true;
+                               break;
+                       }
+               }
+               if (found) {
+                       /* a duplicate, no need to change the mgm,
+                        * only update the duplicates list */
+                       list_del(&dqp->list);
+                       kfree(dqp);
+               } else {
+                       err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
+                               if (err)
+                                       goto out_mailbox;
+                       members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
+                       for (loc = -1, i = 0; i < members_count; ++i)
+                               if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn)
+                                       loc = i;
+
+                       mgm->members_count = cpu_to_be32(--members_count |
+                                                        (MLX4_PROT_ETH << 30));
+                       mgm->qp[loc] = mgm->qp[i - 1];
+                       mgm->qp[i - 1] = 0;
+
+                       err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
+                               if (err)
+                                       goto out_mailbox;
+               }
+
+       }
+
+out_mailbox:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+out_list:
+       if (back_to_list)
+               list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
+       else
+               kfree(pqp);
+out_mutex:
+       mutex_unlock(&priv->mcg_table.mutex);
+       return err;
+}
+
+/*
+ * Caller must hold MCG table semaphore.  gid and mgm parameters must
+ * be properly aligned for command interface.
+ *
+ *  Returns 0 unless a firmware command error occurs.
+ *
+ * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
+ * and *mgm holds MGM entry.
+ *
+ * if GID is found in AMGM, *index = index in AMGM, *prev = index of
+ * previous entry in hash chain and *mgm holds AMGM entry.
+ *
+ * If no AMGM exists for given gid, *index = -1, *prev = index of last
+ * entry in hash chain and *mgm holds end of hash chain.
+ */
+static int find_entry(struct mlx4_dev *dev, u8 port,
+                     u8 *gid, enum mlx4_protocol prot,
+                     enum mlx4_steer_type steer,
+                     struct mlx4_cmd_mailbox *mgm_mailbox,
+                     u16 *hash, int *prev, int *index)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_mgm *mgm = mgm_mailbox->buf;
+       u8 *mgid;
+       int err;
+       u8 op_mod = (prot == MLX4_PROT_ETH) ?
+               !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return -ENOMEM;
+       mgid = mailbox->buf;
+
+       memcpy(mgid, gid, 16);
+
+       err = mlx4_GID_HASH(dev, mailbox, hash, op_mod);
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       if (err)
+               return err;
+
+       if (0)
+               mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, *hash);
+
+       *index = *hash;
+       *prev  = -1;
+
+       do {
+               err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
+               if (err)
+                       return err;
+
+               if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
+                       if (*index != *hash) {
+                               mlx4_err(dev, "Found zero MGID in AMGM.\n");
+                               err = -EINVAL;
+                       }
+                       return err;
+               }
+
+               if (!memcmp(mgm->gid, gid, 16) &&
+                   be32_to_cpu(mgm->members_count) >> 30 == prot)
+                       return err;
+
+               *prev = *index;
+               *index = be32_to_cpu(mgm->next_gid_index) >> 6;
+       } while (*index);
+
+       *index = -1;
+       return err;
+}
+
+int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+                         int block_mcast_loopback, enum mlx4_protocol prot,
+                         enum mlx4_steer_type steer)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_mgm *mgm;
+       u32 members_count;
+       u16 hash;
+       int index, prev;
+       int link = 0;
+       int i;
+       int err;
+       u8 port = gid[5];
+       u8 new_entry = 0;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       mgm = mailbox->buf;
+
+       mutex_lock(&priv->mcg_table.mutex);
+       err = find_entry(dev, port, gid, prot, steer,
+                        mailbox, &hash, &prev, &index);
+       if (err)
+               goto out;
+
+       if (index != -1) {
+               if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
+                       new_entry = 1;
+                       memcpy(mgm->gid, gid, 16);
+               }
+       } else {
+               link = 1;
+
+               index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap);
+               if (index == -1) {
+                       mlx4_err(dev, "No AMGM entries left\n");
+                       err = -ENOMEM;
+                       goto out;
+               }
+               index += dev->caps.num_mgms;
+
+               memset(mgm, 0, sizeof *mgm);
+               memcpy(mgm->gid, gid, 16);
+       }
+
+       members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
+       if (members_count == MLX4_QP_PER_MGM) {
+               mlx4_err(dev, "MGM at index %x is full.\n", index);
+               err = -ENOMEM;
+               goto out;
+       }
+
+       for (i = 0; i < members_count; ++i)
+               if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
+                       mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn);
+                       err = 0;
+                       goto out;
+               }
+
+       if (block_mcast_loopback)
+               mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
+                                                      (1U << MGM_BLCK_LB_BIT));
+       else
+               mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
+
+       mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
+
+       err = mlx4_WRITE_ENTRY(dev, index, mailbox);
+       if (err)
+               goto out;
+
+       if (!link)
+               goto out;
+
+       err = mlx4_READ_ENTRY(dev, prev, mailbox);
+       if (err)
+               goto out;
+
+       mgm->next_gid_index = cpu_to_be32(index << 6);
+
+       err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
+       if (err)
+               goto out;
+
+out:
+       if (prot == MLX4_PROT_ETH) {
+               /* manage the steering entry for promisc mode */
+               if (new_entry)
+                       new_steering_entry(dev, 0, port, steer, index, qp->qpn);
+               else
+                       existing_steering_entry(dev, 0, port, steer,
+                                               index, qp->qpn);
+       }
+       if (err && link && index != -1) {
+               if (index < dev->caps.num_mgms)
+                       mlx4_warn(dev, "Got AMGM index %d < %d",
+                                 index, dev->caps.num_mgms);
+               else
+                       mlx4_bitmap_free(&priv->mcg_table.bitmap,
+                                        index - dev->caps.num_mgms);
+       }
+       mutex_unlock(&priv->mcg_table.mutex);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+
+int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+                         enum mlx4_protocol prot, enum mlx4_steer_type steer)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_mgm *mgm;
+       u32 members_count;
+       u16 hash;
+       int prev, index;
+       int i, loc;
+       int err;
+       u8 port = gid[5];
+       bool removed_entry = false;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       mgm = mailbox->buf;
+
+       mutex_lock(&priv->mcg_table.mutex);
+
+       err = find_entry(dev, port, gid, prot, steer,
+                        mailbox, &hash, &prev, &index);
+       if (err)
+               goto out;
+
+       if (index == -1) {
+               mlx4_err(dev, "MGID %pI6 not found\n", gid);
+               err = -EINVAL;
+               goto out;
+       }
+
+       /* if this pq is also a promisc qp, it shouldn't be removed */
+       if (prot == MLX4_PROT_ETH &&
+           check_duplicate_entry(dev, 0, port, steer, index, qp->qpn))
+               goto out;
+
+       members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
+       for (loc = -1, i = 0; i < members_count; ++i)
+               if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
+                       loc = i;
+
+       if (loc == -1) {
+               mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
+               err = -EINVAL;
+               goto out;
+       }
+
+
+       mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
+       mgm->qp[loc]       = mgm->qp[i - 1];
+       mgm->qp[i - 1]     = 0;
+
+       if (prot == MLX4_PROT_ETH)
+               removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn);
+       if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
+               err = mlx4_WRITE_ENTRY(dev, index, mailbox);
+               goto out;
+       }
+
+       /* We are going to delete the entry, members count should be 0 */
+       mgm->members_count = cpu_to_be32((u32) prot << 30);
+
+       if (prev == -1) {
+               /* Remove entry from MGM */
+               int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
+               if (amgm_index) {
+                       err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
+                       if (err)
+                               goto out;
+               } else
+                       memset(mgm->gid, 0, 16);
+
+               err = mlx4_WRITE_ENTRY(dev, index, mailbox);
+               if (err)
+                       goto out;
+
+               if (amgm_index) {
+                       if (amgm_index < dev->caps.num_mgms)
+                               mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
+                                         index, amgm_index, dev->caps.num_mgms);
+                       else
+                               mlx4_bitmap_free(&priv->mcg_table.bitmap,
+                                                amgm_index - dev->caps.num_mgms);
+               }
+       } else {
+               /* Remove entry from AMGM */
+               int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
+               err = mlx4_READ_ENTRY(dev, prev, mailbox);
+               if (err)
+                       goto out;
+
+               mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
+
+               err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
+               if (err)
+                       goto out;
+
+               if (index < dev->caps.num_mgms)
+                       mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
+                                 prev, index, dev->caps.num_mgms);
+               else
+                       mlx4_bitmap_free(&priv->mcg_table.bitmap,
+                                        index - dev->caps.num_mgms);
+       }
+
+out:
+       mutex_unlock(&priv->mcg_table.mutex);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+
+
+int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+                         int block_mcast_loopback, enum mlx4_protocol prot)
+{
+       enum mlx4_steer_type steer;
+
+       steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
+
+       if (prot == MLX4_PROT_ETH &&
+                       !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
+               return 0;
+
+       if (prot == MLX4_PROT_ETH)
+               gid[7] |= (steer << 1);
+
+       return mlx4_qp_attach_common(dev, qp, gid,
+                                    block_mcast_loopback, prot,
+                                    steer);
+}
+EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
+
+int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+                         enum mlx4_protocol prot)
+{
+       enum mlx4_steer_type steer;
+
+       steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
+
+       if (prot == MLX4_PROT_ETH &&
+                       !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
+               return 0;
+
+       if (prot == MLX4_PROT_ETH) {
+               gid[7] |= (steer << 1);
+       }
+
+       return mlx4_qp_detach_common(dev, qp, gid, prot, steer);
+}
+EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
+
+
+int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
+{
+       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
+               return 0;
+
+
+       return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
+}
+EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
+
+int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
+{
+       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
+               return 0;
+
+
+       return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
+}
+EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
+
+int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
+{
+       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
+               return 0;
+
+
+       return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
+}
+EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
+
+int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
+{
+       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
+               return 0;
+
+       return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
+}
+EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
+
+int mlx4_init_mcg_table(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int err;
+
+       err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
+                              dev->caps.num_amgms - 1, 0, 0);
+       if (err)
+               return err;
+
+       mutex_init(&priv->mcg_table.mutex);
+
+       return 0;
+}
+
+void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
+{
+       mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
new file mode 100644 (file)
index 0000000..a2fcd84
--- /dev/null
@@ -0,0 +1,459 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX4_H
+#define MLX4_H
+
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+#include <linux/timer.h>
+#include <linux/semaphore.h>
+#include <linux/workqueue.h>
+
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/driver.h>
+#include <linux/mlx4/doorbell.h>
+
+#define DRV_NAME       "mlx4_core"
+#define DRV_VERSION    "1.0"
+#define DRV_RELDATE    "July 14, 2011"
+
+enum {
+       MLX4_HCR_BASE           = 0x80680,
+       MLX4_HCR_SIZE           = 0x0001c,
+       MLX4_CLR_INT_SIZE       = 0x00008
+};
+
+enum {
+       MLX4_MGM_ENTRY_SIZE     =  0x100,
+       MLX4_QP_PER_MGM         = 4 * (MLX4_MGM_ENTRY_SIZE / 16 - 2),
+       MLX4_MTT_ENTRY_PER_SEG  = 8
+};
+
+enum {
+       MLX4_NUM_PDS            = 1 << 15
+};
+
+enum {
+       MLX4_CMPT_TYPE_QP       = 0,
+       MLX4_CMPT_TYPE_SRQ      = 1,
+       MLX4_CMPT_TYPE_CQ       = 2,
+       MLX4_CMPT_TYPE_EQ       = 3,
+       MLX4_CMPT_NUM_TYPE
+};
+
+enum {
+       MLX4_CMPT_SHIFT         = 24,
+       MLX4_NUM_CMPTS          = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT
+};
+
+#ifdef CONFIG_MLX4_DEBUG
+extern int mlx4_debug_level;
+#else /* CONFIG_MLX4_DEBUG */
+#define mlx4_debug_level       (0)
+#endif /* CONFIG_MLX4_DEBUG */
+
+#define mlx4_dbg(mdev, format, arg...)                                 \
+do {                                                                   \
+       if (mlx4_debug_level)                                           \
+               dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ##arg); \
+} while (0)
+
+#define mlx4_err(mdev, format, arg...) \
+       dev_err(&mdev->pdev->dev, format, ##arg)
+#define mlx4_info(mdev, format, arg...) \
+       dev_info(&mdev->pdev->dev, format, ##arg)
+#define mlx4_warn(mdev, format, arg...) \
+       dev_warn(&mdev->pdev->dev, format, ##arg)
+
+struct mlx4_bitmap {
+       u32                     last;
+       u32                     top;
+       u32                     max;
+       u32                     reserved_top;
+       u32                     mask;
+       u32                     avail;
+       spinlock_t              lock;
+       unsigned long          *table;
+};
+
+struct mlx4_buddy {
+       unsigned long         **bits;
+       unsigned int           *num_free;
+       int                     max_order;
+       spinlock_t              lock;
+};
+
+struct mlx4_icm;
+
+struct mlx4_icm_table {
+       u64                     virt;
+       int                     num_icm;
+       int                     num_obj;
+       int                     obj_size;
+       int                     lowmem;
+       int                     coherent;
+       struct mutex            mutex;
+       struct mlx4_icm       **icm;
+};
+
+struct mlx4_eq {
+       struct mlx4_dev        *dev;
+       void __iomem           *doorbell;
+       int                     eqn;
+       u32                     cons_index;
+       u16                     irq;
+       u16                     have_irq;
+       int                     nent;
+       struct mlx4_buf_list   *page_list;
+       struct mlx4_mtt         mtt;
+};
+
+struct mlx4_profile {
+       int                     num_qp;
+       int                     rdmarc_per_qp;
+       int                     num_srq;
+       int                     num_cq;
+       int                     num_mcg;
+       int                     num_mpt;
+       int                     num_mtt;
+};
+
+struct mlx4_fw {
+       u64                     clr_int_base;
+       u64                     catas_offset;
+       struct mlx4_icm        *fw_icm;
+       struct mlx4_icm        *aux_icm;
+       u32                     catas_size;
+       u16                     fw_pages;
+       u8                      clr_int_bar;
+       u8                      catas_bar;
+};
+
+#define MGM_QPN_MASK       0x00FFFFFF
+#define MGM_BLCK_LB_BIT    30
+
+struct mlx4_promisc_qp {
+       struct list_head list;
+       u32 qpn;
+};
+
+struct mlx4_steer_index {
+       struct list_head list;
+       unsigned int index;
+       struct list_head duplicates;
+};
+
+struct mlx4_mgm {
+       __be32                  next_gid_index;
+       __be32                  members_count;
+       u32                     reserved[2];
+       u8                      gid[16];
+       __be32                  qp[MLX4_QP_PER_MGM];
+};
+struct mlx4_cmd {
+       struct pci_pool        *pool;
+       void __iomem           *hcr;
+       struct mutex            hcr_mutex;
+       struct semaphore        poll_sem;
+       struct semaphore        event_sem;
+       int                     max_cmds;
+       spinlock_t              context_lock;
+       int                     free_head;
+       struct mlx4_cmd_context *context;
+       u16                     token_mask;
+       u8                      use_events;
+       u8                      toggle;
+};
+
+struct mlx4_uar_table {
+       struct mlx4_bitmap      bitmap;
+};
+
+struct mlx4_mr_table {
+       struct mlx4_bitmap      mpt_bitmap;
+       struct mlx4_buddy       mtt_buddy;
+       u64                     mtt_base;
+       u64                     mpt_base;
+       struct mlx4_icm_table   mtt_table;
+       struct mlx4_icm_table   dmpt_table;
+};
+
+struct mlx4_cq_table {
+       struct mlx4_bitmap      bitmap;
+       spinlock_t              lock;
+       struct radix_tree_root  tree;
+       struct mlx4_icm_table   table;
+       struct mlx4_icm_table   cmpt_table;
+};
+
+struct mlx4_eq_table {
+       struct mlx4_bitmap      bitmap;
+       char                   *irq_names;
+       void __iomem           *clr_int;
+       void __iomem          **uar_map;
+       u32                     clr_mask;
+       struct mlx4_eq         *eq;
+       struct mlx4_icm_table   table;
+       struct mlx4_icm_table   cmpt_table;
+       int                     have_irq;
+       u8                      inta_pin;
+};
+
+struct mlx4_srq_table {
+       struct mlx4_bitmap      bitmap;
+       spinlock_t              lock;
+       struct radix_tree_root  tree;
+       struct mlx4_icm_table   table;
+       struct mlx4_icm_table   cmpt_table;
+};
+
+struct mlx4_qp_table {
+       struct mlx4_bitmap      bitmap;
+       u32                     rdmarc_base;
+       int                     rdmarc_shift;
+       spinlock_t              lock;
+       struct mlx4_icm_table   qp_table;
+       struct mlx4_icm_table   auxc_table;
+       struct mlx4_icm_table   altc_table;
+       struct mlx4_icm_table   rdmarc_table;
+       struct mlx4_icm_table   cmpt_table;
+};
+
+struct mlx4_mcg_table {
+       struct mutex            mutex;
+       struct mlx4_bitmap      bitmap;
+       struct mlx4_icm_table   table;
+};
+
+struct mlx4_catas_err {
+       u32 __iomem            *map;
+       struct timer_list       timer;
+       struct list_head        list;
+};
+
+#define MLX4_MAX_MAC_NUM       128
+#define MLX4_MAC_TABLE_SIZE    (MLX4_MAX_MAC_NUM << 3)
+
+struct mlx4_mac_table {
+       __be64                  entries[MLX4_MAX_MAC_NUM];
+       int                     refs[MLX4_MAX_MAC_NUM];
+       struct mutex            mutex;
+       int                     total;
+       int                     max;
+};
+
+#define MLX4_MAX_VLAN_NUM      128
+#define MLX4_VLAN_TABLE_SIZE   (MLX4_MAX_VLAN_NUM << 2)
+
+struct mlx4_vlan_table {
+       __be32                  entries[MLX4_MAX_VLAN_NUM];
+       int                     refs[MLX4_MAX_VLAN_NUM];
+       struct mutex            mutex;
+       int                     total;
+       int                     max;
+};
+
+struct mlx4_mac_entry {
+       u64 mac;
+};
+
+struct mlx4_port_info {
+       struct mlx4_dev        *dev;
+       int                     port;
+       char                    dev_name[16];
+       struct device_attribute port_attr;
+       enum mlx4_port_type     tmp_type;
+       struct mlx4_mac_table   mac_table;
+       struct radix_tree_root  mac_tree;
+       struct mlx4_vlan_table  vlan_table;
+       int                     base_qpn;
+};
+
+struct mlx4_sense {
+       struct mlx4_dev         *dev;
+       u8                      do_sense_port[MLX4_MAX_PORTS + 1];
+       u8                      sense_allowed[MLX4_MAX_PORTS + 1];
+       struct delayed_work     sense_poll;
+};
+
+struct mlx4_msix_ctl {
+       u64             pool_bm;
+       spinlock_t      pool_lock;
+};
+
+struct mlx4_steer {
+       struct list_head promisc_qps[MLX4_NUM_STEERS];
+       struct list_head steer_entries[MLX4_NUM_STEERS];
+       struct list_head high_prios;
+};
+
+struct mlx4_priv {
+       struct mlx4_dev         dev;
+
+       struct list_head        dev_list;
+       struct list_head        ctx_list;
+       spinlock_t              ctx_lock;
+
+       struct list_head        pgdir_list;
+       struct mutex            pgdir_mutex;
+
+       struct mlx4_fw          fw;
+       struct mlx4_cmd         cmd;
+
+       struct mlx4_bitmap      pd_bitmap;
+       struct mlx4_uar_table   uar_table;
+       struct mlx4_mr_table    mr_table;
+       struct mlx4_cq_table    cq_table;
+       struct mlx4_eq_table    eq_table;
+       struct mlx4_srq_table   srq_table;
+       struct mlx4_qp_table    qp_table;
+       struct mlx4_mcg_table   mcg_table;
+       struct mlx4_bitmap      counters_bitmap;
+
+       struct mlx4_catas_err   catas_err;
+
+       void __iomem           *clr_base;
+
+       struct mlx4_uar         driver_uar;
+       void __iomem           *kar;
+       struct mlx4_port_info   port[MLX4_MAX_PORTS + 1];
+       struct mlx4_sense       sense;
+       struct mutex            port_mutex;
+       struct mlx4_msix_ctl    msix_ctl;
+       struct mlx4_steer       *steer;
+       struct list_head        bf_list;
+       struct mutex            bf_mutex;
+       struct io_mapping       *bf_mapping;
+};
+
+static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
+{
+       return container_of(dev, struct mlx4_priv, dev);
+}
+
+#define MLX4_SENSE_RANGE       (HZ * 3)
+
+extern struct workqueue_struct *mlx4_wq;
+
+u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
+void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
+u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
+void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
+u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap);
+int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
+                    u32 reserved_bot, u32 resetrved_top);
+void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
+
+int mlx4_reset(struct mlx4_dev *dev);
+
+int mlx4_alloc_eq_table(struct mlx4_dev *dev);
+void mlx4_free_eq_table(struct mlx4_dev *dev);
+
+int mlx4_init_pd_table(struct mlx4_dev *dev);
+int mlx4_init_uar_table(struct mlx4_dev *dev);
+int mlx4_init_mr_table(struct mlx4_dev *dev);
+int mlx4_init_eq_table(struct mlx4_dev *dev);
+int mlx4_init_cq_table(struct mlx4_dev *dev);
+int mlx4_init_qp_table(struct mlx4_dev *dev);
+int mlx4_init_srq_table(struct mlx4_dev *dev);
+int mlx4_init_mcg_table(struct mlx4_dev *dev);
+
+void mlx4_cleanup_pd_table(struct mlx4_dev *dev);
+void mlx4_cleanup_uar_table(struct mlx4_dev *dev);
+void mlx4_cleanup_mr_table(struct mlx4_dev *dev);
+void mlx4_cleanup_eq_table(struct mlx4_dev *dev);
+void mlx4_cleanup_cq_table(struct mlx4_dev *dev);
+void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
+void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
+void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
+
+void mlx4_start_catas_poll(struct mlx4_dev *dev);
+void mlx4_stop_catas_poll(struct mlx4_dev *dev);
+void mlx4_catas_init(void);
+int mlx4_restart_one(struct pci_dev *pdev);
+int mlx4_register_device(struct mlx4_dev *dev);
+void mlx4_unregister_device(struct mlx4_dev *dev);
+void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port);
+
+struct mlx4_dev_cap;
+struct mlx4_init_hca_param;
+
+u64 mlx4_make_profile(struct mlx4_dev *dev,
+                     struct mlx4_profile *request,
+                     struct mlx4_dev_cap *dev_cap,
+                     struct mlx4_init_hca_param *init_hca);
+
+int mlx4_cmd_init(struct mlx4_dev *dev);
+void mlx4_cmd_cleanup(struct mlx4_dev *dev);
+void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
+int mlx4_cmd_use_events(struct mlx4_dev *dev);
+void mlx4_cmd_use_polling(struct mlx4_dev *dev);
+
+void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
+void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
+
+void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
+
+void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
+
+void mlx4_handle_catas_err(struct mlx4_dev *dev);
+
+int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
+                   enum mlx4_port_type *type);
+void mlx4_do_sense_ports(struct mlx4_dev *dev,
+                        enum mlx4_port_type *stype,
+                        enum mlx4_port_type *defaults);
+void mlx4_start_sense(struct mlx4_dev *dev);
+void mlx4_stop_sense(struct mlx4_dev *dev);
+void mlx4_sense_init(struct mlx4_dev *dev);
+int mlx4_check_port_params(struct mlx4_dev *dev,
+                          enum mlx4_port_type *port_type);
+int mlx4_change_port_types(struct mlx4_dev *dev,
+                          enum mlx4_port_type *port_types);
+
+void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
+void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
+
+int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
+int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
+
+int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+                         enum mlx4_protocol prot, enum mlx4_steer_type steer);
+int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+                         int block_mcast_loopback, enum mlx4_protocol prot,
+                         enum mlx4_steer_type steer);
+#endif /* MLX4_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
new file mode 100644 (file)
index 0000000..ed84811
--- /dev/null
@@ -0,0 +1,607 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _MLX4_EN_H_
+#define _MLX4_EN_H_
+
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/qp.h>
+#include <linux/mlx4/cq.h>
+#include <linux/mlx4/srq.h>
+#include <linux/mlx4/doorbell.h>
+#include <linux/mlx4/cmd.h>
+
+#include "en_port.h"
+
+#define DRV_NAME       "mlx4_en"
+#define DRV_VERSION    "1.5.4.1"
+#define DRV_RELDATE    "March 2011"
+
+#define MLX4_EN_MSG_LEVEL      (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
+
+/*
+ * Device constants
+ */
+
+
+#define MLX4_EN_PAGE_SHIFT     12
+#define MLX4_EN_PAGE_SIZE      (1 << MLX4_EN_PAGE_SHIFT)
+#define MAX_RX_RINGS           16
+#define MIN_RX_RINGS           4
+#define TXBB_SIZE              64
+#define HEADROOM               (2048 / TXBB_SIZE + 1)
+#define STAMP_STRIDE           64
+#define STAMP_DWORDS           (STAMP_STRIDE / 4)
+#define STAMP_SHIFT            31
+#define STAMP_VAL              0x7fffffff
+#define STATS_DELAY            (HZ / 4)
+
+/* Typical TSO descriptor with 16 gather entries is 352 bytes... */
+#define MAX_DESC_SIZE          512
+#define MAX_DESC_TXBBS         (MAX_DESC_SIZE / TXBB_SIZE)
+
+/*
+ * OS related constants and tunables
+ */
+
+#define MLX4_EN_WATCHDOG_TIMEOUT       (15 * HZ)
+
+#define MLX4_EN_ALLOC_ORDER    2
+#define MLX4_EN_ALLOC_SIZE     (PAGE_SIZE << MLX4_EN_ALLOC_ORDER)
+
+#define MLX4_EN_MAX_LRO_DESCRIPTORS    32
+
+/* Receive fragment sizes; we use at most 4 fragments (for 9600 byte MTU
+ * and 4K allocations) */
+enum {
+       FRAG_SZ0 = 512 - NET_IP_ALIGN,
+       FRAG_SZ1 = 1024,
+       FRAG_SZ2 = 4096,
+       FRAG_SZ3 = MLX4_EN_ALLOC_SIZE
+};
+#define MLX4_EN_MAX_RX_FRAGS   4
+
+/* Maximum ring sizes */
+#define MLX4_EN_MAX_TX_SIZE    8192
+#define MLX4_EN_MAX_RX_SIZE    8192
+
+/* Minimum ring size for our page-allocation sceme to work */
+#define MLX4_EN_MIN_RX_SIZE    (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
+#define MLX4_EN_MIN_TX_SIZE    (4096 / TXBB_SIZE)
+
+#define MLX4_EN_SMALL_PKT_SIZE         64
+#define MLX4_EN_NUM_TX_RINGS           8
+#define MLX4_EN_NUM_PPP_RINGS          8
+#define MAX_TX_RINGS                   (MLX4_EN_NUM_TX_RINGS + MLX4_EN_NUM_PPP_RINGS)
+#define MLX4_EN_DEF_TX_RING_SIZE       512
+#define MLX4_EN_DEF_RX_RING_SIZE       1024
+
+/* Target number of packets to coalesce with interrupt moderation */
+#define MLX4_EN_RX_COAL_TARGET 44
+#define MLX4_EN_RX_COAL_TIME   0x10
+
+#define MLX4_EN_TX_COAL_PKTS   5
+#define MLX4_EN_TX_COAL_TIME   0x80
+
+#define MLX4_EN_RX_RATE_LOW            400000
+#define MLX4_EN_RX_COAL_TIME_LOW       0
+#define MLX4_EN_RX_RATE_HIGH           450000
+#define MLX4_EN_RX_COAL_TIME_HIGH      128
+#define MLX4_EN_RX_SIZE_THRESH         1024
+#define MLX4_EN_RX_RATE_THRESH         (1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
+#define MLX4_EN_SAMPLE_INTERVAL                0
+#define MLX4_EN_AVG_PKT_SMALL          256
+
+#define MLX4_EN_AUTO_CONF      0xffff
+
+#define MLX4_EN_DEF_RX_PAUSE   1
+#define MLX4_EN_DEF_TX_PAUSE   1
+
+/* Interval between successive polls in the Tx routine when polling is used
+   instead of interrupts (in per-core Tx rings) - should be power of 2 */
+#define MLX4_EN_TX_POLL_MODER  16
+#define MLX4_EN_TX_POLL_TIMEOUT        (HZ / 4)
+
+#define ETH_LLC_SNAP_SIZE      8
+
+#define SMALL_PACKET_SIZE      (256 - NET_IP_ALIGN)
+#define HEADER_COPY_SIZE       (128 - NET_IP_ALIGN)
+#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
+
+#define MLX4_EN_MIN_MTU                46
+#define ETH_BCAST              0xffffffffffffULL
+
+#define MLX4_EN_LOOPBACK_RETRIES       5
+#define MLX4_EN_LOOPBACK_TIMEOUT       100
+
+#ifdef MLX4_EN_PERF_STAT
+/* Number of samples to 'average' */
+#define AVG_SIZE                       128
+#define AVG_FACTOR                     1024
+#define NUM_PERF_STATS                 NUM_PERF_COUNTERS
+
+#define INC_PERF_COUNTER(cnt)          (++(cnt))
+#define ADD_PERF_COUNTER(cnt, add)     ((cnt) += (add))
+#define AVG_PERF_COUNTER(cnt, sample) \
+       ((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE)
+#define GET_PERF_COUNTER(cnt)          (cnt)
+#define GET_AVG_PERF_COUNTER(cnt)      ((cnt) / AVG_FACTOR)
+
+#else
+
+#define NUM_PERF_STATS                 0
+#define INC_PERF_COUNTER(cnt)          do {} while (0)
+#define ADD_PERF_COUNTER(cnt, add)     do {} while (0)
+#define AVG_PERF_COUNTER(cnt, sample)  do {} while (0)
+#define GET_PERF_COUNTER(cnt)          (0)
+#define GET_AVG_PERF_COUNTER(cnt)      (0)
+#endif /* MLX4_EN_PERF_STAT */
+
+/*
+ * Configurables
+ */
+
+enum cq_type {
+       RX = 0,
+       TX = 1,
+};
+
+
+/*
+ * Useful macros
+ */
+#define ROUNDUP_LOG2(x)                ilog2(roundup_pow_of_two(x))
+#define XNOR(x, y)             (!(x) == !(y))
+#define ILLEGAL_MAC(addr)      (addr == 0xffffffffffffULL || addr == 0x0)
+
+
+struct mlx4_en_tx_info {
+       struct sk_buff *skb;
+       u32 nr_txbb;
+       u8 linear;
+       u8 data_offset;
+       u8 inl;
+};
+
+
+#define MLX4_EN_BIT_DESC_OWN   0x80000000
+#define CTRL_SIZE      sizeof(struct mlx4_wqe_ctrl_seg)
+#define MLX4_EN_MEMTYPE_PAD    0x100
+#define DS_SIZE                sizeof(struct mlx4_wqe_data_seg)
+
+
+struct mlx4_en_tx_desc {
+       struct mlx4_wqe_ctrl_seg ctrl;
+       union {
+               struct mlx4_wqe_data_seg data; /* at least one data segment */
+               struct mlx4_wqe_lso_seg lso;
+               struct mlx4_wqe_inline_seg inl;
+       };
+};
+
+#define MLX4_EN_USE_SRQ                0x01000000
+
+#define MLX4_EN_CX3_LOW_ID     0x1000
+#define MLX4_EN_CX3_HIGH_ID    0x1005
+
+struct mlx4_en_rx_alloc {
+       struct page *page;
+       u16 offset;
+};
+
+struct mlx4_en_tx_ring {
+       struct mlx4_hwq_resources wqres;
+       u32 size ; /* number of TXBBs */
+       u32 size_mask;
+       u16 stride;
+       u16 cqn;        /* index of port CQ associated with this ring */
+       u32 prod;
+       u32 cons;
+       u32 buf_size;
+       u32 doorbell_qpn;
+       void *buf;
+       u16 poll_cnt;
+       int blocked;
+       struct mlx4_en_tx_info *tx_info;
+       u8 *bounce_buf;
+       u32 last_nr_txbb;
+       struct mlx4_qp qp;
+       struct mlx4_qp_context context;
+       int qpn;
+       enum mlx4_qp_state qp_state;
+       struct mlx4_srq dummy;
+       unsigned long bytes;
+       unsigned long packets;
+       spinlock_t comp_lock;
+       struct mlx4_bf bf;
+       bool bf_enabled;
+};
+
+struct mlx4_en_rx_desc {
+       /* actual number of entries depends on rx ring stride */
+       struct mlx4_wqe_data_seg data[0];
+};
+
+struct mlx4_en_rx_ring {
+       struct mlx4_hwq_resources wqres;
+       struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
+       u32 size ;      /* number of Rx descs*/
+       u32 actual_size;
+       u32 size_mask;
+       u16 stride;
+       u16 log_stride;
+       u16 cqn;        /* index of port CQ associated with this ring */
+       u32 prod;
+       u32 cons;
+       u32 buf_size;
+       void *buf;
+       void *rx_info;
+       unsigned long bytes;
+       unsigned long packets;
+};
+
+
+static inline int mlx4_en_can_lro(__be16 status)
+{
+       return (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4       |
+                                    MLX4_CQE_STATUS_IPV4F      |
+                                    MLX4_CQE_STATUS_IPV6       |
+                                    MLX4_CQE_STATUS_IPV4OPT    |
+                                    MLX4_CQE_STATUS_TCP        |
+                                    MLX4_CQE_STATUS_UDP        |
+                                    MLX4_CQE_STATUS_IPOK)) ==
+               cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
+                           MLX4_CQE_STATUS_IPOK |
+                           MLX4_CQE_STATUS_TCP);
+}
+
+struct mlx4_en_cq {
+       struct mlx4_cq          mcq;
+       struct mlx4_hwq_resources wqres;
+       int                     ring;
+       spinlock_t              lock;
+       struct net_device      *dev;
+       struct napi_struct      napi;
+       /* Per-core Tx cq processing support */
+       struct timer_list timer;
+       int size;
+       int buf_size;
+       unsigned vector;
+       enum cq_type is_tx;
+       u16 moder_time;
+       u16 moder_cnt;
+       struct mlx4_cqe *buf;
+#define MLX4_EN_OPCODE_ERROR   0x1e
+};
+
+struct mlx4_en_port_profile {
+       u32 flags;
+       u32 tx_ring_num;
+       u32 rx_ring_num;
+       u32 tx_ring_size;
+       u32 rx_ring_size;
+       u8 rx_pause;
+       u8 rx_ppp;
+       u8 tx_pause;
+       u8 tx_ppp;
+};
+
+struct mlx4_en_profile {
+       int rss_xor;
+       int tcp_rss;
+       int udp_rss;
+       u8 rss_mask;
+       u32 active_ports;
+       u32 small_pkt_int;
+       u8 no_reset;
+       struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
+};
+
+struct mlx4_en_dev {
+       struct mlx4_dev         *dev;
+       struct pci_dev          *pdev;
+       struct mutex            state_lock;
+       struct net_device       *pndev[MLX4_MAX_PORTS + 1];
+       u32                     port_cnt;
+       bool                    device_up;
+       struct mlx4_en_profile  profile;
+       u32                     LSO_support;
+       struct workqueue_struct *workqueue;
+       struct device           *dma_device;
+       void __iomem            *uar_map;
+       struct mlx4_uar         priv_uar;
+       struct mlx4_mr          mr;
+       u32                     priv_pdn;
+       spinlock_t              uar_lock;
+       u8                      mac_removed[MLX4_MAX_PORTS + 1];
+};
+
+
+struct mlx4_en_rss_map {
+       int base_qpn;
+       struct mlx4_qp qps[MAX_RX_RINGS];
+       enum mlx4_qp_state state[MAX_RX_RINGS];
+       struct mlx4_qp indir_qp;
+       enum mlx4_qp_state indir_state;
+};
+
+struct mlx4_en_rss_context {
+       __be32 base_qpn;
+       __be32 default_qpn;
+       u16 reserved;
+       u8 hash_fn;
+       u8 flags;
+       __be32 rss_key[10];
+       __be32 base_qpn_udp;
+};
+
+struct mlx4_en_port_state {
+       int link_state;
+       int link_speed;
+       int transciver;
+};
+
+struct mlx4_en_pkt_stats {
+       unsigned long broadcast;
+       unsigned long rx_prio[8];
+       unsigned long tx_prio[8];
+#define NUM_PKT_STATS          17
+};
+
+struct mlx4_en_port_stats {
+       unsigned long tso_packets;
+       unsigned long queue_stopped;
+       unsigned long wake_queue;
+       unsigned long tx_timeout;
+       unsigned long rx_alloc_failed;
+       unsigned long rx_chksum_good;
+       unsigned long rx_chksum_none;
+       unsigned long tx_chksum_offload;
+#define NUM_PORT_STATS         8
+};
+
+struct mlx4_en_perf_stats {
+       u32 tx_poll;
+       u64 tx_pktsz_avg;
+       u32 inflight_avg;
+       u16 tx_coal_avg;
+       u16 rx_coal_avg;
+       u32 napi_quota;
+#define NUM_PERF_COUNTERS              6
+};
+
+struct mlx4_en_frag_info {
+       u16 frag_size;
+       u16 frag_prefix_size;
+       u16 frag_stride;
+       u16 frag_align;
+       u16 last_offset;
+
+};
+
+struct mlx4_en_priv {
+       struct mlx4_en_dev *mdev;
+       struct mlx4_en_port_profile *prof;
+       struct net_device *dev;
+       unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+       struct net_device_stats stats;
+       struct net_device_stats ret_stats;
+       struct mlx4_en_port_state port_state;
+       spinlock_t stats_lock;
+
+       unsigned long last_moder_packets;
+       unsigned long last_moder_tx_packets;
+       unsigned long last_moder_bytes;
+       unsigned long last_moder_jiffies;
+       int last_moder_time;
+       u16 rx_usecs;
+       u16 rx_frames;
+       u16 tx_usecs;
+       u16 tx_frames;
+       u32 pkt_rate_low;
+       u16 rx_usecs_low;
+       u32 pkt_rate_high;
+       u16 rx_usecs_high;
+       u16 sample_interval;
+       u16 adaptive_rx_coal;
+       u32 msg_enable;
+       u32 loopback_ok;
+       u32 validate_loopback;
+
+       struct mlx4_hwq_resources res;
+       int link_state;
+       int last_link_state;
+       bool port_up;
+       int port;
+       int registered;
+       int allocated;
+       int stride;
+       u64 mac;
+       int mac_index;
+       unsigned max_mtu;
+       int base_qpn;
+
+       struct mlx4_en_rss_map rss_map;
+       u32 flags;
+#define MLX4_EN_FLAG_PROMISC   0x1
+#define MLX4_EN_FLAG_MC_PROMISC        0x2
+       u32 tx_ring_num;
+       u32 rx_ring_num;
+       u32 rx_skb_size;
+       struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
+       u16 num_frags;
+       u16 log_rx_info;
+
+       struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS];
+       int tx_vector;
+       struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
+       struct mlx4_en_cq tx_cq[MAX_TX_RINGS];
+       struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
+       struct work_struct mcast_task;
+       struct work_struct mac_task;
+       struct work_struct watchdog_task;
+       struct work_struct linkstate_task;
+       struct delayed_work stats_task;
+       struct mlx4_en_perf_stats pstats;
+       struct mlx4_en_pkt_stats pkstats;
+       struct mlx4_en_port_stats port_stats;
+       char *mc_addrs;
+       int mc_addrs_cnt;
+       struct mlx4_en_stat_out_mbox hw_stats;
+       int vids[128];
+       bool wol;
+};
+
+enum mlx4_en_wol {
+       MLX4_EN_WOL_MAGIC = (1ULL << 61),
+       MLX4_EN_WOL_ENABLED = (1ULL << 62),
+       MLX4_EN_WOL_DO_MODIFY = (1ULL << 63),
+};
+
+
+void mlx4_en_destroy_netdev(struct net_device *dev);
+int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+                       struct mlx4_en_port_profile *prof);
+
+int mlx4_en_start_port(struct net_device *dev);
+void mlx4_en_stop_port(struct net_device *dev);
+
+void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors);
+int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
+
+int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
+                     int entries, int ring, enum cq_type mode);
+void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
+                       bool reserve_vectors);
+int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+
+void mlx4_en_poll_tx_cq(unsigned long data);
+void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
+netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+
+int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+                          int qpn, u32 size, u16 stride);
+void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
+int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
+                            struct mlx4_en_tx_ring *ring,
+                            int cq);
+void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
+                               struct mlx4_en_tx_ring *ring);
+
+int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
+                          struct mlx4_en_rx_ring *ring,
+                          u32 size, u16 stride);
+void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
+                            struct mlx4_en_rx_ring *ring);
+int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
+void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
+                               struct mlx4_en_rx_ring *ring);
+int mlx4_en_process_rx_cq(struct net_device *dev,
+                         struct mlx4_en_cq *cq,
+                         int budget);
+int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
+void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
+                            int is_tx, int rss, int qpn, int cqn,
+                            struct mlx4_qp_context *context);
+void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
+int mlx4_en_map_buffer(struct mlx4_buf *buf);
+void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
+
+void mlx4_en_calc_rx_buf(struct net_device *dev);
+int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
+void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
+int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
+void mlx4_en_rx_irq(struct mlx4_cq *mcq);
+
+int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
+int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv);
+int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
+                         u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
+int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
+                          u8 promisc);
+
+int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
+int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
+
+#define MLX4_EN_NUM_SELF_TEST  5
+void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
+u64 mlx4_en_mac_to_u64(u8 *addr);
+
+/*
+ * Globals
+ */
+extern const struct ethtool_ops mlx4_en_ethtool_ops;
+
+
+
+/*
+ * printk / logging functions
+ */
+
+int en_print(const char *level, const struct mlx4_en_priv *priv,
+            const char *format, ...) __attribute__ ((format (printf, 3, 4)));
+
+#define en_dbg(mlevel, priv, format, arg...)                   \
+do {                                                           \
+       if (NETIF_MSG_##mlevel & priv->msg_enable)              \
+               en_print(KERN_DEBUG, priv, format, ##arg);      \
+} while (0)
+#define en_warn(priv, format, arg...)                  \
+       en_print(KERN_WARNING, priv, format, ##arg)
+#define en_err(priv, format, arg...)                   \
+       en_print(KERN_ERR, priv, format, ##arg)
+#define en_info(priv, format, arg...)                  \
+       en_print(KERN_INFO, priv, format, ## arg)
+
+#define mlx4_err(mdev, format, arg...)                 \
+       pr_err("%s %s: " format, DRV_NAME,              \
+              dev_name(&mdev->pdev->dev), ##arg)
+#define mlx4_info(mdev, format, arg...)                        \
+       pr_info("%s %s: " format, DRV_NAME,             \
+               dev_name(&mdev->pdev->dev), ##arg)
+#define mlx4_warn(mdev, format, arg...)                        \
+       pr_warning("%s %s: " format, DRV_NAME,          \
+                  dev_name(&mdev->pdev->dev), ##arg)
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
new file mode 100644 (file)
index 0000000..9c188bd
--- /dev/null
@@ -0,0 +1,667 @@
+/*
+ * Copyright (c) 2004 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4.h"
+#include "icm.h"
+
+/*
+ * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
+ */
+struct mlx4_mpt_entry {
+       __be32 flags;
+       __be32 qpn;
+       __be32 key;
+       __be32 pd_flags;
+       __be64 start;
+       __be64 length;
+       __be32 lkey;
+       __be32 win_cnt;
+       u8      reserved1[3];
+       u8      mtt_rep;
+       __be64 mtt_seg;
+       __be32 mtt_sz;
+       __be32 entity_size;
+       __be32 first_byte_offset;
+} __packed;
+
+#define MLX4_MPT_FLAG_SW_OWNS      (0xfUL << 28)
+#define MLX4_MPT_FLAG_FREE         (0x3UL << 28)
+#define MLX4_MPT_FLAG_MIO          (1 << 17)
+#define MLX4_MPT_FLAG_BIND_ENABLE   (1 << 15)
+#define MLX4_MPT_FLAG_PHYSICAL     (1 <<  9)
+#define MLX4_MPT_FLAG_REGION       (1 <<  8)
+
+#define MLX4_MPT_PD_FLAG_FAST_REG   (1 << 27)
+#define MLX4_MPT_PD_FLAG_RAE       (1 << 28)
+#define MLX4_MPT_PD_FLAG_EN_INV            (3 << 24)
+
+#define MLX4_MPT_STATUS_SW             0xF0
+#define MLX4_MPT_STATUS_HW             0x00
+
+static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
+{
+       int o;
+       int m;
+       u32 seg;
+
+       spin_lock(&buddy->lock);
+
+       for (o = order; o <= buddy->max_order; ++o)
+               if (buddy->num_free[o]) {
+                       m = 1 << (buddy->max_order - o);
+                       seg = find_first_bit(buddy->bits[o], m);
+                       if (seg < m)
+                               goto found;
+               }
+
+       spin_unlock(&buddy->lock);
+       return -1;
+
+ found:
+       clear_bit(seg, buddy->bits[o]);
+       --buddy->num_free[o];
+
+       while (o > order) {
+               --o;
+               seg <<= 1;
+               set_bit(seg ^ 1, buddy->bits[o]);
+               ++buddy->num_free[o];
+       }
+
+       spin_unlock(&buddy->lock);
+
+       seg <<= order;
+
+       return seg;
+}
+
+static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
+{
+       seg >>= order;
+
+       spin_lock(&buddy->lock);
+
+       while (test_bit(seg ^ 1, buddy->bits[order])) {
+               clear_bit(seg ^ 1, buddy->bits[order]);
+               --buddy->num_free[order];
+               seg >>= 1;
+               ++order;
+       }
+
+       set_bit(seg, buddy->bits[order]);
+       ++buddy->num_free[order];
+
+       spin_unlock(&buddy->lock);
+}
+
+static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
+{
+       int i, s;
+
+       buddy->max_order = max_order;
+       spin_lock_init(&buddy->lock);
+
+       buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
+                             GFP_KERNEL);
+       buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof (int *),
+                                 GFP_KERNEL);
+       if (!buddy->bits || !buddy->num_free)
+               goto err_out;
+
+       for (i = 0; i <= buddy->max_order; ++i) {
+               s = BITS_TO_LONGS(1 << (buddy->max_order - i));
+               buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL);
+               if (!buddy->bits[i])
+                       goto err_out_free;
+               bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i));
+       }
+
+       set_bit(0, buddy->bits[buddy->max_order]);
+       buddy->num_free[buddy->max_order] = 1;
+
+       return 0;
+
+err_out_free:
+       for (i = 0; i <= buddy->max_order; ++i)
+               kfree(buddy->bits[i]);
+
+err_out:
+       kfree(buddy->bits);
+       kfree(buddy->num_free);
+
+       return -ENOMEM;
+}
+
+static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
+{
+       int i;
+
+       for (i = 0; i <= buddy->max_order; ++i)
+               kfree(buddy->bits[i]);
+
+       kfree(buddy->bits);
+       kfree(buddy->num_free);
+}
+
+static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
+{
+       struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+       u32 seg;
+
+       seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order);
+       if (seg == -1)
+               return -1;
+
+       if (mlx4_table_get_range(dev, &mr_table->mtt_table, seg,
+                                seg + (1 << order) - 1)) {
+               mlx4_buddy_free(&mr_table->mtt_buddy, seg, order);
+               return -1;
+       }
+
+       return seg;
+}
+
+int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
+                 struct mlx4_mtt *mtt)
+{
+       int i;
+
+       if (!npages) {
+               mtt->order      = -1;
+               mtt->page_shift = MLX4_ICM_PAGE_SHIFT;
+               return 0;
+       } else
+               mtt->page_shift = page_shift;
+
+       for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1)
+               ++mtt->order;
+
+       mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
+       if (mtt->first_seg == -1)
+               return -ENOMEM;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_mtt_init);
+
+void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+{
+       struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+
+       if (mtt->order < 0)
+               return;
+
+       mlx4_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
+       mlx4_table_put_range(dev, &mr_table->mtt_table, mtt->first_seg,
+                            mtt->first_seg + (1 << mtt->order) - 1);
+}
+EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
+
+u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+{
+       return (u64) mtt->first_seg * dev->caps.mtt_entry_sz;
+}
+EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
+
+static u32 hw_index_to_key(u32 ind)
+{
+       return (ind >> 24) | (ind << 8);
+}
+
+static u32 key_to_hw_index(u32 key)
+{
+       return (key << 24) | (key >> 8);
+}
+
+static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+                         int mpt_index)
+{
+       return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT,
+                       MLX4_CMD_TIME_CLASS_B);
+}
+
+static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+                         int mpt_index)
+{
+       return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
+                           !mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B);
+}
+
+int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
+                 int npages, int page_shift, struct mlx4_mr *mr)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       u32 index;
+       int err;
+
+       index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
+       if (index == -1)
+               return -ENOMEM;
+
+       mr->iova       = iova;
+       mr->size       = size;
+       mr->pd         = pd;
+       mr->access     = access;
+       mr->enabled    = 0;
+       mr->key        = hw_index_to_key(index);
+
+       err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
+       if (err)
+               mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
+
+void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int err;
+
+       if (mr->enabled) {
+               err = mlx4_HW2SW_MPT(dev, NULL,
+                                    key_to_hw_index(mr->key) &
+                                    (dev->caps.num_mpts - 1));
+               if (err)
+                       mlx4_warn(dev, "HW2SW_MPT failed (%d)\n", err);
+       }
+
+       mlx4_mtt_cleanup(dev, &mr->mtt);
+       mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key));
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_free);
+
+int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
+{
+       struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_mpt_entry *mpt_entry;
+       int err;
+
+       err = mlx4_table_get(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+       if (err)
+               return err;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox)) {
+               err = PTR_ERR(mailbox);
+               goto err_table;
+       }
+       mpt_entry = mailbox->buf;
+
+       memset(mpt_entry, 0, sizeof *mpt_entry);
+
+       mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO         |
+                                      MLX4_MPT_FLAG_REGION      |
+                                      mr->access);
+
+       mpt_entry->key         = cpu_to_be32(key_to_hw_index(mr->key));
+       mpt_entry->pd_flags    = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV);
+       mpt_entry->start       = cpu_to_be64(mr->iova);
+       mpt_entry->length      = cpu_to_be64(mr->size);
+       mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
+
+       if (mr->mtt.order < 0) {
+               mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
+               mpt_entry->mtt_seg = 0;
+       } else {
+               mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt));
+       }
+
+       if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
+               /* fast register MR in free state */
+               mpt_entry->flags    |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
+               mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
+                                                  MLX4_MPT_PD_FLAG_RAE);
+               mpt_entry->mtt_sz    = cpu_to_be32((1 << mr->mtt.order) *
+                                                  dev->caps.mtts_per_seg);
+       } else {
+               mpt_entry->flags    |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
+       }
+
+       err = mlx4_SW2HW_MPT(dev, mailbox,
+                            key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1));
+       if (err) {
+               mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
+               goto err_cmd;
+       }
+
+       mr->enabled = 1;
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+
+       return 0;
+
+err_cmd:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+
+err_table:
+       mlx4_table_put(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_enable);
+
+static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+                               int start_index, int npages, u64 *page_list)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       __be64 *mtts;
+       dma_addr_t dma_handle;
+       int i;
+       int s = start_index * sizeof (u64);
+
+       /* All MTTs must fit in the same page */
+       if (start_index / (PAGE_SIZE / sizeof (u64)) !=
+           (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
+               return -EINVAL;
+
+       if (start_index & (dev->caps.mtts_per_seg - 1))
+               return -EINVAL;
+
+       mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
+                               s / dev->caps.mtt_entry_sz, &dma_handle);
+       if (!mtts)
+               return -ENOMEM;
+
+       dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
+                               npages * sizeof (u64), DMA_TO_DEVICE);
+
+       for (i = 0; i < npages; ++i)
+               mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
+
+       dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
+                                  npages * sizeof (u64), DMA_TO_DEVICE);
+
+       return 0;
+}
+
+int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+                  int start_index, int npages, u64 *page_list)
+{
+       int chunk;
+       int err;
+
+       if (mtt->order < 0)
+               return -EINVAL;
+
+       while (npages > 0) {
+               chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
+               err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
+               if (err)
+                       return err;
+
+               npages      -= chunk;
+               start_index += chunk;
+               page_list   += chunk;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_write_mtt);
+
+int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+                      struct mlx4_buf *buf)
+{
+       u64 *page_list;
+       int err;
+       int i;
+
+       page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL);
+       if (!page_list)
+               return -ENOMEM;
+
+       for (i = 0; i < buf->npages; ++i)
+               if (buf->nbufs == 1)
+                       page_list[i] = buf->direct.map + (i << buf->page_shift);
+               else
+                       page_list[i] = buf->page_list[i].map;
+
+       err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
+
+       kfree(page_list);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
+
+int mlx4_init_mr_table(struct mlx4_dev *dev)
+{
+       struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+       int err;
+
+       err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
+                              ~0, dev->caps.reserved_mrws, 0);
+       if (err)
+               return err;
+
+       err = mlx4_buddy_init(&mr_table->mtt_buddy,
+                             ilog2(dev->caps.num_mtt_segs));
+       if (err)
+               goto err_buddy;
+
+       if (dev->caps.reserved_mtts) {
+               if (mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)) == -1) {
+                       mlx4_warn(dev, "MTT table of order %d is too small.\n",
+                                 mr_table->mtt_buddy.max_order);
+                       err = -ENOMEM;
+                       goto err_reserve_mtts;
+               }
+       }
+
+       return 0;
+
+err_reserve_mtts:
+       mlx4_buddy_cleanup(&mr_table->mtt_buddy);
+
+err_buddy:
+       mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
+
+       return err;
+}
+
+void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
+{
+       struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+
+       mlx4_buddy_cleanup(&mr_table->mtt_buddy);
+       mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
+}
+
+static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list,
+                                 int npages, u64 iova)
+{
+       int i, page_mask;
+
+       if (npages > fmr->max_pages)
+               return -EINVAL;
+
+       page_mask = (1 << fmr->page_shift) - 1;
+
+       /* We are getting page lists, so va must be page aligned. */
+       if (iova & page_mask)
+               return -EINVAL;
+
+       /* Trust the user not to pass misaligned data in page_list */
+       if (0)
+               for (i = 0; i < npages; ++i) {
+                       if (page_list[i] & ~page_mask)
+                               return -EINVAL;
+               }
+
+       if (fmr->maps >= fmr->max_maps)
+               return -EINVAL;
+
+       return 0;
+}
+
+int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
+                     int npages, u64 iova, u32 *lkey, u32 *rkey)
+{
+       u32 key;
+       int i, err;
+
+       err = mlx4_check_fmr(fmr, page_list, npages, iova);
+       if (err)
+               return err;
+
+       ++fmr->maps;
+
+       key = key_to_hw_index(fmr->mr.key);
+       key += dev->caps.num_mpts;
+       *lkey = *rkey = fmr->mr.key = hw_index_to_key(key);
+
+       *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
+
+       /* Make sure MPT status is visible before writing MTT entries */
+       wmb();
+
+       dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
+                               npages * sizeof(u64), DMA_TO_DEVICE);
+
+       for (i = 0; i < npages; ++i)
+               fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
+
+       dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle,
+                                  npages * sizeof(u64), DMA_TO_DEVICE);
+
+       fmr->mpt->key    = cpu_to_be32(key);
+       fmr->mpt->lkey   = cpu_to_be32(key);
+       fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift));
+       fmr->mpt->start  = cpu_to_be64(iova);
+
+       /* Make MTT entries are visible before setting MPT status */
+       wmb();
+
+       *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW;
+
+       /* Make sure MPT status is visible before consumer can use FMR */
+       wmb();
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr);
+
+int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
+                  int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       u64 mtt_seg;
+       int err = -ENOMEM;
+
+       if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
+               return -EINVAL;
+
+       /* All MTTs must fit in the same page */
+       if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
+               return -EINVAL;
+
+       fmr->page_shift = page_shift;
+       fmr->max_pages  = max_pages;
+       fmr->max_maps   = max_maps;
+       fmr->maps = 0;
+
+       err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages,
+                           page_shift, &fmr->mr);
+       if (err)
+               return err;
+
+       mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz;
+
+       fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
+                                   fmr->mr.mtt.first_seg,
+                                   &fmr->dma_handle);
+       if (!fmr->mtts) {
+               err = -ENOMEM;
+               goto err_free;
+       }
+
+       return 0;
+
+err_free:
+       mlx4_mr_free(dev, &fmr->mr);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
+
+int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int err;
+
+       err = mlx4_mr_enable(dev, &fmr->mr);
+       if (err)
+               return err;
+
+       fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table,
+                                   key_to_hw_index(fmr->mr.key), NULL);
+       if (!fmr->mpt)
+               return -ENOMEM;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
+
+void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
+                   u32 *lkey, u32 *rkey)
+{
+       if (!fmr->maps)
+               return;
+
+       fmr->maps = 0;
+
+       *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
+
+int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
+{
+       if (fmr->maps)
+               return -EBUSY;
+
+       fmr->mr.enabled = 0;
+       mlx4_mr_free(dev, &fmr->mr);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_free);
+
+int mlx4_SYNC_TPT(struct mlx4_dev *dev)
+{
+       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000);
+}
+EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
new file mode 100644 (file)
index 0000000..1286b88
--- /dev/null
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/io-mapping.h>
+
+#include <asm/page.h>
+
+#include "mlx4.h"
+#include "icm.h"
+
+enum {
+       MLX4_NUM_RESERVED_UARS = 8
+};
+
+int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       *pdn = mlx4_bitmap_alloc(&priv->pd_bitmap);
+       if (*pdn == -1)
+               return -ENOMEM;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
+
+void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn)
+{
+       mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn);
+}
+EXPORT_SYMBOL_GPL(mlx4_pd_free);
+
+int mlx4_init_pd_table(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
+                               (1 << 24) - 1, dev->caps.reserved_pds, 0);
+}
+
+void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
+{
+       mlx4_bitmap_cleanup(&mlx4_priv(dev)->pd_bitmap);
+}
+
+
+int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
+{
+       uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap);
+       if (uar->index == -1)
+               return -ENOMEM;
+
+       uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
+       uar->map = NULL;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_uar_alloc);
+
+void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
+{
+       mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index);
+}
+EXPORT_SYMBOL_GPL(mlx4_uar_free);
+
+int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_uar *uar;
+       int err = 0;
+       int idx;
+
+       if (!priv->bf_mapping)
+               return -ENOMEM;
+
+       mutex_lock(&priv->bf_mutex);
+       if (!list_empty(&priv->bf_list))
+               uar = list_entry(priv->bf_list.next, struct mlx4_uar, bf_list);
+       else {
+               if (mlx4_bitmap_avail(&priv->uar_table.bitmap) < MLX4_NUM_RESERVED_UARS) {
+                       err = -ENOMEM;
+                       goto out;
+               }
+               uar = kmalloc(sizeof *uar, GFP_KERNEL);
+               if (!uar) {
+                       err = -ENOMEM;
+                       goto out;
+               }
+               err = mlx4_uar_alloc(dev, uar);
+               if (err)
+                       goto free_kmalloc;
+
+               uar->map = ioremap(uar->pfn << PAGE_SHIFT, PAGE_SIZE);
+               if (!uar->map) {
+                       err = -ENOMEM;
+                       goto free_uar;
+               }
+
+               uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT);
+               if (!uar->bf_map) {
+                       err = -ENOMEM;
+                       goto unamp_uar;
+               }
+               uar->free_bf_bmap = 0;
+               list_add(&uar->bf_list, &priv->bf_list);
+       }
+
+       bf->uar = uar;
+       idx = ffz(uar->free_bf_bmap);
+       uar->free_bf_bmap |= 1 << idx;
+       bf->uar = uar;
+       bf->offset = 0;
+       bf->buf_size = dev->caps.bf_reg_size / 2;
+       bf->reg = uar->bf_map + idx * dev->caps.bf_reg_size;
+       if (uar->free_bf_bmap == (1 << dev->caps.bf_regs_per_page) - 1)
+               list_del_init(&uar->bf_list);
+
+       goto out;
+
+unamp_uar:
+       bf->uar = NULL;
+       iounmap(uar->map);
+
+free_uar:
+       mlx4_uar_free(dev, uar);
+
+free_kmalloc:
+       kfree(uar);
+
+out:
+       mutex_unlock(&priv->bf_mutex);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_bf_alloc);
+
+void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int idx;
+
+       if (!bf->uar || !bf->uar->bf_map)
+               return;
+
+       mutex_lock(&priv->bf_mutex);
+       idx = (bf->reg - bf->uar->bf_map) / dev->caps.bf_reg_size;
+       bf->uar->free_bf_bmap &= ~(1 << idx);
+       if (!bf->uar->free_bf_bmap) {
+               if (!list_empty(&bf->uar->bf_list))
+                       list_del(&bf->uar->bf_list);
+
+               io_mapping_unmap(bf->uar->bf_map);
+               iounmap(bf->uar->map);
+               mlx4_uar_free(dev, bf->uar);
+               kfree(bf->uar);
+       } else if (list_empty(&bf->uar->bf_list))
+               list_add(&bf->uar->bf_list, &priv->bf_list);
+
+       mutex_unlock(&priv->bf_mutex);
+}
+EXPORT_SYMBOL_GPL(mlx4_bf_free);
+
+int mlx4_init_uar_table(struct mlx4_dev *dev)
+{
+       if (dev->caps.num_uars <= 128) {
+               mlx4_err(dev, "Only %d UAR pages (need more than 128)\n",
+                        dev->caps.num_uars);
+               mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n");
+               return -ENODEV;
+       }
+
+       return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
+                               dev->caps.num_uars, dev->caps.num_uars - 1,
+                               max(128, dev->caps.reserved_uars), 0);
+}
+
+void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
+{
+       mlx4_bitmap_cleanup(&mlx4_priv(dev)->uar_table.bitmap);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
new file mode 100644 (file)
index 0000000..609e0ec
--- /dev/null
@@ -0,0 +1,487 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/if_ether.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4.h"
+
+#define MLX4_MAC_VALID         (1ull << 63)
+#define MLX4_MAC_MASK          0xffffffffffffULL
+
+#define MLX4_VLAN_VALID                (1u << 31)
+#define MLX4_VLAN_MASK         0xfff
+
+void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
+{
+       int i;
+
+       mutex_init(&table->mutex);
+       for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+               table->entries[i] = 0;
+               table->refs[i]   = 0;
+       }
+       table->max   = 1 << dev->caps.log_num_macs;
+       table->total = 0;
+}
+
+void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
+{
+       int i;
+
+       mutex_init(&table->mutex);
+       for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
+               table->entries[i] = 0;
+               table->refs[i]   = 0;
+       }
+       table->max   = 1 << dev->caps.log_num_vlans;
+       table->total = 0;
+}
+
+static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
+                                  __be64 *entries)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       u32 in_mod;
+       int err;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
+
+       in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+
+static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
+                            u64 mac, int *qpn, u8 reserve)
+{
+       struct mlx4_qp qp;
+       u8 gid[16] = {0};
+       int err;
+
+       if (reserve) {
+               err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
+               if (err) {
+                       mlx4_err(dev, "Failed to reserve qp for mac registration\n");
+                       return err;
+               }
+       }
+       qp.qpn = *qpn;
+
+       mac &= 0xffffffffffffULL;
+       mac = cpu_to_be64(mac << 16);
+       memcpy(&gid[10], &mac, ETH_ALEN);
+       gid[5] = port;
+       gid[7] = MLX4_UC_STEER << 1;
+
+       err = mlx4_qp_attach_common(dev, &qp, gid, 0,
+                                   MLX4_PROT_ETH, MLX4_UC_STEER);
+       if (err && reserve)
+               mlx4_qp_release_range(dev, *qpn, 1);
+
+       return err;
+}
+
+static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
+                                 u64 mac, int qpn, u8 free)
+{
+       struct mlx4_qp qp;
+       u8 gid[16] = {0};
+
+       qp.qpn = qpn;
+       mac &= 0xffffffffffffULL;
+       mac = cpu_to_be64(mac << 16);
+       memcpy(&gid[10], &mac, ETH_ALEN);
+       gid[5] = port;
+       gid[7] = MLX4_UC_STEER << 1;
+
+       mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER);
+       if (free)
+               mlx4_qp_release_range(dev, qpn, 1);
+}
+
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
+{
+       struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+       struct mlx4_mac_table *table = &info->mac_table;
+       struct mlx4_mac_entry *entry;
+       int i, err = 0;
+       int free = -1;
+
+       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
+               err = mlx4_uc_steer_add(dev, port, mac, qpn, 1);
+               if (!err) {
+                       entry = kmalloc(sizeof *entry, GFP_KERNEL);
+                       if (!entry) {
+                               mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
+                               return -ENOMEM;
+                       }
+                       entry->mac = mac;
+                       err = radix_tree_insert(&info->mac_tree, *qpn, entry);
+                       if (err) {
+                               mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
+                               return err;
+                       }
+               } else
+                       return err;
+       }
+       mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
+       mutex_lock(&table->mutex);
+       for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
+               if (free < 0 && !table->refs[i]) {
+                       free = i;
+                       continue;
+               }
+
+               if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
+                       /* MAC already registered, increase references count */
+                       ++table->refs[i];
+                       goto out;
+               }
+       }
+
+       if (free < 0) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       mlx4_dbg(dev, "Free MAC index is %d\n", free);
+
+       if (table->total == table->max) {
+               /* No free mac entries */
+               err = -ENOSPC;
+               goto out;
+       }
+
+       /* Register new MAC */
+       table->refs[free] = 1;
+       table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
+
+       err = mlx4_set_port_mac_table(dev, port, table->entries);
+       if (unlikely(err)) {
+               mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac);
+               table->refs[free] = 0;
+               table->entries[free] = 0;
+               goto out;
+       }
+
+       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
+               *qpn = info->base_qpn + free;
+       ++table->total;
+out:
+       mutex_unlock(&table->mutex);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_register_mac);
+
+static int validate_index(struct mlx4_dev *dev,
+                         struct mlx4_mac_table *table, int index)
+{
+       int err = 0;
+
+       if (index < 0 || index >= table->max || !table->entries[index]) {
+               mlx4_warn(dev, "No valid Mac entry for the given index\n");
+               err = -EINVAL;
+       }
+       return err;
+}
+
+static int find_index(struct mlx4_dev *dev,
+                     struct mlx4_mac_table *table, u64 mac)
+{
+       int i;
+       for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+               if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
+                       return i;
+       }
+       /* Mac not found */
+       return -EINVAL;
+}
+
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
+{
+       struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+       struct mlx4_mac_table *table = &info->mac_table;
+       int index = qpn - info->base_qpn;
+       struct mlx4_mac_entry *entry;
+
+       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
+               entry = radix_tree_lookup(&info->mac_tree, qpn);
+               if (entry) {
+                       mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1);
+                       radix_tree_delete(&info->mac_tree, qpn);
+                       index = find_index(dev, table, entry->mac);
+                       kfree(entry);
+               }
+       }
+
+       mutex_lock(&table->mutex);
+
+       if (validate_index(dev, table, index))
+               goto out;
+
+       /* Check whether this address has reference count */
+       if (!(--table->refs[index])) {
+               table->entries[index] = 0;
+               mlx4_set_port_mac_table(dev, port, table->entries);
+               --table->total;
+       }
+out:
+       mutex_unlock(&table->mutex);
+}
+EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
+
+int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap)
+{
+       struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+       struct mlx4_mac_table *table = &info->mac_table;
+       int index = qpn - info->base_qpn;
+       struct mlx4_mac_entry *entry;
+       int err;
+
+       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
+               entry = radix_tree_lookup(&info->mac_tree, qpn);
+               if (!entry)
+                       return -EINVAL;
+               index = find_index(dev, table, entry->mac);
+               mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0);
+               entry->mac = new_mac;
+               err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0);
+               if (err || index < 0)
+                       return err;
+       }
+
+       mutex_lock(&table->mutex);
+
+       err = validate_index(dev, table, index);
+       if (err)
+               goto out;
+
+       table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
+
+       err = mlx4_set_port_mac_table(dev, port, table->entries);
+       if (unlikely(err)) {
+               mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac);
+               table->entries[index] = 0;
+       }
+out:
+       mutex_unlock(&table->mutex);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_replace_mac);
+static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
+                                   __be32 *entries)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       u32 in_mod;
+       int err;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
+       in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+
+       return err;
+}
+
+int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
+{
+       struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
+       int i;
+
+       for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
+               if (table->refs[i] &&
+                   (vid == (MLX4_VLAN_MASK &
+                             be32_to_cpu(table->entries[i])))) {
+                       /* VLAN already registered, increase reference count */
+                       *idx = i;
+                       return 0;
+               }
+       }
+
+       return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
+
+int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
+{
+       struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
+       int i, err = 0;
+       int free = -1;
+
+       mutex_lock(&table->mutex);
+       for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
+               if (free < 0 && (table->refs[i] == 0)) {
+                       free = i;
+                       continue;
+               }
+
+               if (table->refs[i] &&
+                   (vlan == (MLX4_VLAN_MASK &
+                             be32_to_cpu(table->entries[i])))) {
+                       /* Vlan already registered, increase references count */
+                       *index = i;
+                       ++table->refs[i];
+                       goto out;
+               }
+       }
+
+       if (free < 0) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       if (table->total == table->max) {
+               /* No free vlan entries */
+               err = -ENOSPC;
+               goto out;
+       }
+
+       /* Register new MAC */
+       table->refs[free] = 1;
+       table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
+
+       err = mlx4_set_port_vlan_table(dev, port, table->entries);
+       if (unlikely(err)) {
+               mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
+               table->refs[free] = 0;
+               table->entries[free] = 0;
+               goto out;
+       }
+
+       *index = free;
+       ++table->total;
+out:
+       mutex_unlock(&table->mutex);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_register_vlan);
+
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+{
+       struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
+
+       if (index < MLX4_VLAN_REGULAR) {
+               mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
+               return;
+       }
+
+       mutex_lock(&table->mutex);
+       if (!table->refs[index]) {
+               mlx4_warn(dev, "No vlan entry for index %d\n", index);
+               goto out;
+       }
+       if (--table->refs[index]) {
+               mlx4_dbg(dev, "Have more references for index %d,"
+                        "no need to modify vlan table\n", index);
+               goto out;
+       }
+       table->entries[index] = 0;
+       mlx4_set_port_vlan_table(dev, port, table->entries);
+       --table->total;
+out:
+       mutex_unlock(&table->mutex);
+}
+EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
+
+int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
+{
+       struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
+       u8 *inbuf, *outbuf;
+       int err;
+
+       inmailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(inmailbox))
+               return PTR_ERR(inmailbox);
+
+       outmailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(outmailbox)) {
+               mlx4_free_cmd_mailbox(dev, inmailbox);
+               return PTR_ERR(outmailbox);
+       }
+
+       inbuf = inmailbox->buf;
+       outbuf = outmailbox->buf;
+       memset(inbuf, 0, 256);
+       memset(outbuf, 0, 256);
+       inbuf[0] = 1;
+       inbuf[1] = 1;
+       inbuf[2] = 1;
+       inbuf[3] = 1;
+       *(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
+       *(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
+
+       err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
+                          MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+       if (!err)
+               *caps = *(__be32 *) (outbuf + 84);
+       mlx4_free_cmd_mailbox(dev, inmailbox);
+       mlx4_free_cmd_mailbox(dev, outmailbox);
+       return err;
+}
+
+int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       int err;
+
+       if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
+               return 0;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       memset(mailbox->buf, 0, 256);
+
+       ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
+       err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
+                      MLX4_CMD_TIME_CLASS_B);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
new file mode 100644 (file)
index 0000000..b967647
--- /dev/null
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/slab.h>
+
+#include "mlx4.h"
+#include "fw.h"
+
+enum {
+       MLX4_RES_QP,
+       MLX4_RES_RDMARC,
+       MLX4_RES_ALTC,
+       MLX4_RES_AUXC,
+       MLX4_RES_SRQ,
+       MLX4_RES_CQ,
+       MLX4_RES_EQ,
+       MLX4_RES_DMPT,
+       MLX4_RES_CMPT,
+       MLX4_RES_MTT,
+       MLX4_RES_MCG,
+       MLX4_RES_NUM
+};
+
+static const char *res_name[] = {
+       [MLX4_RES_QP]           = "QP",
+       [MLX4_RES_RDMARC]       = "RDMARC",
+       [MLX4_RES_ALTC]         = "ALTC",
+       [MLX4_RES_AUXC]         = "AUXC",
+       [MLX4_RES_SRQ]          = "SRQ",
+       [MLX4_RES_CQ]           = "CQ",
+       [MLX4_RES_EQ]           = "EQ",
+       [MLX4_RES_DMPT]         = "DMPT",
+       [MLX4_RES_CMPT]         = "CMPT",
+       [MLX4_RES_MTT]          = "MTT",
+       [MLX4_RES_MCG]          = "MCG",
+};
+
+u64 mlx4_make_profile(struct mlx4_dev *dev,
+                     struct mlx4_profile *request,
+                     struct mlx4_dev_cap *dev_cap,
+                     struct mlx4_init_hca_param *init_hca)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource {
+               u64 size;
+               u64 start;
+               int type;
+               int num;
+               int log_num;
+       };
+
+       u64 total_size = 0;
+       struct mlx4_resource *profile;
+       struct mlx4_resource tmp;
+       int i, j;
+
+       profile = kcalloc(MLX4_RES_NUM, sizeof(*profile), GFP_KERNEL);
+       if (!profile)
+               return -ENOMEM;
+
+       profile[MLX4_RES_QP].size     = dev_cap->qpc_entry_sz;
+       profile[MLX4_RES_RDMARC].size = dev_cap->rdmarc_entry_sz;
+       profile[MLX4_RES_ALTC].size   = dev_cap->altc_entry_sz;
+       profile[MLX4_RES_AUXC].size   = dev_cap->aux_entry_sz;
+       profile[MLX4_RES_SRQ].size    = dev_cap->srq_entry_sz;
+       profile[MLX4_RES_CQ].size     = dev_cap->cqc_entry_sz;
+       profile[MLX4_RES_EQ].size     = dev_cap->eqc_entry_sz;
+       profile[MLX4_RES_DMPT].size   = dev_cap->dmpt_entry_sz;
+       profile[MLX4_RES_CMPT].size   = dev_cap->cmpt_entry_sz;
+       profile[MLX4_RES_MTT].size    = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
+       profile[MLX4_RES_MCG].size    = MLX4_MGM_ENTRY_SIZE;
+
+       profile[MLX4_RES_QP].num      = request->num_qp;
+       profile[MLX4_RES_RDMARC].num  = request->num_qp * request->rdmarc_per_qp;
+       profile[MLX4_RES_ALTC].num    = request->num_qp;
+       profile[MLX4_RES_AUXC].num    = request->num_qp;
+       profile[MLX4_RES_SRQ].num     = request->num_srq;
+       profile[MLX4_RES_CQ].num      = request->num_cq;
+       profile[MLX4_RES_EQ].num      = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
+       profile[MLX4_RES_DMPT].num    = request->num_mpt;
+       profile[MLX4_RES_CMPT].num    = MLX4_NUM_CMPTS;
+       profile[MLX4_RES_MTT].num     = request->num_mtt;
+       profile[MLX4_RES_MCG].num     = request->num_mcg;
+
+       for (i = 0; i < MLX4_RES_NUM; ++i) {
+               profile[i].type     = i;
+               profile[i].num      = roundup_pow_of_two(profile[i].num);
+               profile[i].log_num  = ilog2(profile[i].num);
+               profile[i].size    *= profile[i].num;
+               profile[i].size     = max(profile[i].size, (u64) PAGE_SIZE);
+       }
+
+       /*
+        * Sort the resources in decreasing order of size.  Since they
+        * all have sizes that are powers of 2, we'll be able to keep
+        * resources aligned to their size and pack them without gaps
+        * using the sorted order.
+        */
+       for (i = MLX4_RES_NUM; i > 0; --i)
+               for (j = 1; j < i; ++j) {
+                       if (profile[j].size > profile[j - 1].size) {
+                               tmp            = profile[j];
+                               profile[j]     = profile[j - 1];
+                               profile[j - 1] = tmp;
+                       }
+               }
+
+       for (i = 0; i < MLX4_RES_NUM; ++i) {
+               if (profile[i].size) {
+                       profile[i].start = total_size;
+                       total_size      += profile[i].size;
+               }
+
+               if (total_size > dev_cap->max_icm_sz) {
+                       mlx4_err(dev, "Profile requires 0x%llx bytes; "
+                                 "won't fit in 0x%llx bytes of context memory.\n",
+                                 (unsigned long long) total_size,
+                                 (unsigned long long) dev_cap->max_icm_sz);
+                       kfree(profile);
+                       return -ENOMEM;
+               }
+
+               if (profile[i].size)
+                       mlx4_dbg(dev, "  profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, "
+                                 "size 0x%10llx\n",
+                                i, res_name[profile[i].type], profile[i].log_num,
+                                (unsigned long long) profile[i].start,
+                                (unsigned long long) profile[i].size);
+       }
+
+       mlx4_dbg(dev, "HCA context memory: reserving %d KB\n",
+                (int) (total_size >> 10));
+
+       for (i = 0; i < MLX4_RES_NUM; ++i) {
+               switch (profile[i].type) {
+               case MLX4_RES_QP:
+                       dev->caps.num_qps     = profile[i].num;
+                       init_hca->qpc_base    = profile[i].start;
+                       init_hca->log_num_qps = profile[i].log_num;
+                       break;
+               case MLX4_RES_RDMARC:
+                       for (priv->qp_table.rdmarc_shift = 0;
+                            request->num_qp << priv->qp_table.rdmarc_shift < profile[i].num;
+                            ++priv->qp_table.rdmarc_shift)
+                               ; /* nothing */
+                       dev->caps.max_qp_dest_rdma = 1 << priv->qp_table.rdmarc_shift;
+                       priv->qp_table.rdmarc_base   = (u32) profile[i].start;
+                       init_hca->rdmarc_base        = profile[i].start;
+                       init_hca->log_rd_per_qp      = priv->qp_table.rdmarc_shift;
+                       break;
+               case MLX4_RES_ALTC:
+                       init_hca->altc_base = profile[i].start;
+                       break;
+               case MLX4_RES_AUXC:
+                       init_hca->auxc_base = profile[i].start;
+                       break;
+               case MLX4_RES_SRQ:
+                       dev->caps.num_srqs     = profile[i].num;
+                       init_hca->srqc_base    = profile[i].start;
+                       init_hca->log_num_srqs = profile[i].log_num;
+                       break;
+               case MLX4_RES_CQ:
+                       dev->caps.num_cqs     = profile[i].num;
+                       init_hca->cqc_base    = profile[i].start;
+                       init_hca->log_num_cqs = profile[i].log_num;
+                       break;
+               case MLX4_RES_EQ:
+                       dev->caps.num_eqs     = profile[i].num;
+                       init_hca->eqc_base    = profile[i].start;
+                       init_hca->log_num_eqs = profile[i].log_num;
+                       break;
+               case MLX4_RES_DMPT:
+                       dev->caps.num_mpts      = profile[i].num;
+                       priv->mr_table.mpt_base = profile[i].start;
+                       init_hca->dmpt_base     = profile[i].start;
+                       init_hca->log_mpt_sz    = profile[i].log_num;
+                       break;
+               case MLX4_RES_CMPT:
+                       init_hca->cmpt_base      = profile[i].start;
+                       break;
+               case MLX4_RES_MTT:
+                       dev->caps.num_mtt_segs   = profile[i].num;
+                       priv->mr_table.mtt_base  = profile[i].start;
+                       init_hca->mtt_base       = profile[i].start;
+                       break;
+               case MLX4_RES_MCG:
+                       dev->caps.num_mgms        = profile[i].num >> 1;
+                       dev->caps.num_amgms       = profile[i].num >> 1;
+                       init_hca->mc_base         = profile[i].start;
+                       init_hca->log_mc_entry_sz = ilog2(MLX4_MGM_ENTRY_SIZE);
+                       init_hca->log_mc_table_sz = profile[i].log_num;
+                       init_hca->log_mc_hash_sz  = profile[i].log_num - 1;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       /*
+        * PDs don't take any HCA memory, but we assign them as part
+        * of the HCA profile anyway.
+        */
+       dev->caps.num_pds = MLX4_NUM_PDS;
+
+       kfree(profile);
+       return total_size;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
new file mode 100644 (file)
index 0000000..ec9350e
--- /dev/null
@@ -0,0 +1,380 @@
+/*
+ * Copyright (c) 2004 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/gfp.h>
+#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
+
+#include "mlx4.h"
+#include "icm.h"
+
+void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
+{
+       struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
+       struct mlx4_qp *qp;
+
+       spin_lock(&qp_table->lock);
+
+       qp = __mlx4_qp_lookup(dev, qpn);
+       if (qp)
+               atomic_inc(&qp->refcount);
+
+       spin_unlock(&qp_table->lock);
+
+       if (!qp) {
+               mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn);
+               return;
+       }
+
+       qp->event(qp, event_type);
+
+       if (atomic_dec_and_test(&qp->refcount))
+               complete(&qp->free);
+}
+
+int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+                  enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
+                  struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
+                  int sqd_event, struct mlx4_qp *qp)
+{
+       static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
+               [MLX4_QP_STATE_RST] = {
+                       [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
+                       [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
+                       [MLX4_QP_STATE_INIT]    = MLX4_CMD_RST2INIT_QP,
+               },
+               [MLX4_QP_STATE_INIT]  = {
+                       [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
+                       [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
+                       [MLX4_QP_STATE_INIT]    = MLX4_CMD_INIT2INIT_QP,
+                       [MLX4_QP_STATE_RTR]     = MLX4_CMD_INIT2RTR_QP,
+               },
+               [MLX4_QP_STATE_RTR]   = {
+                       [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
+                       [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
+                       [MLX4_QP_STATE_RTS]     = MLX4_CMD_RTR2RTS_QP,
+               },
+               [MLX4_QP_STATE_RTS]   = {
+                       [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
+                       [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
+                       [MLX4_QP_STATE_RTS]     = MLX4_CMD_RTS2RTS_QP,
+                       [MLX4_QP_STATE_SQD]     = MLX4_CMD_RTS2SQD_QP,
+               },
+               [MLX4_QP_STATE_SQD] = {
+                       [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
+                       [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
+                       [MLX4_QP_STATE_RTS]     = MLX4_CMD_SQD2RTS_QP,
+                       [MLX4_QP_STATE_SQD]     = MLX4_CMD_SQD2SQD_QP,
+               },
+               [MLX4_QP_STATE_SQER] = {
+                       [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
+                       [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
+                       [MLX4_QP_STATE_RTS]     = MLX4_CMD_SQERR2RTS_QP,
+               },
+               [MLX4_QP_STATE_ERR] = {
+                       [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
+                       [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
+               }
+       };
+
+       struct mlx4_cmd_mailbox *mailbox;
+       int ret = 0;
+
+       if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
+           !op[cur_state][new_state])
+               return -EINVAL;
+
+       if (op[cur_state][new_state] == MLX4_CMD_2RST_QP)
+               return mlx4_cmd(dev, 0, qp->qpn, 2,
+                               MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A);
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) {
+               u64 mtt_addr = mlx4_mtt_addr(dev, mtt);
+               context->mtt_base_addr_h = mtt_addr >> 32;
+               context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
+               context->log_page_size   = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
+       }
+
+       *(__be32 *) mailbox->buf = cpu_to_be32(optpar);
+       memcpy(mailbox->buf + 8, context, sizeof *context);
+
+       ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
+               cpu_to_be32(qp->qpn);
+
+       ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31),
+                      new_state == MLX4_QP_STATE_RST ? 2 : 0,
+                      op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_modify);
+
+int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_qp_table *qp_table = &priv->qp_table;
+       int qpn;
+
+       qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
+       if (qpn == -1)
+               return -ENOMEM;
+
+       *base = qpn;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
+
+void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_qp_table *qp_table = &priv->qp_table;
+       if (base_qpn < dev->caps.sqp_start + 8)
+               return;
+
+       mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
+
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_qp_table *qp_table = &priv->qp_table;
+       int err;
+
+       if (!qpn)
+               return -EINVAL;
+
+       qp->qpn = qpn;
+
+       err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
+       if (err)
+               goto err_out;
+
+       err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn);
+       if (err)
+               goto err_put_qp;
+
+       err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn);
+       if (err)
+               goto err_put_auxc;
+
+       err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn);
+       if (err)
+               goto err_put_altc;
+
+       err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn);
+       if (err)
+               goto err_put_rdmarc;
+
+       spin_lock_irq(&qp_table->lock);
+       err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp);
+       spin_unlock_irq(&qp_table->lock);
+       if (err)
+               goto err_put_cmpt;
+
+       atomic_set(&qp->refcount, 1);
+       init_completion(&qp->free);
+
+       return 0;
+
+err_put_cmpt:
+       mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
+
+err_put_rdmarc:
+       mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
+
+err_put_altc:
+       mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
+
+err_put_auxc:
+       mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
+
+err_put_qp:
+       mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
+
+err_out:
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
+
+void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
+{
+       struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
+       unsigned long flags;
+
+       spin_lock_irqsave(&qp_table->lock, flags);
+       radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1));
+       spin_unlock_irqrestore(&qp_table->lock, flags);
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_remove);
+
+void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
+{
+       struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
+
+       if (atomic_dec_and_test(&qp->refcount))
+               complete(&qp->free);
+       wait_for_completion(&qp->free);
+
+       mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
+       mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
+       mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
+       mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
+       mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_free);
+
+static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
+{
+       return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
+                       MLX4_CMD_TIME_CLASS_B);
+}
+
+int mlx4_init_qp_table(struct mlx4_dev *dev)
+{
+       struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
+       int err;
+       int reserved_from_top = 0;
+
+       spin_lock_init(&qp_table->lock);
+       INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
+
+       /*
+        * We reserve 2 extra QPs per port for the special QPs.  The
+        * block of special QPs must be aligned to a multiple of 8, so
+        * round up.
+        */
+       dev->caps.sqp_start =
+               ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
+
+       {
+               int sort[MLX4_NUM_QP_REGION];
+               int i, j, tmp;
+               int last_base = dev->caps.num_qps;
+
+               for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
+                       sort[i] = i;
+
+               for (i = MLX4_NUM_QP_REGION; i > 0; --i) {
+                       for (j = 2; j < i; ++j) {
+                               if (dev->caps.reserved_qps_cnt[sort[j]] >
+                                   dev->caps.reserved_qps_cnt[sort[j - 1]]) {
+                                       tmp             = sort[j];
+                                       sort[j]         = sort[j - 1];
+                                       sort[j - 1]     = tmp;
+                               }
+                       }
+               }
+
+               for (i = 1; i < MLX4_NUM_QP_REGION; ++i) {
+                       last_base -= dev->caps.reserved_qps_cnt[sort[i]];
+                       dev->caps.reserved_qps_base[sort[i]] = last_base;
+                       reserved_from_top +=
+                               dev->caps.reserved_qps_cnt[sort[i]];
+               }
+
+       }
+
+       err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
+                              (1 << 23) - 1, dev->caps.sqp_start + 8,
+                              reserved_from_top);
+       if (err)
+               return err;
+
+       return mlx4_CONF_SPECIAL_QP(dev, dev->caps.sqp_start);
+}
+
+void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
+{
+       mlx4_CONF_SPECIAL_QP(dev, 0);
+       mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
+}
+
+int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
+                 struct mlx4_qp_context *context)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       int err;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
+                          MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A);
+       if (!err)
+               memcpy(context, mailbox->buf + 8, sizeof *context);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_query);
+
+int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+                    struct mlx4_qp_context *context,
+                    struct mlx4_qp *qp, enum mlx4_qp_state *qp_state)
+{
+       int err;
+       int i;
+       enum mlx4_qp_state states[] = {
+               MLX4_QP_STATE_RST,
+               MLX4_QP_STATE_INIT,
+               MLX4_QP_STATE_RTR,
+               MLX4_QP_STATE_RTS
+       };
+
+       for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
+               context->flags &= cpu_to_be32(~(0xf << 28));
+               context->flags |= cpu_to_be32(states[i + 1] << 28);
+               err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
+                                    context, 0, 0, qp);
+               if (err) {
+                       mlx4_err(dev, "Failed to bring QP to state: "
+                                "%d with error: %d\n",
+                                states[i + 1], err);
+                       return err;
+               }
+
+               *qp_state = states[i + 1];
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);
diff --git a/drivers/net/ethernet/mellanox/mlx4/reset.c b/drivers/net/ethernet/mellanox/mlx4/reset.c
new file mode 100644 (file)
index 0000000..11e7c1c
--- /dev/null
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+
+#include "mlx4.h"
+
+int mlx4_reset(struct mlx4_dev *dev)
+{
+       void __iomem *reset;
+       u32 *hca_header = NULL;
+       int pcie_cap;
+       u16 devctl;
+       u16 linkctl;
+       u16 vendor;
+       unsigned long end;
+       u32 sem;
+       int i;
+       int err = 0;
+
+#define MLX4_RESET_BASE                0xf0000
+#define MLX4_RESET_SIZE                  0x400
+#define MLX4_SEM_OFFSET                  0x3fc
+#define MLX4_RESET_OFFSET         0x10
+#define MLX4_RESET_VALUE       swab32(1)
+
+#define MLX4_SEM_TIMEOUT_JIFFIES       (10 * HZ)
+#define MLX4_RESET_TIMEOUT_JIFFIES     (2 * HZ)
+
+       /*
+        * Reset the chip.  This is somewhat ugly because we have to
+        * save off the PCI header before reset and then restore it
+        * after the chip reboots.  We skip config space offsets 22
+        * and 23 since those have a special meaning.
+        */
+
+       /* Do we need to save off the full 4K PCI Express header?? */
+       hca_header = kmalloc(256, GFP_KERNEL);
+       if (!hca_header) {
+               err = -ENOMEM;
+               mlx4_err(dev, "Couldn't allocate memory to save HCA "
+                         "PCI header, aborting.\n");
+               goto out;
+       }
+
+       pcie_cap = pci_pcie_cap(dev->pdev);
+
+       for (i = 0; i < 64; ++i) {
+               if (i == 22 || i == 23)
+                       continue;
+               if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) {
+                       err = -ENODEV;
+                       mlx4_err(dev, "Couldn't save HCA "
+                                 "PCI header, aborting.\n");
+                       goto out;
+               }
+       }
+
+       reset = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_RESET_BASE,
+                       MLX4_RESET_SIZE);
+       if (!reset) {
+               err = -ENOMEM;
+               mlx4_err(dev, "Couldn't map HCA reset register, aborting.\n");
+               goto out;
+       }
+
+       /* grab HW semaphore to lock out flash updates */
+       end = jiffies + MLX4_SEM_TIMEOUT_JIFFIES;
+       do {
+               sem = readl(reset + MLX4_SEM_OFFSET);
+               if (!sem)
+                       break;
+
+               msleep(1);
+       } while (time_before(jiffies, end));
+
+       if (sem) {
+               mlx4_err(dev, "Failed to obtain HW semaphore, aborting\n");
+               err = -EAGAIN;
+               iounmap(reset);
+               goto out;
+       }
+
+       /* actually hit reset */
+       writel(MLX4_RESET_VALUE, reset + MLX4_RESET_OFFSET);
+       iounmap(reset);
+
+       /* Docs say to wait one second before accessing device */
+       msleep(1000);
+
+       end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES;
+       do {
+               if (!pci_read_config_word(dev->pdev, PCI_VENDOR_ID, &vendor) &&
+                   vendor != 0xffff)
+                       break;
+
+               msleep(1);
+       } while (time_before(jiffies, end));
+
+       if (vendor == 0xffff) {
+               err = -ENODEV;
+               mlx4_err(dev, "PCI device did not come back after reset, "
+                         "aborting.\n");
+               goto out;
+       }
+
+       /* Now restore the PCI headers */
+       if (pcie_cap) {
+               devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4];
+               if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_DEVCTL,
+                                          devctl)) {
+                       err = -ENODEV;
+                       mlx4_err(dev, "Couldn't restore HCA PCI Express "
+                                "Device Control register, aborting.\n");
+                       goto out;
+               }
+               linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
+               if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_LNKCTL,
+                                          linkctl)) {
+                       err = -ENODEV;
+                       mlx4_err(dev, "Couldn't restore HCA PCI Express "
+                                "Link control register, aborting.\n");
+                       goto out;
+               }
+       }
+
+       for (i = 0; i < 16; ++i) {
+               if (i * 4 == PCI_COMMAND)
+                       continue;
+
+               if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) {
+                       err = -ENODEV;
+                       mlx4_err(dev, "Couldn't restore HCA reg %x, "
+                                 "aborting.\n", i);
+                       goto out;
+               }
+       }
+
+       if (pci_write_config_dword(dev->pdev, PCI_COMMAND,
+                                  hca_header[PCI_COMMAND / 4])) {
+               err = -ENODEV;
+               mlx4_err(dev, "Couldn't restore HCA COMMAND, "
+                         "aborting.\n");
+               goto out;
+       }
+
+out:
+       kfree(hca_header);
+
+       return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/sense.c b/drivers/net/ethernet/mellanox/mlx4/sense.c
new file mode 100644 (file)
index 0000000..e2337a7
--- /dev/null
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/if_ether.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4.h"
+
+int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
+                   enum mlx4_port_type *type)
+{
+       u64 out_param;
+       int err = 0;
+
+       err = mlx4_cmd_imm(dev, 0, &out_param, port, 0,
+                          MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B);
+       if (err) {
+               mlx4_err(dev, "Sense command failed for port: %d\n", port);
+               return err;
+       }
+
+       if (out_param > 2) {
+               mlx4_err(dev, "Sense returned illegal value: 0x%llx\n", out_param);
+               return -EINVAL;
+       }
+
+       *type = out_param;
+       return 0;
+}
+
+void mlx4_do_sense_ports(struct mlx4_dev *dev,
+                        enum mlx4_port_type *stype,
+                        enum mlx4_port_type *defaults)
+{
+       struct mlx4_sense *sense = &mlx4_priv(dev)->sense;
+       int err;
+       int i;
+
+       for (i = 1; i <= dev->caps.num_ports; i++) {
+               stype[i - 1] = 0;
+               if (sense->do_sense_port[i] && sense->sense_allowed[i] &&
+                   dev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
+                       err = mlx4_SENSE_PORT(dev, i, &stype[i - 1]);
+                       if (err)
+                               stype[i - 1] = defaults[i - 1];
+               } else
+                       stype[i - 1] = defaults[i - 1];
+       }
+
+       /*
+        * Adjust port configuration:
+        * If port 1 sensed nothing and port 2 is IB, set both as IB
+        * If port 2 sensed nothing and port 1 is Eth, set both as Eth
+        */
+       if (stype[0] == MLX4_PORT_TYPE_ETH) {
+               for (i = 1; i < dev->caps.num_ports; i++)
+                       stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_ETH;
+       }
+       if (stype[dev->caps.num_ports - 1] == MLX4_PORT_TYPE_IB) {
+               for (i = 0; i < dev->caps.num_ports - 1; i++)
+                       stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_IB;
+       }
+
+       /*
+        * If sensed nothing, remain in current configuration.
+        */
+       for (i = 0; i < dev->caps.num_ports; i++)
+               stype[i] = stype[i] ? stype[i] : defaults[i];
+
+}
+
+static void mlx4_sense_port(struct work_struct *work)
+{
+       struct delayed_work *delay = to_delayed_work(work);
+       struct mlx4_sense *sense = container_of(delay, struct mlx4_sense,
+                                               sense_poll);
+       struct mlx4_dev *dev = sense->dev;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       enum mlx4_port_type stype[MLX4_MAX_PORTS];
+
+       mutex_lock(&priv->port_mutex);
+       mlx4_do_sense_ports(dev, stype, &dev->caps.port_type[1]);
+
+       if (mlx4_check_port_params(dev, stype))
+               goto sense_again;
+
+       if (mlx4_change_port_types(dev, stype))
+               mlx4_err(dev, "Failed to change port_types\n");
+
+sense_again:
+       mutex_unlock(&priv->port_mutex);
+       queue_delayed_work(mlx4_wq , &sense->sense_poll,
+                          round_jiffies_relative(MLX4_SENSE_RANGE));
+}
+
+void mlx4_start_sense(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_sense *sense = &priv->sense;
+
+       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP))
+               return;
+
+       queue_delayed_work(mlx4_wq , &sense->sense_poll,
+                          round_jiffies_relative(MLX4_SENSE_RANGE));
+}
+
+void mlx4_stop_sense(struct mlx4_dev *dev)
+{
+       cancel_delayed_work_sync(&mlx4_priv(dev)->sense.sense_poll);
+}
+
+void  mlx4_sense_init(struct mlx4_dev *dev)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_sense *sense = &priv->sense;
+       int port;
+
+       sense->dev = dev;
+       for (port = 1; port <= dev->caps.num_ports; port++)
+               sense->do_sense_port[port] = 1;
+
+       INIT_DELAYED_WORK_DEFERRABLE(&sense->sense_poll, mlx4_sense_port);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
new file mode 100644 (file)
index 0000000..3b07b80
--- /dev/null
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx4/cmd.h>
+#include <linux/gfp.h>
+
+#include "mlx4.h"
+#include "icm.h"
+
+struct mlx4_srq_context {
+       __be32                  state_logsize_srqn;
+       u8                      logstride;
+       u8                      reserved1[3];
+       u8                      pg_offset;
+       u8                      reserved2[3];
+       u32                     reserved3;
+       u8                      log_page_size;
+       u8                      reserved4[2];
+       u8                      mtt_base_addr_h;
+       __be32                  mtt_base_addr_l;
+       __be32                  pd;
+       __be16                  limit_watermark;
+       __be16                  wqe_cnt;
+       u16                     reserved5;
+       __be16                  wqe_counter;
+       u32                     reserved6;
+       __be64                  db_rec_addr;
+};
+
+void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
+{
+       struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+       struct mlx4_srq *srq;
+
+       spin_lock(&srq_table->lock);
+
+       srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1));
+       if (srq)
+               atomic_inc(&srq->refcount);
+
+       spin_unlock(&srq_table->lock);
+
+       if (!srq) {
+               mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
+               return;
+       }
+
+       srq->event(srq, event_type);
+
+       if (atomic_dec_and_test(&srq->refcount))
+               complete(&srq->free);
+}
+
+static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+                         int srq_num)
+{
+       return mlx4_cmd(dev, mailbox->dma, srq_num, 0, MLX4_CMD_SW2HW_SRQ,
+                       MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+                         int srq_num)
+{
+       return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
+                           mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ,
+                           MLX4_CMD_TIME_CLASS_A);
+}
+
+static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark)
+{
+       return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ,
+                       MLX4_CMD_TIME_CLASS_B);
+}
+
+static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+                         int srq_num)
+{
+       return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ,
+                           MLX4_CMD_TIME_CLASS_A);
+}
+
+int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
+                  u64 db_rec, struct mlx4_srq *srq)
+{
+       struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_srq_context *srq_context;
+       u64 mtt_addr;
+       int err;
+
+       srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
+       if (srq->srqn == -1)
+               return -ENOMEM;
+
+       err = mlx4_table_get(dev, &srq_table->table, srq->srqn);
+       if (err)
+               goto err_out;
+
+       err = mlx4_table_get(dev, &srq_table->cmpt_table, srq->srqn);
+       if (err)
+               goto err_put;
+
+       spin_lock_irq(&srq_table->lock);
+       err = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
+       spin_unlock_irq(&srq_table->lock);
+       if (err)
+               goto err_cmpt_put;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox)) {
+               err = PTR_ERR(mailbox);
+               goto err_radix;
+       }
+
+       srq_context = mailbox->buf;
+       memset(srq_context, 0, sizeof *srq_context);
+
+       srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) |
+                                                     srq->srqn);
+       srq_context->logstride          = srq->wqe_shift - 4;
+       srq_context->log_page_size      = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
+
+       mtt_addr = mlx4_mtt_addr(dev, mtt);
+       srq_context->mtt_base_addr_h    = mtt_addr >> 32;
+       srq_context->mtt_base_addr_l    = cpu_to_be32(mtt_addr & 0xffffffff);
+       srq_context->pd                 = cpu_to_be32(pdn);
+       srq_context->db_rec_addr        = cpu_to_be64(db_rec);
+
+       err = mlx4_SW2HW_SRQ(dev, mailbox, srq->srqn);
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       if (err)
+               goto err_radix;
+
+       atomic_set(&srq->refcount, 1);
+       init_completion(&srq->free);
+
+       return 0;
+
+err_radix:
+       spin_lock_irq(&srq_table->lock);
+       radix_tree_delete(&srq_table->tree, srq->srqn);
+       spin_unlock_irq(&srq_table->lock);
+
+err_cmpt_put:
+       mlx4_table_put(dev, &srq_table->cmpt_table, srq->srqn);
+
+err_put:
+       mlx4_table_put(dev, &srq_table->table, srq->srqn);
+
+err_out:
+       mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_srq_alloc);
+
+void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
+{
+       struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+       int err;
+
+       err = mlx4_HW2SW_SRQ(dev, NULL, srq->srqn);
+       if (err)
+               mlx4_warn(dev, "HW2SW_SRQ failed (%d) for SRQN %06x\n", err, srq->srqn);
+
+       spin_lock_irq(&srq_table->lock);
+       radix_tree_delete(&srq_table->tree, srq->srqn);
+       spin_unlock_irq(&srq_table->lock);
+
+       if (atomic_dec_and_test(&srq->refcount))
+               complete(&srq->free);
+       wait_for_completion(&srq->free);
+
+       mlx4_table_put(dev, &srq_table->table, srq->srqn);
+       mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
+}
+EXPORT_SYMBOL_GPL(mlx4_srq_free);
+
+int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark)
+{
+       return mlx4_ARM_SRQ(dev, srq->srqn, limit_watermark);
+}
+EXPORT_SYMBOL_GPL(mlx4_srq_arm);
+
+int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_srq_context *srq_context;
+       int err;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       srq_context = mailbox->buf;
+
+       err = mlx4_QUERY_SRQ(dev, mailbox, srq->srqn);
+       if (err)
+               goto err_out;
+       *limit_watermark = be16_to_cpu(srq_context->limit_watermark);
+
+err_out:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_srq_query);
+
+int mlx4_init_srq_table(struct mlx4_dev *dev)
+{
+       struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+       int err;
+
+       spin_lock_init(&srq_table->lock);
+       INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
+
+       err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
+                              dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
+{
+       mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
+}
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
deleted file mode 100644 (file)
index d1aa45a..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-obj-$(CONFIG_MLX4_CORE)                += mlx4_core.o
-
-mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
-               mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o
-
-obj-$(CONFIG_MLX4_EN)               += mlx4_en.o
-
-mlx4_en-y :=   en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \
-               en_resources.o en_netdev.o en_selftest.o
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
deleted file mode 100644 (file)
index 116cae3..0000000
+++ /dev/null
@@ -1,414 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
- * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/bitmap.h>
-#include <linux/dma-mapping.h>
-#include <linux/vmalloc.h>
-
-#include "mlx4.h"
-
-u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
-{
-       u32 obj;
-
-       spin_lock(&bitmap->lock);
-
-       obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
-       if (obj >= bitmap->max) {
-               bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
-                               & bitmap->mask;
-               obj = find_first_zero_bit(bitmap->table, bitmap->max);
-       }
-
-       if (obj < bitmap->max) {
-               set_bit(obj, bitmap->table);
-               bitmap->last = (obj + 1);
-               if (bitmap->last == bitmap->max)
-                       bitmap->last = 0;
-               obj |= bitmap->top;
-       } else
-               obj = -1;
-
-       if (obj != -1)
-               --bitmap->avail;
-
-       spin_unlock(&bitmap->lock);
-
-       return obj;
-}
-
-void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
-{
-       mlx4_bitmap_free_range(bitmap, obj, 1);
-}
-
-u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
-{
-       u32 obj;
-
-       if (likely(cnt == 1 && align == 1))
-               return mlx4_bitmap_alloc(bitmap);
-
-       spin_lock(&bitmap->lock);
-
-       obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
-                               bitmap->last, cnt, align - 1);
-       if (obj >= bitmap->max) {
-               bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
-                               & bitmap->mask;
-               obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
-                                               0, cnt, align - 1);
-       }
-
-       if (obj < bitmap->max) {
-               bitmap_set(bitmap->table, obj, cnt);
-               if (obj == bitmap->last) {
-                       bitmap->last = (obj + cnt);
-                       if (bitmap->last >= bitmap->max)
-                               bitmap->last = 0;
-               }
-               obj |= bitmap->top;
-       } else
-               obj = -1;
-
-       if (obj != -1)
-               bitmap->avail -= cnt;
-
-       spin_unlock(&bitmap->lock);
-
-       return obj;
-}
-
-u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
-{
-       return bitmap->avail;
-}
-
-void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
-{
-       obj &= bitmap->max + bitmap->reserved_top - 1;
-
-       spin_lock(&bitmap->lock);
-       bitmap_clear(bitmap->table, obj, cnt);
-       bitmap->last = min(bitmap->last, obj);
-       bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
-                       & bitmap->mask;
-       bitmap->avail += cnt;
-       spin_unlock(&bitmap->lock);
-}
-
-int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
-                    u32 reserved_bot, u32 reserved_top)
-{
-       /* num must be a power of 2 */
-       if (num != roundup_pow_of_two(num))
-               return -EINVAL;
-
-       bitmap->last = 0;
-       bitmap->top  = 0;
-       bitmap->max  = num - reserved_top;
-       bitmap->mask = mask;
-       bitmap->reserved_top = reserved_top;
-       bitmap->avail = num - reserved_top - reserved_bot;
-       spin_lock_init(&bitmap->lock);
-       bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
-                               sizeof (long), GFP_KERNEL);
-       if (!bitmap->table)
-               return -ENOMEM;
-
-       bitmap_set(bitmap->table, 0, reserved_bot);
-
-       return 0;
-}
-
-void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
-{
-       kfree(bitmap->table);
-}
-
-/*
- * Handling for queue buffers -- we allocate a bunch of memory and
- * register it in a memory region at HCA virtual address 0.  If the
- * requested size is > max_direct, we split the allocation into
- * multiple pages, so we don't require too much contiguous memory.
- */
-
-int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
-                  struct mlx4_buf *buf)
-{
-       dma_addr_t t;
-
-       if (size <= max_direct) {
-               buf->nbufs        = 1;
-               buf->npages       = 1;
-               buf->page_shift   = get_order(size) + PAGE_SHIFT;
-               buf->direct.buf   = dma_alloc_coherent(&dev->pdev->dev,
-                                                      size, &t, GFP_KERNEL);
-               if (!buf->direct.buf)
-                       return -ENOMEM;
-
-               buf->direct.map = t;
-
-               while (t & ((1 << buf->page_shift) - 1)) {
-                       --buf->page_shift;
-                       buf->npages *= 2;
-               }
-
-               memset(buf->direct.buf, 0, size);
-       } else {
-               int i;
-
-               buf->direct.buf  = NULL;
-               buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
-               buf->npages      = buf->nbufs;
-               buf->page_shift  = PAGE_SHIFT;
-               buf->page_list   = kcalloc(buf->nbufs, sizeof(*buf->page_list),
-                                          GFP_KERNEL);
-               if (!buf->page_list)
-                       return -ENOMEM;
-
-               for (i = 0; i < buf->nbufs; ++i) {
-                       buf->page_list[i].buf =
-                               dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
-                                                  &t, GFP_KERNEL);
-                       if (!buf->page_list[i].buf)
-                               goto err_free;
-
-                       buf->page_list[i].map = t;
-
-                       memset(buf->page_list[i].buf, 0, PAGE_SIZE);
-               }
-
-               if (BITS_PER_LONG == 64) {
-                       struct page **pages;
-                       pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
-                       if (!pages)
-                               goto err_free;
-                       for (i = 0; i < buf->nbufs; ++i)
-                               pages[i] = virt_to_page(buf->page_list[i].buf);
-                       buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
-                       kfree(pages);
-                       if (!buf->direct.buf)
-                               goto err_free;
-               }
-       }
-
-       return 0;
-
-err_free:
-       mlx4_buf_free(dev, size, buf);
-
-       return -ENOMEM;
-}
-EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
-
-void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
-{
-       int i;
-
-       if (buf->nbufs == 1)
-               dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
-                                 buf->direct.map);
-       else {
-               if (BITS_PER_LONG == 64 && buf->direct.buf)
-                       vunmap(buf->direct.buf);
-
-               for (i = 0; i < buf->nbufs; ++i)
-                       if (buf->page_list[i].buf)
-                               dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
-                                                 buf->page_list[i].buf,
-                                                 buf->page_list[i].map);
-               kfree(buf->page_list);
-       }
-}
-EXPORT_SYMBOL_GPL(mlx4_buf_free);
-
-static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
-{
-       struct mlx4_db_pgdir *pgdir;
-
-       pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
-       if (!pgdir)
-               return NULL;
-
-       bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
-       pgdir->bits[0] = pgdir->order0;
-       pgdir->bits[1] = pgdir->order1;
-       pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
-                                           &pgdir->db_dma, GFP_KERNEL);
-       if (!pgdir->db_page) {
-               kfree(pgdir);
-               return NULL;
-       }
-
-       return pgdir;
-}
-
-static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
-                                   struct mlx4_db *db, int order)
-{
-       int o;
-       int i;
-
-       for (o = order; o <= 1; ++o) {
-               i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
-               if (i < MLX4_DB_PER_PAGE >> o)
-                       goto found;
-       }
-
-       return -ENOMEM;
-
-found:
-       clear_bit(i, pgdir->bits[o]);
-
-       i <<= o;
-
-       if (o > order)
-               set_bit(i ^ 1, pgdir->bits[order]);
-
-       db->u.pgdir = pgdir;
-       db->index   = i;
-       db->db      = pgdir->db_page + db->index;
-       db->dma     = pgdir->db_dma  + db->index * 4;
-       db->order   = order;
-
-       return 0;
-}
-
-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_db_pgdir *pgdir;
-       int ret = 0;
-
-       mutex_lock(&priv->pgdir_mutex);
-
-       list_for_each_entry(pgdir, &priv->pgdir_list, list)
-               if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
-                       goto out;
-
-       pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev));
-       if (!pgdir) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       list_add(&pgdir->list, &priv->pgdir_list);
-
-       /* This should never fail -- we just allocated an empty page: */
-       WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
-
-out:
-       mutex_unlock(&priv->pgdir_mutex);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(mlx4_db_alloc);
-
-void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int o;
-       int i;
-
-       mutex_lock(&priv->pgdir_mutex);
-
-       o = db->order;
-       i = db->index;
-
-       if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
-               clear_bit(i ^ 1, db->u.pgdir->order0);
-               ++o;
-       }
-       i >>= o;
-       set_bit(i, db->u.pgdir->bits[o]);
-
-       if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
-               dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
-                                 db->u.pgdir->db_page, db->u.pgdir->db_dma);
-               list_del(&db->u.pgdir->list);
-               kfree(db->u.pgdir);
-       }
-
-       mutex_unlock(&priv->pgdir_mutex);
-}
-EXPORT_SYMBOL_GPL(mlx4_db_free);
-
-int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
-                      int size, int max_direct)
-{
-       int err;
-
-       err = mlx4_db_alloc(dev, &wqres->db, 1);
-       if (err)
-               return err;
-
-       *wqres->db.db = 0;
-
-       err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf);
-       if (err)
-               goto err_db;
-
-       err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
-                           &wqres->mtt);
-       if (err)
-               goto err_buf;
-
-       err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
-       if (err)
-               goto err_mtt;
-
-       return 0;
-
-err_mtt:
-       mlx4_mtt_cleanup(dev, &wqres->mtt);
-err_buf:
-       mlx4_buf_free(dev, size, &wqres->buf);
-err_db:
-       mlx4_db_free(dev, &wqres->db);
-
-       return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res);
-
-void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
-                      int size)
-{
-       mlx4_mtt_cleanup(dev, &wqres->mtt);
-       mlx4_buf_free(dev, size, &wqres->buf);
-       mlx4_db_free(dev, &wqres->db);
-}
-EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c
deleted file mode 100644 (file)
index 32f9471..0000000
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
- * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/workqueue.h>
-
-#include "mlx4.h"
-
-enum {
-       MLX4_CATAS_POLL_INTERVAL        = 5 * HZ,
-};
-
-static DEFINE_SPINLOCK(catas_lock);
-
-static LIST_HEAD(catas_list);
-static struct work_struct catas_work;
-
-static int internal_err_reset = 1;
-module_param(internal_err_reset, int, 0644);
-MODULE_PARM_DESC(internal_err_reset,
-                "Reset device on internal errors if non-zero (default 1)");
-
-static void dump_err_buf(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-
-       int i;
-
-       mlx4_err(dev, "Internal error detected:\n");
-       for (i = 0; i < priv->fw.catas_size; ++i)
-               mlx4_err(dev, "  buf[%02x]: %08x\n",
-                        i, swab32(readl(priv->catas_err.map + i)));
-}
-
-static void poll_catas(unsigned long dev_ptr)
-{
-       struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr;
-       struct mlx4_priv *priv = mlx4_priv(dev);
-
-       if (readl(priv->catas_err.map)) {
-               dump_err_buf(dev);
-
-               mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0);
-
-               if (internal_err_reset) {
-                       spin_lock(&catas_lock);
-                       list_add(&priv->catas_err.list, &catas_list);
-                       spin_unlock(&catas_lock);
-
-                       queue_work(mlx4_wq, &catas_work);
-               }
-       } else
-               mod_timer(&priv->catas_err.timer,
-                         round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
-}
-
-static void catas_reset(struct work_struct *work)
-{
-       struct mlx4_priv *priv, *tmppriv;
-       struct mlx4_dev *dev;
-
-       LIST_HEAD(tlist);
-       int ret;
-
-       spin_lock_irq(&catas_lock);
-       list_splice_init(&catas_list, &tlist);
-       spin_unlock_irq(&catas_lock);
-
-       list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) {
-               struct pci_dev *pdev = priv->dev.pdev;
-
-               ret = mlx4_restart_one(priv->dev.pdev);
-               /* 'priv' now is not valid */
-               if (ret)
-                       pr_err("mlx4 %s: Reset failed (%d)\n",
-                              pci_name(pdev), ret);
-               else {
-                       dev  = pci_get_drvdata(pdev);
-                       mlx4_dbg(dev, "Reset succeeded\n");
-               }
-       }
-}
-
-void mlx4_start_catas_poll(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       phys_addr_t addr;
-
-       INIT_LIST_HEAD(&priv->catas_err.list);
-       init_timer(&priv->catas_err.timer);
-       priv->catas_err.map = NULL;
-
-       addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) +
-               priv->fw.catas_offset;
-
-       priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
-       if (!priv->catas_err.map) {
-               mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n",
-                         (unsigned long long) addr);
-               return;
-       }
-
-       priv->catas_err.timer.data     = (unsigned long) dev;
-       priv->catas_err.timer.function = poll_catas;
-       priv->catas_err.timer.expires  =
-               round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL);
-       add_timer(&priv->catas_err.timer);
-}
-
-void mlx4_stop_catas_poll(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-
-       del_timer_sync(&priv->catas_err.timer);
-
-       if (priv->catas_err.map)
-               iounmap(priv->catas_err.map);
-
-       spin_lock_irq(&catas_lock);
-       list_del(&priv->catas_err.list);
-       spin_unlock_irq(&catas_lock);
-}
-
-void  __init mlx4_catas_init(void)
-{
-       INIT_WORK(&catas_work, catas_reset);
-}
diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c
deleted file mode 100644 (file)
index 23cee7b..0000000
+++ /dev/null
@@ -1,443 +0,0 @@
-/*
- * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/errno.h>
-
-#include <linux/mlx4/cmd.h>
-
-#include <asm/io.h>
-
-#include "mlx4.h"
-
-#define CMD_POLL_TOKEN 0xffff
-
-enum {
-       /* command completed successfully: */
-       CMD_STAT_OK             = 0x00,
-       /* Internal error (such as a bus error) occurred while processing command: */
-       CMD_STAT_INTERNAL_ERR   = 0x01,
-       /* Operation/command not supported or opcode modifier not supported: */
-       CMD_STAT_BAD_OP         = 0x02,
-       /* Parameter not supported or parameter out of range: */
-       CMD_STAT_BAD_PARAM      = 0x03,
-       /* System not enabled or bad system state: */
-       CMD_STAT_BAD_SYS_STATE  = 0x04,
-       /* Attempt to access reserved or unallocaterd resource: */
-       CMD_STAT_BAD_RESOURCE   = 0x05,
-       /* Requested resource is currently executing a command, or is otherwise busy: */
-       CMD_STAT_RESOURCE_BUSY  = 0x06,
-       /* Required capability exceeds device limits: */
-       CMD_STAT_EXCEED_LIM     = 0x08,
-       /* Resource is not in the appropriate state or ownership: */
-       CMD_STAT_BAD_RES_STATE  = 0x09,
-       /* Index out of range: */
-       CMD_STAT_BAD_INDEX      = 0x0a,
-       /* FW image corrupted: */
-       CMD_STAT_BAD_NVMEM      = 0x0b,
-       /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
-       CMD_STAT_ICM_ERROR      = 0x0c,
-       /* Attempt to modify a QP/EE which is not in the presumed state: */
-       CMD_STAT_BAD_QP_STATE   = 0x10,
-       /* Bad segment parameters (Address/Size): */
-       CMD_STAT_BAD_SEG_PARAM  = 0x20,
-       /* Memory Region has Memory Windows bound to: */
-       CMD_STAT_REG_BOUND      = 0x21,
-       /* HCA local attached memory not present: */
-       CMD_STAT_LAM_NOT_PRE    = 0x22,
-       /* Bad management packet (silently discarded): */
-       CMD_STAT_BAD_PKT        = 0x30,
-       /* More outstanding CQEs in CQ than new CQ size: */
-       CMD_STAT_BAD_SIZE       = 0x40,
-       /* Multi Function device support required: */
-       CMD_STAT_MULTI_FUNC_REQ = 0x50,
-};
-
-enum {
-       HCR_IN_PARAM_OFFSET     = 0x00,
-       HCR_IN_MODIFIER_OFFSET  = 0x08,
-       HCR_OUT_PARAM_OFFSET    = 0x0c,
-       HCR_TOKEN_OFFSET        = 0x14,
-       HCR_STATUS_OFFSET       = 0x18,
-
-       HCR_OPMOD_SHIFT         = 12,
-       HCR_T_BIT               = 21,
-       HCR_E_BIT               = 22,
-       HCR_GO_BIT              = 23
-};
-
-enum {
-       GO_BIT_TIMEOUT_MSECS    = 10000
-};
-
-struct mlx4_cmd_context {
-       struct completion       done;
-       int                     result;
-       int                     next;
-       u64                     out_param;
-       u16                     token;
-};
-
-static int mlx4_status_to_errno(u8 status)
-{
-       static const int trans_table[] = {
-               [CMD_STAT_INTERNAL_ERR]   = -EIO,
-               [CMD_STAT_BAD_OP]         = -EPERM,
-               [CMD_STAT_BAD_PARAM]      = -EINVAL,
-               [CMD_STAT_BAD_SYS_STATE]  = -ENXIO,
-               [CMD_STAT_BAD_RESOURCE]   = -EBADF,
-               [CMD_STAT_RESOURCE_BUSY]  = -EBUSY,
-               [CMD_STAT_EXCEED_LIM]     = -ENOMEM,
-               [CMD_STAT_BAD_RES_STATE]  = -EBADF,
-               [CMD_STAT_BAD_INDEX]      = -EBADF,
-               [CMD_STAT_BAD_NVMEM]      = -EFAULT,
-               [CMD_STAT_ICM_ERROR]      = -ENFILE,
-               [CMD_STAT_BAD_QP_STATE]   = -EINVAL,
-               [CMD_STAT_BAD_SEG_PARAM]  = -EFAULT,
-               [CMD_STAT_REG_BOUND]      = -EBUSY,
-               [CMD_STAT_LAM_NOT_PRE]    = -EAGAIN,
-               [CMD_STAT_BAD_PKT]        = -EINVAL,
-               [CMD_STAT_BAD_SIZE]       = -ENOMEM,
-               [CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
-       };
-
-       if (status >= ARRAY_SIZE(trans_table) ||
-           (status != CMD_STAT_OK && trans_table[status] == 0))
-               return -EIO;
-
-       return trans_table[status];
-}
-
-static int cmd_pending(struct mlx4_dev *dev)
-{
-       u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
-
-       return (status & swab32(1 << HCR_GO_BIT)) ||
-               (mlx4_priv(dev)->cmd.toggle ==
-                !!(status & swab32(1 << HCR_T_BIT)));
-}
-
-static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
-                        u32 in_modifier, u8 op_modifier, u16 op, u16 token,
-                        int event)
-{
-       struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
-       u32 __iomem *hcr = cmd->hcr;
-       int ret = -EAGAIN;
-       unsigned long end;
-
-       mutex_lock(&cmd->hcr_mutex);
-
-       end = jiffies;
-       if (event)
-               end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
-
-       while (cmd_pending(dev)) {
-               if (time_after_eq(jiffies, end))
-                       goto out;
-               cond_resched();
-       }
-
-       /*
-        * We use writel (instead of something like memcpy_toio)
-        * because writes of less than 32 bits to the HCR don't work
-        * (and some architectures such as ia64 implement memcpy_toio
-        * in terms of writeb).
-        */
-       __raw_writel((__force u32) cpu_to_be32(in_param >> 32),           hcr + 0);
-       __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful),  hcr + 1);
-       __raw_writel((__force u32) cpu_to_be32(in_modifier),              hcr + 2);
-       __raw_writel((__force u32) cpu_to_be32(out_param >> 32),          hcr + 3);
-       __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
-       __raw_writel((__force u32) cpu_to_be32(token << 16),              hcr + 5);
-
-       /* __raw_writel may not order writes. */
-       wmb();
-
-       __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT)                |
-                                              (cmd->toggle << HCR_T_BIT)       |
-                                              (event ? (1 << HCR_E_BIT) : 0)   |
-                                              (op_modifier << HCR_OPMOD_SHIFT) |
-                                              op),                       hcr + 6);
-
-       /*
-        * Make sure that our HCR writes don't get mixed in with
-        * writes from another CPU starting a FW command.
-        */
-       mmiowb();
-
-       cmd->toggle = cmd->toggle ^ 1;
-
-       ret = 0;
-
-out:
-       mutex_unlock(&cmd->hcr_mutex);
-       return ret;
-}
-
-static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
-                        int out_is_imm, u32 in_modifier, u8 op_modifier,
-                        u16 op, unsigned long timeout)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       void __iomem *hcr = priv->cmd.hcr;
-       int err = 0;
-       unsigned long end;
-
-       down(&priv->cmd.poll_sem);
-
-       err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
-                           in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
-       if (err)
-               goto out;
-
-       end = msecs_to_jiffies(timeout) + jiffies;
-       while (cmd_pending(dev) && time_before(jiffies, end))
-               cond_resched();
-
-       if (cmd_pending(dev)) {
-               err = -ETIMEDOUT;
-               goto out;
-       }
-
-       if (out_is_imm)
-               *out_param =
-                       (u64) be32_to_cpu((__force __be32)
-                                         __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
-                       (u64) be32_to_cpu((__force __be32)
-                                         __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
-
-       err = mlx4_status_to_errno(be32_to_cpu((__force __be32)
-                                              __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24);
-
-out:
-       up(&priv->cmd.poll_sem);
-       return err;
-}
-
-void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_cmd_context *context =
-               &priv->cmd.context[token & priv->cmd.token_mask];
-
-       /* previously timed out command completing at long last */
-       if (token != context->token)
-               return;
-
-       context->result    = mlx4_status_to_errno(status);
-       context->out_param = out_param;
-
-       complete(&context->done);
-}
-
-static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
-                        int out_is_imm, u32 in_modifier, u8 op_modifier,
-                        u16 op, unsigned long timeout)
-{
-       struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
-       struct mlx4_cmd_context *context;
-       int err = 0;
-
-       down(&cmd->event_sem);
-
-       spin_lock(&cmd->context_lock);
-       BUG_ON(cmd->free_head < 0);
-       context = &cmd->context[cmd->free_head];
-       context->token += cmd->token_mask + 1;
-       cmd->free_head = context->next;
-       spin_unlock(&cmd->context_lock);
-
-       init_completion(&context->done);
-
-       mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
-                     in_modifier, op_modifier, op, context->token, 1);
-
-       if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) {
-               err = -EBUSY;
-               goto out;
-       }
-
-       err = context->result;
-       if (err)
-               goto out;
-
-       if (out_is_imm)
-               *out_param = context->out_param;
-
-out:
-       spin_lock(&cmd->context_lock);
-       context->next = cmd->free_head;
-       cmd->free_head = context - cmd->context;
-       spin_unlock(&cmd->context_lock);
-
-       up(&cmd->event_sem);
-       return err;
-}
-
-int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
-              int out_is_imm, u32 in_modifier, u8 op_modifier,
-              u16 op, unsigned long timeout)
-{
-       if (mlx4_priv(dev)->cmd.use_events)
-               return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
-                                    in_modifier, op_modifier, op, timeout);
-       else
-               return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
-                                    in_modifier, op_modifier, op, timeout);
-}
-EXPORT_SYMBOL_GPL(__mlx4_cmd);
-
-int mlx4_cmd_init(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-
-       mutex_init(&priv->cmd.hcr_mutex);
-       sema_init(&priv->cmd.poll_sem, 1);
-       priv->cmd.use_events = 0;
-       priv->cmd.toggle     = 1;
-
-       priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE,
-                               MLX4_HCR_SIZE);
-       if (!priv->cmd.hcr) {
-               mlx4_err(dev, "Couldn't map command register.");
-               return -ENOMEM;
-       }
-
-       priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
-                                        MLX4_MAILBOX_SIZE,
-                                        MLX4_MAILBOX_SIZE, 0);
-       if (!priv->cmd.pool) {
-               iounmap(priv->cmd.hcr);
-               return -ENOMEM;
-       }
-
-       return 0;
-}
-
-void mlx4_cmd_cleanup(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-
-       pci_pool_destroy(priv->cmd.pool);
-       iounmap(priv->cmd.hcr);
-}
-
-/*
- * Switch to using events to issue FW commands (can only be called
- * after event queue for command events has been initialized).
- */
-int mlx4_cmd_use_events(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int i;
-
-       priv->cmd.context = kmalloc(priv->cmd.max_cmds *
-                                  sizeof (struct mlx4_cmd_context),
-                                  GFP_KERNEL);
-       if (!priv->cmd.context)
-               return -ENOMEM;
-
-       for (i = 0; i < priv->cmd.max_cmds; ++i) {
-               priv->cmd.context[i].token = i;
-               priv->cmd.context[i].next  = i + 1;
-       }
-
-       priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
-       priv->cmd.free_head = 0;
-
-       sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
-       spin_lock_init(&priv->cmd.context_lock);
-
-       for (priv->cmd.token_mask = 1;
-            priv->cmd.token_mask < priv->cmd.max_cmds;
-            priv->cmd.token_mask <<= 1)
-               ; /* nothing */
-       --priv->cmd.token_mask;
-
-       priv->cmd.use_events = 1;
-
-       down(&priv->cmd.poll_sem);
-
-       return 0;
-}
-
-/*
- * Switch back to polling (used when shutting down the device)
- */
-void mlx4_cmd_use_polling(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int i;
-
-       priv->cmd.use_events = 0;
-
-       for (i = 0; i < priv->cmd.max_cmds; ++i)
-               down(&priv->cmd.event_sem);
-
-       kfree(priv->cmd.context);
-
-       up(&priv->cmd.poll_sem);
-}
-
-struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-
-       mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
-       if (!mailbox)
-               return ERR_PTR(-ENOMEM);
-
-       mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
-                                     &mailbox->dma);
-       if (!mailbox->buf) {
-               kfree(mailbox);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       return mailbox;
-}
-EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
-
-void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox)
-{
-       if (!mailbox)
-               return;
-
-       pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
-       kfree(mailbox);
-}
-EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
deleted file mode 100644 (file)
index bd8ef9f..0000000
+++ /dev/null
@@ -1,319 +0,0 @@
-/*
- * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
- * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/hardirq.h>
-#include <linux/gfp.h>
-
-#include <linux/mlx4/cmd.h>
-#include <linux/mlx4/cq.h>
-
-#include "mlx4.h"
-#include "icm.h"
-
-struct mlx4_cq_context {
-       __be32                  flags;
-       u16                     reserved1[3];
-       __be16                  page_offset;
-       __be32                  logsize_usrpage;
-       __be16                  cq_period;
-       __be16                  cq_max_count;
-       u8                      reserved2[3];
-       u8                      comp_eqn;
-       u8                      log_page_size;
-       u8                      reserved3[2];
-       u8                      mtt_base_addr_h;
-       __be32                  mtt_base_addr_l;
-       __be32                  last_notified_index;
-       __be32                  solicit_producer_index;
-       __be32                  consumer_index;
-       __be32                  producer_index;
-       u32                     reserved4[2];
-       __be64                  db_rec_addr;
-};
-
-#define MLX4_CQ_STATUS_OK              ( 0 << 28)
-#define MLX4_CQ_STATUS_OVERFLOW                ( 9 << 28)
-#define MLX4_CQ_STATUS_WRITE_FAIL      (10 << 28)
-#define MLX4_CQ_FLAG_CC                        ( 1 << 18)
-#define MLX4_CQ_FLAG_OI                        ( 1 << 17)
-#define MLX4_CQ_STATE_ARMED            ( 9 <<  8)
-#define MLX4_CQ_STATE_ARMED_SOL                ( 6 <<  8)
-#define MLX4_EQ_STATE_FIRED            (10 <<  8)
-
-void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
-{
-       struct mlx4_cq *cq;
-
-       cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
-                              cqn & (dev->caps.num_cqs - 1));
-       if (!cq) {
-               mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
-               return;
-       }
-
-       ++cq->arm_sn;
-
-       cq->comp(cq);
-}
-
-void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
-{
-       struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
-       struct mlx4_cq *cq;
-
-       spin_lock(&cq_table->lock);
-
-       cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
-       if (cq)
-               atomic_inc(&cq->refcount);
-
-       spin_unlock(&cq_table->lock);
-
-       if (!cq) {
-               mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
-               return;
-       }
-
-       cq->event(cq, event_type);
-
-       if (atomic_dec_and_test(&cq->refcount))
-               complete(&cq->free);
-}
-
-static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
-                        int cq_num)
-{
-       return mlx4_cmd(dev, mailbox->dma, cq_num, 0, MLX4_CMD_SW2HW_CQ,
-                       MLX4_CMD_TIME_CLASS_A);
-}
-
-static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
-                        int cq_num, u32 opmod)
-{
-       return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
-                       MLX4_CMD_TIME_CLASS_A);
-}
-
-static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
-                        int cq_num)
-{
-       return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
-                           mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
-                           MLX4_CMD_TIME_CLASS_A);
-}
-
-int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
-                  u16 count, u16 period)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_cq_context *cq_context;
-       int err;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-
-       cq_context = mailbox->buf;
-       memset(cq_context, 0, sizeof *cq_context);
-
-       cq_context->cq_max_count = cpu_to_be16(count);
-       cq_context->cq_period    = cpu_to_be16(period);
-
-       err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_cq_modify);
-
-int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
-                  int entries, struct mlx4_mtt *mtt)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_cq_context *cq_context;
-       u64 mtt_addr;
-       int err;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-
-       cq_context = mailbox->buf;
-       memset(cq_context, 0, sizeof *cq_context);
-
-       cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
-       cq_context->log_page_size   = mtt->page_shift - 12;
-       mtt_addr = mlx4_mtt_addr(dev, mtt);
-       cq_context->mtt_base_addr_h = mtt_addr >> 32;
-       cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
-
-       err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0);
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_cq_resize);
-
-int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
-                 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
-                 unsigned vector, int collapsed)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_cq_table *cq_table = &priv->cq_table;
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_cq_context *cq_context;
-       u64 mtt_addr;
-       int err;
-
-       if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool)
-               return -EINVAL;
-
-       cq->vector = vector;
-
-       cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
-       if (cq->cqn == -1)
-               return -ENOMEM;
-
-       err = mlx4_table_get(dev, &cq_table->table, cq->cqn);
-       if (err)
-               goto err_out;
-
-       err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn);
-       if (err)
-               goto err_put;
-
-       spin_lock_irq(&cq_table->lock);
-       err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
-       spin_unlock_irq(&cq_table->lock);
-       if (err)
-               goto err_cmpt_put;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox)) {
-               err = PTR_ERR(mailbox);
-               goto err_radix;
-       }
-
-       cq_context = mailbox->buf;
-       memset(cq_context, 0, sizeof *cq_context);
-
-       cq_context->flags           = cpu_to_be32(!!collapsed << 18);
-       cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
-       cq_context->comp_eqn        = priv->eq_table.eq[vector].eqn;
-       cq_context->log_page_size   = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
-
-       mtt_addr = mlx4_mtt_addr(dev, mtt);
-       cq_context->mtt_base_addr_h = mtt_addr >> 32;
-       cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
-       cq_context->db_rec_addr     = cpu_to_be64(db_rec);
-
-       err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       if (err)
-               goto err_radix;
-
-       cq->cons_index = 0;
-       cq->arm_sn     = 1;
-       cq->uar        = uar;
-       atomic_set(&cq->refcount, 1);
-       init_completion(&cq->free);
-
-       return 0;
-
-err_radix:
-       spin_lock_irq(&cq_table->lock);
-       radix_tree_delete(&cq_table->tree, cq->cqn);
-       spin_unlock_irq(&cq_table->lock);
-
-err_cmpt_put:
-       mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn);
-
-err_put:
-       mlx4_table_put(dev, &cq_table->table, cq->cqn);
-
-err_out:
-       mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
-
-       return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_cq_alloc);
-
-void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_cq_table *cq_table = &priv->cq_table;
-       int err;
-
-       err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn);
-       if (err)
-               mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
-
-       synchronize_irq(priv->eq_table.eq[cq->vector].irq);
-
-       spin_lock_irq(&cq_table->lock);
-       radix_tree_delete(&cq_table->tree, cq->cqn);
-       spin_unlock_irq(&cq_table->lock);
-
-       if (atomic_dec_and_test(&cq->refcount))
-               complete(&cq->free);
-       wait_for_completion(&cq->free);
-
-       mlx4_table_put(dev, &cq_table->table, cq->cqn);
-       mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
-}
-EXPORT_SYMBOL_GPL(mlx4_cq_free);
-
-int mlx4_init_cq_table(struct mlx4_dev *dev)
-{
-       struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
-       int err;
-
-       spin_lock_init(&cq_table->lock);
-       INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
-
-       err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
-                              dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
-       if (err)
-               return err;
-
-       return 0;
-}
-
-void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
-{
-       /* Nothing to do to clean up radix_tree */
-       mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
-}
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
deleted file mode 100644 (file)
index ec4b6d0..0000000
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/mlx4/cq.h>
-#include <linux/mlx4/qp.h>
-#include <linux/mlx4/cmd.h>
-
-#include "mlx4_en.h"
-
-static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
-{
-       return;
-}
-
-
-int mlx4_en_create_cq(struct mlx4_en_priv *priv,
-                     struct mlx4_en_cq *cq,
-                     int entries, int ring, enum cq_type mode)
-{
-       struct mlx4_en_dev *mdev = priv->mdev;
-       int err;
-
-       cq->size = entries;
-       if (mode == RX)
-               cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
-       else
-               cq->buf_size = sizeof(struct mlx4_cqe);
-
-       cq->ring = ring;
-       cq->is_tx = mode;
-       spin_lock_init(&cq->lock);
-
-       err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
-                               cq->buf_size, 2 * PAGE_SIZE);
-       if (err)
-               return err;
-
-       err = mlx4_en_map_buffer(&cq->wqres.buf);
-       if (err)
-               mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
-       else
-               cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
-
-       return err;
-}
-
-int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
-{
-       struct mlx4_en_dev *mdev = priv->mdev;
-       int err = 0;
-       char name[25];
-
-       cq->dev = mdev->pndev[priv->port];
-       cq->mcq.set_ci_db  = cq->wqres.db.db;
-       cq->mcq.arm_db     = cq->wqres.db.db + 1;
-       *cq->mcq.set_ci_db = 0;
-       *cq->mcq.arm_db    = 0;
-       memset(cq->buf, 0, cq->buf_size);
-
-       if (cq->is_tx == RX) {
-               if (mdev->dev->caps.comp_pool) {
-                       if (!cq->vector) {
-                               sprintf(name , "%s-rx-%d", priv->dev->name, cq->ring);
-                               if (mlx4_assign_eq(mdev->dev, name, &cq->vector)) {
-                                       cq->vector = (cq->ring + 1 + priv->port) %
-                                               mdev->dev->caps.num_comp_vectors;
-                                       mlx4_warn(mdev, "Failed Assigning an EQ to "
-                                                 "%s_rx-%d ,Falling back to legacy EQ's\n",
-                                                 priv->dev->name, cq->ring);
-                               }
-                       }
-               } else {
-                       cq->vector = (cq->ring + 1 + priv->port) %
-                               mdev->dev->caps.num_comp_vectors;
-               }
-       } else {
-               if (!cq->vector || !mdev->dev->caps.comp_pool) {
-                       /*Fallback to legacy pool in case of error*/
-                       cq->vector   = 0;
-               }
-       }
-
-       if (!cq->is_tx)
-               cq->size = priv->rx_ring[cq->ring].actual_size;
-
-       err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
-                           cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx);
-       if (err)
-               return err;
-
-       cq->mcq.comp  = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
-       cq->mcq.event = mlx4_en_cq_event;
-
-       if (cq->is_tx) {
-               init_timer(&cq->timer);
-               cq->timer.function = mlx4_en_poll_tx_cq;
-               cq->timer.data = (unsigned long) cq;
-       } else {
-               netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
-               napi_enable(&cq->napi);
-       }
-
-       return 0;
-}
-
-void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
-                       bool reserve_vectors)
-{
-       struct mlx4_en_dev *mdev = priv->mdev;
-
-       mlx4_en_unmap_buffer(&cq->wqres.buf);
-       mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
-       if (priv->mdev->dev->caps.comp_pool && cq->vector && !reserve_vectors)
-               mlx4_release_eq(priv->mdev->dev, cq->vector);
-       cq->buf_size = 0;
-       cq->buf = NULL;
-}
-
-void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
-{
-       struct mlx4_en_dev *mdev = priv->mdev;
-
-       if (cq->is_tx)
-               del_timer(&cq->timer);
-       else {
-               napi_disable(&cq->napi);
-               netif_napi_del(&cq->napi);
-       }
-
-       mlx4_cq_free(mdev->dev, &cq->mcq);
-}
-
-/* Set rx cq moderation parameters */
-int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
-{
-       return mlx4_cq_modify(priv->mdev->dev, &cq->mcq,
-                             cq->moder_cnt, cq->moder_time);
-}
-
-int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
-{
-       mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map,
-                   &priv->mdev->uar_lock);
-
-       return 0;
-}
-
-
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
deleted file mode 100644 (file)
index eb09625..0000000
+++ /dev/null
@@ -1,477 +0,0 @@
-/*
- * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/ethtool.h>
-#include <linux/netdevice.h>
-
-#include "mlx4_en.h"
-#include "en_port.h"
-
-
-static void
-mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       struct mlx4_en_dev *mdev = priv->mdev;
-
-       strncpy(drvinfo->driver, DRV_NAME, 32);
-       strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
-       sprintf(drvinfo->fw_version, "%d.%d.%d",
-               (u16) (mdev->dev->caps.fw_ver >> 32),
-               (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
-               (u16) (mdev->dev->caps.fw_ver & 0xffff));
-       strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32);
-       drvinfo->n_stats = 0;
-       drvinfo->regdump_len = 0;
-       drvinfo->eedump_len = 0;
-}
-
-static const char main_strings[][ETH_GSTRING_LEN] = {
-       "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
-       "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
-       "rx_length_errors", "rx_over_errors", "rx_crc_errors",
-       "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
-       "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
-       "tx_heartbeat_errors", "tx_window_errors",
-
-       /* port statistics */
-       "tso_packets",
-       "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
-       "rx_csum_good", "rx_csum_none", "tx_chksum_offload",
-
-       /* packet statistics */
-       "broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3",
-       "rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0",
-       "tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5",
-       "tx_prio_6", "tx_prio_7",
-};
-#define NUM_MAIN_STATS 21
-#define NUM_ALL_STATS  (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
-
-static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
-       "Interupt Test",
-       "Link Test",
-       "Speed Test",
-       "Register Test",
-       "Loopback Test",
-};
-
-static u32 mlx4_en_get_msglevel(struct net_device *dev)
-{
-       return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
-}
-
-static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
-{
-       ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
-}
-
-static void mlx4_en_get_wol(struct net_device *netdev,
-                           struct ethtool_wolinfo *wol)
-{
-       struct mlx4_en_priv *priv = netdev_priv(netdev);
-       int err = 0;
-       u64 config = 0;
-
-       if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL)) {
-               wol->supported = 0;
-               wol->wolopts = 0;
-               return;
-       }
-
-       err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
-       if (err) {
-               en_err(priv, "Failed to get WoL information\n");
-               return;
-       }
-
-       if (config & MLX4_EN_WOL_MAGIC)
-               wol->supported = WAKE_MAGIC;
-       else
-               wol->supported = 0;
-
-       if (config & MLX4_EN_WOL_ENABLED)
-               wol->wolopts = WAKE_MAGIC;
-       else
-               wol->wolopts = 0;
-}
-
-static int mlx4_en_set_wol(struct net_device *netdev,
-                           struct ethtool_wolinfo *wol)
-{
-       struct mlx4_en_priv *priv = netdev_priv(netdev);
-       u64 config = 0;
-       int err = 0;
-
-       if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL))
-               return -EOPNOTSUPP;
-
-       if (wol->supported & ~WAKE_MAGIC)
-               return -EINVAL;
-
-       err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
-       if (err) {
-               en_err(priv, "Failed to get WoL info, unable to modify\n");
-               return err;
-       }
-
-       if (wol->wolopts & WAKE_MAGIC) {
-               config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED |
-                               MLX4_EN_WOL_MAGIC;
-       } else {
-               config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC);
-               config |= MLX4_EN_WOL_DO_MODIFY;
-       }
-
-       err = mlx4_wol_write(priv->mdev->dev, config, priv->port);
-       if (err)
-               en_err(priv, "Failed to set WoL information\n");
-
-       return err;
-}
-
-static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-
-       switch (sset) {
-       case ETH_SS_STATS:
-               return NUM_ALL_STATS +
-                       (priv->tx_ring_num + priv->rx_ring_num) * 2;
-       case ETH_SS_TEST:
-               return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
-                                       & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
-       default:
-               return -EOPNOTSUPP;
-       }
-}
-
-static void mlx4_en_get_ethtool_stats(struct net_device *dev,
-               struct ethtool_stats *stats, uint64_t *data)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       int index = 0;
-       int i;
-
-       spin_lock_bh(&priv->stats_lock);
-
-       for (i = 0; i < NUM_MAIN_STATS; i++)
-               data[index++] = ((unsigned long *) &priv->stats)[i];
-       for (i = 0; i < NUM_PORT_STATS; i++)
-               data[index++] = ((unsigned long *) &priv->port_stats)[i];
-       for (i = 0; i < priv->tx_ring_num; i++) {
-               data[index++] = priv->tx_ring[i].packets;
-               data[index++] = priv->tx_ring[i].bytes;
-       }
-       for (i = 0; i < priv->rx_ring_num; i++) {
-               data[index++] = priv->rx_ring[i].packets;
-               data[index++] = priv->rx_ring[i].bytes;
-       }
-       for (i = 0; i < NUM_PKT_STATS; i++)
-               data[index++] = ((unsigned long *) &priv->pkstats)[i];
-       spin_unlock_bh(&priv->stats_lock);
-
-}
-
-static void mlx4_en_self_test(struct net_device *dev,
-                             struct ethtool_test *etest, u64 *buf)
-{
-       mlx4_en_ex_selftest(dev, &etest->flags, buf);
-}
-
-static void mlx4_en_get_strings(struct net_device *dev,
-                               uint32_t stringset, uint8_t *data)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       int index = 0;
-       int i;
-
-       switch (stringset) {
-       case ETH_SS_TEST:
-               for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
-                       strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
-               if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK)
-                       for (; i < MLX4_EN_NUM_SELF_TEST; i++)
-                               strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
-               break;
-
-       case ETH_SS_STATS:
-               /* Add main counters */
-               for (i = 0; i < NUM_MAIN_STATS; i++)
-                       strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]);
-               for (i = 0; i< NUM_PORT_STATS; i++)
-                       strcpy(data + (index++) * ETH_GSTRING_LEN,
-                       main_strings[i + NUM_MAIN_STATS]);
-               for (i = 0; i < priv->tx_ring_num; i++) {
-                       sprintf(data + (index++) * ETH_GSTRING_LEN,
-                               "tx%d_packets", i);
-                       sprintf(data + (index++) * ETH_GSTRING_LEN,
-                               "tx%d_bytes", i);
-               }
-               for (i = 0; i < priv->rx_ring_num; i++) {
-                       sprintf(data + (index++) * ETH_GSTRING_LEN,
-                               "rx%d_packets", i);
-                       sprintf(data + (index++) * ETH_GSTRING_LEN,
-                               "rx%d_bytes", i);
-               }
-               for (i = 0; i< NUM_PKT_STATS; i++)
-                       strcpy(data + (index++) * ETH_GSTRING_LEN,
-                       main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]);
-               break;
-       }
-}
-
-static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       int trans_type;
-
-       cmd->autoneg = AUTONEG_DISABLE;
-       cmd->supported = SUPPORTED_10000baseT_Full;
-       cmd->advertising = ADVERTISED_10000baseT_Full;
-
-       if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
-               return -ENOMEM;
-
-       trans_type = priv->port_state.transciver;
-       if (netif_carrier_ok(dev)) {
-               ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
-               cmd->duplex = DUPLEX_FULL;
-       } else {
-               ethtool_cmd_speed_set(cmd, -1);
-               cmd->duplex = -1;
-       }
-
-       if (trans_type > 0 && trans_type <= 0xC) {
-               cmd->port = PORT_FIBRE;
-               cmd->transceiver = XCVR_EXTERNAL;
-               cmd->supported |= SUPPORTED_FIBRE;
-               cmd->advertising |= ADVERTISED_FIBRE;
-       } else if (trans_type == 0x80 || trans_type == 0) {
-               cmd->port = PORT_TP;
-               cmd->transceiver = XCVR_INTERNAL;
-               cmd->supported |= SUPPORTED_TP;
-               cmd->advertising |= ADVERTISED_TP;
-       } else  {
-               cmd->port = -1;
-               cmd->transceiver = -1;
-       }
-       return 0;
-}
-
-static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-       if ((cmd->autoneg == AUTONEG_ENABLE) ||
-           (ethtool_cmd_speed(cmd) != SPEED_10000) ||
-           (cmd->duplex != DUPLEX_FULL))
-               return -EINVAL;
-
-       /* Nothing to change */
-       return 0;
-}
-
-static int mlx4_en_get_coalesce(struct net_device *dev,
-                             struct ethtool_coalesce *coal)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-
-       coal->tx_coalesce_usecs = 0;
-       coal->tx_max_coalesced_frames = 0;
-       coal->rx_coalesce_usecs = priv->rx_usecs;
-       coal->rx_max_coalesced_frames = priv->rx_frames;
-
-       coal->pkt_rate_low = priv->pkt_rate_low;
-       coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
-       coal->pkt_rate_high = priv->pkt_rate_high;
-       coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
-       coal->rate_sample_interval = priv->sample_interval;
-       coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
-       return 0;
-}
-
-static int mlx4_en_set_coalesce(struct net_device *dev,
-                             struct ethtool_coalesce *coal)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       int err, i;
-
-       priv->rx_frames = (coal->rx_max_coalesced_frames ==
-                          MLX4_EN_AUTO_CONF) ?
-                               MLX4_EN_RX_COAL_TARGET :
-                               coal->rx_max_coalesced_frames;
-       priv->rx_usecs = (coal->rx_coalesce_usecs ==
-                         MLX4_EN_AUTO_CONF) ?
-                               MLX4_EN_RX_COAL_TIME :
-                               coal->rx_coalesce_usecs;
-
-       /* Set adaptive coalescing params */
-       priv->pkt_rate_low = coal->pkt_rate_low;
-       priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
-       priv->pkt_rate_high = coal->pkt_rate_high;
-       priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
-       priv->sample_interval = coal->rate_sample_interval;
-       priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
-       priv->last_moder_time = MLX4_EN_AUTO_CONF;
-       if (priv->adaptive_rx_coal)
-               return 0;
-
-       for (i = 0; i < priv->rx_ring_num; i++) {
-               priv->rx_cq[i].moder_cnt = priv->rx_frames;
-               priv->rx_cq[i].moder_time = priv->rx_usecs;
-               err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
-               if (err)
-                       return err;
-       }
-       return 0;
-}
-
-static int mlx4_en_set_pauseparam(struct net_device *dev,
-                               struct ethtool_pauseparam *pause)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       struct mlx4_en_dev *mdev = priv->mdev;
-       int err;
-
-       priv->prof->tx_pause = pause->tx_pause != 0;
-       priv->prof->rx_pause = pause->rx_pause != 0;
-       err = mlx4_SET_PORT_general(mdev->dev, priv->port,
-                                   priv->rx_skb_size + ETH_FCS_LEN,
-                                   priv->prof->tx_pause,
-                                   priv->prof->tx_ppp,
-                                   priv->prof->rx_pause,
-                                   priv->prof->rx_ppp);
-       if (err)
-               en_err(priv, "Failed setting pause params\n");
-
-       return err;
-}
-
-static void mlx4_en_get_pauseparam(struct net_device *dev,
-                                struct ethtool_pauseparam *pause)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-
-       pause->tx_pause = priv->prof->tx_pause;
-       pause->rx_pause = priv->prof->rx_pause;
-}
-
-static int mlx4_en_set_ringparam(struct net_device *dev,
-                                struct ethtool_ringparam *param)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       struct mlx4_en_dev *mdev = priv->mdev;
-       u32 rx_size, tx_size;
-       int port_up = 0;
-       int err = 0;
-
-       if (param->rx_jumbo_pending || param->rx_mini_pending)
-               return -EINVAL;
-
-       rx_size = roundup_pow_of_two(param->rx_pending);
-       rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
-       rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
-       tx_size = roundup_pow_of_two(param->tx_pending);
-       tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
-       tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
-
-       if (rx_size == (priv->port_up ? priv->rx_ring[0].actual_size :
-                                       priv->rx_ring[0].size) &&
-           tx_size == priv->tx_ring[0].size)
-               return 0;
-
-       mutex_lock(&mdev->state_lock);
-       if (priv->port_up) {
-               port_up = 1;
-               mlx4_en_stop_port(dev);
-       }
-
-       mlx4_en_free_resources(priv, true);
-
-       priv->prof->tx_ring_size = tx_size;
-       priv->prof->rx_ring_size = rx_size;
-
-       err = mlx4_en_alloc_resources(priv);
-       if (err) {
-               en_err(priv, "Failed reallocating port resources\n");
-               goto out;
-       }
-       if (port_up) {
-               err = mlx4_en_start_port(dev);
-               if (err)
-                       en_err(priv, "Failed starting port\n");
-       }
-
-out:
-       mutex_unlock(&mdev->state_lock);
-       return err;
-}
-
-static void mlx4_en_get_ringparam(struct net_device *dev,
-                                 struct ethtool_ringparam *param)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-
-       memset(param, 0, sizeof(*param));
-       param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
-       param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
-       param->rx_pending = priv->port_up ?
-               priv->rx_ring[0].actual_size : priv->rx_ring[0].size;
-       param->tx_pending = priv->tx_ring[0].size;
-}
-
-const struct ethtool_ops mlx4_en_ethtool_ops = {
-       .get_drvinfo = mlx4_en_get_drvinfo,
-       .get_settings = mlx4_en_get_settings,
-       .set_settings = mlx4_en_set_settings,
-       .get_link = ethtool_op_get_link,
-       .get_strings = mlx4_en_get_strings,
-       .get_sset_count = mlx4_en_get_sset_count,
-       .get_ethtool_stats = mlx4_en_get_ethtool_stats,
-       .self_test = mlx4_en_self_test,
-       .get_wol = mlx4_en_get_wol,
-       .set_wol = mlx4_en_set_wol,
-       .get_msglevel = mlx4_en_get_msglevel,
-       .set_msglevel = mlx4_en_set_msglevel,
-       .get_coalesce = mlx4_en_get_coalesce,
-       .set_coalesce = mlx4_en_set_coalesce,
-       .get_pauseparam = mlx4_en_get_pauseparam,
-       .set_pauseparam = mlx4_en_set_pauseparam,
-       .get_ringparam = mlx4_en_get_ringparam,
-       .set_ringparam = mlx4_en_set_ringparam,
-};
-
-
-
-
-
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
deleted file mode 100644 (file)
index 6bfea23..0000000
+++ /dev/null
@@ -1,315 +0,0 @@
-/*
- * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/cpumask.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/slab.h>
-
-#include <linux/mlx4/driver.h>
-#include <linux/mlx4/device.h>
-#include <linux/mlx4/cmd.h>
-
-#include "mlx4_en.h"
-
-MODULE_AUTHOR("Liran Liss, Yevgeny Petrilin");
-MODULE_DESCRIPTION("Mellanox ConnectX HCA Ethernet driver");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION " ("DRV_RELDATE")");
-
-static const char mlx4_en_version[] =
-       DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v"
-       DRV_VERSION " (" DRV_RELDATE ")\n";
-
-#define MLX4_EN_PARM_INT(X, def_val, desc) \
-       static unsigned int X = def_val;\
-       module_param(X , uint, 0444); \
-       MODULE_PARM_DESC(X, desc);
-
-
-/*
- * Device scope module parameters
- */
-
-
-/* Enable RSS TCP traffic */
-MLX4_EN_PARM_INT(tcp_rss, 1,
-                "Enable RSS for incomming TCP traffic or disabled (0)");
-/* Enable RSS UDP traffic */
-MLX4_EN_PARM_INT(udp_rss, 1,
-                "Enable RSS for incomming UDP traffic or disabled (0)");
-
-/* Priority pausing */
-MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
-                          " Per priority bit mask");
-MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
-                          " Per priority bit mask");
-
-int en_print(const char *level, const struct mlx4_en_priv *priv,
-            const char *format, ...)
-{
-       va_list args;
-       struct va_format vaf;
-       int i;
-
-       va_start(args, format);
-
-       vaf.fmt = format;
-       vaf.va = &args;
-       if (priv->registered)
-               i = printk("%s%s: %s: %pV",
-                          level, DRV_NAME, priv->dev->name, &vaf);
-       else
-               i = printk("%s%s: %s: Port %d: %pV",
-                          level, DRV_NAME, dev_name(&priv->mdev->pdev->dev),
-                          priv->port, &vaf);
-       va_end(args);
-
-       return i;
-}
-
-static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
-{
-       struct mlx4_en_profile *params = &mdev->profile;
-       int i;
-
-       params->tcp_rss = tcp_rss;
-       params->udp_rss = udp_rss;
-       if (params->udp_rss && !(mdev->dev->caps.flags
-                                       & MLX4_DEV_CAP_FLAG_UDP_RSS)) {
-               mlx4_warn(mdev, "UDP RSS is not supported on this device.\n");
-               params->udp_rss = 0;
-       }
-       for (i = 1; i <= MLX4_MAX_PORTS; i++) {
-               params->prof[i].rx_pause = 1;
-               params->prof[i].rx_ppp = pfcrx;
-               params->prof[i].tx_pause = 1;
-               params->prof[i].tx_ppp = pfctx;
-               params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
-               params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
-               params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS +
-                       (!!pfcrx) * MLX4_EN_NUM_PPP_RINGS;
-       }
-
-       return 0;
-}
-
-static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port)
-{
-       struct mlx4_en_dev *endev = ctx;
-
-       return endev->pndev[port];
-}
-
-static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
-                         enum mlx4_dev_event event, int port)
-{
-       struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
-       struct mlx4_en_priv *priv;
-
-       if (!mdev->pndev[port])
-               return;
-
-       priv = netdev_priv(mdev->pndev[port]);
-       switch (event) {
-       case MLX4_DEV_EVENT_PORT_UP:
-       case MLX4_DEV_EVENT_PORT_DOWN:
-               /* To prevent races, we poll the link state in a separate
-                 task rather than changing it here */
-               priv->link_state = event;
-               queue_work(mdev->workqueue, &priv->linkstate_task);
-               break;
-
-       case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
-               mlx4_err(mdev, "Internal error detected, restarting device\n");
-               break;
-
-       default:
-               mlx4_warn(mdev, "Unhandled event: %d\n", event);
-       }
-}
-
-static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
-{
-       struct mlx4_en_dev *mdev = endev_ptr;
-       int i;
-
-       mutex_lock(&mdev->state_lock);
-       mdev->device_up = false;
-       mutex_unlock(&mdev->state_lock);
-
-       mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
-               if (mdev->pndev[i])
-                       mlx4_en_destroy_netdev(mdev->pndev[i]);
-
-       flush_workqueue(mdev->workqueue);
-       destroy_workqueue(mdev->workqueue);
-       mlx4_mr_free(dev, &mdev->mr);
-       mlx4_uar_free(dev, &mdev->priv_uar);
-       mlx4_pd_free(dev, mdev->priv_pdn);
-       kfree(mdev);
-}
-
-static void *mlx4_en_add(struct mlx4_dev *dev)
-{
-       struct mlx4_en_dev *mdev;
-       int i;
-       int err;
-
-       printk_once(KERN_INFO "%s", mlx4_en_version);
-
-       mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
-       if (!mdev) {
-               dev_err(&dev->pdev->dev, "Device struct alloc failed, "
-                       "aborting.\n");
-               err = -ENOMEM;
-               goto err_free_res;
-       }
-
-       if (mlx4_pd_alloc(dev, &mdev->priv_pdn))
-               goto err_free_dev;
-
-       if (mlx4_uar_alloc(dev, &mdev->priv_uar))
-               goto err_pd;
-
-       mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT,
-                               PAGE_SIZE);
-       if (!mdev->uar_map)
-               goto err_uar;
-       spin_lock_init(&mdev->uar_lock);
-
-       mdev->dev = dev;
-       mdev->dma_device = &(dev->pdev->dev);
-       mdev->pdev = dev->pdev;
-       mdev->device_up = false;
-
-       mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
-       if (!mdev->LSO_support)
-               mlx4_warn(mdev, "LSO not supported, please upgrade to later "
-                               "FW version to enable LSO\n");
-
-       if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
-                        MLX4_PERM_LOCAL_WRITE |  MLX4_PERM_LOCAL_READ,
-                        0, 0, &mdev->mr)) {
-               mlx4_err(mdev, "Failed allocating memory region\n");
-               goto err_uar;
-       }
-       if (mlx4_mr_enable(mdev->dev, &mdev->mr)) {
-               mlx4_err(mdev, "Failed enabling memory region\n");
-               goto err_mr;
-       }
-
-       /* Build device profile according to supplied module parameters */
-       err = mlx4_en_get_profile(mdev);
-       if (err) {
-               mlx4_err(mdev, "Bad module parameters, aborting.\n");
-               goto err_mr;
-       }
-
-       /* Configure which ports to start according to module parameters */
-       mdev->port_cnt = 0;
-       mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
-               mdev->port_cnt++;
-
-       mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
-               if (!dev->caps.comp_pool) {
-                       mdev->profile.prof[i].rx_ring_num =
-                               rounddown_pow_of_two(max_t(int, MIN_RX_RINGS,
-                                                          min_t(int,
-                                                                dev->caps.num_comp_vectors,
-                                                                MAX_RX_RINGS)));
-               } else {
-                       mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(
-                               min_t(int, dev->caps.comp_pool/
-                                     dev->caps.num_ports - 1 , MAX_MSIX_P_PORT - 1));
-               }
-       }
-
-       /* Create our own workqueue for reset/multicast tasks
-        * Note: we cannot use the shared workqueue because of deadlocks caused
-        *       by the rtnl lock */
-       mdev->workqueue = create_singlethread_workqueue("mlx4_en");
-       if (!mdev->workqueue) {
-               err = -ENOMEM;
-               goto err_mr;
-       }
-
-       /* At this stage all non-port specific tasks are complete:
-        * mark the card state as up */
-       mutex_init(&mdev->state_lock);
-       mdev->device_up = true;
-
-       /* Setup ports */
-
-       /* Create a netdev for each port */
-       mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
-               mlx4_info(mdev, "Activating port:%d\n", i);
-               if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
-                       mdev->pndev[i] = NULL;
-       }
-       return mdev;
-
-err_mr:
-       mlx4_mr_free(dev, &mdev->mr);
-err_uar:
-       mlx4_uar_free(dev, &mdev->priv_uar);
-err_pd:
-       mlx4_pd_free(dev, mdev->priv_pdn);
-err_free_dev:
-       kfree(mdev);
-err_free_res:
-       return NULL;
-}
-
-static struct mlx4_interface mlx4_en_interface = {
-       .add            = mlx4_en_add,
-       .remove         = mlx4_en_remove,
-       .event          = mlx4_en_event,
-       .get_dev        = mlx4_en_get_netdev,
-       .protocol       = MLX4_PROT_ETH,
-};
-
-static int __init mlx4_en_init(void)
-{
-       return mlx4_register_interface(&mlx4_en_interface);
-}
-
-static void __exit mlx4_en_cleanup(void)
-{
-       mlx4_unregister_interface(&mlx4_en_interface);
-}
-
-module_init(mlx4_en_init);
-module_exit(mlx4_en_cleanup);
-
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
deleted file mode 100644 (file)
index 4b0f32e..0000000
+++ /dev/null
@@ -1,1166 +0,0 @@
-/*
- * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/etherdevice.h>
-#include <linux/tcp.h>
-#include <linux/if_vlan.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-
-#include <linux/mlx4/driver.h>
-#include <linux/mlx4/device.h>
-#include <linux/mlx4/cmd.h>
-#include <linux/mlx4/cq.h>
-
-#include "mlx4_en.h"
-#include "en_port.h"
-
-static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       struct mlx4_en_dev *mdev = priv->mdev;
-       int err;
-       int idx;
-
-       en_dbg(HW, priv, "adding VLAN:%d\n", vid);
-
-       set_bit(vid, priv->active_vlans);
-
-       /* Add VID to port VLAN filter */
-       mutex_lock(&mdev->state_lock);
-       if (mdev->device_up && priv->port_up) {
-               err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
-               if (err)
-                       en_err(priv, "Failed configuring VLAN filter\n");
-       }
-       if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
-               en_err(priv, "failed adding vlan %d\n", vid);
-       mutex_unlock(&mdev->state_lock);
-
-}
-
-static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       struct mlx4_en_dev *mdev = priv->mdev;
-       int err;
-       int idx;
-
-       en_dbg(HW, priv, "Killing VID:%d\n", vid);
-
-       clear_bit(vid, priv->active_vlans);
-
-       /* Remove VID from port VLAN filter */
-       mutex_lock(&mdev->state_lock);
-       if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
-               mlx4_unregister_vlan(mdev->dev, priv->port, idx);
-       else
-               en_err(priv, "could not find vid %d in cache\n", vid);
-
-       if (mdev->device_up && priv->port_up) {
-               err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
-               if (err)
-                       en_err(priv, "Failed configuring VLAN filter\n");
-       }
-       mutex_unlock(&mdev->state_lock);
-}
-
-u64 mlx4_en_mac_to_u64(u8 *addr)
-{
-       u64 mac = 0;
-       int i;
-
-       for (i = 0; i < ETH_ALEN; i++) {
-               mac <<= 8;
-               mac |= addr[i];
-       }
-       return mac;
-}
-
-static int mlx4_en_set_mac(struct net_device *dev, void *addr)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       struct mlx4_en_dev *mdev = priv->mdev;
-       struct sockaddr *saddr = addr;
-
-       if (!is_valid_ether_addr(saddr->sa_data))
-               return -EADDRNOTAVAIL;
-
-       memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
-       priv->mac = mlx4_en_mac_to_u64(dev->dev_addr);
-       queue_work(mdev->workqueue, &priv->mac_task);
-       return 0;
-}
-
-static void mlx4_en_do_set_mac(struct work_struct *work)
-{
-       struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
-                                                mac_task);
-       struct mlx4_en_dev *mdev = priv->mdev;
-       int err = 0;
-
-       mutex_lock(&mdev->state_lock);
-       if (priv->port_up) {
-               /* Remove old MAC and insert the new one */
-               err = mlx4_replace_mac(mdev->dev, priv->port,
-                                      priv->base_qpn, priv->mac, 0);
-               if (err)
-                       en_err(priv, "Failed changing HW MAC address\n");
-       } else
-               en_dbg(HW, priv, "Port is down while "
-                                "registering mac, exiting...\n");
-
-       mutex_unlock(&mdev->state_lock);
-}
-
-static void mlx4_en_clear_list(struct net_device *dev)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-
-       kfree(priv->mc_addrs);
-       priv->mc_addrs_cnt = 0;
-}
-
-static void mlx4_en_cache_mclist(struct net_device *dev)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       struct netdev_hw_addr *ha;
-       char *mc_addrs;
-       int mc_addrs_cnt = netdev_mc_count(dev);
-       int i;
-
-       mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC);
-       if (!mc_addrs) {
-               en_err(priv, "failed to allocate multicast list\n");
-               return;
-       }
-       i = 0;
-       netdev_for_each_mc_addr(ha, dev)
-               memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
-       priv->mc_addrs = mc_addrs;
-       priv->mc_addrs_cnt = mc_addrs_cnt;
-}
-
-
-static void mlx4_en_set_multicast(struct net_device *dev)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-
-       if (!priv->port_up)
-               return;
-
-       queue_work(priv->mdev->workqueue, &priv->mcast_task);
-}
-
-static void mlx4_en_do_set_multicast(struct work_struct *work)
-{
-       struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
-                                                mcast_task);
-       struct mlx4_en_dev *mdev = priv->mdev;
-       struct net_device *dev = priv->dev;
-       u64 mcast_addr = 0;
-       u8 mc_list[16] = {0};
-       int err;
-
-       mutex_lock(&mdev->state_lock);
-       if (!mdev->device_up) {
-               en_dbg(HW, priv, "Card is not up, "
-                                "ignoring multicast change.\n");
-               goto out;
-       }
-       if (!priv->port_up) {
-               en_dbg(HW, priv, "Port is down, "
-                                "ignoring  multicast change.\n");
-               goto out;
-       }
-
-       /*
-        * Promsicuous mode: disable all filters
-        */
-
-       if (dev->flags & IFF_PROMISC) {
-               if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
-                       if (netif_msg_rx_status(priv))
-                               en_warn(priv, "Entering promiscuous mode\n");
-                       priv->flags |= MLX4_EN_FLAG_PROMISC;
-
-                       /* Enable promiscouos mode */
-                       if (!(mdev->dev->caps.flags &
-                                               MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
-                               err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
-                                                            priv->base_qpn, 1);
-                       else
-                               err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn,
-                                                              priv->port);
-                       if (err)
-                               en_err(priv, "Failed enabling "
-                                            "promiscuous mode\n");
-
-                       /* Disable port multicast filter (unconditionally) */
-                       err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
-                                                 0, MLX4_MCAST_DISABLE);
-                       if (err)
-                               en_err(priv, "Failed disabling "
-                                            "multicast filter\n");
-
-                       /* Add the default qp number as multicast promisc */
-                       if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
-                               err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
-                                                                priv->port);
-                               if (err)
-                                       en_err(priv, "Failed entering multicast promisc mode\n");
-                               priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
-                       }
-
-                       /* Disable port VLAN filter */
-                       err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
-                       if (err)
-                               en_err(priv, "Failed disabling VLAN filter\n");
-               }
-               goto out;
-       }
-
-       /*
-        * Not in promiscuous mode
-        */
-
-       if (priv->flags & MLX4_EN_FLAG_PROMISC) {
-               if (netif_msg_rx_status(priv))
-                       en_warn(priv, "Leaving promiscuous mode\n");
-               priv->flags &= ~MLX4_EN_FLAG_PROMISC;
-
-               /* Disable promiscouos mode */
-               if (!(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
-                       err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
-                                                    priv->base_qpn, 0);
-               else
-                       err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
-                                                         priv->port);
-               if (err)
-                       en_err(priv, "Failed disabling promiscuous mode\n");
-
-               /* Disable Multicast promisc */
-               if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
-                       err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
-                                                           priv->port);
-                       if (err)
-                               en_err(priv, "Failed disabling multicast promiscuous mode\n");
-                       priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
-               }
-
-               /* Enable port VLAN filter */
-               err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
-               if (err)
-                       en_err(priv, "Failed enabling VLAN filter\n");
-       }
-
-       /* Enable/disable the multicast filter according to IFF_ALLMULTI */
-       if (dev->flags & IFF_ALLMULTI) {
-               err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
-                                         0, MLX4_MCAST_DISABLE);
-               if (err)
-                       en_err(priv, "Failed disabling multicast filter\n");
-
-               /* Add the default qp number as multicast promisc */
-               if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
-                       err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
-                                                        priv->port);
-                       if (err)
-                               en_err(priv, "Failed entering multicast promisc mode\n");
-                       priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
-               }
-       } else {
-               int i;
-               /* Disable Multicast promisc */
-               if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
-                       err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
-                                                           priv->port);
-                       if (err)
-                               en_err(priv, "Failed disabling multicast promiscuous mode\n");
-                       priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
-               }
-
-               err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
-                                         0, MLX4_MCAST_DISABLE);
-               if (err)
-                       en_err(priv, "Failed disabling multicast filter\n");
-
-               /* Detach our qp from all the multicast addresses */
-               for (i = 0; i < priv->mc_addrs_cnt; i++) {
-                       memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
-                       mc_list[5] = priv->port;
-                       mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
-                                             mc_list, MLX4_PROT_ETH);
-               }
-               /* Flush mcast filter and init it with broadcast address */
-               mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
-                                   1, MLX4_MCAST_CONFIG);
-
-               /* Update multicast list - we cache all addresses so they won't
-                * change while HW is updated holding the command semaphor */
-               netif_tx_lock_bh(dev);
-               mlx4_en_cache_mclist(dev);
-               netif_tx_unlock_bh(dev);
-               for (i = 0; i < priv->mc_addrs_cnt; i++) {
-                       mcast_addr =
-                             mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
-                       memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
-                       mc_list[5] = priv->port;
-                       mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp,
-                                             mc_list, 0, MLX4_PROT_ETH);
-                       mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
-                                           mcast_addr, 0, MLX4_MCAST_CONFIG);
-               }
-               err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
-                                         0, MLX4_MCAST_ENABLE);
-               if (err)
-                       en_err(priv, "Failed enabling multicast filter\n");
-       }
-out:
-       mutex_unlock(&mdev->state_lock);
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void mlx4_en_netpoll(struct net_device *dev)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       struct mlx4_en_cq *cq;
-       unsigned long flags;
-       int i;
-
-       for (i = 0; i < priv->rx_ring_num; i++) {
-               cq = &priv->rx_cq[i];
-               spin_lock_irqsave(&cq->lock, flags);
-               napi_synchronize(&cq->napi);
-               mlx4_en_process_rx_cq(dev, cq, 0);
-               spin_unlock_irqrestore(&cq->lock, flags);
-       }
-}
-#endif
-
-static void mlx4_en_tx_timeout(struct net_device *dev)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       struct mlx4_en_dev *mdev = priv->mdev;
-
-       if (netif_msg_timer(priv))
-               en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
-
-       priv->port_stats.tx_timeout++;
-       en_dbg(DRV, priv, "Scheduling watchdog\n");
-       queue_work(mdev->workqueue, &priv->watchdog_task);
-}
-
-
-static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-
-       spin_lock_bh(&priv->stats_lock);
-       memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
-       spin_unlock_bh(&priv->stats_lock);
-
-       return &priv->ret_stats;
-}
-
-static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
-{
-       struct mlx4_en_cq *cq;
-       int i;
-
-       /* If we haven't received a specific coalescing setting
-        * (module param), we set the moderation parameters as follows:
-        * - moder_cnt is set to the number of mtu sized packets to
-        *   satisfy our coelsing target.
-        * - moder_time is set to a fixed value.
-        */
-       priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
-       priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
-       en_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
-                          "rx_frames:%d rx_usecs:%d\n",
-                priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
-
-       /* Setup cq moderation params */
-       for (i = 0; i < priv->rx_ring_num; i++) {
-               cq = &priv->rx_cq[i];
-               cq->moder_cnt = priv->rx_frames;
-               cq->moder_time = priv->rx_usecs;
-       }
-
-       for (i = 0; i < priv->tx_ring_num; i++) {
-               cq = &priv->tx_cq[i];
-               cq->moder_cnt = MLX4_EN_TX_COAL_PKTS;
-               cq->moder_time = MLX4_EN_TX_COAL_TIME;
-       }
-
-       /* Reset auto-moderation params */
-       priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
-       priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
-       priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
-       priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
-       priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
-       priv->adaptive_rx_coal = 1;
-       priv->last_moder_time = MLX4_EN_AUTO_CONF;
-       priv->last_moder_jiffies = 0;
-       priv->last_moder_packets = 0;
-       priv->last_moder_tx_packets = 0;
-       priv->last_moder_bytes = 0;
-}
-
-static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
-{
-       unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
-       struct mlx4_en_cq *cq;
-       unsigned long packets;
-       unsigned long rate;
-       unsigned long avg_pkt_size;
-       unsigned long rx_packets;
-       unsigned long rx_bytes;
-       unsigned long tx_packets;
-       unsigned long tx_pkt_diff;
-       unsigned long rx_pkt_diff;
-       int moder_time;
-       int i, err;
-
-       if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
-               return;
-
-       spin_lock_bh(&priv->stats_lock);
-       rx_packets = priv->stats.rx_packets;
-       rx_bytes = priv->stats.rx_bytes;
-       tx_packets = priv->stats.tx_packets;
-       spin_unlock_bh(&priv->stats_lock);
-
-       if (!priv->last_moder_jiffies || !period)
-               goto out;
-
-       tx_pkt_diff = ((unsigned long) (tx_packets -
-                                       priv->last_moder_tx_packets));
-       rx_pkt_diff = ((unsigned long) (rx_packets -
-                                       priv->last_moder_packets));
-       packets = max(tx_pkt_diff, rx_pkt_diff);
-       rate = packets * HZ / period;
-       avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
-                                priv->last_moder_bytes)) / packets : 0;
-
-       /* Apply auto-moderation only when packet rate exceeds a rate that
-        * it matters */
-       if (rate > MLX4_EN_RX_RATE_THRESH && avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
-               /* If tx and rx packet rates are not balanced, assume that
-                * traffic is mainly BW bound and apply maximum moderation.
-                * Otherwise, moderate according to packet rate */
-               if (2 * tx_pkt_diff > 3 * rx_pkt_diff ||
-                   2 * rx_pkt_diff > 3 * tx_pkt_diff) {
-                       moder_time = priv->rx_usecs_high;
-               } else {
-                       if (rate < priv->pkt_rate_low)
-                               moder_time = priv->rx_usecs_low;
-                       else if (rate > priv->pkt_rate_high)
-                               moder_time = priv->rx_usecs_high;
-                       else
-                               moder_time = (rate - priv->pkt_rate_low) *
-                                       (priv->rx_usecs_high - priv->rx_usecs_low) /
-                                       (priv->pkt_rate_high - priv->pkt_rate_low) +
-                                       priv->rx_usecs_low;
-               }
-       } else {
-               moder_time = priv->rx_usecs_low;
-       }
-
-       en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
-              tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period);
-
-       en_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu "
-              "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n",
-                priv->last_moder_time, moder_time, period, packets,
-                avg_pkt_size, rate);
-
-       if (moder_time != priv->last_moder_time) {
-               priv->last_moder_time = moder_time;
-               for (i = 0; i < priv->rx_ring_num; i++) {
-                       cq = &priv->rx_cq[i];
-                       cq->moder_time = moder_time;
-                       err = mlx4_en_set_cq_moder(priv, cq);
-                       if (err) {
-                               en_err(priv, "Failed modifying moderation for cq:%d\n", i);
-                               break;
-                       }
-               }
-       }
-
-out:
-       priv->last_moder_packets = rx_packets;
-       priv->last_moder_tx_packets = tx_packets;
-       priv->last_moder_bytes = rx_bytes;
-       priv->last_moder_jiffies = jiffies;
-}
-
-static void mlx4_en_do_get_stats(struct work_struct *work)
-{
-       struct delayed_work *delay = to_delayed_work(work);
-       struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
-                                                stats_task);
-       struct mlx4_en_dev *mdev = priv->mdev;
-       int err;
-
-       err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
-       if (err)
-               en_dbg(HW, priv, "Could not update stats\n");
-
-       mutex_lock(&mdev->state_lock);
-       if (mdev->device_up) {
-               if (priv->port_up)
-                       mlx4_en_auto_moderation(priv);
-
-               queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
-       }
-       if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
-               queue_work(mdev->workqueue, &priv->mac_task);
-               mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
-       }
-       mutex_unlock(&mdev->state_lock);
-}
-
-static void mlx4_en_linkstate(struct work_struct *work)
-{
-       struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
-                                                linkstate_task);
-       struct mlx4_en_dev *mdev = priv->mdev;
-       int linkstate = priv->link_state;
-
-       mutex_lock(&mdev->state_lock);
-       /* If observable port state changed set carrier state and
-        * report to system log */
-       if (priv->last_link_state != linkstate) {
-               if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
-                       en_info(priv, "Link Down\n");
-                       netif_carrier_off(priv->dev);
-               } else {
-                       en_info(priv, "Link Up\n");
-                       netif_carrier_on(priv->dev);
-               }
-       }
-       priv->last_link_state = linkstate;
-       mutex_unlock(&mdev->state_lock);
-}
-
-
-int mlx4_en_start_port(struct net_device *dev)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       struct mlx4_en_dev *mdev = priv->mdev;
-       struct mlx4_en_cq *cq;
-       struct mlx4_en_tx_ring *tx_ring;
-       int rx_index = 0;
-       int tx_index = 0;
-       int err = 0;
-       int i;
-       int j;
-       u8 mc_list[16] = {0};
-       char name[32];
-
-       if (priv->port_up) {
-               en_dbg(DRV, priv, "start port called while port already up\n");
-               return 0;
-       }
-
-       /* Calculate Rx buf size */
-       dev->mtu = min(dev->mtu, priv->max_mtu);
-       mlx4_en_calc_rx_buf(dev);
-       en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
-
-       /* Configure rx cq's and rings */
-       err = mlx4_en_activate_rx_rings(priv);
-       if (err) {
-               en_err(priv, "Failed to activate RX rings\n");
-               return err;
-       }
-       for (i = 0; i < priv->rx_ring_num; i++) {
-               cq = &priv->rx_cq[i];
-
-               err = mlx4_en_activate_cq(priv, cq);
-               if (err) {
-                       en_err(priv, "Failed activating Rx CQ\n");
-                       goto cq_err;
-               }
-               for (j = 0; j < cq->size; j++)
-                       cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
-               err = mlx4_en_set_cq_moder(priv, cq);
-               if (err) {
-                       en_err(priv, "Failed setting cq moderation parameters");
-                       mlx4_en_deactivate_cq(priv, cq);
-                       goto cq_err;
-               }
-               mlx4_en_arm_cq(priv, cq);
-               priv->rx_ring[i].cqn = cq->mcq.cqn;
-               ++rx_index;
-       }
-
-       /* Set port mac number */
-       en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
-       err = mlx4_register_mac(mdev->dev, priv->port,
-                               priv->mac, &priv->base_qpn, 0);
-       if (err) {
-               en_err(priv, "Failed setting port mac\n");
-               goto cq_err;
-       }
-       mdev->mac_removed[priv->port] = 0;
-
-       err = mlx4_en_config_rss_steer(priv);
-       if (err) {
-               en_err(priv, "Failed configuring rss steering\n");
-               goto mac_err;
-       }
-
-       if (mdev->dev->caps.comp_pool && !priv->tx_vector) {
-               sprintf(name , "%s-tx", priv->dev->name);
-               if (mlx4_assign_eq(mdev->dev , name, &priv->tx_vector)) {
-                       mlx4_warn(mdev, "Failed Assigning an EQ to "
-                                       "%s_tx ,Falling back to legacy "
-                                       "EQ's\n", priv->dev->name);
-               }
-       }
-       /* Configure tx cq's and rings */
-       for (i = 0; i < priv->tx_ring_num; i++) {
-               /* Configure cq */
-               cq = &priv->tx_cq[i];
-               cq->vector = priv->tx_vector;
-               err = mlx4_en_activate_cq(priv, cq);
-               if (err) {
-                       en_err(priv, "Failed allocating Tx CQ\n");
-                       goto tx_err;
-               }
-               err = mlx4_en_set_cq_moder(priv, cq);
-               if (err) {
-                       en_err(priv, "Failed setting cq moderation parameters");
-                       mlx4_en_deactivate_cq(priv, cq);
-                       goto tx_err;
-               }
-               en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
-               cq->buf->wqe_index = cpu_to_be16(0xffff);
-
-               /* Configure ring */
-               tx_ring = &priv->tx_ring[i];
-               err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn);
-               if (err) {
-                       en_err(priv, "Failed allocating Tx ring\n");
-                       mlx4_en_deactivate_cq(priv, cq);
-                       goto tx_err;
-               }
-               /* Set initial ownership of all Tx TXBBs to SW (1) */
-               for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
-                       *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
-               ++tx_index;
-       }
-
-       /* Configure port */
-       err = mlx4_SET_PORT_general(mdev->dev, priv->port,
-                                   priv->rx_skb_size + ETH_FCS_LEN,
-                                   priv->prof->tx_pause,
-                                   priv->prof->tx_ppp,
-                                   priv->prof->rx_pause,
-                                   priv->prof->rx_ppp);
-       if (err) {
-               en_err(priv, "Failed setting port general configurations "
-                            "for port %d, with error %d\n", priv->port, err);
-               goto tx_err;
-       }
-       /* Set default qp number */
-       err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
-       if (err) {
-               en_err(priv, "Failed setting default qp numbers\n");
-               goto tx_err;
-       }
-
-       /* Init port */
-       en_dbg(HW, priv, "Initializing port\n");
-       err = mlx4_INIT_PORT(mdev->dev, priv->port);
-       if (err) {
-               en_err(priv, "Failed Initializing port\n");
-               goto tx_err;
-       }
-
-       /* Attach rx QP to bradcast address */
-       memset(&mc_list[10], 0xff, ETH_ALEN);
-       mc_list[5] = priv->port;
-       if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
-                                 0, MLX4_PROT_ETH))
-               mlx4_warn(mdev, "Failed Attaching Broadcast\n");
-
-       /* Must redo promiscuous mode setup. */
-       priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
-
-       /* Schedule multicast task to populate multicast list */
-       queue_work(mdev->workqueue, &priv->mcast_task);
-
-       priv->port_up = true;
-       netif_tx_start_all_queues(dev);
-       return 0;
-
-tx_err:
-       while (tx_index--) {
-               mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
-               mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
-       }
-
-       mlx4_en_release_rss_steer(priv);
-mac_err:
-       mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
-cq_err:
-       while (rx_index--)
-               mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
-       for (i = 0; i < priv->rx_ring_num; i++)
-               mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
-
-       return err; /* need to close devices */
-}
-
-
-void mlx4_en_stop_port(struct net_device *dev)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       struct mlx4_en_dev *mdev = priv->mdev;
-       int i;
-       u8 mc_list[16] = {0};
-
-       if (!priv->port_up) {
-               en_dbg(DRV, priv, "stop port called while port already down\n");
-               return;
-       }
-
-       /* Synchronize with tx routine */
-       netif_tx_lock_bh(dev);
-       netif_tx_stop_all_queues(dev);
-       netif_tx_unlock_bh(dev);
-
-       /* Set port as not active */
-       priv->port_up = false;
-
-       /* Detach All multicasts */
-       memset(&mc_list[10], 0xff, ETH_ALEN);
-       mc_list[5] = priv->port;
-       mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
-                             MLX4_PROT_ETH);
-       for (i = 0; i < priv->mc_addrs_cnt; i++) {
-               memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
-               mc_list[5] = priv->port;
-               mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
-                                     mc_list, MLX4_PROT_ETH);
-       }
-       mlx4_en_clear_list(dev);
-       /* Flush multicast filter */
-       mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
-
-       /* Unregister Mac address for the port */
-       mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
-       mdev->mac_removed[priv->port] = 1;
-
-       /* Free TX Rings */
-       for (i = 0; i < priv->tx_ring_num; i++) {
-               mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
-               mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
-       }
-       msleep(10);
-
-       for (i = 0; i < priv->tx_ring_num; i++)
-               mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);
-
-       /* Free RSS qps */
-       mlx4_en_release_rss_steer(priv);
-
-       /* Free RX Rings */
-       for (i = 0; i < priv->rx_ring_num; i++) {
-               mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
-               while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state))
-                       msleep(1);
-               mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
-       }
-
-       /* close port*/
-       mlx4_CLOSE_PORT(mdev->dev, priv->port);
-}
-
-static void mlx4_en_restart(struct work_struct *work)
-{
-       struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
-                                                watchdog_task);
-       struct mlx4_en_dev *mdev = priv->mdev;
-       struct net_device *dev = priv->dev;
-
-       en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
-
-       mutex_lock(&mdev->state_lock);
-       if (priv->port_up) {
-               mlx4_en_stop_port(dev);
-               if (mlx4_en_start_port(dev))
-                       en_err(priv, "Failed restarting port %d\n", priv->port);
-       }
-       mutex_unlock(&mdev->state_lock);
-}
-
-
-static int mlx4_en_open(struct net_device *dev)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       struct mlx4_en_dev *mdev = priv->mdev;
-       int i;
-       int err = 0;
-
-       mutex_lock(&mdev->state_lock);
-
-       if (!mdev->device_up) {
-               en_err(priv, "Cannot open - device down/disabled\n");
-               err = -EBUSY;
-               goto out;
-       }
-
-       /* Reset HW statistics and performance counters */
-       if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
-               en_dbg(HW, priv, "Failed dumping statistics\n");
-
-       memset(&priv->stats, 0, sizeof(priv->stats));
-       memset(&priv->pstats, 0, sizeof(priv->pstats));
-
-       for (i = 0; i < priv->tx_ring_num; i++) {
-               priv->tx_ring[i].bytes = 0;
-               priv->tx_ring[i].packets = 0;
-       }
-       for (i = 0; i < priv->rx_ring_num; i++) {
-               priv->rx_ring[i].bytes = 0;
-               priv->rx_ring[i].packets = 0;
-       }
-
-       err = mlx4_en_start_port(dev);
-       if (err)
-               en_err(priv, "Failed starting port:%d\n", priv->port);
-
-out:
-       mutex_unlock(&mdev->state_lock);
-       return err;
-}
-
-
-static int mlx4_en_close(struct net_device *dev)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       struct mlx4_en_dev *mdev = priv->mdev;
-
-       en_dbg(IFDOWN, priv, "Close port called\n");
-
-       mutex_lock(&mdev->state_lock);
-
-       mlx4_en_stop_port(dev);
-       netif_carrier_off(dev);
-
-       mutex_unlock(&mdev->state_lock);
-       return 0;
-}
-
-void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors)
-{
-       int i;
-
-       for (i = 0; i < priv->tx_ring_num; i++) {
-               if (priv->tx_ring[i].tx_info)
-                       mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
-               if (priv->tx_cq[i].buf)
-                       mlx4_en_destroy_cq(priv, &priv->tx_cq[i], reserve_vectors);
-       }
-
-       for (i = 0; i < priv->rx_ring_num; i++) {
-               if (priv->rx_ring[i].rx_info)
-                       mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]);
-               if (priv->rx_cq[i].buf)
-                       mlx4_en_destroy_cq(priv, &priv->rx_cq[i], reserve_vectors);
-       }
-}
-
-int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
-{
-       struct mlx4_en_port_profile *prof = priv->prof;
-       int i;
-       int base_tx_qpn, err;
-
-       err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn);
-       if (err) {
-               en_err(priv, "failed reserving range for TX rings\n");
-               return err;
-       }
-
-       /* Create tx Rings */
-       for (i = 0; i < priv->tx_ring_num; i++) {
-               if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
-                                     prof->tx_ring_size, i, TX))
-                       goto err;
-
-               if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i,
-                                          prof->tx_ring_size, TXBB_SIZE))
-                       goto err;
-       }
-
-       /* Create rx Rings */
-       for (i = 0; i < priv->rx_ring_num; i++) {
-               if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
-                                     prof->rx_ring_size, i, RX))
-                       goto err;
-
-               if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
-                                          prof->rx_ring_size, priv->stride))
-                       goto err;
-       }
-
-       return 0;
-
-err:
-       en_err(priv, "Failed to allocate NIC resources\n");
-       mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num);
-       return -ENOMEM;
-}
-
-
-void mlx4_en_destroy_netdev(struct net_device *dev)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       struct mlx4_en_dev *mdev = priv->mdev;
-
-       en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
-
-       /* Unregister device - this will close the port if it was up */
-       if (priv->registered)
-               unregister_netdev(dev);
-
-       if (priv->allocated)
-               mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
-
-       cancel_delayed_work(&priv->stats_task);
-       /* flush any pending task for this netdev */
-       flush_workqueue(mdev->workqueue);
-
-       /* Detach the netdev so tasks would not attempt to access it */
-       mutex_lock(&mdev->state_lock);
-       mdev->pndev[priv->port] = NULL;
-       mutex_unlock(&mdev->state_lock);
-
-       mlx4_en_free_resources(priv, false);
-       free_netdev(dev);
-}
-
-static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       struct mlx4_en_dev *mdev = priv->mdev;
-       int err = 0;
-
-       en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
-                dev->mtu, new_mtu);
-
-       if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
-               en_err(priv, "Bad MTU size:%d.\n", new_mtu);
-               return -EPERM;
-       }
-       dev->mtu = new_mtu;
-
-       if (netif_running(dev)) {
-               mutex_lock(&mdev->state_lock);
-               if (!mdev->device_up) {
-                       /* NIC is probably restarting - let watchdog task reset
-                        * the port */
-                       en_dbg(DRV, priv, "Change MTU called with card down!?\n");
-               } else {
-                       mlx4_en_stop_port(dev);
-                       err = mlx4_en_start_port(dev);
-                       if (err) {
-                               en_err(priv, "Failed restarting port:%d\n",
-                                        priv->port);
-                               queue_work(mdev->workqueue, &priv->watchdog_task);
-                       }
-               }
-               mutex_unlock(&mdev->state_lock);
-       }
-       return 0;
-}
-
-static const struct net_device_ops mlx4_netdev_ops = {
-       .ndo_open               = mlx4_en_open,
-       .ndo_stop               = mlx4_en_close,
-       .ndo_start_xmit         = mlx4_en_xmit,
-       .ndo_select_queue       = mlx4_en_select_queue,
-       .ndo_get_stats          = mlx4_en_get_stats,
-       .ndo_set_multicast_list = mlx4_en_set_multicast,
-       .ndo_set_mac_address    = mlx4_en_set_mac,
-       .ndo_validate_addr      = eth_validate_addr,
-       .ndo_change_mtu         = mlx4_en_change_mtu,
-       .ndo_tx_timeout         = mlx4_en_tx_timeout,
-       .ndo_vlan_rx_add_vid    = mlx4_en_vlan_rx_add_vid,
-       .ndo_vlan_rx_kill_vid   = mlx4_en_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = mlx4_en_netpoll,
-#endif
-};
-
-int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
-                       struct mlx4_en_port_profile *prof)
-{
-       struct net_device *dev;
-       struct mlx4_en_priv *priv;
-       int i;
-       int err;
-
-       dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
-           prof->tx_ring_num, prof->rx_ring_num);
-       if (dev == NULL) {
-               mlx4_err(mdev, "Net device allocation failed\n");
-               return -ENOMEM;
-       }
-
-       SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
-       dev->dev_id =  port - 1;
-
-       /*
-        * Initialize driver private data
-        */
-
-       priv = netdev_priv(dev);
-       memset(priv, 0, sizeof(struct mlx4_en_priv));
-       priv->dev = dev;
-       priv->mdev = mdev;
-       priv->prof = prof;
-       priv->port = port;
-       priv->port_up = false;
-       priv->flags = prof->flags;
-       priv->tx_ring_num = prof->tx_ring_num;
-       priv->rx_ring_num = prof->rx_ring_num;
-       priv->mac_index = -1;
-       priv->msg_enable = MLX4_EN_MSG_LEVEL;
-       spin_lock_init(&priv->stats_lock);
-       INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
-       INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
-       INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
-       INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
-       INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
-
-       /* Query for default mac and max mtu */
-       priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
-       priv->mac = mdev->dev->caps.def_mac[priv->port];
-       if (ILLEGAL_MAC(priv->mac)) {
-               en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
-                        priv->port, priv->mac);
-               err = -EINVAL;
-               goto out;
-       }
-
-       priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
-                                         DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
-       err = mlx4_en_alloc_resources(priv);
-       if (err)
-               goto out;
-
-       /* Allocate page for receive rings */
-       err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
-                               MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
-       if (err) {
-               en_err(priv, "Failed to allocate page for rx qps\n");
-               goto out;
-       }
-       priv->allocated = 1;
-
-       /*
-        * Initialize netdev entry points
-        */
-       dev->netdev_ops = &mlx4_netdev_ops;
-       dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
-       netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
-       netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
-
-       SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
-
-       /* Set defualt MAC */
-       dev->addr_len = ETH_ALEN;
-       for (i = 0; i < ETH_ALEN; i++) {
-               dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
-               dev->perm_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
-       }
-
-       /*
-        * Set driver features
-        */
-       dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
-       if (mdev->LSO_support)
-               dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
-
-       dev->vlan_features = dev->hw_features;
-
-       dev->hw_features |= NETIF_F_RXCSUM;
-       dev->features = dev->hw_features | NETIF_F_HIGHDMA |
-                       NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
-                       NETIF_F_HW_VLAN_FILTER;
-
-       mdev->pndev[port] = dev;
-
-       netif_carrier_off(dev);
-       err = register_netdev(dev);
-       if (err) {
-               en_err(priv, "Netdev registration failed for port %d\n", port);
-               goto out;
-       }
-
-       en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
-       en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
-
-       /* Configure port */
-       err = mlx4_SET_PORT_general(mdev->dev, priv->port,
-                                   MLX4_EN_MIN_MTU,
-                                   0, 0, 0, 0);
-       if (err) {
-               en_err(priv, "Failed setting port general configurations "
-                      "for port %d, with error %d\n", priv->port, err);
-               goto out;
-       }
-
-       /* Init port */
-       en_warn(priv, "Initializing port\n");
-       err = mlx4_INIT_PORT(mdev->dev, priv->port);
-       if (err) {
-               en_err(priv, "Failed Initializing port\n");
-               goto out;
-       }
-       priv->registered = 1;
-       mlx4_en_set_default_moderation(priv);
-       queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
-       return 0;
-
-out:
-       mlx4_en_destroy_netdev(dev);
-       return err;
-}
-
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
deleted file mode 100644 (file)
index 5ada5b4..0000000
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-
-#include <linux/if_vlan.h>
-
-#include <linux/mlx4/device.h>
-#include <linux/mlx4/cmd.h>
-
-#include "en_port.h"
-#include "mlx4_en.h"
-
-
-int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
-                       u64 mac, u64 clear, u8 mode)
-{
-       return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
-                       MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B);
-}
-
-int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_set_vlan_fltr_mbox *filter;
-       int i;
-       int j;
-       int index = 0;
-       u32 entry;
-       int err = 0;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-
-       filter = mailbox->buf;
-       memset(filter, 0, sizeof(*filter));
-       for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
-               entry = 0;
-               for (j = 0; j < 32; j++)
-                       if (test_bit(index++, priv->active_vlans))
-                               entry |= 1 << j;
-               filter->entry[i] = cpu_to_be32(entry);
-       }
-       err = mlx4_cmd(dev, mailbox->dma, priv->port, 0, MLX4_CMD_SET_VLAN_FLTR,
-                      MLX4_CMD_TIME_CLASS_B);
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-
-
-int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
-                         u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_set_port_general_context *context;
-       int err;
-       u32 in_mod;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-       context = mailbox->buf;
-       memset(context, 0, sizeof *context);
-
-       context->flags = SET_PORT_GEN_ALL_VALID;
-       context->mtu = cpu_to_be16(mtu);
-       context->pptx = (pptx * (!pfctx)) << 7;
-       context->pfctx = pfctx;
-       context->pprx = (pprx * (!pfcrx)) << 7;
-       context->pfcrx = pfcrx;
-
-       in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
-       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B);
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-
-int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
-                          u8 promisc)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_set_port_rqp_calc_context *context;
-       int err;
-       u32 in_mod;
-       u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
-                                               MCAST_DIRECT : MCAST_DEFAULT;
-
-       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER  &&
-                       dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
-               return 0;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-       context = mailbox->buf;
-       memset(context, 0, sizeof *context);
-
-       context->base_qpn = cpu_to_be32(base_qpn);
-       context->n_mac = 0x2;
-       context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
-                                      base_qpn);
-       context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
-                                    base_qpn);
-       context->intra_no_vlan = 0;
-       context->no_vlan = MLX4_NO_VLAN_IDX;
-       context->intra_vlan_miss = 0;
-       context->vlan_miss = MLX4_VLAN_MISS_IDX;
-
-       in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
-       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B);
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-
-int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
-{
-       struct mlx4_en_query_port_context *qport_context;
-       struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
-       struct mlx4_en_port_state *state = &priv->port_state;
-       struct mlx4_cmd_mailbox *mailbox;
-       int err;
-
-       mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-       memset(mailbox->buf, 0, sizeof(*qport_context));
-       err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
-                          MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B);
-       if (err)
-               goto out;
-       qport_context = mailbox->buf;
-
-       /* This command is always accessed from Ethtool context
-        * already synchronized, no need in locking */
-       state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK);
-       if ((qport_context->link_speed & MLX4_EN_SPEED_MASK) ==
-           MLX4_EN_1G_SPEED)
-               state->link_speed = 1000;
-       else
-               state->link_speed = 10000;
-       state->transciver = qport_context->transceiver;
-
-out:
-       mlx4_free_cmd_mailbox(mdev->dev, mailbox);
-       return err;
-}
-
-int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
-{
-       struct mlx4_en_stat_out_mbox *mlx4_en_stats;
-       struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
-       struct net_device_stats *stats = &priv->stats;
-       struct mlx4_cmd_mailbox *mailbox;
-       u64 in_mod = reset << 8 | port;
-       int err;
-       int i;
-
-       mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-       memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
-       err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
-                          MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B);
-       if (err)
-               goto out;
-
-       mlx4_en_stats = mailbox->buf;
-
-       spin_lock_bh(&priv->stats_lock);
-
-       stats->rx_packets = 0;
-       stats->rx_bytes = 0;
-       for (i = 0; i < priv->rx_ring_num; i++) {
-               stats->rx_packets += priv->rx_ring[i].packets;
-               stats->rx_bytes += priv->rx_ring[i].bytes;
-       }
-       stats->tx_packets = 0;
-       stats->tx_bytes = 0;
-       for (i = 0; i < priv->tx_ring_num; i++) {
-               stats->tx_packets += priv->tx_ring[i].packets;
-               stats->tx_bytes += priv->tx_ring[i].bytes;
-       }
-
-       stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
-                          be32_to_cpu(mlx4_en_stats->RdropLength) +
-                          be32_to_cpu(mlx4_en_stats->RJBBR) +
-                          be32_to_cpu(mlx4_en_stats->RCRC) +
-                          be32_to_cpu(mlx4_en_stats->RRUNT);
-       stats->tx_errors = be32_to_cpu(mlx4_en_stats->TDROP);
-       stats->multicast = be64_to_cpu(mlx4_en_stats->MCAST_prio_0) +
-                          be64_to_cpu(mlx4_en_stats->MCAST_prio_1) +
-                          be64_to_cpu(mlx4_en_stats->MCAST_prio_2) +
-                          be64_to_cpu(mlx4_en_stats->MCAST_prio_3) +
-                          be64_to_cpu(mlx4_en_stats->MCAST_prio_4) +
-                          be64_to_cpu(mlx4_en_stats->MCAST_prio_5) +
-                          be64_to_cpu(mlx4_en_stats->MCAST_prio_6) +
-                          be64_to_cpu(mlx4_en_stats->MCAST_prio_7) +
-                          be64_to_cpu(mlx4_en_stats->MCAST_novlan);
-       stats->collisions = 0;
-       stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
-       stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
-       stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
-       stats->rx_frame_errors = 0;
-       stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
-       stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
-       stats->tx_aborted_errors = 0;
-       stats->tx_carrier_errors = 0;
-       stats->tx_fifo_errors = 0;
-       stats->tx_heartbeat_errors = 0;
-       stats->tx_window_errors = 0;
-
-       priv->pkstats.broadcast =
-                               be64_to_cpu(mlx4_en_stats->RBCAST_prio_0) +
-                               be64_to_cpu(mlx4_en_stats->RBCAST_prio_1) +
-                               be64_to_cpu(mlx4_en_stats->RBCAST_prio_2) +
-                               be64_to_cpu(mlx4_en_stats->RBCAST_prio_3) +
-                               be64_to_cpu(mlx4_en_stats->RBCAST_prio_4) +
-                               be64_to_cpu(mlx4_en_stats->RBCAST_prio_5) +
-                               be64_to_cpu(mlx4_en_stats->RBCAST_prio_6) +
-                               be64_to_cpu(mlx4_en_stats->RBCAST_prio_7) +
-                               be64_to_cpu(mlx4_en_stats->RBCAST_novlan);
-       priv->pkstats.rx_prio[0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0);
-       priv->pkstats.rx_prio[1] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1);
-       priv->pkstats.rx_prio[2] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2);
-       priv->pkstats.rx_prio[3] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3);
-       priv->pkstats.rx_prio[4] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4);
-       priv->pkstats.rx_prio[5] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5);
-       priv->pkstats.rx_prio[6] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6);
-       priv->pkstats.rx_prio[7] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7);
-       priv->pkstats.tx_prio[0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0);
-       priv->pkstats.tx_prio[1] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1);
-       priv->pkstats.tx_prio[2] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2);
-       priv->pkstats.tx_prio[3] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3);
-       priv->pkstats.tx_prio[4] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4);
-       priv->pkstats.tx_prio[5] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5);
-       priv->pkstats.tx_prio[6] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6);
-       priv->pkstats.tx_prio[7] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7);
-       spin_unlock_bh(&priv->stats_lock);
-
-out:
-       mlx4_free_cmd_mailbox(mdev->dev, mailbox);
-       return err;
-}
-
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h
deleted file mode 100644 (file)
index e3d73e4..0000000
+++ /dev/null
@@ -1,594 +0,0 @@
-/*
- * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef _MLX4_EN_PORT_H_
-#define _MLX4_EN_PORT_H_
-
-
-#define SET_PORT_GEN_ALL_VALID 0x7
-#define SET_PORT_PROMISC_SHIFT 31
-#define SET_PORT_MC_PROMISC_SHIFT      30
-
-enum {
-       MLX4_CMD_SET_VLAN_FLTR  = 0x47,
-       MLX4_CMD_SET_MCAST_FLTR = 0x48,
-       MLX4_CMD_DUMP_ETH_STATS = 0x49,
-};
-
-enum {
-       MCAST_DIRECT_ONLY       = 0,
-       MCAST_DIRECT            = 1,
-       MCAST_DEFAULT           = 2
-};
-
-struct mlx4_set_port_general_context {
-       u8 reserved[3];
-       u8 flags;
-       u16 reserved2;
-       __be16 mtu;
-       u8 pptx;
-       u8 pfctx;
-       u16 reserved3;
-       u8 pprx;
-       u8 pfcrx;
-       u16 reserved4;
-};
-
-struct mlx4_set_port_rqp_calc_context {
-       __be32 base_qpn;
-       u8 rererved;
-       u8 n_mac;
-       u8 n_vlan;
-       u8 n_prio;
-       u8 reserved2[3];
-       u8 mac_miss;
-       u8 intra_no_vlan;
-       u8 no_vlan;
-       u8 intra_vlan_miss;
-       u8 vlan_miss;
-       u8 reserved3[3];
-       u8 no_vlan_prio;
-       __be32 promisc;
-       __be32 mcast;
-};
-
-#define VLAN_FLTR_SIZE 128
-struct mlx4_set_vlan_fltr_mbox {
-       __be32 entry[VLAN_FLTR_SIZE];
-};
-
-
-enum {
-       MLX4_MCAST_CONFIG       = 0,
-       MLX4_MCAST_DISABLE      = 1,
-       MLX4_MCAST_ENABLE       = 2,
-};
-
-struct mlx4_en_query_port_context {
-       u8 link_up;
-#define MLX4_EN_LINK_UP_MASK   0x80
-       u8 reserved;
-       __be16 mtu;
-       u8 reserved2;
-       u8 link_speed;
-#define MLX4_EN_SPEED_MASK     0x3
-#define MLX4_EN_1G_SPEED       0x2
-       u16 reserved3[5];
-       __be64 mac;
-       u8 transceiver;
-};
-
-
-struct mlx4_en_stat_out_mbox {
-       /* Received frames with a length of 64 octets */
-       __be64 R64_prio_0;
-       __be64 R64_prio_1;
-       __be64 R64_prio_2;
-       __be64 R64_prio_3;
-       __be64 R64_prio_4;
-       __be64 R64_prio_5;
-       __be64 R64_prio_6;
-       __be64 R64_prio_7;
-       __be64 R64_novlan;
-       /* Received frames with a length of 127 octets */
-       __be64 R127_prio_0;
-       __be64 R127_prio_1;
-       __be64 R127_prio_2;
-       __be64 R127_prio_3;
-       __be64 R127_prio_4;
-       __be64 R127_prio_5;
-       __be64 R127_prio_6;
-       __be64 R127_prio_7;
-       __be64 R127_novlan;
-       /* Received frames with a length of 255 octets */
-       __be64 R255_prio_0;
-       __be64 R255_prio_1;
-       __be64 R255_prio_2;
-       __be64 R255_prio_3;
-       __be64 R255_prio_4;
-       __be64 R255_prio_5;
-       __be64 R255_prio_6;
-       __be64 R255_prio_7;
-       __be64 R255_novlan;
-       /* Received frames with a length of 511 octets */
-       __be64 R511_prio_0;
-       __be64 R511_prio_1;
-       __be64 R511_prio_2;
-       __be64 R511_prio_3;
-       __be64 R511_prio_4;
-       __be64 R511_prio_5;
-       __be64 R511_prio_6;
-       __be64 R511_prio_7;
-       __be64 R511_novlan;
-       /* Received frames with a length of 1023 octets */
-       __be64 R1023_prio_0;
-       __be64 R1023_prio_1;
-       __be64 R1023_prio_2;
-       __be64 R1023_prio_3;
-       __be64 R1023_prio_4;
-       __be64 R1023_prio_5;
-       __be64 R1023_prio_6;
-       __be64 R1023_prio_7;
-       __be64 R1023_novlan;
-       /* Received frames with a length of 1518 octets */
-       __be64 R1518_prio_0;
-       __be64 R1518_prio_1;
-       __be64 R1518_prio_2;
-       __be64 R1518_prio_3;
-       __be64 R1518_prio_4;
-       __be64 R1518_prio_5;
-       __be64 R1518_prio_6;
-       __be64 R1518_prio_7;
-       __be64 R1518_novlan;
-       /* Received frames with a length of 1522 octets */
-       __be64 R1522_prio_0;
-       __be64 R1522_prio_1;
-       __be64 R1522_prio_2;
-       __be64 R1522_prio_3;
-       __be64 R1522_prio_4;
-       __be64 R1522_prio_5;
-       __be64 R1522_prio_6;
-       __be64 R1522_prio_7;
-       __be64 R1522_novlan;
-       /* Received frames with a length of 1548 octets */
-       __be64 R1548_prio_0;
-       __be64 R1548_prio_1;
-       __be64 R1548_prio_2;
-       __be64 R1548_prio_3;
-       __be64 R1548_prio_4;
-       __be64 R1548_prio_5;
-       __be64 R1548_prio_6;
-       __be64 R1548_prio_7;
-       __be64 R1548_novlan;
-       /* Received frames with a length of 1548 < octets < MTU */
-       __be64 R2MTU_prio_0;
-       __be64 R2MTU_prio_1;
-       __be64 R2MTU_prio_2;
-       __be64 R2MTU_prio_3;
-       __be64 R2MTU_prio_4;
-       __be64 R2MTU_prio_5;
-       __be64 R2MTU_prio_6;
-       __be64 R2MTU_prio_7;
-       __be64 R2MTU_novlan;
-       /* Received frames with a length of MTU< octets and good CRC */
-       __be64 RGIANT_prio_0;
-       __be64 RGIANT_prio_1;
-       __be64 RGIANT_prio_2;
-       __be64 RGIANT_prio_3;
-       __be64 RGIANT_prio_4;
-       __be64 RGIANT_prio_5;
-       __be64 RGIANT_prio_6;
-       __be64 RGIANT_prio_7;
-       __be64 RGIANT_novlan;
-       /* Received broadcast frames with good CRC */
-       __be64 RBCAST_prio_0;
-       __be64 RBCAST_prio_1;
-       __be64 RBCAST_prio_2;
-       __be64 RBCAST_prio_3;
-       __be64 RBCAST_prio_4;
-       __be64 RBCAST_prio_5;
-       __be64 RBCAST_prio_6;
-       __be64 RBCAST_prio_7;
-       __be64 RBCAST_novlan;
-       /* Received multicast frames with good CRC */
-       __be64 MCAST_prio_0;
-       __be64 MCAST_prio_1;
-       __be64 MCAST_prio_2;
-       __be64 MCAST_prio_3;
-       __be64 MCAST_prio_4;
-       __be64 MCAST_prio_5;
-       __be64 MCAST_prio_6;
-       __be64 MCAST_prio_7;
-       __be64 MCAST_novlan;
-       /* Received unicast not short or GIANT frames with good CRC */
-       __be64 RTOTG_prio_0;
-       __be64 RTOTG_prio_1;
-       __be64 RTOTG_prio_2;
-       __be64 RTOTG_prio_3;
-       __be64 RTOTG_prio_4;
-       __be64 RTOTG_prio_5;
-       __be64 RTOTG_prio_6;
-       __be64 RTOTG_prio_7;
-       __be64 RTOTG_novlan;
-
-       /* Count of total octets of received frames, includes framing characters */
-       __be64 RTTLOCT_prio_0;
-       /* Count of total octets of received frames, not including framing
-          characters */
-       __be64 RTTLOCT_NOFRM_prio_0;
-       /* Count of Total number of octets received
-          (only for frames without errors) */
-       __be64 ROCT_prio_0;
-
-       __be64 RTTLOCT_prio_1;
-       __be64 RTTLOCT_NOFRM_prio_1;
-       __be64 ROCT_prio_1;
-
-       __be64 RTTLOCT_prio_2;
-       __be64 RTTLOCT_NOFRM_prio_2;
-       __be64 ROCT_prio_2;
-
-       __be64 RTTLOCT_prio_3;
-       __be64 RTTLOCT_NOFRM_prio_3;
-       __be64 ROCT_prio_3;
-
-       __be64 RTTLOCT_prio_4;
-       __be64 RTTLOCT_NOFRM_prio_4;
-       __be64 ROCT_prio_4;
-
-       __be64 RTTLOCT_prio_5;
-       __be64 RTTLOCT_NOFRM_prio_5;
-       __be64 ROCT_prio_5;
-
-       __be64 RTTLOCT_prio_6;
-       __be64 RTTLOCT_NOFRM_prio_6;
-       __be64 ROCT_prio_6;
-
-       __be64 RTTLOCT_prio_7;
-       __be64 RTTLOCT_NOFRM_prio_7;
-       __be64 ROCT_prio_7;
-
-       __be64 RTTLOCT_novlan;
-       __be64 RTTLOCT_NOFRM_novlan;
-       __be64 ROCT_novlan;
-
-       /* Count of Total received frames including bad frames */
-       __be64 RTOT_prio_0;
-       /* Count of  Total number of received frames with 802.1Q encapsulation */
-       __be64 R1Q_prio_0;
-       __be64 reserved1;
-
-       __be64 RTOT_prio_1;
-       __be64 R1Q_prio_1;
-       __be64 reserved2;
-
-       __be64 RTOT_prio_2;
-       __be64 R1Q_prio_2;
-       __be64 reserved3;
-
-       __be64 RTOT_prio_3;
-       __be64 R1Q_prio_3;
-       __be64 reserved4;
-
-       __be64 RTOT_prio_4;
-       __be64 R1Q_prio_4;
-       __be64 reserved5;
-
-       __be64 RTOT_prio_5;
-       __be64 R1Q_prio_5;
-       __be64 reserved6;
-
-       __be64 RTOT_prio_6;
-       __be64 R1Q_prio_6;
-       __be64 reserved7;
-
-       __be64 RTOT_prio_7;
-       __be64 R1Q_prio_7;
-       __be64 reserved8;
-
-       __be64 RTOT_novlan;
-       __be64 R1Q_novlan;
-       __be64 reserved9;
-
-       /* Total number of Successfully Received Control Frames */
-       __be64 RCNTL;
-       __be64 reserved10;
-       __be64 reserved11;
-       __be64 reserved12;
-       /* Count of received frames with a length/type field  value between 46
-          (42 for VLANtagged frames) and 1500 (also 1500 for VLAN-tagged frames),
-          inclusive */
-       __be64 RInRangeLengthErr;
-       /* Count of received frames with length/type field between 1501 and 1535
-          decimal, inclusive */
-       __be64 ROutRangeLengthErr;
-       /* Count of received frames that are longer than max allowed size for
-          802.3 frames (1518/1522) */
-       __be64 RFrmTooLong;
-       /* Count frames received with PCS error */
-       __be64 PCS;
-
-       /* Transmit frames with a length of 64 octets */
-       __be64 T64_prio_0;
-       __be64 T64_prio_1;
-       __be64 T64_prio_2;
-       __be64 T64_prio_3;
-       __be64 T64_prio_4;
-       __be64 T64_prio_5;
-       __be64 T64_prio_6;
-       __be64 T64_prio_7;
-       __be64 T64_novlan;
-       __be64 T64_loopbk;
-       /* Transmit frames with a length of 65 to 127 octets. */
-       __be64 T127_prio_0;
-       __be64 T127_prio_1;
-       __be64 T127_prio_2;
-       __be64 T127_prio_3;
-       __be64 T127_prio_4;
-       __be64 T127_prio_5;
-       __be64 T127_prio_6;
-       __be64 T127_prio_7;
-       __be64 T127_novlan;
-       __be64 T127_loopbk;
-       /* Transmit frames with a length of 128 to 255 octets */
-       __be64 T255_prio_0;
-       __be64 T255_prio_1;
-       __be64 T255_prio_2;
-       __be64 T255_prio_3;
-       __be64 T255_prio_4;
-       __be64 T255_prio_5;
-       __be64 T255_prio_6;
-       __be64 T255_prio_7;
-       __be64 T255_novlan;
-       __be64 T255_loopbk;
-       /* Transmit frames with a length of 256 to 511 octets */
-       __be64 T511_prio_0;
-       __be64 T511_prio_1;
-       __be64 T511_prio_2;
-       __be64 T511_prio_3;
-       __be64 T511_prio_4;
-       __be64 T511_prio_5;
-       __be64 T511_prio_6;
-       __be64 T511_prio_7;
-       __be64 T511_novlan;
-       __be64 T511_loopbk;
-       /* Transmit frames with a length of 512 to 1023 octets */
-       __be64 T1023_prio_0;
-       __be64 T1023_prio_1;
-       __be64 T1023_prio_2;
-       __be64 T1023_prio_3;
-       __be64 T1023_prio_4;
-       __be64 T1023_prio_5;
-       __be64 T1023_prio_6;
-       __be64 T1023_prio_7;
-       __be64 T1023_novlan;
-       __be64 T1023_loopbk;
-       /* Transmit frames with a length of 1024 to 1518 octets */
-       __be64 T1518_prio_0;
-       __be64 T1518_prio_1;
-       __be64 T1518_prio_2;
-       __be64 T1518_prio_3;
-       __be64 T1518_prio_4;
-       __be64 T1518_prio_5;
-       __be64 T1518_prio_6;
-       __be64 T1518_prio_7;
-       __be64 T1518_novlan;
-       __be64 T1518_loopbk;
-       /* Counts transmit frames with a length of 1519 to 1522 bytes */
-       __be64 T1522_prio_0;
-       __be64 T1522_prio_1;
-       __be64 T1522_prio_2;
-       __be64 T1522_prio_3;
-       __be64 T1522_prio_4;
-       __be64 T1522_prio_5;
-       __be64 T1522_prio_6;
-       __be64 T1522_prio_7;
-       __be64 T1522_novlan;
-       __be64 T1522_loopbk;
-       /* Transmit frames with a length of 1523 to 1548 octets */
-       __be64 T1548_prio_0;
-       __be64 T1548_prio_1;
-       __be64 T1548_prio_2;
-       __be64 T1548_prio_3;
-       __be64 T1548_prio_4;
-       __be64 T1548_prio_5;
-       __be64 T1548_prio_6;
-       __be64 T1548_prio_7;
-       __be64 T1548_novlan;
-       __be64 T1548_loopbk;
-       /* Counts transmit frames with a length of 1549 to MTU bytes */
-       __be64 T2MTU_prio_0;
-       __be64 T2MTU_prio_1;
-       __be64 T2MTU_prio_2;
-       __be64 T2MTU_prio_3;
-       __be64 T2MTU_prio_4;
-       __be64 T2MTU_prio_5;
-       __be64 T2MTU_prio_6;
-       __be64 T2MTU_prio_7;
-       __be64 T2MTU_novlan;
-       __be64 T2MTU_loopbk;
-       /* Transmit frames with a length greater than MTU octets and a good CRC. */
-       __be64 TGIANT_prio_0;
-       __be64 TGIANT_prio_1;
-       __be64 TGIANT_prio_2;
-       __be64 TGIANT_prio_3;
-       __be64 TGIANT_prio_4;
-       __be64 TGIANT_prio_5;
-       __be64 TGIANT_prio_6;
-       __be64 TGIANT_prio_7;
-       __be64 TGIANT_novlan;
-       __be64 TGIANT_loopbk;
-       /* Transmit broadcast frames with a good CRC */
-       __be64 TBCAST_prio_0;
-       __be64 TBCAST_prio_1;
-       __be64 TBCAST_prio_2;
-       __be64 TBCAST_prio_3;
-       __be64 TBCAST_prio_4;
-       __be64 TBCAST_prio_5;
-       __be64 TBCAST_prio_6;
-       __be64 TBCAST_prio_7;
-       __be64 TBCAST_novlan;
-       __be64 TBCAST_loopbk;
-       /* Transmit multicast frames with a good CRC */
-       __be64 TMCAST_prio_0;
-       __be64 TMCAST_prio_1;
-       __be64 TMCAST_prio_2;
-       __be64 TMCAST_prio_3;
-       __be64 TMCAST_prio_4;
-       __be64 TMCAST_prio_5;
-       __be64 TMCAST_prio_6;
-       __be64 TMCAST_prio_7;
-       __be64 TMCAST_novlan;
-       __be64 TMCAST_loopbk;
-       /* Transmit good frames that are neither broadcast nor multicast */
-       __be64 TTOTG_prio_0;
-       __be64 TTOTG_prio_1;
-       __be64 TTOTG_prio_2;
-       __be64 TTOTG_prio_3;
-       __be64 TTOTG_prio_4;
-       __be64 TTOTG_prio_5;
-       __be64 TTOTG_prio_6;
-       __be64 TTOTG_prio_7;
-       __be64 TTOTG_novlan;
-       __be64 TTOTG_loopbk;
-
-       /* total octets of transmitted frames, including framing characters */
-       __be64 TTTLOCT_prio_0;
-       /* total octets of transmitted frames, not including framing characters */
-       __be64 TTTLOCT_NOFRM_prio_0;
-       /* ifOutOctets */
-       __be64 TOCT_prio_0;
-
-       __be64 TTTLOCT_prio_1;
-       __be64 TTTLOCT_NOFRM_prio_1;
-       __be64 TOCT_prio_1;
-
-       __be64 TTTLOCT_prio_2;
-       __be64 TTTLOCT_NOFRM_prio_2;
-       __be64 TOCT_prio_2;
-
-       __be64 TTTLOCT_prio_3;
-       __be64 TTTLOCT_NOFRM_prio_3;
-       __be64 TOCT_prio_3;
-
-       __be64 TTTLOCT_prio_4;
-       __be64 TTTLOCT_NOFRM_prio_4;
-       __be64 TOCT_prio_4;
-
-       __be64 TTTLOCT_prio_5;
-       __be64 TTTLOCT_NOFRM_prio_5;
-       __be64 TOCT_prio_5;
-
-       __be64 TTTLOCT_prio_6;
-       __be64 TTTLOCT_NOFRM_prio_6;
-       __be64 TOCT_prio_6;
-
-       __be64 TTTLOCT_prio_7;
-       __be64 TTTLOCT_NOFRM_prio_7;
-       __be64 TOCT_prio_7;
-
-       __be64 TTTLOCT_novlan;
-       __be64 TTTLOCT_NOFRM_novlan;
-       __be64 TOCT_novlan;
-
-       __be64 TTTLOCT_loopbk;
-       __be64 TTTLOCT_NOFRM_loopbk;
-       __be64 TOCT_loopbk;
-
-       /* Total frames transmitted with a good CRC that are not aborted  */
-       __be64 TTOT_prio_0;
-       /* Total number of frames transmitted with 802.1Q encapsulation */
-       __be64 T1Q_prio_0;
-       __be64 reserved13;
-
-       __be64 TTOT_prio_1;
-       __be64 T1Q_prio_1;
-       __be64 reserved14;
-
-       __be64 TTOT_prio_2;
-       __be64 T1Q_prio_2;
-       __be64 reserved15;
-
-       __be64 TTOT_prio_3;
-       __be64 T1Q_prio_3;
-       __be64 reserved16;
-
-       __be64 TTOT_prio_4;
-       __be64 T1Q_prio_4;
-       __be64 reserved17;
-
-       __be64 TTOT_prio_5;
-       __be64 T1Q_prio_5;
-       __be64 reserved18;
-
-       __be64 TTOT_prio_6;
-       __be64 T1Q_prio_6;
-       __be64 reserved19;
-
-       __be64 TTOT_prio_7;
-       __be64 T1Q_prio_7;
-       __be64 reserved20;
-
-       __be64 TTOT_novlan;
-       __be64 T1Q_novlan;
-       __be64 reserved21;
-
-       __be64 TTOT_loopbk;
-       __be64 T1Q_loopbk;
-       __be64 reserved22;
-
-       /* Received frames with a length greater than MTU octets and a bad CRC */
-       __be32 RJBBR;
-       /* Received frames with a bad CRC that are not runts, jabbers,
-          or alignment errors */
-       __be32 RCRC;
-       /* Received frames with SFD with a length of less than 64 octets and a
-          bad CRC */
-       __be32 RRUNT;
-       /* Received frames with a length less than 64 octets and a good CRC */
-       __be32 RSHORT;
-       /* Total Number of Received Packets Dropped */
-       __be32 RDROP;
-       /* Drop due to overflow  */
-       __be32 RdropOvflw;
-       /* Drop due to overflow */
-       __be32 RdropLength;
-       /* Total of good frames. Does not include frames received with
-          frame-too-long, FCS, or length errors */
-       __be32 RTOTFRMS;
-       /* Total dropped Xmited packets */
-       __be32 TDROP;
-};
-
-
-#endif
diff --git a/drivers/net/mlx4/en_resources.c b/drivers/net/mlx4/en_resources.c
deleted file mode 100644 (file)
index 0dfb4ec..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/mlx4/qp.h>
-
-#include "mlx4_en.h"
-
-void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
-                            int is_tx, int rss, int qpn, int cqn,
-                            struct mlx4_qp_context *context)
-{
-       struct mlx4_en_dev *mdev = priv->mdev;
-
-       memset(context, 0, sizeof *context);
-       context->flags = cpu_to_be32(7 << 16 | rss << 13);
-       context->pd = cpu_to_be32(mdev->priv_pdn);
-       context->mtu_msgmax = 0xff;
-       if (!is_tx && !rss)
-               context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
-       if (is_tx)
-               context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
-       else
-               context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
-       context->usr_page = cpu_to_be32(mdev->priv_uar.index);
-       context->local_qpn = cpu_to_be32(qpn);
-       context->pri_path.ackto = 1 & 0x07;
-       context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
-       context->pri_path.counter_index = 0xff;
-       context->cqn_send = cpu_to_be32(cqn);
-       context->cqn_recv = cpu_to_be32(cqn);
-       context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
-}
-
-
-int mlx4_en_map_buffer(struct mlx4_buf *buf)
-{
-       struct page **pages;
-       int i;
-
-       if (BITS_PER_LONG == 64 || buf->nbufs == 1)
-               return 0;
-
-       pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
-       if (!pages)
-               return -ENOMEM;
-
-       for (i = 0; i < buf->nbufs; ++i)
-               pages[i] = virt_to_page(buf->page_list[i].buf);
-
-       buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
-       kfree(pages);
-       if (!buf->direct.buf)
-               return -ENOMEM;
-
-       return 0;
-}
-
-void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
-{
-       if (BITS_PER_LONG == 64 || buf->nbufs == 1)
-               return;
-
-       vunmap(buf->direct.buf);
-}
-
-void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
-{
-    return;
-}
-
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
deleted file mode 100644 (file)
index 37cc9e5..0000000
+++ /dev/null
@@ -1,918 +0,0 @@
-/*
- * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/mlx4/cq.h>
-#include <linux/slab.h>
-#include <linux/mlx4/qp.h>
-#include <linux/skbuff.h>
-#include <linux/if_ether.h>
-#include <linux/if_vlan.h>
-#include <linux/vmalloc.h>
-
-#include "mlx4_en.h"
-
-
-static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
-                             struct mlx4_en_rx_desc *rx_desc,
-                             struct skb_frag_struct *skb_frags,
-                             struct mlx4_en_rx_alloc *ring_alloc,
-                             int i)
-{
-       struct mlx4_en_dev *mdev = priv->mdev;
-       struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
-       struct mlx4_en_rx_alloc *page_alloc = &ring_alloc[i];
-       struct page *page;
-       dma_addr_t dma;
-
-       if (page_alloc->offset == frag_info->last_offset) {
-               /* Allocate new page */
-               page = alloc_pages(GFP_ATOMIC | __GFP_COMP, MLX4_EN_ALLOC_ORDER);
-               if (!page)
-                       return -ENOMEM;
-
-               skb_frags[i].page = page_alloc->page;
-               skb_frags[i].page_offset = page_alloc->offset;
-               page_alloc->page = page;
-               page_alloc->offset = frag_info->frag_align;
-       } else {
-               page = page_alloc->page;
-               get_page(page);
-
-               skb_frags[i].page = page;
-               skb_frags[i].page_offset = page_alloc->offset;
-               page_alloc->offset += frag_info->frag_stride;
-       }
-       dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) +
-                            skb_frags[i].page_offset, frag_info->frag_size,
-                            PCI_DMA_FROMDEVICE);
-       rx_desc->data[i].addr = cpu_to_be64(dma);
-       return 0;
-}
-
-static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
-                                 struct mlx4_en_rx_ring *ring)
-{
-       struct mlx4_en_rx_alloc *page_alloc;
-       int i;
-
-       for (i = 0; i < priv->num_frags; i++) {
-               page_alloc = &ring->page_alloc[i];
-               page_alloc->page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
-                                              MLX4_EN_ALLOC_ORDER);
-               if (!page_alloc->page)
-                       goto out;
-
-               page_alloc->offset = priv->frag_info[i].frag_align;
-               en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
-                      i, page_alloc->page);
-       }
-       return 0;
-
-out:
-       while (i--) {
-               page_alloc = &ring->page_alloc[i];
-               put_page(page_alloc->page);
-               page_alloc->page = NULL;
-       }
-       return -ENOMEM;
-}
-
-static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
-                                     struct mlx4_en_rx_ring *ring)
-{
-       struct mlx4_en_rx_alloc *page_alloc;
-       int i;
-
-       for (i = 0; i < priv->num_frags; i++) {
-               page_alloc = &ring->page_alloc[i];
-               en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
-                      i, page_count(page_alloc->page));
-
-               put_page(page_alloc->page);
-               page_alloc->page = NULL;
-       }
-}
-
-
-static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
-                                struct mlx4_en_rx_ring *ring, int index)
-{
-       struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
-       struct skb_frag_struct *skb_frags = ring->rx_info +
-                                           (index << priv->log_rx_info);
-       int possible_frags;
-       int i;
-
-       /* Set size and memtype fields */
-       for (i = 0; i < priv->num_frags; i++) {
-               skb_frags[i].size = priv->frag_info[i].frag_size;
-               rx_desc->data[i].byte_count =
-                       cpu_to_be32(priv->frag_info[i].frag_size);
-               rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
-       }
-
-       /* If the number of used fragments does not fill up the ring stride,
-        * remaining (unused) fragments must be padded with null address/size
-        * and a special memory key */
-       possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
-       for (i = priv->num_frags; i < possible_frags; i++) {
-               rx_desc->data[i].byte_count = 0;
-               rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
-               rx_desc->data[i].addr = 0;
-       }
-}
-
-
-static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
-                                  struct mlx4_en_rx_ring *ring, int index)
-{
-       struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
-       struct skb_frag_struct *skb_frags = ring->rx_info +
-                                           (index << priv->log_rx_info);
-       int i;
-
-       for (i = 0; i < priv->num_frags; i++)
-               if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, ring->page_alloc, i))
-                       goto err;
-
-       return 0;
-
-err:
-       while (i--)
-               put_page(skb_frags[i].page);
-       return -ENOMEM;
-}
-
-static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
-{
-       *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
-}
-
-static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
-                                struct mlx4_en_rx_ring *ring,
-                                int index)
-{
-       struct mlx4_en_dev *mdev = priv->mdev;
-       struct skb_frag_struct *skb_frags;
-       struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
-       dma_addr_t dma;
-       int nr;
-
-       skb_frags = ring->rx_info + (index << priv->log_rx_info);
-       for (nr = 0; nr < priv->num_frags; nr++) {
-               en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
-               dma = be64_to_cpu(rx_desc->data[nr].addr);
-
-               en_dbg(DRV, priv, "Unmapping buffer at dma:0x%llx\n", (u64) dma);
-               pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
-                                PCI_DMA_FROMDEVICE);
-               put_page(skb_frags[nr].page);
-       }
-}
-
-static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
-{
-       struct mlx4_en_rx_ring *ring;
-       int ring_ind;
-       int buf_ind;
-       int new_size;
-
-       for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
-               for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
-                       ring = &priv->rx_ring[ring_ind];
-
-                       if (mlx4_en_prepare_rx_desc(priv, ring,
-                                                   ring->actual_size)) {
-                               if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
-                                       en_err(priv, "Failed to allocate "
-                                                    "enough rx buffers\n");
-                                       return -ENOMEM;
-                               } else {
-                                       new_size = rounddown_pow_of_two(ring->actual_size);
-                                       en_warn(priv, "Only %d buffers allocated "
-                                                     "reducing ring size to %d",
-                                               ring->actual_size, new_size);
-                                       goto reduce_rings;
-                               }
-                       }
-                       ring->actual_size++;
-                       ring->prod++;
-               }
-       }
-       return 0;
-
-reduce_rings:
-       for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
-               ring = &priv->rx_ring[ring_ind];
-               while (ring->actual_size > new_size) {
-                       ring->actual_size--;
-                       ring->prod--;
-                       mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
-               }
-       }
-
-       return 0;
-}
-
-static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
-                               struct mlx4_en_rx_ring *ring)
-{
-       int index;
-
-       en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
-              ring->cons, ring->prod);
-
-       /* Unmap and free Rx buffers */
-       BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
-       while (ring->cons != ring->prod) {
-               index = ring->cons & ring->size_mask;
-               en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
-               mlx4_en_free_rx_desc(priv, ring, index);
-               ++ring->cons;
-       }
-}
-
-int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
-                          struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
-{
-       struct mlx4_en_dev *mdev = priv->mdev;
-       int err;
-       int tmp;
-
-
-       ring->prod = 0;
-       ring->cons = 0;
-       ring->size = size;
-       ring->size_mask = size - 1;
-       ring->stride = stride;
-       ring->log_stride = ffs(ring->stride) - 1;
-       ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
-
-       tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
-                                       sizeof(struct skb_frag_struct));
-       ring->rx_info = vmalloc(tmp);
-       if (!ring->rx_info) {
-               en_err(priv, "Failed allocating rx_info ring\n");
-               return -ENOMEM;
-       }
-       en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
-                ring->rx_info, tmp);
-
-       err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
-                                ring->buf_size, 2 * PAGE_SIZE);
-       if (err)
-               goto err_ring;
-
-       err = mlx4_en_map_buffer(&ring->wqres.buf);
-       if (err) {
-               en_err(priv, "Failed to map RX buffer\n");
-               goto err_hwq;
-       }
-       ring->buf = ring->wqres.buf.direct.buf;
-
-       return 0;
-
-err_hwq:
-       mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
-err_ring:
-       vfree(ring->rx_info);
-       ring->rx_info = NULL;
-       return err;
-}
-
-int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
-{
-       struct mlx4_en_rx_ring *ring;
-       int i;
-       int ring_ind;
-       int err;
-       int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
-                                       DS_SIZE * priv->num_frags);
-
-       for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
-               ring = &priv->rx_ring[ring_ind];
-
-               ring->prod = 0;
-               ring->cons = 0;
-               ring->actual_size = 0;
-               ring->cqn = priv->rx_cq[ring_ind].mcq.cqn;
-
-               ring->stride = stride;
-               if (ring->stride <= TXBB_SIZE)
-                       ring->buf += TXBB_SIZE;
-
-               ring->log_stride = ffs(ring->stride) - 1;
-               ring->buf_size = ring->size * ring->stride;
-
-               memset(ring->buf, 0, ring->buf_size);
-               mlx4_en_update_rx_prod_db(ring);
-
-               /* Initailize all descriptors */
-               for (i = 0; i < ring->size; i++)
-                       mlx4_en_init_rx_desc(priv, ring, i);
-
-               /* Initialize page allocators */
-               err = mlx4_en_init_allocator(priv, ring);
-               if (err) {
-                       en_err(priv, "Failed initializing ring allocator\n");
-                       if (ring->stride <= TXBB_SIZE)
-                               ring->buf -= TXBB_SIZE;
-                       ring_ind--;
-                       goto err_allocator;
-               }
-       }
-       err = mlx4_en_fill_rx_buffers(priv);
-       if (err)
-               goto err_buffers;
-
-       for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
-               ring = &priv->rx_ring[ring_ind];
-
-               ring->size_mask = ring->actual_size - 1;
-               mlx4_en_update_rx_prod_db(ring);
-       }
-
-       return 0;
-
-err_buffers:
-       for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
-               mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]);
-
-       ring_ind = priv->rx_ring_num - 1;
-err_allocator:
-       while (ring_ind >= 0) {
-               if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE)
-                       priv->rx_ring[ring_ind].buf -= TXBB_SIZE;
-               mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
-               ring_ind--;
-       }
-       return err;
-}
-
-void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
-                            struct mlx4_en_rx_ring *ring)
-{
-       struct mlx4_en_dev *mdev = priv->mdev;
-
-       mlx4_en_unmap_buffer(&ring->wqres.buf);
-       mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size + TXBB_SIZE);
-       vfree(ring->rx_info);
-       ring->rx_info = NULL;
-}
-
-void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
-                               struct mlx4_en_rx_ring *ring)
-{
-       mlx4_en_free_rx_buf(priv, ring);
-       if (ring->stride <= TXBB_SIZE)
-               ring->buf -= TXBB_SIZE;
-       mlx4_en_destroy_allocator(priv, ring);
-}
-
-
-/* Unmap a completed descriptor and free unused pages */
-static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
-                                   struct mlx4_en_rx_desc *rx_desc,
-                                   struct skb_frag_struct *skb_frags,
-                                   struct skb_frag_struct *skb_frags_rx,
-                                   struct mlx4_en_rx_alloc *page_alloc,
-                                   int length)
-{
-       struct mlx4_en_dev *mdev = priv->mdev;
-       struct mlx4_en_frag_info *frag_info;
-       int nr;
-       dma_addr_t dma;
-
-       /* Collect used fragments while replacing them in the HW descirptors */
-       for (nr = 0; nr < priv->num_frags; nr++) {
-               frag_info = &priv->frag_info[nr];
-               if (length <= frag_info->frag_prefix_size)
-                       break;
-
-               /* Save page reference in skb */
-               skb_frags_rx[nr].page = skb_frags[nr].page;
-               skb_frags_rx[nr].size = skb_frags[nr].size;
-               skb_frags_rx[nr].page_offset = skb_frags[nr].page_offset;
-               dma = be64_to_cpu(rx_desc->data[nr].addr);
-
-               /* Allocate a replacement page */
-               if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, page_alloc, nr))
-                       goto fail;
-
-               /* Unmap buffer */
-               pci_unmap_single(mdev->pdev, dma, skb_frags_rx[nr].size,
-                                PCI_DMA_FROMDEVICE);
-       }
-       /* Adjust size of last fragment to match actual length */
-       if (nr > 0)
-               skb_frags_rx[nr - 1].size = length -
-                       priv->frag_info[nr - 1].frag_prefix_size;
-       return nr;
-
-fail:
-       /* Drop all accumulated fragments (which have already been replaced in
-        * the descriptor) of this packet; remaining fragments are reused... */
-       while (nr > 0) {
-               nr--;
-               put_page(skb_frags_rx[nr].page);
-       }
-       return 0;
-}
-
-
-static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-                                     struct mlx4_en_rx_desc *rx_desc,
-                                     struct skb_frag_struct *skb_frags,
-                                     struct mlx4_en_rx_alloc *page_alloc,
-                                     unsigned int length)
-{
-       struct mlx4_en_dev *mdev = priv->mdev;
-       struct sk_buff *skb;
-       void *va;
-       int used_frags;
-       dma_addr_t dma;
-
-       skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN);
-       if (!skb) {
-               en_dbg(RX_ERR, priv, "Failed allocating skb\n");
-               return NULL;
-       }
-       skb->dev = priv->dev;
-       skb_reserve(skb, NET_IP_ALIGN);
-       skb->len = length;
-       skb->truesize = length + sizeof(struct sk_buff);
-
-       /* Get pointer to first fragment so we could copy the headers into the
-        * (linear part of the) skb */
-       va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
-
-       if (length <= SMALL_PACKET_SIZE) {
-               /* We are copying all relevant data to the skb - temporarily
-                * synch buffers for the copy */
-               dma = be64_to_cpu(rx_desc->data[0].addr);
-               dma_sync_single_for_cpu(&mdev->pdev->dev, dma, length,
-                                       DMA_FROM_DEVICE);
-               skb_copy_to_linear_data(skb, va, length);
-               dma_sync_single_for_device(&mdev->pdev->dev, dma, length,
-                                          DMA_FROM_DEVICE);
-               skb->tail += length;
-       } else {
-
-               /* Move relevant fragments to skb */
-               used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
-                                                     skb_shinfo(skb)->frags,
-                                                     page_alloc, length);
-               if (unlikely(!used_frags)) {
-                       kfree_skb(skb);
-                       return NULL;
-               }
-               skb_shinfo(skb)->nr_frags = used_frags;
-
-               /* Copy headers into the skb linear buffer */
-               memcpy(skb->data, va, HEADER_COPY_SIZE);
-               skb->tail += HEADER_COPY_SIZE;
-
-               /* Skip headers in first fragment */
-               skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE;
-
-               /* Adjust size of first fragment */
-               skb_shinfo(skb)->frags[0].size -= HEADER_COPY_SIZE;
-               skb->data_len = length - HEADER_COPY_SIZE;
-       }
-       return skb;
-}
-
-static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb)
-{
-       int i;
-       int offset = ETH_HLEN;
-
-       for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) {
-               if (*(skb->data + offset) != (unsigned char) (i & 0xff))
-                       goto out_loopback;
-       }
-       /* Loopback found */
-       priv->loopback_ok = 1;
-
-out_loopback:
-       dev_kfree_skb_any(skb);
-}
-
-int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       struct mlx4_cqe *cqe;
-       struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
-       struct skb_frag_struct *skb_frags;
-       struct mlx4_en_rx_desc *rx_desc;
-       struct sk_buff *skb;
-       int index;
-       int nr;
-       unsigned int length;
-       int polled = 0;
-       int ip_summed;
-
-       if (!priv->port_up)
-               return 0;
-
-       /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
-        * descriptor offset can be deduced from the CQE index instead of
-        * reading 'cqe->index' */
-       index = cq->mcq.cons_index & ring->size_mask;
-       cqe = &cq->buf[index];
-
-       /* Process all completed CQEs */
-       while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
-                   cq->mcq.cons_index & cq->size)) {
-
-               skb_frags = ring->rx_info + (index << priv->log_rx_info);
-               rx_desc = ring->buf + (index << ring->log_stride);
-
-               /*
-                * make sure we read the CQE after we read the ownership bit
-                */
-               rmb();
-
-               /* Drop packet on bad receive or bad checksum */
-               if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
-                                               MLX4_CQE_OPCODE_ERROR)) {
-                       en_err(priv, "CQE completed in error - vendor "
-                                 "syndrom:%d syndrom:%d\n",
-                                 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
-                                 ((struct mlx4_err_cqe *) cqe)->syndrome);
-                       goto next;
-               }
-               if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
-                       en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
-                       goto next;
-               }
-
-               /*
-                * Packet is OK - process it.
-                */
-               length = be32_to_cpu(cqe->byte_cnt);
-               ring->bytes += length;
-               ring->packets++;
-
-               if (likely(dev->features & NETIF_F_RXCSUM)) {
-                       if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
-                           (cqe->checksum == cpu_to_be16(0xffff))) {
-                               priv->port_stats.rx_chksum_good++;
-                               /* This packet is eligible for LRO if it is:
-                                * - DIX Ethernet (type interpretation)
-                                * - TCP/IP (v4)
-                                * - without IP options
-                                * - not an IP fragment */
-                               if (dev->features & NETIF_F_GRO) {
-                                       struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
-                                       if (!gro_skb)
-                                               goto next;
-
-                                       nr = mlx4_en_complete_rx_desc(
-                                               priv, rx_desc,
-                                               skb_frags, skb_shinfo(gro_skb)->frags,
-                                               ring->page_alloc, length);
-                                       if (!nr)
-                                               goto next;
-
-                                       skb_shinfo(gro_skb)->nr_frags = nr;
-                                       gro_skb->len = length;
-                                       gro_skb->data_len = length;
-                                       gro_skb->truesize += length;
-                                       gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
-
-                                       if (cqe->vlan_my_qpn &
-                                           cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) {
-                                               u16 vid = be16_to_cpu(cqe->sl_vid);
-
-                                               __vlan_hwaccel_put_tag(gro_skb, vid);
-                                       }
-
-                                       napi_gro_frags(&cq->napi);
-
-                                       goto next;
-                               }
-
-                               /* LRO not possible, complete processing here */
-                               ip_summed = CHECKSUM_UNNECESSARY;
-                       } else {
-                               ip_summed = CHECKSUM_NONE;
-                               priv->port_stats.rx_chksum_none++;
-                       }
-               } else {
-                       ip_summed = CHECKSUM_NONE;
-                       priv->port_stats.rx_chksum_none++;
-               }
-
-               skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags,
-                                    ring->page_alloc, length);
-               if (!skb) {
-                       priv->stats.rx_dropped++;
-                       goto next;
-               }
-
-                if (unlikely(priv->validate_loopback)) {
-                       validate_loopback(priv, skb);
-                       goto next;
-               }
-
-               skb->ip_summed = ip_summed;
-               skb->protocol = eth_type_trans(skb, dev);
-               skb_record_rx_queue(skb, cq->ring);
-
-               if (be32_to_cpu(cqe->vlan_my_qpn) &
-                   MLX4_CQE_VLAN_PRESENT_MASK)
-                       __vlan_hwaccel_put_tag(skb, be16_to_cpu(cqe->sl_vid));
-
-               /* Push it up the stack */
-               netif_receive_skb(skb);
-
-next:
-               ++cq->mcq.cons_index;
-               index = (cq->mcq.cons_index) & ring->size_mask;
-               cqe = &cq->buf[index];
-               if (++polled == budget) {
-                       /* We are here because we reached the NAPI budget -
-                        * flush only pending LRO sessions */
-                       goto out;
-               }
-       }
-
-out:
-       AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
-       mlx4_cq_set_ci(&cq->mcq);
-       wmb(); /* ensure HW sees CQ consumer before we post new buffers */
-       ring->cons = cq->mcq.cons_index;
-       ring->prod += polled; /* Polled descriptors were realocated in place */
-       mlx4_en_update_rx_prod_db(ring);
-       return polled;
-}
-
-
-void mlx4_en_rx_irq(struct mlx4_cq *mcq)
-{
-       struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
-       struct mlx4_en_priv *priv = netdev_priv(cq->dev);
-
-       if (priv->port_up)
-               napi_schedule(&cq->napi);
-       else
-               mlx4_en_arm_cq(priv, cq);
-}
-
-/* Rx CQ polling - called by NAPI */
-int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
-{
-       struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
-       struct net_device *dev = cq->dev;
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       int done;
-
-       done = mlx4_en_process_rx_cq(dev, cq, budget);
-
-       /* If we used up all the quota - we're probably not done yet... */
-       if (done == budget)
-               INC_PERF_COUNTER(priv->pstats.napi_quota);
-       else {
-               /* Done for now */
-               napi_complete(napi);
-               mlx4_en_arm_cq(priv, cq);
-       }
-       return done;
-}
-
-
-/* Calculate the last offset position that accommodates a full fragment
- * (assuming fagment size = stride-align) */
-static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align)
-{
-       u16 res = MLX4_EN_ALLOC_SIZE % stride;
-       u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align;
-
-       en_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d "
-                           "res:%d offset:%d\n", stride, align, res, offset);
-       return offset;
-}
-
-
-static int frag_sizes[] = {
-       FRAG_SZ0,
-       FRAG_SZ1,
-       FRAG_SZ2,
-       FRAG_SZ3
-};
-
-void mlx4_en_calc_rx_buf(struct net_device *dev)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN + ETH_LLC_SNAP_SIZE;
-       int buf_size = 0;
-       int i = 0;
-
-       while (buf_size < eff_mtu) {
-               priv->frag_info[i].frag_size =
-                       (eff_mtu > buf_size + frag_sizes[i]) ?
-                               frag_sizes[i] : eff_mtu - buf_size;
-               priv->frag_info[i].frag_prefix_size = buf_size;
-               if (!i) {
-                       priv->frag_info[i].frag_align = NET_IP_ALIGN;
-                       priv->frag_info[i].frag_stride =
-                               ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES);
-               } else {
-                       priv->frag_info[i].frag_align = 0;
-                       priv->frag_info[i].frag_stride =
-                               ALIGN(frag_sizes[i], SMP_CACHE_BYTES);
-               }
-               priv->frag_info[i].last_offset = mlx4_en_last_alloc_offset(
-                                               priv, priv->frag_info[i].frag_stride,
-                                               priv->frag_info[i].frag_align);
-               buf_size += priv->frag_info[i].frag_size;
-               i++;
-       }
-
-       priv->num_frags = i;
-       priv->rx_skb_size = eff_mtu;
-       priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct));
-
-       en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
-                 "num_frags:%d):\n", eff_mtu, priv->num_frags);
-       for (i = 0; i < priv->num_frags; i++) {
-               en_dbg(DRV, priv, "  frag:%d - size:%d prefix:%d align:%d "
-                               "stride:%d last_offset:%d\n", i,
-                               priv->frag_info[i].frag_size,
-                               priv->frag_info[i].frag_prefix_size,
-                               priv->frag_info[i].frag_align,
-                               priv->frag_info[i].frag_stride,
-                               priv->frag_info[i].last_offset);
-       }
-}
-
-/* RSS related functions */
-
-static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
-                                struct mlx4_en_rx_ring *ring,
-                                enum mlx4_qp_state *state,
-                                struct mlx4_qp *qp)
-{
-       struct mlx4_en_dev *mdev = priv->mdev;
-       struct mlx4_qp_context *context;
-       int err = 0;
-
-       context = kmalloc(sizeof *context , GFP_KERNEL);
-       if (!context) {
-               en_err(priv, "Failed to allocate qp context\n");
-               return -ENOMEM;
-       }
-
-       err = mlx4_qp_alloc(mdev->dev, qpn, qp);
-       if (err) {
-               en_err(priv, "Failed to allocate qp #%x\n", qpn);
-               goto out;
-       }
-       qp->event = mlx4_en_sqp_event;
-
-       memset(context, 0, sizeof *context);
-       mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
-                               qpn, ring->cqn, context);
-       context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
-
-       err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
-       if (err) {
-               mlx4_qp_remove(mdev->dev, qp);
-               mlx4_qp_free(mdev->dev, qp);
-       }
-       mlx4_en_update_rx_prod_db(ring);
-out:
-       kfree(context);
-       return err;
-}
-
-/* Allocate rx qp's and configure them according to rss map */
-int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
-{
-       struct mlx4_en_dev *mdev = priv->mdev;
-       struct mlx4_en_rss_map *rss_map = &priv->rss_map;
-       struct mlx4_qp_context context;
-       struct mlx4_en_rss_context *rss_context;
-       void *ptr;
-       u8 rss_mask = 0x3f;
-       int i, qpn;
-       int err = 0;
-       int good_qps = 0;
-
-       en_dbg(DRV, priv, "Configuring rss steering\n");
-       err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
-                                   priv->rx_ring_num,
-                                   &rss_map->base_qpn);
-       if (err) {
-               en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
-               return err;
-       }
-
-       for (i = 0; i < priv->rx_ring_num; i++) {
-               qpn = rss_map->base_qpn + i;
-               err = mlx4_en_config_rss_qp(priv, qpn, &priv->rx_ring[i],
-                                           &rss_map->state[i],
-                                           &rss_map->qps[i]);
-               if (err)
-                       goto rss_err;
-
-               ++good_qps;
-       }
-
-       /* Configure RSS indirection qp */
-       err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
-       if (err) {
-               en_err(priv, "Failed to allocate RSS indirection QP\n");
-               goto rss_err;
-       }
-       rss_map->indir_qp.event = mlx4_en_sqp_event;
-       mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
-                               priv->rx_ring[0].cqn, &context);
-
-       ptr = ((void *) &context) + 0x3c;
-       rss_context = ptr;
-       rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 |
-                                           (rss_map->base_qpn));
-       rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
-       rss_context->flags = rss_mask;
-
-       if (priv->mdev->profile.udp_rss)
-               rss_context->base_qpn_udp = rss_context->default_qpn;
-       err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
-                              &rss_map->indir_qp, &rss_map->indir_state);
-       if (err)
-               goto indir_err;
-
-       return 0;
-
-indir_err:
-       mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
-                      MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
-       mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
-       mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
-rss_err:
-       for (i = 0; i < good_qps; i++) {
-               mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
-                              MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
-               mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
-               mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
-       }
-       mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
-       return err;
-}
-
-void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
-{
-       struct mlx4_en_dev *mdev = priv->mdev;
-       struct mlx4_en_rss_map *rss_map = &priv->rss_map;
-       int i;
-
-       mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
-                      MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
-       mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
-       mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
-
-       for (i = 0; i < priv->rx_ring_num; i++) {
-               mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
-                              MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
-               mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
-               mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
-       }
-       mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
-}
-
-
-
-
-
diff --git a/drivers/net/mlx4/en_selftest.c b/drivers/net/mlx4/en_selftest.c
deleted file mode 100644 (file)
index 9fdbcec..0000000
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/ethtool.h>
-#include <linux/netdevice.h>
-#include <linux/delay.h>
-#include <linux/mlx4/driver.h>
-
-#include "mlx4_en.h"
-
-
-static int mlx4_en_test_registers(struct mlx4_en_priv *priv)
-{
-       return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK,
-                       MLX4_CMD_TIME_CLASS_A);
-}
-
-static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
-{
-       struct sk_buff *skb;
-       struct ethhdr *ethh;
-       unsigned char *packet;
-       unsigned int packet_size = MLX4_LOOPBACK_TEST_PAYLOAD;
-       unsigned int i;
-       int err;
-
-
-       /* build the pkt before xmit */
-       skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN);
-       if (!skb) {
-               en_err(priv, "-LOOPBACK_TEST_XMIT- failed to create skb for xmit\n");
-               return -ENOMEM;
-       }
-       skb_reserve(skb, NET_IP_ALIGN);
-
-       ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
-       packet  = (unsigned char *)skb_put(skb, packet_size);
-       memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN);
-       memset(ethh->h_source, 0, ETH_ALEN);
-       ethh->h_proto = htons(ETH_P_ARP);
-       skb_set_mac_header(skb, 0);
-       for (i = 0; i < packet_size; ++i)       /* fill our packet */
-               packet[i] = (unsigned char)(i & 0xff);
-
-       /* xmit the pkt */
-       err = mlx4_en_xmit(skb, priv->dev);
-       return err;
-}
-
-static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
-{
-       u32 loopback_ok = 0;
-       int i;
-
-
-        priv->loopback_ok = 0;
-       priv->validate_loopback = 1;
-
-       /* xmit */
-       if (mlx4_en_test_loopback_xmit(priv)) {
-               en_err(priv, "Transmitting loopback packet failed\n");
-               goto mlx4_en_test_loopback_exit;
-       }
-
-       /* polling for result */
-       for (i = 0; i < MLX4_EN_LOOPBACK_RETRIES; ++i) {
-               msleep(MLX4_EN_LOOPBACK_TIMEOUT);
-               if (priv->loopback_ok) {
-                       loopback_ok = 1;
-                       break;
-               }
-       }
-       if (!loopback_ok)
-               en_err(priv, "Loopback packet didn't arrive\n");
-
-mlx4_en_test_loopback_exit:
-
-       priv->validate_loopback = 0;
-       return !loopback_ok;
-}
-
-
-static int mlx4_en_test_link(struct mlx4_en_priv *priv)
-{
-       if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
-               return -ENOMEM;
-       if (priv->port_state.link_state == 1)
-               return 0;
-       else
-               return 1;
-}
-
-static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
-{
-
-       if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
-               return -ENOMEM;
-
-       /* The device currently only supports 10G speed */
-       if (priv->port_state.link_speed != SPEED_10000)
-               return priv->port_state.link_speed;
-       return 0;
-}
-
-
-void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       struct mlx4_en_dev *mdev = priv->mdev;
-       struct mlx4_en_tx_ring *tx_ring;
-       int i, carrier_ok;
-
-       memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
-
-       if (*flags & ETH_TEST_FL_OFFLINE) {
-               /* disable the interface */
-               carrier_ok = netif_carrier_ok(dev);
-
-               netif_carrier_off(dev);
-retry_tx:
-               /* Wait until all tx queues are empty.
-                * there should not be any additional incoming traffic
-                * since we turned the carrier off */
-               msleep(200);
-               for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) {
-                       tx_ring = &priv->tx_ring[i];
-                       if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb))
-                               goto retry_tx;
-               }
-
-               if (priv->mdev->dev->caps.flags &
-                                       MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
-                       buf[3] = mlx4_en_test_registers(priv);
-                       buf[4] = mlx4_en_test_loopback(priv);
-               }
-
-               if (carrier_ok)
-                       netif_carrier_on(dev);
-
-       }
-       buf[0] = mlx4_test_interrupts(mdev->dev);
-       buf[1] = mlx4_en_test_link(priv);
-       buf[2] = mlx4_en_test_speed(priv);
-
-       for (i = 0; i < MLX4_EN_NUM_SELF_TEST; i++) {
-               if (buf[i])
-                       *flags |= ETH_TEST_FL_FAILED;
-       }
-}
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
deleted file mode 100644 (file)
index 6e03de0..0000000
+++ /dev/null
@@ -1,828 +0,0 @@
-/*
- * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <asm/page.h>
-#include <linux/mlx4/cq.h>
-#include <linux/slab.h>
-#include <linux/mlx4/qp.h>
-#include <linux/skbuff.h>
-#include <linux/if_vlan.h>
-#include <linux/vmalloc.h>
-#include <linux/tcp.h>
-
-#include "mlx4_en.h"
-
-enum {
-       MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
-       MAX_BF = 256,
-};
-
-static int inline_thold __read_mostly = MAX_INLINE;
-
-module_param_named(inline_thold, inline_thold, int, 0444);
-MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
-
-int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
-                          struct mlx4_en_tx_ring *ring, int qpn, u32 size,
-                          u16 stride)
-{
-       struct mlx4_en_dev *mdev = priv->mdev;
-       int tmp;
-       int err;
-
-       ring->size = size;
-       ring->size_mask = size - 1;
-       ring->stride = stride;
-
-       inline_thold = min(inline_thold, MAX_INLINE);
-
-       spin_lock_init(&ring->comp_lock);
-
-       tmp = size * sizeof(struct mlx4_en_tx_info);
-       ring->tx_info = vmalloc(tmp);
-       if (!ring->tx_info) {
-               en_err(priv, "Failed allocating tx_info ring\n");
-               return -ENOMEM;
-       }
-       en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
-                ring->tx_info, tmp);
-
-       ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
-       if (!ring->bounce_buf) {
-               en_err(priv, "Failed allocating bounce buffer\n");
-               err = -ENOMEM;
-               goto err_tx;
-       }
-       ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
-
-       err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
-                                2 * PAGE_SIZE);
-       if (err) {
-               en_err(priv, "Failed allocating hwq resources\n");
-               goto err_bounce;
-       }
-
-       err = mlx4_en_map_buffer(&ring->wqres.buf);
-       if (err) {
-               en_err(priv, "Failed to map TX buffer\n");
-               goto err_hwq_res;
-       }
-
-       ring->buf = ring->wqres.buf.direct.buf;
-
-       en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
-              "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
-              ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
-
-       ring->qpn = qpn;
-       err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
-       if (err) {
-               en_err(priv, "Failed allocating qp %d\n", ring->qpn);
-               goto err_map;
-       }
-       ring->qp.event = mlx4_en_sqp_event;
-
-       err = mlx4_bf_alloc(mdev->dev, &ring->bf);
-       if (err) {
-               en_dbg(DRV, priv, "working without blueflame (%d)", err);
-               ring->bf.uar = &mdev->priv_uar;
-               ring->bf.uar->map = mdev->uar_map;
-               ring->bf_enabled = false;
-       } else
-               ring->bf_enabled = true;
-
-       return 0;
-
-err_map:
-       mlx4_en_unmap_buffer(&ring->wqres.buf);
-err_hwq_res:
-       mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
-err_bounce:
-       kfree(ring->bounce_buf);
-       ring->bounce_buf = NULL;
-err_tx:
-       vfree(ring->tx_info);
-       ring->tx_info = NULL;
-       return err;
-}
-
-void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
-                            struct mlx4_en_tx_ring *ring)
-{
-       struct mlx4_en_dev *mdev = priv->mdev;
-       en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
-
-       if (ring->bf_enabled)
-               mlx4_bf_free(mdev->dev, &ring->bf);
-       mlx4_qp_remove(mdev->dev, &ring->qp);
-       mlx4_qp_free(mdev->dev, &ring->qp);
-       mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
-       mlx4_en_unmap_buffer(&ring->wqres.buf);
-       mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
-       kfree(ring->bounce_buf);
-       ring->bounce_buf = NULL;
-       vfree(ring->tx_info);
-       ring->tx_info = NULL;
-}
-
-int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
-                            struct mlx4_en_tx_ring *ring,
-                            int cq)
-{
-       struct mlx4_en_dev *mdev = priv->mdev;
-       int err;
-
-       ring->cqn = cq;
-       ring->prod = 0;
-       ring->cons = 0xffffffff;
-       ring->last_nr_txbb = 1;
-       ring->poll_cnt = 0;
-       ring->blocked = 0;
-       memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
-       memset(ring->buf, 0, ring->buf_size);
-
-       ring->qp_state = MLX4_QP_STATE_RST;
-       ring->doorbell_qpn = swab32(ring->qp.qpn << 8);
-
-       mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
-                               ring->cqn, &ring->context);
-       if (ring->bf_enabled)
-               ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
-
-       err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
-                              &ring->qp, &ring->qp_state);
-
-       return err;
-}
-
-void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
-                               struct mlx4_en_tx_ring *ring)
-{
-       struct mlx4_en_dev *mdev = priv->mdev;
-
-       mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
-                      MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
-}
-
-
-static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
-                               struct mlx4_en_tx_ring *ring,
-                               int index, u8 owner)
-{
-       struct mlx4_en_dev *mdev = priv->mdev;
-       struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
-       struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
-       struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
-       struct sk_buff *skb = tx_info->skb;
-       struct skb_frag_struct *frag;
-       void *end = ring->buf + ring->buf_size;
-       int frags = skb_shinfo(skb)->nr_frags;
-       int i;
-       __be32 *ptr = (__be32 *)tx_desc;
-       __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
-
-       /* Optimize the common case when there are no wraparounds */
-       if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
-               if (!tx_info->inl) {
-                       if (tx_info->linear) {
-                               pci_unmap_single(mdev->pdev,
-                                       (dma_addr_t) be64_to_cpu(data->addr),
-                                        be32_to_cpu(data->byte_count),
-                                        PCI_DMA_TODEVICE);
-                               ++data;
-                       }
-
-                       for (i = 0; i < frags; i++) {
-                               frag = &skb_shinfo(skb)->frags[i];
-                               pci_unmap_page(mdev->pdev,
-                                       (dma_addr_t) be64_to_cpu(data[i].addr),
-                                       frag->size, PCI_DMA_TODEVICE);
-                       }
-               }
-               /* Stamp the freed descriptor */
-               for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
-                       *ptr = stamp;
-                       ptr += STAMP_DWORDS;
-               }
-
-       } else {
-               if (!tx_info->inl) {
-                       if ((void *) data >= end) {
-                               data = ring->buf + ((void *)data - end);
-                       }
-
-                       if (tx_info->linear) {
-                               pci_unmap_single(mdev->pdev,
-                                       (dma_addr_t) be64_to_cpu(data->addr),
-                                        be32_to_cpu(data->byte_count),
-                                        PCI_DMA_TODEVICE);
-                               ++data;
-                       }
-
-                       for (i = 0; i < frags; i++) {
-                               /* Check for wraparound before unmapping */
-                               if ((void *) data >= end)
-                                       data = ring->buf;
-                               frag = &skb_shinfo(skb)->frags[i];
-                               pci_unmap_page(mdev->pdev,
-                                       (dma_addr_t) be64_to_cpu(data->addr),
-                                        frag->size, PCI_DMA_TODEVICE);
-                               ++data;
-                       }
-               }
-               /* Stamp the freed descriptor */
-               for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
-                       *ptr = stamp;
-                       ptr += STAMP_DWORDS;
-                       if ((void *) ptr >= end) {
-                               ptr = ring->buf;
-                               stamp ^= cpu_to_be32(0x80000000);
-                       }
-               }
-
-       }
-       dev_kfree_skb_any(skb);
-       return tx_info->nr_txbb;
-}
-
-
-int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       int cnt = 0;
-
-       /* Skip last polled descriptor */
-       ring->cons += ring->last_nr_txbb;
-       en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
-                ring->cons, ring->prod);
-
-       if ((u32) (ring->prod - ring->cons) > ring->size) {
-               if (netif_msg_tx_err(priv))
-                       en_warn(priv, "Tx consumer passed producer!\n");
-               return 0;
-       }
-
-       while (ring->cons != ring->prod) {
-               ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
-                                               ring->cons & ring->size_mask,
-                                               !!(ring->cons & ring->size));
-               ring->cons += ring->last_nr_txbb;
-               cnt++;
-       }
-
-       if (cnt)
-               en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
-
-       return cnt;
-}
-
-
-static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       struct mlx4_cq *mcq = &cq->mcq;
-       struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
-       struct mlx4_cqe *cqe = cq->buf;
-       u16 index;
-       u16 new_index;
-       u32 txbbs_skipped = 0;
-       u32 cq_last_sav;
-
-       /* index always points to the first TXBB of the last polled descriptor */
-       index = ring->cons & ring->size_mask;
-       new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
-       if (index == new_index)
-               return;
-
-       if (!priv->port_up)
-               return;
-
-       /*
-        * We use a two-stage loop:
-        * - the first samples the HW-updated CQE
-        * - the second frees TXBBs until the last sample
-        * This lets us amortize CQE cache misses, while still polling the CQ
-        * until is quiescent.
-        */
-       cq_last_sav = mcq->cons_index;
-       do {
-               do {
-                       /* Skip over last polled CQE */
-                       index = (index + ring->last_nr_txbb) & ring->size_mask;
-                       txbbs_skipped += ring->last_nr_txbb;
-
-                       /* Poll next CQE */
-                       ring->last_nr_txbb = mlx4_en_free_tx_desc(
-                                               priv, ring, index,
-                                               !!((ring->cons + txbbs_skipped) &
-                                                  ring->size));
-                       ++mcq->cons_index;
-
-               } while (index != new_index);
-
-               new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
-       } while (index != new_index);
-       AVG_PERF_COUNTER(priv->pstats.tx_coal_avg,
-                        (u32) (mcq->cons_index - cq_last_sav));
-
-       /*
-        * To prevent CQ overflow we first update CQ consumer and only then
-        * the ring consumer.
-        */
-       mlx4_cq_set_ci(mcq);
-       wmb();
-       ring->cons += txbbs_skipped;
-
-       /* Wakeup Tx queue if this ring stopped it */
-       if (unlikely(ring->blocked)) {
-               if ((u32) (ring->prod - ring->cons) <=
-                    ring->size - HEADROOM - MAX_DESC_TXBBS) {
-                       ring->blocked = 0;
-                       netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
-                       priv->port_stats.wake_queue++;
-               }
-       }
-}
-
-void mlx4_en_tx_irq(struct mlx4_cq *mcq)
-{
-       struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
-       struct mlx4_en_priv *priv = netdev_priv(cq->dev);
-       struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
-
-       if (!spin_trylock(&ring->comp_lock))
-               return;
-       mlx4_en_process_tx_cq(cq->dev, cq);
-       mod_timer(&cq->timer, jiffies + 1);
-       spin_unlock(&ring->comp_lock);
-}
-
-
-void mlx4_en_poll_tx_cq(unsigned long data)
-{
-       struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
-       struct mlx4_en_priv *priv = netdev_priv(cq->dev);
-       struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
-       u32 inflight;
-
-       INC_PERF_COUNTER(priv->pstats.tx_poll);
-
-       if (!spin_trylock_irq(&ring->comp_lock)) {
-               mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
-               return;
-       }
-       mlx4_en_process_tx_cq(cq->dev, cq);
-       inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
-
-       /* If there are still packets in flight and the timer has not already
-        * been scheduled by the Tx routine then schedule it here to guarantee
-        * completion processing of these packets */
-       if (inflight && priv->port_up)
-               mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
-
-       spin_unlock_irq(&ring->comp_lock);
-}
-
-static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
-                                                     struct mlx4_en_tx_ring *ring,
-                                                     u32 index,
-                                                     unsigned int desc_size)
-{
-       u32 copy = (ring->size - index) * TXBB_SIZE;
-       int i;
-
-       for (i = desc_size - copy - 4; i >= 0; i -= 4) {
-               if ((i & (TXBB_SIZE - 1)) == 0)
-                       wmb();
-
-               *((u32 *) (ring->buf + i)) =
-                       *((u32 *) (ring->bounce_buf + copy + i));
-       }
-
-       for (i = copy - 4; i >= 4 ; i -= 4) {
-               if ((i & (TXBB_SIZE - 1)) == 0)
-                       wmb();
-
-               *((u32 *) (ring->buf + index * TXBB_SIZE + i)) =
-                       *((u32 *) (ring->bounce_buf + i));
-       }
-
-       /* Return real descriptor location */
-       return ring->buf + index * TXBB_SIZE;
-}
-
-static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
-{
-       struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
-       struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
-       unsigned long flags;
-
-       /* If we don't have a pending timer, set one up to catch our recent
-          post in case the interface becomes idle */
-       if (!timer_pending(&cq->timer))
-               mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
-
-       /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
-       if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
-               if (spin_trylock_irqsave(&ring->comp_lock, flags)) {
-                       mlx4_en_process_tx_cq(priv->dev, cq);
-                       spin_unlock_irqrestore(&ring->comp_lock, flags);
-               }
-}
-
-static void *get_frag_ptr(struct sk_buff *skb)
-{
-       struct skb_frag_struct *frag =  &skb_shinfo(skb)->frags[0];
-       struct page *page = frag->page;
-       void *ptr;
-
-       ptr = page_address(page);
-       if (unlikely(!ptr))
-               return NULL;
-
-       return ptr + frag->page_offset;
-}
-
-static int is_inline(struct sk_buff *skb, void **pfrag)
-{
-       void *ptr;
-
-       if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) {
-               if (skb_shinfo(skb)->nr_frags == 1) {
-                       ptr = get_frag_ptr(skb);
-                       if (unlikely(!ptr))
-                               return 0;
-
-                       if (pfrag)
-                               *pfrag = ptr;
-
-                       return 1;
-               } else if (unlikely(skb_shinfo(skb)->nr_frags))
-                       return 0;
-               else
-                       return 1;
-       }
-
-       return 0;
-}
-
-static int inline_size(struct sk_buff *skb)
-{
-       if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg)
-           <= MLX4_INLINE_ALIGN)
-               return ALIGN(skb->len + CTRL_SIZE +
-                            sizeof(struct mlx4_wqe_inline_seg), 16);
-       else
-               return ALIGN(skb->len + CTRL_SIZE + 2 *
-                            sizeof(struct mlx4_wqe_inline_seg), 16);
-}
-
-static int get_real_size(struct sk_buff *skb, struct net_device *dev,
-                        int *lso_header_size)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       int real_size;
-
-       if (skb_is_gso(skb)) {
-               *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
-               real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE +
-                       ALIGN(*lso_header_size + 4, DS_SIZE);
-               if (unlikely(*lso_header_size != skb_headlen(skb))) {
-                       /* We add a segment for the skb linear buffer only if
-                        * it contains data */
-                       if (*lso_header_size < skb_headlen(skb))
-                               real_size += DS_SIZE;
-                       else {
-                               if (netif_msg_tx_err(priv))
-                                       en_warn(priv, "Non-linear headers\n");
-                               return 0;
-                       }
-               }
-       } else {
-               *lso_header_size = 0;
-               if (!is_inline(skb, NULL))
-                       real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE;
-               else
-                       real_size = inline_size(skb);
-       }
-
-       return real_size;
-}
-
-static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb,
-                            int real_size, u16 *vlan_tag, int tx_ind, void *fragptr)
-{
-       struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
-       int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
-
-       if (skb->len <= spc) {
-               inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
-               skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
-               if (skb_shinfo(skb)->nr_frags)
-                       memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
-                              skb_shinfo(skb)->frags[0].size);
-
-       } else {
-               inl->byte_count = cpu_to_be32(1 << 31 | spc);
-               if (skb_headlen(skb) <= spc) {
-                       skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
-                       if (skb_headlen(skb) < spc) {
-                               memcpy(((void *)(inl + 1)) + skb_headlen(skb),
-                                       fragptr, spc - skb_headlen(skb));
-                               fragptr +=  spc - skb_headlen(skb);
-                       }
-                       inl = (void *) (inl + 1) + spc;
-                       memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
-               } else {
-                       skb_copy_from_linear_data(skb, inl + 1, spc);
-                       inl = (void *) (inl + 1) + spc;
-                       skb_copy_from_linear_data_offset(skb, spc, inl + 1,
-                                       skb_headlen(skb) - spc);
-                       if (skb_shinfo(skb)->nr_frags)
-                               memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
-                                       fragptr, skb_shinfo(skb)->frags[0].size);
-               }
-
-               wmb();
-               inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
-       }
-       tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
-       tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag);
-       tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
-}
-
-u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       u16 vlan_tag = 0;
-
-       /* If we support per priority flow control and the packet contains
-        * a vlan tag, send the packet to the TX ring assigned to that priority
-        */
-       if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) {
-               vlan_tag = vlan_tx_tag_get(skb);
-               return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13);
-       }
-
-       return skb_tx_hash(dev, skb);
-}
-
-static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, unsigned bytecnt)
-{
-       __iowrite64_copy(dst, src, bytecnt / 8);
-}
-
-netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       struct mlx4_en_dev *mdev = priv->mdev;
-       struct mlx4_en_tx_ring *ring;
-       struct mlx4_en_cq *cq;
-       struct mlx4_en_tx_desc *tx_desc;
-       struct mlx4_wqe_data_seg *data;
-       struct skb_frag_struct *frag;
-       struct mlx4_en_tx_info *tx_info;
-       struct ethhdr *ethh;
-       u64 mac;
-       u32 mac_l, mac_h;
-       int tx_ind = 0;
-       int nr_txbb;
-       int desc_size;
-       int real_size;
-       dma_addr_t dma;
-       u32 index, bf_index;
-       __be32 op_own;
-       u16 vlan_tag = 0;
-       int i;
-       int lso_header_size;
-       void *fragptr;
-       bool bounce = false;
-
-       if (!priv->port_up)
-               goto tx_drop;
-
-       real_size = get_real_size(skb, dev, &lso_header_size);
-       if (unlikely(!real_size))
-               goto tx_drop;
-
-       /* Align descriptor to TXBB size */
-       desc_size = ALIGN(real_size, TXBB_SIZE);
-       nr_txbb = desc_size / TXBB_SIZE;
-       if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
-               if (netif_msg_tx_err(priv))
-                       en_warn(priv, "Oversized header or SG list\n");
-               goto tx_drop;
-       }
-
-       tx_ind = skb->queue_mapping;
-       ring = &priv->tx_ring[tx_ind];
-       if (vlan_tx_tag_present(skb))
-               vlan_tag = vlan_tx_tag_get(skb);
-
-       /* Check available TXBBs And 2K spare for prefetch */
-       if (unlikely(((int)(ring->prod - ring->cons)) >
-                    ring->size - HEADROOM - MAX_DESC_TXBBS)) {
-               /* every full Tx ring stops queue */
-               netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
-               ring->blocked = 1;
-               priv->port_stats.queue_stopped++;
-
-               /* Use interrupts to find out when queue opened */
-               cq = &priv->tx_cq[tx_ind];
-               mlx4_en_arm_cq(priv, cq);
-               return NETDEV_TX_BUSY;
-       }
-
-       /* Track current inflight packets for performance analysis */
-       AVG_PERF_COUNTER(priv->pstats.inflight_avg,
-                        (u32) (ring->prod - ring->cons - 1));
-
-       /* Packet is good - grab an index and transmit it */
-       index = ring->prod & ring->size_mask;
-       bf_index = ring->prod;
-
-       /* See if we have enough space for whole descriptor TXBB for setting
-        * SW ownership on next descriptor; if not, use a bounce buffer. */
-       if (likely(index + nr_txbb <= ring->size))
-               tx_desc = ring->buf + index * TXBB_SIZE;
-       else {
-               tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
-               bounce = true;
-       }
-
-       /* Save skb in tx_info ring */
-       tx_info = &ring->tx_info[index];
-       tx_info->skb = skb;
-       tx_info->nr_txbb = nr_txbb;
-
-       /* Prepare ctrl segement apart opcode+ownership, which depends on
-        * whether LSO is used */
-       tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
-       tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag;
-       tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
-       tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
-                                               MLX4_WQE_CTRL_SOLICITED);
-       if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
-               tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
-                                                        MLX4_WQE_CTRL_TCP_UDP_CSUM);
-               priv->port_stats.tx_chksum_offload++;
-       }
-
-       if (unlikely(priv->validate_loopback)) {
-               /* Copy dst mac address to wqe */
-               skb_reset_mac_header(skb);
-               ethh = eth_hdr(skb);
-               if (ethh && ethh->h_dest) {
-                       mac = mlx4_en_mac_to_u64(ethh->h_dest);
-                       mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
-                       mac_l = (u32) (mac & 0xffffffff);
-                       tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
-                       tx_desc->ctrl.imm = cpu_to_be32(mac_l);
-               }
-       }
-
-       /* Handle LSO (TSO) packets */
-       if (lso_header_size) {
-               /* Mark opcode as LSO */
-               op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) |
-                       ((ring->prod & ring->size) ?
-                               cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
-
-               /* Fill in the LSO prefix */
-               tx_desc->lso.mss_hdr_size = cpu_to_be32(
-                       skb_shinfo(skb)->gso_size << 16 | lso_header_size);
-
-               /* Copy headers;
-                * note that we already verified that it is linear */
-               memcpy(tx_desc->lso.header, skb->data, lso_header_size);
-               data = ((void *) &tx_desc->lso +
-                       ALIGN(lso_header_size + 4, DS_SIZE));
-
-               priv->port_stats.tso_packets++;
-               i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
-                       !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
-               ring->bytes += skb->len + (i - 1) * lso_header_size;
-               ring->packets += i;
-       } else {
-               /* Normal (Non LSO) packet */
-               op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
-                       ((ring->prod & ring->size) ?
-                        cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
-               data = &tx_desc->data;
-               ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN);
-               ring->packets++;
-
-       }
-       AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
-
-
-       /* valid only for none inline segments */
-       tx_info->data_offset = (void *) data - (void *) tx_desc;
-
-       tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
-       data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
-
-       if (!is_inline(skb, &fragptr)) {
-               /* Map fragments */
-               for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
-                       frag = &skb_shinfo(skb)->frags[i];
-                       dma = pci_map_page(mdev->dev->pdev, frag->page, frag->page_offset,
-                                          frag->size, PCI_DMA_TODEVICE);
-                       data->addr = cpu_to_be64(dma);
-                       data->lkey = cpu_to_be32(mdev->mr.key);
-                       wmb();
-                       data->byte_count = cpu_to_be32(frag->size);
-                       --data;
-               }
-
-               /* Map linear part */
-               if (tx_info->linear) {
-                       dma = pci_map_single(mdev->dev->pdev, skb->data + lso_header_size,
-                                            skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
-                       data->addr = cpu_to_be64(dma);
-                       data->lkey = cpu_to_be32(mdev->mr.key);
-                       wmb();
-                       data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
-               }
-               tx_info->inl = 0;
-       } else {
-               build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
-               tx_info->inl = 1;
-       }
-
-       ring->prod += nr_txbb;
-
-       /* If we used a bounce buffer then copy descriptor back into place */
-       if (bounce)
-               tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
-
-       /* Run destructor before passing skb to HW */
-       if (likely(!skb_shared(skb)))
-               skb_orphan(skb);
-
-       if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
-               *(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn;
-               op_own |= htonl((bf_index & 0xffff) << 8);
-               /* Ensure new descirptor hits memory
-               * before setting ownership of this descriptor to HW */
-               wmb();
-               tx_desc->ctrl.owner_opcode = op_own;
-
-               wmb();
-
-               mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl,
-                    desc_size);
-
-               wmb();
-
-               ring->bf.offset ^= ring->bf.buf_size;
-       } else {
-               /* Ensure new descirptor hits memory
-               * before setting ownership of this descriptor to HW */
-               wmb();
-               tx_desc->ctrl.owner_opcode = op_own;
-               wmb();
-               writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
-       }
-
-       /* Poll CQ here */
-       mlx4_en_xmit_poll(priv, tx_ind);
-
-       return NETDEV_TX_OK;
-
-tx_drop:
-       dev_kfree_skb_any(skb);
-       priv->stats.tx_dropped++;
-       return NETDEV_TX_OK;
-}
-
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
deleted file mode 100644 (file)
index 1ad1f60..0000000
+++ /dev/null
@@ -1,842 +0,0 @@
-/*
- * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *     - Redistributions of source code must retain the above
- *       copyright notice, this list of conditions and the following
- *       disclaimer.
- *
- *     - Redistributions in binary form must reproduce the above
- *       copyright notice, this list of conditions and the following
- *       disclaimer in the documentation and/or other materials
- *       provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/dma-mapping.h>
-
-#include <linux/mlx4/cmd.h>
-
-#include "mlx4.h"
-#include "fw.h"
-
-enum {
-       MLX4_IRQNAME_SIZE       = 32
-};
-
-enum {
-       MLX4_NUM_ASYNC_EQE      = 0x100,
-       MLX4_NUM_SPARE_EQE      = 0x80,
-       MLX4_EQ_ENTRY_SIZE      = 0x20
-};
-
-/*
- * Must be packed because start is 64 bits but only aligned to 32 bits.
- */
-struct mlx4_eq_context {
-       __be32                  flags;
-       u16                     reserved1[3];
-       __be16                  page_offset;
-       u8                      log_eq_size;
-       u8                      reserved2[4];
-       u8                      eq_period;
-       u8                      reserved3;
-       u8                      eq_max_count;
-       u8                      reserved4[3];
-       u8                      intr;
-       u8                      log_page_size;
-       u8                      reserved5[2];
-       u8                      mtt_base_addr_h;
-       __be32                  mtt_base_addr_l;
-       u32                     reserved6[2];
-       __be32                  consumer_index;
-       __be32                  producer_index;
-       u32                     reserved7[4];
-};
-
-#define MLX4_EQ_STATUS_OK         ( 0 << 28)
-#define MLX4_EQ_STATUS_WRITE_FAIL  (10 << 28)
-#define MLX4_EQ_OWNER_SW          ( 0 << 24)
-#define MLX4_EQ_OWNER_HW          ( 1 << 24)
-#define MLX4_EQ_FLAG_EC                   ( 1 << 18)
-#define MLX4_EQ_FLAG_OI                   ( 1 << 17)
-#define MLX4_EQ_STATE_ARMED       ( 9 <<  8)
-#define MLX4_EQ_STATE_FIRED       (10 <<  8)
-#define MLX4_EQ_STATE_ALWAYS_ARMED (11 <<  8)
-
-#define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG)          | \
-                              (1ull << MLX4_EVENT_TYPE_COMM_EST)           | \
-                              (1ull << MLX4_EVENT_TYPE_SQ_DRAINED)         | \
-                              (1ull << MLX4_EVENT_TYPE_CQ_ERROR)           | \
-                              (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR)     | \
-                              (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR)    | \
-                              (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED)    | \
-                              (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
-                              (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
-                              (1ull << MLX4_EVENT_TYPE_PORT_CHANGE)        | \
-                              (1ull << MLX4_EVENT_TYPE_ECC_DETECT)         | \
-                              (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
-                              (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE)    | \
-                              (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT)          | \
-                              (1ull << MLX4_EVENT_TYPE_CMD))
-
-struct mlx4_eqe {
-       u8                      reserved1;
-       u8                      type;
-       u8                      reserved2;
-       u8                      subtype;
-       union {
-               u32             raw[6];
-               struct {
-                       __be32  cqn;
-               } __packed comp;
-               struct {
-                       u16     reserved1;
-                       __be16  token;
-                       u32     reserved2;
-                       u8      reserved3[3];
-                       u8      status;
-                       __be64  out_param;
-               } __packed cmd;
-               struct {
-                       __be32  qpn;
-               } __packed qp;
-               struct {
-                       __be32  srqn;
-               } __packed srq;
-               struct {
-                       __be32  cqn;
-                       u32     reserved1;
-                       u8      reserved2[3];
-                       u8      syndrome;
-               } __packed cq_err;
-               struct {
-                       u32     reserved1[2];
-                       __be32  port;
-               } __packed port_change;
-       }                       event;
-       u8                      reserved3[3];
-       u8                      owner;
-} __packed;
-
-static void eq_set_ci(struct mlx4_eq *eq, int req_not)
-{
-       __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
-                                              req_not << 31),
-                    eq->doorbell);
-       /* We still want ordering, just not swabbing, so add a barrier */
-       mb();
-}
-
-static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry)
-{
-       unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;
-       return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
-}
-
-static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
-{
-       struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);
-       return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
-}
-
-static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
-{
-       struct mlx4_eqe *eqe;
-       int cqn;
-       int eqes_found = 0;
-       int set_ci = 0;
-       int port;
-
-       while ((eqe = next_eqe_sw(eq))) {
-               /*
-                * Make sure we read EQ entry contents after we've
-                * checked the ownership bit.
-                */
-               rmb();
-
-               switch (eqe->type) {
-               case MLX4_EVENT_TYPE_COMP:
-                       cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
-                       mlx4_cq_completion(dev, cqn);
-                       break;
-
-               case MLX4_EVENT_TYPE_PATH_MIG:
-               case MLX4_EVENT_TYPE_COMM_EST:
-               case MLX4_EVENT_TYPE_SQ_DRAINED:
-               case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
-               case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
-               case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
-               case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
-               case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
-                       mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
-                                     eqe->type);
-                       break;
-
-               case MLX4_EVENT_TYPE_SRQ_LIMIT:
-               case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
-                       mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
-                                     eqe->type);
-                       break;
-
-               case MLX4_EVENT_TYPE_CMD:
-                       mlx4_cmd_event(dev,
-                                      be16_to_cpu(eqe->event.cmd.token),
-                                      eqe->event.cmd.status,
-                                      be64_to_cpu(eqe->event.cmd.out_param));
-                       break;
-
-               case MLX4_EVENT_TYPE_PORT_CHANGE:
-                       port = be32_to_cpu(eqe->event.port_change.port) >> 28;
-                       if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
-                               mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
-                                                   port);
-                               mlx4_priv(dev)->sense.do_sense_port[port] = 1;
-                       } else {
-                               mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP,
-                                                   port);
-                               mlx4_priv(dev)->sense.do_sense_port[port] = 0;
-                       }
-                       break;
-
-               case MLX4_EVENT_TYPE_CQ_ERROR:
-                       mlx4_warn(dev, "CQ %s on CQN %06x\n",
-                                 eqe->event.cq_err.syndrome == 1 ?
-                                 "overrun" : "access violation",
-                                 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
-                       mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
-                                     eqe->type);
-                       break;
-
-               case MLX4_EVENT_TYPE_EQ_OVERFLOW:
-                       mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
-                       break;
-
-               case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
-               case MLX4_EVENT_TYPE_ECC_DETECT:
-               default:
-                       mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
-                                 eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
-                       break;
-               }
-
-               ++eq->cons_index;
-               eqes_found = 1;
-               ++set_ci;
-
-               /*
-                * The HCA will think the queue has overflowed if we
-                * don't tell it we've been processing events.  We
-                * create our EQs with MLX4_NUM_SPARE_EQE extra
-                * entries, so we must update our consumer index at
-                * least that often.
-                */
-               if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
-                       eq_set_ci(eq, 0);
-                       set_ci = 0;
-               }
-       }
-
-       eq_set_ci(eq, 1);
-
-       return eqes_found;
-}
-
-static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
-{
-       struct mlx4_dev *dev = dev_ptr;
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int work = 0;
-       int i;
-
-       writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
-
-       for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
-               work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
-
-       return IRQ_RETVAL(work);
-}
-
-static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
-{
-       struct mlx4_eq  *eq  = eq_ptr;
-       struct mlx4_dev *dev = eq->dev;
-
-       mlx4_eq_int(dev, eq);
-
-       /* MSI-X vectors always belong to us */
-       return IRQ_HANDLED;
-}
-
-static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
-                       int eq_num)
-{
-       return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
-                       0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B);
-}
-
-static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
-                        int eq_num)
-{
-       return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,
-                       MLX4_CMD_TIME_CLASS_A);
-}
-
-static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
-                        int eq_num)
-{
-       return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ,
-                           MLX4_CMD_TIME_CLASS_A);
-}
-
-static int mlx4_num_eq_uar(struct mlx4_dev *dev)
-{
-       /*
-        * Each UAR holds 4 EQ doorbells.  To figure out how many UARs
-        * we need to map, take the difference of highest index and
-        * the lowest index we'll use and add 1.
-        */
-       return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
-                dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
-}
-
-static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int index;
-
-       index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
-
-       if (!priv->eq_table.uar_map[index]) {
-               priv->eq_table.uar_map[index] =
-                       ioremap(pci_resource_start(dev->pdev, 2) +
-                               ((eq->eqn / 4) << PAGE_SHIFT),
-                               PAGE_SIZE);
-               if (!priv->eq_table.uar_map[index]) {
-                       mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
-                                eq->eqn);
-                       return NULL;
-               }
-       }
-
-       return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
-}
-
-static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
-                         u8 intr, struct mlx4_eq *eq)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_eq_context *eq_context;
-       int npages;
-       u64 *dma_list = NULL;
-       dma_addr_t t;
-       u64 mtt_addr;
-       int err = -ENOMEM;
-       int i;
-
-       eq->dev   = dev;
-       eq->nent  = roundup_pow_of_two(max(nent, 2));
-       npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;
-
-       eq->page_list = kmalloc(npages * sizeof *eq->page_list,
-                               GFP_KERNEL);
-       if (!eq->page_list)
-               goto err_out;
-
-       for (i = 0; i < npages; ++i)
-               eq->page_list[i].buf = NULL;
-
-       dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
-       if (!dma_list)
-               goto err_out_free;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               goto err_out_free;
-       eq_context = mailbox->buf;
-
-       for (i = 0; i < npages; ++i) {
-               eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
-                                                         PAGE_SIZE, &t, GFP_KERNEL);
-               if (!eq->page_list[i].buf)
-                       goto err_out_free_pages;
-
-               dma_list[i] = t;
-               eq->page_list[i].map = t;
-
-               memset(eq->page_list[i].buf, 0, PAGE_SIZE);
-       }
-
-       eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
-       if (eq->eqn == -1)
-               goto err_out_free_pages;
-
-       eq->doorbell = mlx4_get_eq_uar(dev, eq);
-       if (!eq->doorbell) {
-               err = -ENOMEM;
-               goto err_out_free_eq;
-       }
-
-       err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
-       if (err)
-               goto err_out_free_eq;
-
-       err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
-       if (err)
-               goto err_out_free_mtt;
-
-       memset(eq_context, 0, sizeof *eq_context);
-       eq_context->flags         = cpu_to_be32(MLX4_EQ_STATUS_OK   |
-                                               MLX4_EQ_STATE_ARMED);
-       eq_context->log_eq_size   = ilog2(eq->nent);
-       eq_context->intr          = intr;
-       eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
-
-       mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
-       eq_context->mtt_base_addr_h = mtt_addr >> 32;
-       eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
-
-       err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
-       if (err) {
-               mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
-               goto err_out_free_mtt;
-       }
-
-       kfree(dma_list);
-       mlx4_free_cmd_mailbox(dev, mailbox);
-
-       eq->cons_index = 0;
-
-       return err;
-
-err_out_free_mtt:
-       mlx4_mtt_cleanup(dev, &eq->mtt);
-
-err_out_free_eq:
-       mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
-
-err_out_free_pages:
-       for (i = 0; i < npages; ++i)
-               if (eq->page_list[i].buf)
-                       dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
-                                         eq->page_list[i].buf,
-                                         eq->page_list[i].map);
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-
-err_out_free:
-       kfree(eq->page_list);
-       kfree(dma_list);
-
-err_out:
-       return err;
-}
-
-static void mlx4_free_eq(struct mlx4_dev *dev,
-                        struct mlx4_eq *eq)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_cmd_mailbox *mailbox;
-       int err;
-       int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE;
-       int i;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return;
-
-       err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
-       if (err)
-               mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
-
-       if (0) {
-               mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
-               for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
-                       if (i % 4 == 0)
-                               pr_cont("[%02x] ", i * 4);
-                       pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4));
-                       if ((i + 1) % 4 == 0)
-                               pr_cont("\n");
-               }
-       }
-
-       mlx4_mtt_cleanup(dev, &eq->mtt);
-       for (i = 0; i < npages; ++i)
-               pci_free_consistent(dev->pdev, PAGE_SIZE,
-                                   eq->page_list[i].buf,
-                                   eq->page_list[i].map);
-
-       kfree(eq->page_list);
-       mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
-       mlx4_free_cmd_mailbox(dev, mailbox);
-}
-
-static void mlx4_free_irqs(struct mlx4_dev *dev)
-{
-       struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int     i, vec;
-
-       if (eq_table->have_irq)
-               free_irq(dev->pdev->irq, dev);
-
-       for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
-               if (eq_table->eq[i].have_irq) {
-                       free_irq(eq_table->eq[i].irq, eq_table->eq + i);
-                       eq_table->eq[i].have_irq = 0;
-               }
-
-       for (i = 0; i < dev->caps.comp_pool; i++) {
-               /*
-                * Freeing the assigned irq's
-                * all bits should be 0, but we need to validate
-                */
-               if (priv->msix_ctl.pool_bm & 1ULL << i) {
-                       /* NO need protecting*/
-                       vec = dev->caps.num_comp_vectors + 1 + i;
-                       free_irq(priv->eq_table.eq[vec].irq,
-                                &priv->eq_table.eq[vec]);
-               }
-       }
-
-
-       kfree(eq_table->irq_names);
-}
-
-static int mlx4_map_clr_int(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-
-       priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
-                                priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
-       if (!priv->clr_base) {
-               mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
-               return -ENOMEM;
-       }
-
-       return 0;
-}
-
-static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-
-       iounmap(priv->clr_base);
-}
-
-int mlx4_alloc_eq_table(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-
-       priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs,
-                                   sizeof *priv->eq_table.eq, GFP_KERNEL);
-       if (!priv->eq_table.eq)
-               return -ENOMEM;
-
-       return 0;
-}
-
-void mlx4_free_eq_table(struct mlx4_dev *dev)
-{
-       kfree(mlx4_priv(dev)->eq_table.eq);
-}
-
-int mlx4_init_eq_table(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int err;
-       int i;
-
-       priv->eq_table.uar_map = kcalloc(sizeof *priv->eq_table.uar_map,
-                                        mlx4_num_eq_uar(dev), GFP_KERNEL);
-       if (!priv->eq_table.uar_map) {
-               err = -ENOMEM;
-               goto err_out_free;
-       }
-
-       err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
-                              dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
-       if (err)
-               goto err_out_free;
-
-       for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
-               priv->eq_table.uar_map[i] = NULL;
-
-       err = mlx4_map_clr_int(dev);
-       if (err)
-               goto err_out_bitmap;
-
-       priv->eq_table.clr_mask =
-               swab32(1 << (priv->eq_table.inta_pin & 31));
-       priv->eq_table.clr_int  = priv->clr_base +
-               (priv->eq_table.inta_pin < 32 ? 4 : 0);
-
-       priv->eq_table.irq_names =
-               kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
-                                            dev->caps.comp_pool),
-                       GFP_KERNEL);
-       if (!priv->eq_table.irq_names) {
-               err = -ENOMEM;
-               goto err_out_bitmap;
-       }
-
-       for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
-               err = mlx4_create_eq(dev, dev->caps.num_cqs -
-                                         dev->caps.reserved_cqs +
-                                         MLX4_NUM_SPARE_EQE,
-                                    (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
-                                    &priv->eq_table.eq[i]);
-               if (err) {
-                       --i;
-                       goto err_out_unmap;
-               }
-       }
-
-       err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
-                            (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0,
-                            &priv->eq_table.eq[dev->caps.num_comp_vectors]);
-       if (err)
-               goto err_out_comp;
-
-       /*if additional completion vectors poolsize is 0 this loop will not run*/
-       for (i = dev->caps.num_comp_vectors + 1;
-             i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
-
-               err = mlx4_create_eq(dev, dev->caps.num_cqs -
-                                         dev->caps.reserved_cqs +
-                                         MLX4_NUM_SPARE_EQE,
-                                    (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
-                                    &priv->eq_table.eq[i]);
-               if (err) {
-                       --i;
-                       goto err_out_unmap;
-               }
-       }
-
-
-       if (dev->flags & MLX4_FLAG_MSI_X) {
-               const char *eq_name;
-
-               for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
-                       if (i < dev->caps.num_comp_vectors) {
-                               snprintf(priv->eq_table.irq_names +
-                                        i * MLX4_IRQNAME_SIZE,
-                                        MLX4_IRQNAME_SIZE,
-                                        "mlx4-comp-%d@pci:%s", i,
-                                        pci_name(dev->pdev));
-                       } else {
-                               snprintf(priv->eq_table.irq_names +
-                                        i * MLX4_IRQNAME_SIZE,
-                                        MLX4_IRQNAME_SIZE,
-                                        "mlx4-async@pci:%s",
-                                        pci_name(dev->pdev));
-                       }
-
-                       eq_name = priv->eq_table.irq_names +
-                                 i * MLX4_IRQNAME_SIZE;
-                       err = request_irq(priv->eq_table.eq[i].irq,
-                                         mlx4_msi_x_interrupt, 0, eq_name,
-                                         priv->eq_table.eq + i);
-                       if (err)
-                               goto err_out_async;
-
-                       priv->eq_table.eq[i].have_irq = 1;
-               }
-       } else {
-               snprintf(priv->eq_table.irq_names,
-                        MLX4_IRQNAME_SIZE,
-                        DRV_NAME "@pci:%s",
-                        pci_name(dev->pdev));
-               err = request_irq(dev->pdev->irq, mlx4_interrupt,
-                                 IRQF_SHARED, priv->eq_table.irq_names, dev);
-               if (err)
-                       goto err_out_async;
-
-               priv->eq_table.have_irq = 1;
-       }
-
-       err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
-                         priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
-       if (err)
-               mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
-                          priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
-
-       for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
-               eq_set_ci(&priv->eq_table.eq[i], 1);
-
-       return 0;
-
-err_out_async:
-       mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
-
-err_out_comp:
-       i = dev->caps.num_comp_vectors - 1;
-
-err_out_unmap:
-       while (i >= 0) {
-               mlx4_free_eq(dev, &priv->eq_table.eq[i]);
-               --i;
-       }
-       mlx4_unmap_clr_int(dev);
-       mlx4_free_irqs(dev);
-
-err_out_bitmap:
-       mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
-
-err_out_free:
-       kfree(priv->eq_table.uar_map);
-
-       return err;
-}
-
-void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int i;
-
-       mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
-                   priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
-
-       mlx4_free_irqs(dev);
-
-       for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
-               mlx4_free_eq(dev, &priv->eq_table.eq[i]);
-
-       mlx4_unmap_clr_int(dev);
-
-       for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
-               if (priv->eq_table.uar_map[i])
-                       iounmap(priv->eq_table.uar_map[i]);
-
-       mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
-
-       kfree(priv->eq_table.uar_map);
-}
-
-/* A test that verifies that we can accept interrupts on all
- * the irq vectors of the device.
- * Interrupts are checked using the NOP command.
- */
-int mlx4_test_interrupts(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int i;
-       int err;
-
-       err = mlx4_NOP(dev);
-       /* When not in MSI_X, there is only one irq to check */
-       if (!(dev->flags & MLX4_FLAG_MSI_X))
-               return err;
-
-       /* A loop over all completion vectors, for each vector we will check
-        * whether it works by mapping command completions to that vector
-        * and performing a NOP command
-        */
-       for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
-               /* Temporary use polling for command completions */
-               mlx4_cmd_use_polling(dev);
-
-               /* Map the new eq to handle all asyncronous events */
-               err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
-                                 priv->eq_table.eq[i].eqn);
-               if (err) {
-                       mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
-                       mlx4_cmd_use_events(dev);
-                       break;
-               }
-
-               /* Go back to using events */
-               mlx4_cmd_use_events(dev);
-               err = mlx4_NOP(dev);
-       }
-
-       /* Return to default */
-       mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
-                   priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
-       return err;
-}
-EXPORT_SYMBOL(mlx4_test_interrupts);
-
-int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
-{
-
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int vec = 0, err = 0, i;
-
-       spin_lock(&priv->msix_ctl.pool_lock);
-       for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
-               if (~priv->msix_ctl.pool_bm & 1ULL << i) {
-                       priv->msix_ctl.pool_bm |= 1ULL << i;
-                       vec = dev->caps.num_comp_vectors + 1 + i;
-                       snprintf(priv->eq_table.irq_names +
-                                       vec * MLX4_IRQNAME_SIZE,
-                                       MLX4_IRQNAME_SIZE, "%s", name);
-                       err = request_irq(priv->eq_table.eq[vec].irq,
-                                         mlx4_msi_x_interrupt, 0,
-                                         &priv->eq_table.irq_names[vec<<5],
-                                         priv->eq_table.eq + vec);
-                       if (err) {
-                               /*zero out bit by fliping it*/
-                               priv->msix_ctl.pool_bm ^= 1 << i;
-                               vec = 0;
-                               continue;
-                               /*we dont want to break here*/
-                       }
-                       eq_set_ci(&priv->eq_table.eq[vec], 1);
-               }
-       }
-       spin_unlock(&priv->msix_ctl.pool_lock);
-
-       if (vec) {
-               *vector = vec;
-       } else {
-               *vector = 0;
-               err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
-       }
-       return err;
-}
-EXPORT_SYMBOL(mlx4_assign_eq);
-
-void mlx4_release_eq(struct mlx4_dev *dev, int vec)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       /*bm index*/
-       int i = vec - dev->caps.num_comp_vectors - 1;
-
-       if (likely(i >= 0)) {
-               /*sanity check , making sure were not trying to free irq's
-                 Belonging to a legacy EQ*/
-               spin_lock(&priv->msix_ctl.pool_lock);
-               if (priv->msix_ctl.pool_bm & 1ULL << i) {
-                       free_irq(priv->eq_table.eq[vec].irq,
-                                &priv->eq_table.eq[vec]);
-                       priv->msix_ctl.pool_bm &= ~(1ULL << i);
-               }
-               spin_unlock(&priv->msix_ctl.pool_lock);
-       }
-
-}
-EXPORT_SYMBOL(mlx4_release_eq);
-
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
deleted file mode 100644 (file)
index 7eb8ba8..0000000
+++ /dev/null
@@ -1,944 +0,0 @@
-/*
- * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/mlx4/cmd.h>
-#include <linux/cache.h>
-
-#include "fw.h"
-#include "icm.h"
-
-enum {
-       MLX4_COMMAND_INTERFACE_MIN_REV          = 2,
-       MLX4_COMMAND_INTERFACE_MAX_REV          = 3,
-       MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS    = 3,
-};
-
-extern void __buggy_use_of_MLX4_GET(void);
-extern void __buggy_use_of_MLX4_PUT(void);
-
-static int enable_qos;
-module_param(enable_qos, bool, 0444);
-MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
-
-#define MLX4_GET(dest, source, offset)                               \
-       do {                                                          \
-               void *__p = (char *) (source) + (offset);             \
-               switch (sizeof (dest)) {                              \
-               case 1: (dest) = *(u8 *) __p;       break;            \
-               case 2: (dest) = be16_to_cpup(__p); break;            \
-               case 4: (dest) = be32_to_cpup(__p); break;            \
-               case 8: (dest) = be64_to_cpup(__p); break;            \
-               default: __buggy_use_of_MLX4_GET();                   \
-               }                                                     \
-       } while (0)
-
-#define MLX4_PUT(dest, source, offset)                               \
-       do {                                                          \
-               void *__d = ((char *) (dest) + (offset));             \
-               switch (sizeof(source)) {                             \
-               case 1: *(u8 *) __d = (source);                break; \
-               case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
-               case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
-               case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
-               default: __buggy_use_of_MLX4_PUT();                   \
-               }                                                     \
-       } while (0)
-
-static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
-{
-       static const char *fname[] = {
-               [ 0] = "RC transport",
-               [ 1] = "UC transport",
-               [ 2] = "UD transport",
-               [ 3] = "XRC transport",
-               [ 4] = "reliable multicast",
-               [ 5] = "FCoIB support",
-               [ 6] = "SRQ support",
-               [ 7] = "IPoIB checksum offload",
-               [ 8] = "P_Key violation counter",
-               [ 9] = "Q_Key violation counter",
-               [10] = "VMM",
-               [12] = "DPDP",
-               [15] = "Big LSO headers",
-               [16] = "MW support",
-               [17] = "APM support",
-               [18] = "Atomic ops support",
-               [19] = "Raw multicast support",
-               [20] = "Address vector port checking support",
-               [21] = "UD multicast support",
-               [24] = "Demand paging support",
-               [25] = "Router support",
-               [30] = "IBoE support",
-               [32] = "Unicast loopback support",
-               [38] = "Wake On LAN support",
-               [40] = "UDP RSS support",
-               [41] = "Unicast VEP steering support",
-               [42] = "Multicast VEP steering support",
-               [48] = "Counters support",
-       };
-       int i;
-
-       mlx4_dbg(dev, "DEV_CAP flags:\n");
-       for (i = 0; i < ARRAY_SIZE(fname); ++i)
-               if (fname[i] && (flags & (1LL << i)))
-                       mlx4_dbg(dev, "    %s\n", fname[i]);
-}
-
-int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       u32 *inbox;
-       int err = 0;
-
-#define MOD_STAT_CFG_IN_SIZE           0x100
-
-#define MOD_STAT_CFG_PG_SZ_M_OFFSET    0x002
-#define MOD_STAT_CFG_PG_SZ_OFFSET      0x003
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-       inbox = mailbox->buf;
-
-       memset(inbox, 0, MOD_STAT_CFG_IN_SIZE);
-
-       MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
-       MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
-
-       err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
-                       MLX4_CMD_TIME_CLASS_A);
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-
-int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       u32 *outbox;
-       u8 field;
-       u32 field32, flags, ext_flags;
-       u16 size;
-       u16 stat_rate;
-       int err;
-       int i;
-
-#define QUERY_DEV_CAP_OUT_SIZE                0x100
-#define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET                0x10
-#define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET         0x11
-#define QUERY_DEV_CAP_RSVD_QP_OFFSET           0x12
-#define QUERY_DEV_CAP_MAX_QP_OFFSET            0x13
-#define QUERY_DEV_CAP_RSVD_SRQ_OFFSET          0x14
-#define QUERY_DEV_CAP_MAX_SRQ_OFFSET           0x15
-#define QUERY_DEV_CAP_RSVD_EEC_OFFSET          0x16
-#define QUERY_DEV_CAP_MAX_EEC_OFFSET           0x17
-#define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET         0x19
-#define QUERY_DEV_CAP_RSVD_CQ_OFFSET           0x1a
-#define QUERY_DEV_CAP_MAX_CQ_OFFSET            0x1b
-#define QUERY_DEV_CAP_MAX_MPT_OFFSET           0x1d
-#define QUERY_DEV_CAP_RSVD_EQ_OFFSET           0x1e
-#define QUERY_DEV_CAP_MAX_EQ_OFFSET            0x1f
-#define QUERY_DEV_CAP_RSVD_MTT_OFFSET          0x20
-#define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET                0x21
-#define QUERY_DEV_CAP_RSVD_MRW_OFFSET          0x22
-#define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET       0x23
-#define QUERY_DEV_CAP_MAX_AV_OFFSET            0x27
-#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET                0x29
-#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET                0x2b
-#define QUERY_DEV_CAP_MAX_GSO_OFFSET           0x2d
-#define QUERY_DEV_CAP_MAX_RDMA_OFFSET          0x2f
-#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET           0x33
-#define QUERY_DEV_CAP_ACK_DELAY_OFFSET         0x35
-#define QUERY_DEV_CAP_MTU_WIDTH_OFFSET         0x36
-#define QUERY_DEV_CAP_VL_PORT_OFFSET           0x37
-#define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET                0x38
-#define QUERY_DEV_CAP_MAX_GID_OFFSET           0x3b
-#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET      0x3c
-#define QUERY_DEV_CAP_MAX_PKEY_OFFSET          0x3f
-#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET         0x40
-#define QUERY_DEV_CAP_FLAGS_OFFSET             0x44
-#define QUERY_DEV_CAP_RSVD_UAR_OFFSET          0x48
-#define QUERY_DEV_CAP_UAR_SZ_OFFSET            0x49
-#define QUERY_DEV_CAP_PAGE_SZ_OFFSET           0x4b
-#define QUERY_DEV_CAP_BF_OFFSET                        0x4c
-#define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET     0x4d
-#define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET  0x4e
-#define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET  0x4f
-#define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET         0x51
-#define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET    0x52
-#define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET         0x55
-#define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET    0x56
-#define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET                0x61
-#define QUERY_DEV_CAP_RSVD_MCG_OFFSET          0x62
-#define QUERY_DEV_CAP_MAX_MCG_OFFSET           0x63
-#define QUERY_DEV_CAP_RSVD_PD_OFFSET           0x64
-#define QUERY_DEV_CAP_MAX_PD_OFFSET            0x65
-#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET      0x68
-#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET   0x80
-#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET      0x82
-#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET      0x84
-#define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET     0x86
-#define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET      0x88
-#define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET      0x8a
-#define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET      0x8c
-#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET    0x8e
-#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET      0x90
-#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET    0x92
-#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET                0x94
-#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET         0x98
-#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET                0xa0
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-       outbox = mailbox->buf;
-
-       err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
-                          MLX4_CMD_TIME_CLASS_A);
-       if (err)
-               goto out;
-
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
-       dev_cap->reserved_qps = 1 << (field & 0xf);
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
-       dev_cap->max_qps = 1 << (field & 0x1f);
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET);
-       dev_cap->reserved_srqs = 1 << (field >> 4);
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET);
-       dev_cap->max_srqs = 1 << (field & 0x1f);
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET);
-       dev_cap->max_cq_sz = 1 << field;
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET);
-       dev_cap->reserved_cqs = 1 << (field & 0xf);
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET);
-       dev_cap->max_cqs = 1 << (field & 0x1f);
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
-       dev_cap->max_mpts = 1 << (field & 0x3f);
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
-       dev_cap->reserved_eqs = field & 0xf;
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
-       dev_cap->max_eqs = 1 << (field & 0xf);
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
-       dev_cap->reserved_mtts = 1 << (field >> 4);
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET);
-       dev_cap->max_mrw_sz = 1 << field;
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
-       dev_cap->reserved_mrws = 1 << (field & 0xf);
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
-       dev_cap->max_mtt_seg = 1 << (field & 0x3f);
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
-       dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
-       dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
-       field &= 0x1f;
-       if (!field)
-               dev_cap->max_gso_sz = 0;
-       else
-               dev_cap->max_gso_sz = 1 << field;
-
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
-       dev_cap->max_rdma_global = 1 << (field & 0x3f);
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
-       dev_cap->local_ca_ack_delay = field & 0x1f;
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
-       dev_cap->num_ports = field & 0xf;
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
-       dev_cap->max_msg_sz = 1 << (field & 0x1f);
-       MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
-       dev_cap->stat_rate_support = stat_rate;
-       MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
-       MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
-       dev_cap->flags = flags | (u64)ext_flags << 32;
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
-       dev_cap->reserved_uars = field >> 4;
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
-       dev_cap->uar_size = 1 << ((field & 0x3f) + 20);
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET);
-       dev_cap->min_page_sz = 1 << field;
-
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET);
-       if (field & 0x80) {
-               MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
-               dev_cap->bf_reg_size = 1 << (field & 0x1f);
-               MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
-               if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
-                       field = 3;
-               dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
-               mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
-                        dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
-       } else {
-               dev_cap->bf_reg_size = 0;
-               mlx4_dbg(dev, "BlueFlame not available\n");
-       }
-
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
-       dev_cap->max_sq_sg = field;
-       MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
-       dev_cap->max_sq_desc_sz = size;
-
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
-       dev_cap->max_qp_per_mcg = 1 << field;
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
-       dev_cap->reserved_mgms = field & 0xf;
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET);
-       dev_cap->max_mcgs = 1 << field;
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET);
-       dev_cap->reserved_pds = field >> 4;
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
-       dev_cap->max_pds = 1 << (field & 0x3f);
-
-       MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
-       dev_cap->rdmarc_entry_sz = size;
-       MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET);
-       dev_cap->qpc_entry_sz = size;
-       MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET);
-       dev_cap->aux_entry_sz = size;
-       MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET);
-       dev_cap->altc_entry_sz = size;
-       MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET);
-       dev_cap->eqc_entry_sz = size;
-       MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET);
-       dev_cap->cqc_entry_sz = size;
-       MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET);
-       dev_cap->srq_entry_sz = size;
-       MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET);
-       dev_cap->cmpt_entry_sz = size;
-       MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET);
-       dev_cap->mtt_entry_sz = size;
-       MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET);
-       dev_cap->dmpt_entry_sz = size;
-
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET);
-       dev_cap->max_srq_sz = 1 << field;
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET);
-       dev_cap->max_qp_sz = 1 << field;
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET);
-       dev_cap->resize_srq = field & 1;
-       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET);
-       dev_cap->max_rq_sg = field;
-       MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
-       dev_cap->max_rq_desc_sz = size;
-
-       MLX4_GET(dev_cap->bmme_flags, outbox,
-                QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
-       MLX4_GET(dev_cap->reserved_lkey, outbox,
-                QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
-       MLX4_GET(dev_cap->max_icm_sz, outbox,
-                QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
-       if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
-               MLX4_GET(dev_cap->max_counters, outbox,
-                        QUERY_DEV_CAP_MAX_COUNTERS_OFFSET);
-
-       if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
-               for (i = 1; i <= dev_cap->num_ports; ++i) {
-                       MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
-                       dev_cap->max_vl[i]         = field >> 4;
-                       MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
-                       dev_cap->ib_mtu[i]         = field >> 4;
-                       dev_cap->max_port_width[i] = field & 0xf;
-                       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
-                       dev_cap->max_gids[i]       = 1 << (field & 0xf);
-                       MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
-                       dev_cap->max_pkeys[i]      = 1 << (field & 0xf);
-               }
-       } else {
-#define QUERY_PORT_SUPPORTED_TYPE_OFFSET       0x00
-#define QUERY_PORT_MTU_OFFSET                  0x01
-#define QUERY_PORT_ETH_MTU_OFFSET              0x02
-#define QUERY_PORT_WIDTH_OFFSET                        0x06
-#define QUERY_PORT_MAX_GID_PKEY_OFFSET         0x07
-#define QUERY_PORT_MAX_MACVLAN_OFFSET          0x0a
-#define QUERY_PORT_MAX_VL_OFFSET               0x0b
-#define QUERY_PORT_MAC_OFFSET                  0x10
-#define QUERY_PORT_TRANS_VENDOR_OFFSET         0x18
-#define QUERY_PORT_WAVELENGTH_OFFSET           0x1c
-#define QUERY_PORT_TRANS_CODE_OFFSET           0x20
-
-               for (i = 1; i <= dev_cap->num_ports; ++i) {
-                       err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
-                                          MLX4_CMD_TIME_CLASS_B);
-                       if (err)
-                               goto out;
-
-                       MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
-                       dev_cap->supported_port_types[i] = field & 3;
-                       MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
-                       dev_cap->ib_mtu[i]         = field & 0xf;
-                       MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
-                       dev_cap->max_port_width[i] = field & 0xf;
-                       MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
-                       dev_cap->max_gids[i]       = 1 << (field >> 4);
-                       dev_cap->max_pkeys[i]      = 1 << (field & 0xf);
-                       MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
-                       dev_cap->max_vl[i]         = field & 0xf;
-                       MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
-                       dev_cap->log_max_macs[i]  = field & 0xf;
-                       dev_cap->log_max_vlans[i] = field >> 4;
-                       MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
-                       MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
-                       MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
-                       dev_cap->trans_type[i] = field32 >> 24;
-                       dev_cap->vendor_oui[i] = field32 & 0xffffff;
-                       MLX4_GET(dev_cap->wavelength[i], outbox, QUERY_PORT_WAVELENGTH_OFFSET);
-                       MLX4_GET(dev_cap->trans_code[i], outbox, QUERY_PORT_TRANS_CODE_OFFSET);
-               }
-       }
-
-       mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
-                dev_cap->bmme_flags, dev_cap->reserved_lkey);
-
-       /*
-        * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
-        * we can't use any EQs whose doorbell falls on that page,
-        * even if the EQ itself isn't reserved.
-        */
-       dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
-                                   dev_cap->reserved_eqs);
-
-       mlx4_dbg(dev, "Max ICM size %lld MB\n",
-                (unsigned long long) dev_cap->max_icm_sz >> 20);
-       mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
-                dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz);
-       mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
-                dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
-       mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
-                dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
-       mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
-                dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz);
-       mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
-                dev_cap->reserved_mrws, dev_cap->reserved_mtts);
-       mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
-                dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars);
-       mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
-                dev_cap->max_pds, dev_cap->reserved_mgms);
-       mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
-                dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
-       mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
-                dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1],
-                dev_cap->max_port_width[1]);
-       mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
-                dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
-       mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
-                dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
-       mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
-       mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters);
-
-       dump_dev_cap_flags(dev, dev_cap->flags);
-
-out:
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-
-int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_icm_iter iter;
-       __be64 *pages;
-       int lg;
-       int nent = 0;
-       int i;
-       int err = 0;
-       int ts = 0, tc = 0;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-       memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
-       pages = mailbox->buf;
-
-       for (mlx4_icm_first(icm, &iter);
-            !mlx4_icm_last(&iter);
-            mlx4_icm_next(&iter)) {
-               /*
-                * We have to pass pages that are aligned to their
-                * size, so find the least significant 1 in the
-                * address or size and use that as our log2 size.
-                */
-               lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
-               if (lg < MLX4_ICM_PAGE_SHIFT) {
-                       mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
-                                  MLX4_ICM_PAGE_SIZE,
-                                  (unsigned long long) mlx4_icm_addr(&iter),
-                                  mlx4_icm_size(&iter));
-                       err = -EINVAL;
-                       goto out;
-               }
-
-               for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
-                       if (virt != -1) {
-                               pages[nent * 2] = cpu_to_be64(virt);
-                               virt += 1 << lg;
-                       }
-
-                       pages[nent * 2 + 1] =
-                               cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
-                                           (lg - MLX4_ICM_PAGE_SHIFT));
-                       ts += 1 << (lg - 10);
-                       ++tc;
-
-                       if (++nent == MLX4_MAILBOX_SIZE / 16) {
-                               err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
-                                               MLX4_CMD_TIME_CLASS_B);
-                               if (err)
-                                       goto out;
-                               nent = 0;
-                       }
-               }
-       }
-
-       if (nent)
-               err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B);
-       if (err)
-               goto out;
-
-       switch (op) {
-       case MLX4_CMD_MAP_FA:
-               mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts);
-               break;
-       case MLX4_CMD_MAP_ICM_AUX:
-               mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts);
-               break;
-       case MLX4_CMD_MAP_ICM:
-               mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n",
-                         tc, ts, (unsigned long long) virt - (ts << 10));
-               break;
-       }
-
-out:
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-
-int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
-{
-       return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1);
-}
-
-int mlx4_UNMAP_FA(struct mlx4_dev *dev)
-{
-       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B);
-}
-
-
-int mlx4_RUN_FW(struct mlx4_dev *dev)
-{
-       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A);
-}
-
-int mlx4_QUERY_FW(struct mlx4_dev *dev)
-{
-       struct mlx4_fw  *fw  = &mlx4_priv(dev)->fw;
-       struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
-       struct mlx4_cmd_mailbox *mailbox;
-       u32 *outbox;
-       int err = 0;
-       u64 fw_ver;
-       u16 cmd_if_rev;
-       u8 lg;
-
-#define QUERY_FW_OUT_SIZE             0x100
-#define QUERY_FW_VER_OFFSET            0x00
-#define QUERY_FW_CMD_IF_REV_OFFSET     0x0a
-#define QUERY_FW_MAX_CMD_OFFSET        0x0f
-#define QUERY_FW_ERR_START_OFFSET      0x30
-#define QUERY_FW_ERR_SIZE_OFFSET       0x38
-#define QUERY_FW_ERR_BAR_OFFSET        0x3c
-
-#define QUERY_FW_SIZE_OFFSET           0x00
-#define QUERY_FW_CLR_INT_BASE_OFFSET   0x20
-#define QUERY_FW_CLR_INT_BAR_OFFSET    0x28
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-       outbox = mailbox->buf;
-
-       err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
-                           MLX4_CMD_TIME_CLASS_A);
-       if (err)
-               goto out;
-
-       MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET);
-       /*
-        * FW subminor version is at more significant bits than minor
-        * version, so swap here.
-        */
-       dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) |
-               ((fw_ver & 0xffff0000ull) >> 16) |
-               ((fw_ver & 0x0000ffffull) << 16);
-
-       MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
-       if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
-           cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
-               mlx4_err(dev, "Installed FW has unsupported "
-                        "command interface revision %d.\n",
-                        cmd_if_rev);
-               mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
-                        (int) (dev->caps.fw_ver >> 32),
-                        (int) (dev->caps.fw_ver >> 16) & 0xffff,
-                        (int) dev->caps.fw_ver & 0xffff);
-               mlx4_err(dev, "This driver version supports only revisions %d to %d.\n",
-                        MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
-               err = -ENODEV;
-               goto out;
-       }
-
-       if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS)
-               dev->flags |= MLX4_FLAG_OLD_PORT_CMDS;
-
-       MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
-       cmd->max_cmds = 1 << lg;
-
-       mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
-                (int) (dev->caps.fw_ver >> 32),
-                (int) (dev->caps.fw_ver >> 16) & 0xffff,
-                (int) dev->caps.fw_ver & 0xffff,
-                cmd_if_rev, cmd->max_cmds);
-
-       MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET);
-       MLX4_GET(fw->catas_size,   outbox, QUERY_FW_ERR_SIZE_OFFSET);
-       MLX4_GET(fw->catas_bar,    outbox, QUERY_FW_ERR_BAR_OFFSET);
-       fw->catas_bar = (fw->catas_bar >> 6) * 2;
-
-       mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
-                (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar);
-
-       MLX4_GET(fw->fw_pages,     outbox, QUERY_FW_SIZE_OFFSET);
-       MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
-       MLX4_GET(fw->clr_int_bar,  outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
-       fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
-
-       mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
-
-       /*
-        * Round up number of system pages needed in case
-        * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
-        */
-       fw->fw_pages =
-               ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
-               (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
-
-       mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n",
-                (unsigned long long) fw->clr_int_base, fw->clr_int_bar);
-
-out:
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-
-static void get_board_id(void *vsd, char *board_id)
-{
-       int i;
-
-#define VSD_OFFSET_SIG1                0x00
-#define VSD_OFFSET_SIG2                0xde
-#define VSD_OFFSET_MLX_BOARD_ID        0xd0
-#define VSD_OFFSET_TS_BOARD_ID 0x20
-
-#define VSD_SIGNATURE_TOPSPIN  0x5ad
-
-       memset(board_id, 0, MLX4_BOARD_ID_LEN);
-
-       if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
-           be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
-               strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
-       } else {
-               /*
-                * The board ID is a string but the firmware byte
-                * swaps each 4-byte word before passing it back to
-                * us.  Therefore we need to swab it before printing.
-                */
-               for (i = 0; i < 4; ++i)
-                       ((u32 *) board_id)[i] =
-                               swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
-       }
-}
-
-int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       u32 *outbox;
-       int err;
-
-#define QUERY_ADAPTER_OUT_SIZE             0x100
-#define QUERY_ADAPTER_INTA_PIN_OFFSET      0x10
-#define QUERY_ADAPTER_VSD_OFFSET           0x20
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-       outbox = mailbox->buf;
-
-       err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
-                          MLX4_CMD_TIME_CLASS_A);
-       if (err)
-               goto out;
-
-       MLX4_GET(adapter->inta_pin, outbox,    QUERY_ADAPTER_INTA_PIN_OFFSET);
-
-       get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
-                    adapter->board_id);
-
-out:
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-
-int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       __be32 *inbox;
-       int err;
-
-#define INIT_HCA_IN_SIZE                0x200
-#define INIT_HCA_VERSION_OFFSET                 0x000
-#define         INIT_HCA_VERSION                2
-#define INIT_HCA_CACHELINE_SZ_OFFSET    0x0e
-#define INIT_HCA_FLAGS_OFFSET           0x014
-#define INIT_HCA_QPC_OFFSET             0x020
-#define         INIT_HCA_QPC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x10)
-#define         INIT_HCA_LOG_QP_OFFSET          (INIT_HCA_QPC_OFFSET + 0x17)
-#define         INIT_HCA_SRQC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x28)
-#define         INIT_HCA_LOG_SRQ_OFFSET         (INIT_HCA_QPC_OFFSET + 0x2f)
-#define         INIT_HCA_CQC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x30)
-#define         INIT_HCA_LOG_CQ_OFFSET          (INIT_HCA_QPC_OFFSET + 0x37)
-#define         INIT_HCA_ALTC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x40)
-#define         INIT_HCA_AUXC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x50)
-#define         INIT_HCA_EQC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x60)
-#define         INIT_HCA_LOG_EQ_OFFSET          (INIT_HCA_QPC_OFFSET + 0x67)
-#define         INIT_HCA_RDMARC_BASE_OFFSET     (INIT_HCA_QPC_OFFSET + 0x70)
-#define         INIT_HCA_LOG_RD_OFFSET          (INIT_HCA_QPC_OFFSET + 0x77)
-#define INIT_HCA_MCAST_OFFSET           0x0c0
-#define         INIT_HCA_MC_BASE_OFFSET         (INIT_HCA_MCAST_OFFSET + 0x00)
-#define         INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
-#define         INIT_HCA_LOG_MC_HASH_SZ_OFFSET  (INIT_HCA_MCAST_OFFSET + 0x16)
-#define  INIT_HCA_UC_STEERING_OFFSET    (INIT_HCA_MCAST_OFFSET + 0x18)
-#define         INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
-#define INIT_HCA_TPT_OFFSET             0x0f0
-#define         INIT_HCA_DMPT_BASE_OFFSET       (INIT_HCA_TPT_OFFSET + 0x00)
-#define         INIT_HCA_LOG_MPT_SZ_OFFSET      (INIT_HCA_TPT_OFFSET + 0x0b)
-#define         INIT_HCA_MTT_BASE_OFFSET        (INIT_HCA_TPT_OFFSET + 0x10)
-#define         INIT_HCA_CMPT_BASE_OFFSET       (INIT_HCA_TPT_OFFSET + 0x18)
-#define INIT_HCA_UAR_OFFSET             0x120
-#define         INIT_HCA_LOG_UAR_SZ_OFFSET      (INIT_HCA_UAR_OFFSET + 0x0a)
-#define  INIT_HCA_UAR_PAGE_SZ_OFFSET     (INIT_HCA_UAR_OFFSET + 0x0b)
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-       inbox = mailbox->buf;
-
-       memset(inbox, 0, INIT_HCA_IN_SIZE);
-
-       *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
-
-       *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
-               (ilog2(cache_line_size()) - 4) << 5;
-
-#if defined(__LITTLE_ENDIAN)
-       *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
-#elif defined(__BIG_ENDIAN)
-       *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
-#else
-#error Host endianness not defined
-#endif
-       /* Check port for UD address vector: */
-       *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
-
-       /* Enable IPoIB checksumming if we can: */
-       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
-               *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
-
-       /* Enable QoS support if module parameter set */
-       if (enable_qos)
-               *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
-
-       /* enable counters */
-       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
-               *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
-
-       /* QPC/EEC/CQC/EQC/RDMARC attributes */
-
-       MLX4_PUT(inbox, param->qpc_base,      INIT_HCA_QPC_BASE_OFFSET);
-       MLX4_PUT(inbox, param->log_num_qps,   INIT_HCA_LOG_QP_OFFSET);
-       MLX4_PUT(inbox, param->srqc_base,     INIT_HCA_SRQC_BASE_OFFSET);
-       MLX4_PUT(inbox, param->log_num_srqs,  INIT_HCA_LOG_SRQ_OFFSET);
-       MLX4_PUT(inbox, param->cqc_base,      INIT_HCA_CQC_BASE_OFFSET);
-       MLX4_PUT(inbox, param->log_num_cqs,   INIT_HCA_LOG_CQ_OFFSET);
-       MLX4_PUT(inbox, param->altc_base,     INIT_HCA_ALTC_BASE_OFFSET);
-       MLX4_PUT(inbox, param->auxc_base,     INIT_HCA_AUXC_BASE_OFFSET);
-       MLX4_PUT(inbox, param->eqc_base,      INIT_HCA_EQC_BASE_OFFSET);
-       MLX4_PUT(inbox, param->log_num_eqs,   INIT_HCA_LOG_EQ_OFFSET);
-       MLX4_PUT(inbox, param->rdmarc_base,   INIT_HCA_RDMARC_BASE_OFFSET);
-       MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
-
-       /* multicast attributes */
-
-       MLX4_PUT(inbox, param->mc_base,         INIT_HCA_MC_BASE_OFFSET);
-       MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
-       MLX4_PUT(inbox, param->log_mc_hash_sz,  INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
-       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
-               MLX4_PUT(inbox, (u8) (1 << 3),  INIT_HCA_UC_STEERING_OFFSET);
-       MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
-
-       /* TPT attributes */
-
-       MLX4_PUT(inbox, param->dmpt_base,  INIT_HCA_DMPT_BASE_OFFSET);
-       MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
-       MLX4_PUT(inbox, param->mtt_base,   INIT_HCA_MTT_BASE_OFFSET);
-       MLX4_PUT(inbox, param->cmpt_base,  INIT_HCA_CMPT_BASE_OFFSET);
-
-       /* UAR attributes */
-
-       MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET);
-       MLX4_PUT(inbox, param->log_uar_sz,      INIT_HCA_LOG_UAR_SZ_OFFSET);
-
-       err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000);
-
-       if (err)
-               mlx4_err(dev, "INIT_HCA returns %d\n", err);
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-
-int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       u32 *inbox;
-       int err;
-       u32 flags;
-       u16 field;
-
-       if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
-#define INIT_PORT_IN_SIZE          256
-#define INIT_PORT_FLAGS_OFFSET     0x00
-#define INIT_PORT_FLAG_SIG         (1 << 18)
-#define INIT_PORT_FLAG_NG          (1 << 17)
-#define INIT_PORT_FLAG_G0          (1 << 16)
-#define INIT_PORT_VL_SHIFT         4
-#define INIT_PORT_PORT_WIDTH_SHIFT 8
-#define INIT_PORT_MTU_OFFSET       0x04
-#define INIT_PORT_MAX_GID_OFFSET   0x06
-#define INIT_PORT_MAX_PKEY_OFFSET  0x0a
-#define INIT_PORT_GUID0_OFFSET     0x10
-#define INIT_PORT_NODE_GUID_OFFSET 0x18
-#define INIT_PORT_SI_GUID_OFFSET   0x20
-
-               mailbox = mlx4_alloc_cmd_mailbox(dev);
-               if (IS_ERR(mailbox))
-                       return PTR_ERR(mailbox);
-               inbox = mailbox->buf;
-
-               memset(inbox, 0, INIT_PORT_IN_SIZE);
-
-               flags = 0;
-               flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
-               flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
-               MLX4_PUT(inbox, flags,            INIT_PORT_FLAGS_OFFSET);
-
-               field = 128 << dev->caps.ib_mtu_cap[port];
-               MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
-               field = dev->caps.gid_table_len[port];
-               MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
-               field = dev->caps.pkey_table_len[port];
-               MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
-
-               err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
-                              MLX4_CMD_TIME_CLASS_A);
-
-               mlx4_free_cmd_mailbox(dev, mailbox);
-       } else
-               err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
-                              MLX4_CMD_TIME_CLASS_A);
-
-       return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
-
-int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
-{
-       return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000);
-}
-EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
-
-int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
-{
-       return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000);
-}
-
-int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
-{
-       int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
-                              MLX4_CMD_SET_ICM_SIZE,
-                              MLX4_CMD_TIME_CLASS_A);
-       if (ret)
-               return ret;
-
-       /*
-        * Round up number of system pages needed in case
-        * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
-        */
-       *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
-               (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
-
-       return 0;
-}
-
-int mlx4_NOP(struct mlx4_dev *dev)
-{
-       /* Input modifier of 0x1f means "finish as soon as possible." */
-       return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100);
-}
-
-#define MLX4_WOL_SETUP_MODE (5 << 28)
-int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
-{
-       u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
-
-       return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
-                           MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A);
-}
-EXPORT_SYMBOL_GPL(mlx4_wol_read);
-
-int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
-{
-       u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
-
-       return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
-                                       MLX4_CMD_TIME_CLASS_A);
-}
-EXPORT_SYMBOL_GPL(mlx4_wol_write);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
deleted file mode 100644 (file)
index 1e8ecc3..0000000
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2006, 2007 Cisco Systems.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX4_FW_H
-#define MLX4_FW_H
-
-#include "mlx4.h"
-#include "icm.h"
-
-struct mlx4_mod_stat_cfg {
-       u8 log_pg_sz;
-       u8 log_pg_sz_m;
-};
-
-struct mlx4_dev_cap {
-       int max_srq_sz;
-       int max_qp_sz;
-       int reserved_qps;
-       int max_qps;
-       int reserved_srqs;
-       int max_srqs;
-       int max_cq_sz;
-       int reserved_cqs;
-       int max_cqs;
-       int max_mpts;
-       int reserved_eqs;
-       int max_eqs;
-       int reserved_mtts;
-       int max_mrw_sz;
-       int reserved_mrws;
-       int max_mtt_seg;
-       int max_requester_per_qp;
-       int max_responder_per_qp;
-       int max_rdma_global;
-       int local_ca_ack_delay;
-       int num_ports;
-       u32 max_msg_sz;
-       int ib_mtu[MLX4_MAX_PORTS + 1];
-       int max_port_width[MLX4_MAX_PORTS + 1];
-       int max_vl[MLX4_MAX_PORTS + 1];
-       int max_gids[MLX4_MAX_PORTS + 1];
-       int max_pkeys[MLX4_MAX_PORTS + 1];
-       u64 def_mac[MLX4_MAX_PORTS + 1];
-       u16 eth_mtu[MLX4_MAX_PORTS + 1];
-       int trans_type[MLX4_MAX_PORTS + 1];
-       int vendor_oui[MLX4_MAX_PORTS + 1];
-       u16 wavelength[MLX4_MAX_PORTS + 1];
-       u64 trans_code[MLX4_MAX_PORTS + 1];
-       u16 stat_rate_support;
-       u64 flags;
-       int reserved_uars;
-       int uar_size;
-       int min_page_sz;
-       int bf_reg_size;
-       int bf_regs_per_page;
-       int max_sq_sg;
-       int max_sq_desc_sz;
-       int max_rq_sg;
-       int max_rq_desc_sz;
-       int max_qp_per_mcg;
-       int reserved_mgms;
-       int max_mcgs;
-       int reserved_pds;
-       int max_pds;
-       int qpc_entry_sz;
-       int rdmarc_entry_sz;
-       int altc_entry_sz;
-       int aux_entry_sz;
-       int srq_entry_sz;
-       int cqc_entry_sz;
-       int eqc_entry_sz;
-       int dmpt_entry_sz;
-       int cmpt_entry_sz;
-       int mtt_entry_sz;
-       int resize_srq;
-       u32 bmme_flags;
-       u32 reserved_lkey;
-       u64 max_icm_sz;
-       int max_gso_sz;
-       u8  supported_port_types[MLX4_MAX_PORTS + 1];
-       u8  log_max_macs[MLX4_MAX_PORTS + 1];
-       u8  log_max_vlans[MLX4_MAX_PORTS + 1];
-       u32 max_counters;
-};
-
-struct mlx4_adapter {
-       char board_id[MLX4_BOARD_ID_LEN];
-       u8   inta_pin;
-};
-
-struct mlx4_init_hca_param {
-       u64 qpc_base;
-       u64 rdmarc_base;
-       u64 auxc_base;
-       u64 altc_base;
-       u64 srqc_base;
-       u64 cqc_base;
-       u64 eqc_base;
-       u64 mc_base;
-       u64 dmpt_base;
-       u64 cmpt_base;
-       u64 mtt_base;
-       u16 log_mc_entry_sz;
-       u16 log_mc_hash_sz;
-       u8  log_num_qps;
-       u8  log_num_srqs;
-       u8  log_num_cqs;
-       u8  log_num_eqs;
-       u8  log_rd_per_qp;
-       u8  log_mc_table_sz;
-       u8  log_mpt_sz;
-       u8  log_uar_sz;
-};
-
-struct mlx4_init_ib_param {
-       int port_width;
-       int vl_cap;
-       int mtu_cap;
-       u16 gid_cap;
-       u16 pkey_cap;
-       int set_guid0;
-       u64 guid0;
-       int set_node_guid;
-       u64 node_guid;
-       int set_si_guid;
-       u64 si_guid;
-};
-
-struct mlx4_set_ib_param {
-       int set_si_guid;
-       int reset_qkey_viol;
-       u64 si_guid;
-       u32 cap_mask;
-};
-
-int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
-int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm);
-int mlx4_UNMAP_FA(struct mlx4_dev *dev);
-int mlx4_RUN_FW(struct mlx4_dev *dev);
-int mlx4_QUERY_FW(struct mlx4_dev *dev);
-int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter);
-int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
-int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic);
-int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt);
-int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages);
-int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
-int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
-int mlx4_NOP(struct mlx4_dev *dev);
-int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg);
-
-#endif /* MLX4_FW_H */
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
deleted file mode 100644 (file)
index 02393fd..0000000
+++ /dev/null
@@ -1,430 +0,0 @@
-/*
- * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-
-#include <linux/mlx4/cmd.h>
-
-#include "mlx4.h"
-#include "icm.h"
-#include "fw.h"
-
-/*
- * We allocate in as big chunks as we can, up to a maximum of 256 KB
- * per chunk.
- */
-enum {
-       MLX4_ICM_ALLOC_SIZE     = 1 << 18,
-       MLX4_TABLE_CHUNK_SIZE   = 1 << 18
-};
-
-static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
-{
-       int i;
-
-       if (chunk->nsg > 0)
-               pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
-                            PCI_DMA_BIDIRECTIONAL);
-
-       for (i = 0; i < chunk->npages; ++i)
-               __free_pages(sg_page(&chunk->mem[i]),
-                            get_order(chunk->mem[i].length));
-}
-
-static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
-{
-       int i;
-
-       for (i = 0; i < chunk->npages; ++i)
-               dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
-                                 lowmem_page_address(sg_page(&chunk->mem[i])),
-                                 sg_dma_address(&chunk->mem[i]));
-}
-
-void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
-{
-       struct mlx4_icm_chunk *chunk, *tmp;
-
-       if (!icm)
-               return;
-
-       list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
-               if (coherent)
-                       mlx4_free_icm_coherent(dev, chunk);
-               else
-                       mlx4_free_icm_pages(dev, chunk);
-
-               kfree(chunk);
-       }
-
-       kfree(icm);
-}
-
-static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
-{
-       struct page *page;
-
-       page = alloc_pages(gfp_mask, order);
-       if (!page)
-               return -ENOMEM;
-
-       sg_set_page(mem, page, PAGE_SIZE << order, 0);
-       return 0;
-}
-
-static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
-                                   int order, gfp_t gfp_mask)
-{
-       void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
-                                      &sg_dma_address(mem), gfp_mask);
-       if (!buf)
-               return -ENOMEM;
-
-       sg_set_buf(mem, buf, PAGE_SIZE << order);
-       BUG_ON(mem->offset);
-       sg_dma_len(mem) = PAGE_SIZE << order;
-       return 0;
-}
-
-struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
-                               gfp_t gfp_mask, int coherent)
-{
-       struct mlx4_icm *icm;
-       struct mlx4_icm_chunk *chunk = NULL;
-       int cur_order;
-       int ret;
-
-       /* We use sg_set_buf for coherent allocs, which assumes low memory */
-       BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
-
-       icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
-       if (!icm)
-               return NULL;
-
-       icm->refcount = 0;
-       INIT_LIST_HEAD(&icm->chunk_list);
-
-       cur_order = get_order(MLX4_ICM_ALLOC_SIZE);
-
-       while (npages > 0) {
-               if (!chunk) {
-                       chunk = kmalloc(sizeof *chunk,
-                                       gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
-                       if (!chunk)
-                               goto fail;
-
-                       sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
-                       chunk->npages = 0;
-                       chunk->nsg    = 0;
-                       list_add_tail(&chunk->list, &icm->chunk_list);
-               }
-
-               while (1 << cur_order > npages)
-                       --cur_order;
-
-               if (coherent)
-                       ret = mlx4_alloc_icm_coherent(&dev->pdev->dev,
-                                                     &chunk->mem[chunk->npages],
-                                                     cur_order, gfp_mask);
-               else
-                       ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
-                                                  cur_order, gfp_mask);
-
-               if (ret) {
-                       if (--cur_order < 0)
-                               goto fail;
-                       else
-                               continue;
-               }
-
-               ++chunk->npages;
-
-               if (coherent)
-                       ++chunk->nsg;
-               else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
-                       chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
-                                               chunk->npages,
-                                               PCI_DMA_BIDIRECTIONAL);
-
-                       if (chunk->nsg <= 0)
-                               goto fail;
-               }
-
-               if (chunk->npages == MLX4_ICM_CHUNK_LEN)
-                       chunk = NULL;
-
-               npages -= 1 << cur_order;
-       }
-
-       if (!coherent && chunk) {
-               chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
-                                       chunk->npages,
-                                       PCI_DMA_BIDIRECTIONAL);
-
-               if (chunk->nsg <= 0)
-                       goto fail;
-       }
-
-       return icm;
-
-fail:
-       mlx4_free_icm(dev, icm, coherent);
-       return NULL;
-}
-
-static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
-{
-       return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
-}
-
-static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
-{
-       return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
-                       MLX4_CMD_TIME_CLASS_B);
-}
-
-int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
-{
-       return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
-}
-
-int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
-{
-       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B);
-}
-
-int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
-{
-       int i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
-       int ret = 0;
-
-       mutex_lock(&table->mutex);
-
-       if (table->icm[i]) {
-               ++table->icm[i]->refcount;
-               goto out;
-       }
-
-       table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
-                                      (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
-                                      __GFP_NOWARN, table->coherent);
-       if (!table->icm[i]) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
-                        (u64) i * MLX4_TABLE_CHUNK_SIZE)) {
-               mlx4_free_icm(dev, table->icm[i], table->coherent);
-               table->icm[i] = NULL;
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       ++table->icm[i]->refcount;
-
-out:
-       mutex_unlock(&table->mutex);
-       return ret;
-}
-
-void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
-{
-       int i;
-
-       i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
-
-       mutex_lock(&table->mutex);
-
-       if (--table->icm[i]->refcount == 0) {
-               mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
-                              MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
-               mlx4_free_icm(dev, table->icm[i], table->coherent);
-               table->icm[i] = NULL;
-       }
-
-       mutex_unlock(&table->mutex);
-}
-
-void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle)
-{
-       int idx, offset, dma_offset, i;
-       struct mlx4_icm_chunk *chunk;
-       struct mlx4_icm *icm;
-       struct page *page = NULL;
-
-       if (!table->lowmem)
-               return NULL;
-
-       mutex_lock(&table->mutex);
-
-       idx = (obj & (table->num_obj - 1)) * table->obj_size;
-       icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE];
-       dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE;
-
-       if (!icm)
-               goto out;
-
-       list_for_each_entry(chunk, &icm->chunk_list, list) {
-               for (i = 0; i < chunk->npages; ++i) {
-                       if (dma_handle && dma_offset >= 0) {
-                               if (sg_dma_len(&chunk->mem[i]) > dma_offset)
-                                       *dma_handle = sg_dma_address(&chunk->mem[i]) +
-                                               dma_offset;
-                               dma_offset -= sg_dma_len(&chunk->mem[i]);
-                       }
-                       /*
-                        * DMA mapping can merge pages but not split them,
-                        * so if we found the page, dma_handle has already
-                        * been assigned to.
-                        */
-                       if (chunk->mem[i].length > offset) {
-                               page = sg_page(&chunk->mem[i]);
-                               goto out;
-                       }
-                       offset -= chunk->mem[i].length;
-               }
-       }
-
-out:
-       mutex_unlock(&table->mutex);
-       return page ? lowmem_page_address(page) + offset : NULL;
-}
-
-int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
-                        int start, int end)
-{
-       int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
-       int i, err;
-
-       for (i = start; i <= end; i += inc) {
-               err = mlx4_table_get(dev, table, i);
-               if (err)
-                       goto fail;
-       }
-
-       return 0;
-
-fail:
-       while (i > start) {
-               i -= inc;
-               mlx4_table_put(dev, table, i);
-       }
-
-       return err;
-}
-
-void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
-                         int start, int end)
-{
-       int i;
-
-       for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
-               mlx4_table_put(dev, table, i);
-}
-
-int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
-                       u64 virt, int obj_size, int nobj, int reserved,
-                       int use_lowmem, int use_coherent)
-{
-       int obj_per_chunk;
-       int num_icm;
-       unsigned chunk_size;
-       int i;
-
-       obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
-       num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
-
-       table->icm      = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL);
-       if (!table->icm)
-               return -ENOMEM;
-       table->virt     = virt;
-       table->num_icm  = num_icm;
-       table->num_obj  = nobj;
-       table->obj_size = obj_size;
-       table->lowmem   = use_lowmem;
-       table->coherent = use_coherent;
-       mutex_init(&table->mutex);
-
-       for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
-               chunk_size = MLX4_TABLE_CHUNK_SIZE;
-               if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size)
-                       chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE);
-
-               table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
-                                              (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
-                                              __GFP_NOWARN, use_coherent);
-               if (!table->icm[i])
-                       goto err;
-               if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
-                       mlx4_free_icm(dev, table->icm[i], use_coherent);
-                       table->icm[i] = NULL;
-                       goto err;
-               }
-
-               /*
-                * Add a reference to this ICM chunk so that it never
-                * gets freed (since it contains reserved firmware objects).
-                */
-               ++table->icm[i]->refcount;
-       }
-
-       return 0;
-
-err:
-       for (i = 0; i < num_icm; ++i)
-               if (table->icm[i]) {
-                       mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
-                                      MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
-                       mlx4_free_icm(dev, table->icm[i], use_coherent);
-               }
-
-       return -ENOMEM;
-}
-
-void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
-{
-       int i;
-
-       for (i = 0; i < table->num_icm; ++i)
-               if (table->icm[i]) {
-                       mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
-                                      MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
-                       mlx4_free_icm(dev, table->icm[i], table->coherent);
-               }
-
-       kfree(table->icm);
-}
diff --git a/drivers/net/mlx4/icm.h b/drivers/net/mlx4/icm.h
deleted file mode 100644 (file)
index b10c07a..0000000
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX4_ICM_H
-#define MLX4_ICM_H
-
-#include <linux/list.h>
-#include <linux/pci.h>
-#include <linux/mutex.h>
-
-#define MLX4_ICM_CHUNK_LEN                                             \
-       ((256 - sizeof (struct list_head) - 2 * sizeof (int)) /         \
-        (sizeof (struct scatterlist)))
-
-enum {
-       MLX4_ICM_PAGE_SHIFT     = 12,
-       MLX4_ICM_PAGE_SIZE      = 1 << MLX4_ICM_PAGE_SHIFT,
-};
-
-struct mlx4_icm_chunk {
-       struct list_head        list;
-       int                     npages;
-       int                     nsg;
-       struct scatterlist      mem[MLX4_ICM_CHUNK_LEN];
-};
-
-struct mlx4_icm {
-       struct list_head        chunk_list;
-       int                     refcount;
-};
-
-struct mlx4_icm_iter {
-       struct mlx4_icm        *icm;
-       struct mlx4_icm_chunk  *chunk;
-       int                     page_idx;
-};
-
-struct mlx4_dev;
-
-struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
-                               gfp_t gfp_mask, int coherent);
-void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent);
-
-int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
-void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
-int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
-                        int start, int end);
-void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
-                         int start, int end);
-int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
-                       u64 virt, int obj_size, int nobj, int reserved,
-                       int use_lowmem, int use_coherent);
-void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);
-int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
-void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
-void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle);
-int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
-                        int start, int end);
-void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
-                         int start, int end);
-
-static inline void mlx4_icm_first(struct mlx4_icm *icm,
-                                 struct mlx4_icm_iter *iter)
-{
-       iter->icm      = icm;
-       iter->chunk    = list_empty(&icm->chunk_list) ?
-               NULL : list_entry(icm->chunk_list.next,
-                                 struct mlx4_icm_chunk, list);
-       iter->page_idx = 0;
-}
-
-static inline int mlx4_icm_last(struct mlx4_icm_iter *iter)
-{
-       return !iter->chunk;
-}
-
-static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
-{
-       if (++iter->page_idx >= iter->chunk->nsg) {
-               if (iter->chunk->list.next == &iter->icm->chunk_list) {
-                       iter->chunk = NULL;
-                       return;
-               }
-
-               iter->chunk = list_entry(iter->chunk->list.next,
-                                        struct mlx4_icm_chunk, list);
-               iter->page_idx = 0;
-       }
-}
-
-static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
-{
-       return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
-}
-
-static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
-{
-       return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
-}
-
-int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
-int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
-
-#endif /* MLX4_ICM_H */
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c
deleted file mode 100644 (file)
index 73c94fc..0000000
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
- * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/slab.h>
-
-#include "mlx4.h"
-
-struct mlx4_device_context {
-       struct list_head        list;
-       struct mlx4_interface  *intf;
-       void                   *context;
-};
-
-static LIST_HEAD(intf_list);
-static LIST_HEAD(dev_list);
-static DEFINE_MUTEX(intf_mutex);
-
-static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
-{
-       struct mlx4_device_context *dev_ctx;
-
-       dev_ctx = kmalloc(sizeof *dev_ctx, GFP_KERNEL);
-       if (!dev_ctx)
-               return;
-
-       dev_ctx->intf    = intf;
-       dev_ctx->context = intf->add(&priv->dev);
-
-       if (dev_ctx->context) {
-               spin_lock_irq(&priv->ctx_lock);
-               list_add_tail(&dev_ctx->list, &priv->ctx_list);
-               spin_unlock_irq(&priv->ctx_lock);
-       } else
-               kfree(dev_ctx);
-}
-
-static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
-{
-       struct mlx4_device_context *dev_ctx;
-
-       list_for_each_entry(dev_ctx, &priv->ctx_list, list)
-               if (dev_ctx->intf == intf) {
-                       spin_lock_irq(&priv->ctx_lock);
-                       list_del(&dev_ctx->list);
-                       spin_unlock_irq(&priv->ctx_lock);
-
-                       intf->remove(&priv->dev, dev_ctx->context);
-                       kfree(dev_ctx);
-                       return;
-               }
-}
-
-int mlx4_register_interface(struct mlx4_interface *intf)
-{
-       struct mlx4_priv *priv;
-
-       if (!intf->add || !intf->remove)
-               return -EINVAL;
-
-       mutex_lock(&intf_mutex);
-
-       list_add_tail(&intf->list, &intf_list);
-       list_for_each_entry(priv, &dev_list, dev_list)
-               mlx4_add_device(intf, priv);
-
-       mutex_unlock(&intf_mutex);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(mlx4_register_interface);
-
-void mlx4_unregister_interface(struct mlx4_interface *intf)
-{
-       struct mlx4_priv *priv;
-
-       mutex_lock(&intf_mutex);
-
-       list_for_each_entry(priv, &dev_list, dev_list)
-               mlx4_remove_device(intf, priv);
-
-       list_del(&intf->list);
-
-       mutex_unlock(&intf_mutex);
-}
-EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
-
-void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_device_context *dev_ctx;
-       unsigned long flags;
-
-       spin_lock_irqsave(&priv->ctx_lock, flags);
-
-       list_for_each_entry(dev_ctx, &priv->ctx_list, list)
-               if (dev_ctx->intf->event)
-                       dev_ctx->intf->event(dev, dev_ctx->context, type, port);
-
-       spin_unlock_irqrestore(&priv->ctx_lock, flags);
-}
-
-int mlx4_register_device(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_interface *intf;
-
-       mutex_lock(&intf_mutex);
-
-       list_add_tail(&priv->dev_list, &dev_list);
-       list_for_each_entry(intf, &intf_list, list)
-               mlx4_add_device(intf, priv);
-
-       mutex_unlock(&intf_mutex);
-       mlx4_start_catas_poll(dev);
-
-       return 0;
-}
-
-void mlx4_unregister_device(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_interface *intf;
-
-       mlx4_stop_catas_poll(dev);
-       mutex_lock(&intf_mutex);
-
-       list_for_each_entry(intf, &intf_list, list)
-               mlx4_remove_device(intf, priv);
-
-       list_del(&priv->dev_list);
-
-       mutex_unlock(&intf_mutex);
-}
-
-void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_device_context *dev_ctx;
-       unsigned long flags;
-       void *result = NULL;
-
-       spin_lock_irqsave(&priv->ctx_lock, flags);
-
-       list_for_each_entry(dev_ctx, &priv->ctx_list, list)
-               if (dev_ctx->intf->protocol == proto && dev_ctx->intf->get_dev) {
-                       result = dev_ctx->intf->get_dev(dev, dev_ctx->context, port);
-                       break;
-               }
-
-       spin_unlock_irqrestore(&priv->ctx_lock, flags);
-
-       return result;
-}
-EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev);
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
deleted file mode 100644 (file)
index f0ee35d..0000000
+++ /dev/null
@@ -1,1529 +0,0 @@
-/*
- * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <linux/io-mapping.h>
-
-#include <linux/mlx4/device.h>
-#include <linux/mlx4/doorbell.h>
-
-#include "mlx4.h"
-#include "fw.h"
-#include "icm.h"
-
-MODULE_AUTHOR("Roland Dreier");
-MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
-
-struct workqueue_struct *mlx4_wq;
-
-#ifdef CONFIG_MLX4_DEBUG
-
-int mlx4_debug_level = 0;
-module_param_named(debug_level, mlx4_debug_level, int, 0644);
-MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
-
-#endif /* CONFIG_MLX4_DEBUG */
-
-#ifdef CONFIG_PCI_MSI
-
-static int msi_x = 1;
-module_param(msi_x, int, 0444);
-MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
-
-#else /* CONFIG_PCI_MSI */
-
-#define msi_x (0)
-
-#endif /* CONFIG_PCI_MSI */
-
-static char mlx4_version[] __devinitdata =
-       DRV_NAME ": Mellanox ConnectX core driver v"
-       DRV_VERSION " (" DRV_RELDATE ")\n";
-
-static struct mlx4_profile default_profile = {
-       .num_qp         = 1 << 17,
-       .num_srq        = 1 << 16,
-       .rdmarc_per_qp  = 1 << 4,
-       .num_cq         = 1 << 16,
-       .num_mcg        = 1 << 13,
-       .num_mpt        = 1 << 17,
-       .num_mtt        = 1 << 20,
-};
-
-static int log_num_mac = 2;
-module_param_named(log_num_mac, log_num_mac, int, 0444);
-MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
-
-static int log_num_vlan;
-module_param_named(log_num_vlan, log_num_vlan, int, 0444);
-MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
-
-static int use_prio;
-module_param_named(use_prio, use_prio, bool, 0444);
-MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
-                 "(0/1, default 0)");
-
-static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
-module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
-MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
-
-int mlx4_check_port_params(struct mlx4_dev *dev,
-                          enum mlx4_port_type *port_type)
-{
-       int i;
-
-       for (i = 0; i < dev->caps.num_ports - 1; i++) {
-               if (port_type[i] != port_type[i + 1]) {
-                       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
-                               mlx4_err(dev, "Only same port types supported "
-                                        "on this HCA, aborting.\n");
-                               return -EINVAL;
-                       }
-                       if (port_type[i] == MLX4_PORT_TYPE_ETH &&
-                           port_type[i + 1] == MLX4_PORT_TYPE_IB)
-                               return -EINVAL;
-               }
-       }
-
-       for (i = 0; i < dev->caps.num_ports; i++) {
-               if (!(port_type[i] & dev->caps.supported_type[i+1])) {
-                       mlx4_err(dev, "Requested port type for port %d is not "
-                                     "supported on this HCA\n", i + 1);
-                       return -EINVAL;
-               }
-       }
-       return 0;
-}
-
-static void mlx4_set_port_mask(struct mlx4_dev *dev)
-{
-       int i;
-
-       dev->caps.port_mask = 0;
-       for (i = 1; i <= dev->caps.num_ports; ++i)
-               if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
-                       dev->caps.port_mask |= 1 << (i - 1);
-}
-
-static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
-{
-       int err;
-       int i;
-
-       err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
-       if (err) {
-               mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
-               return err;
-       }
-
-       if (dev_cap->min_page_sz > PAGE_SIZE) {
-               mlx4_err(dev, "HCA minimum page size of %d bigger than "
-                        "kernel PAGE_SIZE of %ld, aborting.\n",
-                        dev_cap->min_page_sz, PAGE_SIZE);
-               return -ENODEV;
-       }
-       if (dev_cap->num_ports > MLX4_MAX_PORTS) {
-               mlx4_err(dev, "HCA has %d ports, but we only support %d, "
-                        "aborting.\n",
-                        dev_cap->num_ports, MLX4_MAX_PORTS);
-               return -ENODEV;
-       }
-
-       if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
-               mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
-                        "PCI resource 2 size of 0x%llx, aborting.\n",
-                        dev_cap->uar_size,
-                        (unsigned long long) pci_resource_len(dev->pdev, 2));
-               return -ENODEV;
-       }
-
-       dev->caps.num_ports          = dev_cap->num_ports;
-       for (i = 1; i <= dev->caps.num_ports; ++i) {
-               dev->caps.vl_cap[i]         = dev_cap->max_vl[i];
-               dev->caps.ib_mtu_cap[i]     = dev_cap->ib_mtu[i];
-               dev->caps.gid_table_len[i]  = dev_cap->max_gids[i];
-               dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
-               dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
-               dev->caps.eth_mtu_cap[i]    = dev_cap->eth_mtu[i];
-               dev->caps.def_mac[i]        = dev_cap->def_mac[i];
-               dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
-               dev->caps.trans_type[i]     = dev_cap->trans_type[i];
-               dev->caps.vendor_oui[i]     = dev_cap->vendor_oui[i];
-               dev->caps.wavelength[i]     = dev_cap->wavelength[i];
-               dev->caps.trans_code[i]     = dev_cap->trans_code[i];
-       }
-
-       dev->caps.num_uars           = dev_cap->uar_size / PAGE_SIZE;
-       dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
-       dev->caps.bf_reg_size        = dev_cap->bf_reg_size;
-       dev->caps.bf_regs_per_page   = dev_cap->bf_regs_per_page;
-       dev->caps.max_sq_sg          = dev_cap->max_sq_sg;
-       dev->caps.max_rq_sg          = dev_cap->max_rq_sg;
-       dev->caps.max_wqes           = dev_cap->max_qp_sz;
-       dev->caps.max_qp_init_rdma   = dev_cap->max_requester_per_qp;
-       dev->caps.max_srq_wqes       = dev_cap->max_srq_sz;
-       dev->caps.max_srq_sge        = dev_cap->max_rq_sg - 1;
-       dev->caps.reserved_srqs      = dev_cap->reserved_srqs;
-       dev->caps.max_sq_desc_sz     = dev_cap->max_sq_desc_sz;
-       dev->caps.max_rq_desc_sz     = dev_cap->max_rq_desc_sz;
-       dev->caps.num_qp_per_mgm     = MLX4_QP_PER_MGM;
-       /*
-        * Subtract 1 from the limit because we need to allocate a
-        * spare CQE so the HCA HW can tell the difference between an
-        * empty CQ and a full CQ.
-        */
-       dev->caps.max_cqes           = dev_cap->max_cq_sz - 1;
-       dev->caps.reserved_cqs       = dev_cap->reserved_cqs;
-       dev->caps.reserved_eqs       = dev_cap->reserved_eqs;
-       dev->caps.mtts_per_seg       = 1 << log_mtts_per_seg;
-       dev->caps.reserved_mtts      = DIV_ROUND_UP(dev_cap->reserved_mtts,
-                                                   dev->caps.mtts_per_seg);
-       dev->caps.reserved_mrws      = dev_cap->reserved_mrws;
-       dev->caps.reserved_uars      = dev_cap->reserved_uars;
-       dev->caps.reserved_pds       = dev_cap->reserved_pds;
-       dev->caps.mtt_entry_sz       = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
-       dev->caps.max_msg_sz         = dev_cap->max_msg_sz;
-       dev->caps.page_size_cap      = ~(u32) (dev_cap->min_page_sz - 1);
-       dev->caps.flags              = dev_cap->flags;
-       dev->caps.bmme_flags         = dev_cap->bmme_flags;
-       dev->caps.reserved_lkey      = dev_cap->reserved_lkey;
-       dev->caps.stat_rate_support  = dev_cap->stat_rate_support;
-       dev->caps.max_gso_sz         = dev_cap->max_gso_sz;
-
-       dev->caps.log_num_macs  = log_num_mac;
-       dev->caps.log_num_vlans = log_num_vlan;
-       dev->caps.log_num_prios = use_prio ? 3 : 0;
-
-       for (i = 1; i <= dev->caps.num_ports; ++i) {
-               if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
-                       dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
-               else
-                       dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
-               dev->caps.possible_type[i] = dev->caps.port_type[i];
-               mlx4_priv(dev)->sense.sense_allowed[i] =
-                       dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO;
-
-               if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
-                       dev->caps.log_num_macs = dev_cap->log_max_macs[i];
-                       mlx4_warn(dev, "Requested number of MACs is too much "
-                                 "for port %d, reducing to %d.\n",
-                                 i, 1 << dev->caps.log_num_macs);
-               }
-               if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
-                       dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
-                       mlx4_warn(dev, "Requested number of VLANs is too much "
-                                 "for port %d, reducing to %d.\n",
-                                 i, 1 << dev->caps.log_num_vlans);
-               }
-       }
-
-       mlx4_set_port_mask(dev);
-
-       dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
-
-       dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
-       dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
-               dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
-               (1 << dev->caps.log_num_macs) *
-               (1 << dev->caps.log_num_vlans) *
-               (1 << dev->caps.log_num_prios) *
-               dev->caps.num_ports;
-       dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
-
-       dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
-               dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
-               dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
-               dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
-
-       return 0;
-}
-
-/*
- * Change the port configuration of the device.
- * Every user of this function must hold the port mutex.
- */
-int mlx4_change_port_types(struct mlx4_dev *dev,
-                          enum mlx4_port_type *port_types)
-{
-       int err = 0;
-       int change = 0;
-       int port;
-
-       for (port = 0; port <  dev->caps.num_ports; port++) {
-               /* Change the port type only if the new type is different
-                * from the current, and not set to Auto */
-               if (port_types[port] != dev->caps.port_type[port + 1]) {
-                       change = 1;
-                       dev->caps.port_type[port + 1] = port_types[port];
-               }
-       }
-       if (change) {
-               mlx4_unregister_device(dev);
-               for (port = 1; port <= dev->caps.num_ports; port++) {
-                       mlx4_CLOSE_PORT(dev, port);
-                       err = mlx4_SET_PORT(dev, port);
-                       if (err) {
-                               mlx4_err(dev, "Failed to set port %d, "
-                                             "aborting\n", port);
-                               goto out;
-                       }
-               }
-               mlx4_set_port_mask(dev);
-               err = mlx4_register_device(dev);
-       }
-
-out:
-       return err;
-}
-
-static ssize_t show_port_type(struct device *dev,
-                             struct device_attribute *attr,
-                             char *buf)
-{
-       struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
-                                                  port_attr);
-       struct mlx4_dev *mdev = info->dev;
-       char type[8];
-
-       sprintf(type, "%s",
-               (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
-               "ib" : "eth");
-       if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
-               sprintf(buf, "auto (%s)\n", type);
-       else
-               sprintf(buf, "%s\n", type);
-
-       return strlen(buf);
-}
-
-static ssize_t set_port_type(struct device *dev,
-                            struct device_attribute *attr,
-                            const char *buf, size_t count)
-{
-       struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
-                                                  port_attr);
-       struct mlx4_dev *mdev = info->dev;
-       struct mlx4_priv *priv = mlx4_priv(mdev);
-       enum mlx4_port_type types[MLX4_MAX_PORTS];
-       enum mlx4_port_type new_types[MLX4_MAX_PORTS];
-       int i;
-       int err = 0;
-
-       if (!strcmp(buf, "ib\n"))
-               info->tmp_type = MLX4_PORT_TYPE_IB;
-       else if (!strcmp(buf, "eth\n"))
-               info->tmp_type = MLX4_PORT_TYPE_ETH;
-       else if (!strcmp(buf, "auto\n"))
-               info->tmp_type = MLX4_PORT_TYPE_AUTO;
-       else {
-               mlx4_err(mdev, "%s is not supported port type\n", buf);
-               return -EINVAL;
-       }
-
-       mlx4_stop_sense(mdev);
-       mutex_lock(&priv->port_mutex);
-       /* Possible type is always the one that was delivered */
-       mdev->caps.possible_type[info->port] = info->tmp_type;
-
-       for (i = 0; i < mdev->caps.num_ports; i++) {
-               types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
-                                       mdev->caps.possible_type[i+1];
-               if (types[i] == MLX4_PORT_TYPE_AUTO)
-                       types[i] = mdev->caps.port_type[i+1];
-       }
-
-       if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
-               for (i = 1; i <= mdev->caps.num_ports; i++) {
-                       if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
-                               mdev->caps.possible_type[i] = mdev->caps.port_type[i];
-                               err = -EINVAL;
-                       }
-               }
-       }
-       if (err) {
-               mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
-                              "Set only 'eth' or 'ib' for both ports "
-                              "(should be the same)\n");
-               goto out;
-       }
-
-       mlx4_do_sense_ports(mdev, new_types, types);
-
-       err = mlx4_check_port_params(mdev, new_types);
-       if (err)
-               goto out;
-
-       /* We are about to apply the changes after the configuration
-        * was verified, no need to remember the temporary types
-        * any more */
-       for (i = 0; i < mdev->caps.num_ports; i++)
-               priv->port[i + 1].tmp_type = 0;
-
-       err = mlx4_change_port_types(mdev, new_types);
-
-out:
-       mlx4_start_sense(mdev);
-       mutex_unlock(&priv->port_mutex);
-       return err ? err : count;
-}
-
-static int mlx4_load_fw(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int err;
-
-       priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
-                                        GFP_HIGHUSER | __GFP_NOWARN, 0);
-       if (!priv->fw.fw_icm) {
-               mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
-               return -ENOMEM;
-       }
-
-       err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
-       if (err) {
-               mlx4_err(dev, "MAP_FA command failed, aborting.\n");
-               goto err_free;
-       }
-
-       err = mlx4_RUN_FW(dev);
-       if (err) {
-               mlx4_err(dev, "RUN_FW command failed, aborting.\n");
-               goto err_unmap_fa;
-       }
-
-       return 0;
-
-err_unmap_fa:
-       mlx4_UNMAP_FA(dev);
-
-err_free:
-       mlx4_free_icm(dev, priv->fw.fw_icm, 0);
-       return err;
-}
-
-static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
-                               int cmpt_entry_sz)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int err;
-
-       err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
-                                 cmpt_base +
-                                 ((u64) (MLX4_CMPT_TYPE_QP *
-                                         cmpt_entry_sz) << MLX4_CMPT_SHIFT),
-                                 cmpt_entry_sz, dev->caps.num_qps,
-                                 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
-                                 0, 0);
-       if (err)
-               goto err;
-
-       err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
-                                 cmpt_base +
-                                 ((u64) (MLX4_CMPT_TYPE_SRQ *
-                                         cmpt_entry_sz) << MLX4_CMPT_SHIFT),
-                                 cmpt_entry_sz, dev->caps.num_srqs,
-                                 dev->caps.reserved_srqs, 0, 0);
-       if (err)
-               goto err_qp;
-
-       err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
-                                 cmpt_base +
-                                 ((u64) (MLX4_CMPT_TYPE_CQ *
-                                         cmpt_entry_sz) << MLX4_CMPT_SHIFT),
-                                 cmpt_entry_sz, dev->caps.num_cqs,
-                                 dev->caps.reserved_cqs, 0, 0);
-       if (err)
-               goto err_srq;
-
-       err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
-                                 cmpt_base +
-                                 ((u64) (MLX4_CMPT_TYPE_EQ *
-                                         cmpt_entry_sz) << MLX4_CMPT_SHIFT),
-                                 cmpt_entry_sz,
-                                 dev->caps.num_eqs, dev->caps.num_eqs, 0, 0);
-       if (err)
-               goto err_cq;
-
-       return 0;
-
-err_cq:
-       mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
-
-err_srq:
-       mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
-
-err_qp:
-       mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
-
-err:
-       return err;
-}
-
-static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
-                        struct mlx4_init_hca_param *init_hca, u64 icm_size)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       u64 aux_pages;
-       int err;
-
-       err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
-       if (err) {
-               mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
-               return err;
-       }
-
-       mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
-                (unsigned long long) icm_size >> 10,
-                (unsigned long long) aux_pages << 2);
-
-       priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
-                                         GFP_HIGHUSER | __GFP_NOWARN, 0);
-       if (!priv->fw.aux_icm) {
-               mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
-               return -ENOMEM;
-       }
-
-       err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
-       if (err) {
-               mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
-               goto err_free_aux;
-       }
-
-       err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
-       if (err) {
-               mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
-               goto err_unmap_aux;
-       }
-
-       err = mlx4_init_icm_table(dev, &priv->eq_table.table,
-                                 init_hca->eqc_base, dev_cap->eqc_entry_sz,
-                                 dev->caps.num_eqs, dev->caps.num_eqs,
-                                 0, 0);
-       if (err) {
-               mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
-               goto err_unmap_cmpt;
-       }
-
-       /*
-        * Reserved MTT entries must be aligned up to a cacheline
-        * boundary, since the FW will write to them, while the driver
-        * writes to all other MTT entries. (The variable
-        * dev->caps.mtt_entry_sz below is really the MTT segment
-        * size, not the raw entry size)
-        */
-       dev->caps.reserved_mtts =
-               ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
-                     dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
-
-       err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
-                                 init_hca->mtt_base,
-                                 dev->caps.mtt_entry_sz,
-                                 dev->caps.num_mtt_segs,
-                                 dev->caps.reserved_mtts, 1, 0);
-       if (err) {
-               mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
-               goto err_unmap_eq;
-       }
-
-       err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
-                                 init_hca->dmpt_base,
-                                 dev_cap->dmpt_entry_sz,
-                                 dev->caps.num_mpts,
-                                 dev->caps.reserved_mrws, 1, 1);
-       if (err) {
-               mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
-               goto err_unmap_mtt;
-       }
-
-       err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
-                                 init_hca->qpc_base,
-                                 dev_cap->qpc_entry_sz,
-                                 dev->caps.num_qps,
-                                 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
-                                 0, 0);
-       if (err) {
-               mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
-               goto err_unmap_dmpt;
-       }
-
-       err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
-                                 init_hca->auxc_base,
-                                 dev_cap->aux_entry_sz,
-                                 dev->caps.num_qps,
-                                 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
-                                 0, 0);
-       if (err) {
-               mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
-               goto err_unmap_qp;
-       }
-
-       err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
-                                 init_hca->altc_base,
-                                 dev_cap->altc_entry_sz,
-                                 dev->caps.num_qps,
-                                 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
-                                 0, 0);
-       if (err) {
-               mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
-               goto err_unmap_auxc;
-       }
-
-       err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
-                                 init_hca->rdmarc_base,
-                                 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
-                                 dev->caps.num_qps,
-                                 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
-                                 0, 0);
-       if (err) {
-               mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
-               goto err_unmap_altc;
-       }
-
-       err = mlx4_init_icm_table(dev, &priv->cq_table.table,
-                                 init_hca->cqc_base,
-                                 dev_cap->cqc_entry_sz,
-                                 dev->caps.num_cqs,
-                                 dev->caps.reserved_cqs, 0, 0);
-       if (err) {
-               mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
-               goto err_unmap_rdmarc;
-       }
-
-       err = mlx4_init_icm_table(dev, &priv->srq_table.table,
-                                 init_hca->srqc_base,
-                                 dev_cap->srq_entry_sz,
-                                 dev->caps.num_srqs,
-                                 dev->caps.reserved_srqs, 0, 0);
-       if (err) {
-               mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
-               goto err_unmap_cq;
-       }
-
-       /*
-        * It's not strictly required, but for simplicity just map the
-        * whole multicast group table now.  The table isn't very big
-        * and it's a lot easier than trying to track ref counts.
-        */
-       err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
-                                 init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
-                                 dev->caps.num_mgms + dev->caps.num_amgms,
-                                 dev->caps.num_mgms + dev->caps.num_amgms,
-                                 0, 0);
-       if (err) {
-               mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
-               goto err_unmap_srq;
-       }
-
-       return 0;
-
-err_unmap_srq:
-       mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
-
-err_unmap_cq:
-       mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
-
-err_unmap_rdmarc:
-       mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
-
-err_unmap_altc:
-       mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
-
-err_unmap_auxc:
-       mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
-
-err_unmap_qp:
-       mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
-
-err_unmap_dmpt:
-       mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
-
-err_unmap_mtt:
-       mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
-
-err_unmap_eq:
-       mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
-
-err_unmap_cmpt:
-       mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
-       mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
-       mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
-       mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
-
-err_unmap_aux:
-       mlx4_UNMAP_ICM_AUX(dev);
-
-err_free_aux:
-       mlx4_free_icm(dev, priv->fw.aux_icm, 0);
-
-       return err;
-}
-
-static void mlx4_free_icms(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-
-       mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
-       mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
-       mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
-       mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
-       mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
-       mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
-       mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
-       mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
-       mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
-       mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
-       mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
-       mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
-       mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
-       mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
-
-       mlx4_UNMAP_ICM_AUX(dev);
-       mlx4_free_icm(dev, priv->fw.aux_icm, 0);
-}
-
-static int map_bf_area(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       resource_size_t bf_start;
-       resource_size_t bf_len;
-       int err = 0;
-
-       bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT);
-       bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT);
-       priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
-       if (!priv->bf_mapping)
-               err = -ENOMEM;
-
-       return err;
-}
-
-static void unmap_bf_area(struct mlx4_dev *dev)
-{
-       if (mlx4_priv(dev)->bf_mapping)
-               io_mapping_free(mlx4_priv(dev)->bf_mapping);
-}
-
-static void mlx4_close_hca(struct mlx4_dev *dev)
-{
-       unmap_bf_area(dev);
-       mlx4_CLOSE_HCA(dev, 0);
-       mlx4_free_icms(dev);
-       mlx4_UNMAP_FA(dev);
-       mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
-}
-
-static int mlx4_init_hca(struct mlx4_dev *dev)
-{
-       struct mlx4_priv          *priv = mlx4_priv(dev);
-       struct mlx4_adapter        adapter;
-       struct mlx4_dev_cap        dev_cap;
-       struct mlx4_mod_stat_cfg   mlx4_cfg;
-       struct mlx4_profile        profile;
-       struct mlx4_init_hca_param init_hca;
-       u64 icm_size;
-       int err;
-
-       err = mlx4_QUERY_FW(dev);
-       if (err) {
-               if (err == -EACCES)
-                       mlx4_info(dev, "non-primary physical function, skipping.\n");
-               else
-                       mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
-               return err;
-       }
-
-       err = mlx4_load_fw(dev);
-       if (err) {
-               mlx4_err(dev, "Failed to start FW, aborting.\n");
-               return err;
-       }
-
-       mlx4_cfg.log_pg_sz_m = 1;
-       mlx4_cfg.log_pg_sz = 0;
-       err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
-       if (err)
-               mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
-
-       err = mlx4_dev_cap(dev, &dev_cap);
-       if (err) {
-               mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
-               goto err_stop_fw;
-       }
-
-       profile = default_profile;
-
-       icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
-       if ((long long) icm_size < 0) {
-               err = icm_size;
-               goto err_stop_fw;
-       }
-
-       if (map_bf_area(dev))
-               mlx4_dbg(dev, "Failed to map blue flame area\n");
-
-       init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
-
-       err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
-       if (err)
-               goto err_stop_fw;
-
-       err = mlx4_INIT_HCA(dev, &init_hca);
-       if (err) {
-               mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
-               goto err_free_icm;
-       }
-
-       err = mlx4_QUERY_ADAPTER(dev, &adapter);
-       if (err) {
-               mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
-               goto err_close;
-       }
-
-       priv->eq_table.inta_pin = adapter.inta_pin;
-       memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
-
-       return 0;
-
-err_close:
-       mlx4_CLOSE_HCA(dev, 0);
-
-err_free_icm:
-       mlx4_free_icms(dev);
-
-err_stop_fw:
-       unmap_bf_area(dev);
-       mlx4_UNMAP_FA(dev);
-       mlx4_free_icm(dev, priv->fw.fw_icm, 0);
-
-       return err;
-}
-
-static int mlx4_init_counters_table(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int nent;
-
-       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
-               return -ENOENT;
-
-       nent = dev->caps.max_counters;
-       return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
-}
-
-static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
-{
-       mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
-}
-
-int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-
-       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
-               return -ENOENT;
-
-       *idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
-       if (*idx == -1)
-               return -ENOMEM;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
-
-void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
-{
-       mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx);
-       return;
-}
-EXPORT_SYMBOL_GPL(mlx4_counter_free);
-
-static int mlx4_setup_hca(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int err;
-       int port;
-       __be32 ib_port_default_caps;
-
-       err = mlx4_init_uar_table(dev);
-       if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "user access region table, aborting.\n");
-               return err;
-       }
-
-       err = mlx4_uar_alloc(dev, &priv->driver_uar);
-       if (err) {
-               mlx4_err(dev, "Failed to allocate driver access region, "
-                        "aborting.\n");
-               goto err_uar_table_free;
-       }
-
-       priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
-       if (!priv->kar) {
-               mlx4_err(dev, "Couldn't map kernel access region, "
-                        "aborting.\n");
-               err = -ENOMEM;
-               goto err_uar_free;
-       }
-
-       err = mlx4_init_pd_table(dev);
-       if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "protection domain table, aborting.\n");
-               goto err_kar_unmap;
-       }
-
-       err = mlx4_init_mr_table(dev);
-       if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "memory region table, aborting.\n");
-               goto err_pd_table_free;
-       }
-
-       err = mlx4_init_eq_table(dev);
-       if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "event queue table, aborting.\n");
-               goto err_mr_table_free;
-       }
-
-       err = mlx4_cmd_use_events(dev);
-       if (err) {
-               mlx4_err(dev, "Failed to switch to event-driven "
-                        "firmware commands, aborting.\n");
-               goto err_eq_table_free;
-       }
-
-       err = mlx4_NOP(dev);
-       if (err) {
-               if (dev->flags & MLX4_FLAG_MSI_X) {
-                       mlx4_warn(dev, "NOP command failed to generate MSI-X "
-                                 "interrupt IRQ %d).\n",
-                                 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
-                       mlx4_warn(dev, "Trying again without MSI-X.\n");
-               } else {
-                       mlx4_err(dev, "NOP command failed to generate interrupt "
-                                "(IRQ %d), aborting.\n",
-                                priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
-                       mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
-               }
-
-               goto err_cmd_poll;
-       }
-
-       mlx4_dbg(dev, "NOP command IRQ test passed\n");
-
-       err = mlx4_init_cq_table(dev);
-       if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "completion queue table, aborting.\n");
-               goto err_cmd_poll;
-       }
-
-       err = mlx4_init_srq_table(dev);
-       if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "shared receive queue table, aborting.\n");
-               goto err_cq_table_free;
-       }
-
-       err = mlx4_init_qp_table(dev);
-       if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "queue pair table, aborting.\n");
-               goto err_srq_table_free;
-       }
-
-       err = mlx4_init_mcg_table(dev);
-       if (err) {
-               mlx4_err(dev, "Failed to initialize "
-                        "multicast group table, aborting.\n");
-               goto err_qp_table_free;
-       }
-
-       err = mlx4_init_counters_table(dev);
-       if (err && err != -ENOENT) {
-               mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
-               goto err_counters_table_free;
-       }
-
-       for (port = 1; port <= dev->caps.num_ports; port++) {
-               enum mlx4_port_type port_type = 0;
-               mlx4_SENSE_PORT(dev, port, &port_type);
-               if (port_type)
-                       dev->caps.port_type[port] = port_type;
-               ib_port_default_caps = 0;
-               err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
-               if (err)
-                       mlx4_warn(dev, "failed to get port %d default "
-                                 "ib capabilities (%d). Continuing with "
-                                 "caps = 0\n", port, err);
-               dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
-               err = mlx4_SET_PORT(dev, port);
-               if (err) {
-                       mlx4_err(dev, "Failed to set port %d, aborting\n",
-                               port);
-                       goto err_mcg_table_free;
-               }
-       }
-       mlx4_set_port_mask(dev);
-
-       return 0;
-
-err_mcg_table_free:
-       mlx4_cleanup_mcg_table(dev);
-
-err_counters_table_free:
-       mlx4_cleanup_counters_table(dev);
-
-err_qp_table_free:
-       mlx4_cleanup_qp_table(dev);
-
-err_srq_table_free:
-       mlx4_cleanup_srq_table(dev);
-
-err_cq_table_free:
-       mlx4_cleanup_cq_table(dev);
-
-err_cmd_poll:
-       mlx4_cmd_use_polling(dev);
-
-err_eq_table_free:
-       mlx4_cleanup_eq_table(dev);
-
-err_mr_table_free:
-       mlx4_cleanup_mr_table(dev);
-
-err_pd_table_free:
-       mlx4_cleanup_pd_table(dev);
-
-err_kar_unmap:
-       iounmap(priv->kar);
-
-err_uar_free:
-       mlx4_uar_free(dev, &priv->driver_uar);
-
-err_uar_table_free:
-       mlx4_cleanup_uar_table(dev);
-       return err;
-}
-
-static void mlx4_enable_msi_x(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       struct msix_entry *entries;
-       int nreq = min_t(int, dev->caps.num_ports *
-                        min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT)
-                               + MSIX_LEGACY_SZ, MAX_MSIX);
-       int err;
-       int i;
-
-       if (msi_x) {
-               nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
-                            nreq);
-               entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
-               if (!entries)
-                       goto no_msi;
-
-               for (i = 0; i < nreq; ++i)
-                       entries[i].entry = i;
-
-       retry:
-               err = pci_enable_msix(dev->pdev, entries, nreq);
-               if (err) {
-                       /* Try again if at least 2 vectors are available */
-                       if (err > 1) {
-                               mlx4_info(dev, "Requested %d vectors, "
-                                         "but only %d MSI-X vectors available, "
-                                         "trying again\n", nreq, err);
-                               nreq = err;
-                               goto retry;
-                       }
-                       kfree(entries);
-                       goto no_msi;
-               }
-
-               if (nreq <
-                   MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
-                       /*Working in legacy mode , all EQ's shared*/
-                       dev->caps.comp_pool           = 0;
-                       dev->caps.num_comp_vectors = nreq - 1;
-               } else {
-                       dev->caps.comp_pool           = nreq - MSIX_LEGACY_SZ;
-                       dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
-               }
-               for (i = 0; i < nreq; ++i)
-                       priv->eq_table.eq[i].irq = entries[i].vector;
-
-               dev->flags |= MLX4_FLAG_MSI_X;
-
-               kfree(entries);
-               return;
-       }
-
-no_msi:
-       dev->caps.num_comp_vectors = 1;
-       dev->caps.comp_pool        = 0;
-
-       for (i = 0; i < 2; ++i)
-               priv->eq_table.eq[i].irq = dev->pdev->irq;
-}
-
-static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
-{
-       struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
-       int err = 0;
-
-       info->dev = dev;
-       info->port = port;
-       mlx4_init_mac_table(dev, &info->mac_table);
-       mlx4_init_vlan_table(dev, &info->vlan_table);
-       info->base_qpn = dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
-                       (port - 1) * (1 << log_num_mac);
-
-       sprintf(info->dev_name, "mlx4_port%d", port);
-       info->port_attr.attr.name = info->dev_name;
-       info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
-       info->port_attr.show      = show_port_type;
-       info->port_attr.store     = set_port_type;
-       sysfs_attr_init(&info->port_attr.attr);
-
-       err = device_create_file(&dev->pdev->dev, &info->port_attr);
-       if (err) {
-               mlx4_err(dev, "Failed to create file for port %d\n", port);
-               info->port = -1;
-       }
-
-       return err;
-}
-
-static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
-{
-       if (info->port < 0)
-               return;
-
-       device_remove_file(&info->dev->pdev->dev, &info->port_attr);
-}
-
-static int mlx4_init_steering(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int num_entries = dev->caps.num_ports;
-       int i, j;
-
-       priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
-       if (!priv->steer)
-               return -ENOMEM;
-
-       for (i = 0; i < num_entries; i++) {
-               for (j = 0; j < MLX4_NUM_STEERS; j++) {
-                       INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
-                       INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
-               }
-               INIT_LIST_HEAD(&priv->steer[i].high_prios);
-       }
-       return 0;
-}
-
-static void mlx4_clear_steering(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_steer_index *entry, *tmp_entry;
-       struct mlx4_promisc_qp *pqp, *tmp_pqp;
-       int num_entries = dev->caps.num_ports;
-       int i, j;
-
-       for (i = 0; i < num_entries; i++) {
-               for (j = 0; j < MLX4_NUM_STEERS; j++) {
-                       list_for_each_entry_safe(pqp, tmp_pqp,
-                                                &priv->steer[i].promisc_qps[j],
-                                                list) {
-                               list_del(&pqp->list);
-                               kfree(pqp);
-                       }
-                       list_for_each_entry_safe(entry, tmp_entry,
-                                                &priv->steer[i].steer_entries[j],
-                                                list) {
-                               list_del(&entry->list);
-                               list_for_each_entry_safe(pqp, tmp_pqp,
-                                                        &entry->duplicates,
-                                                        list) {
-                                       list_del(&pqp->list);
-                                       kfree(pqp);
-                               }
-                               kfree(entry);
-                       }
-               }
-       }
-       kfree(priv->steer);
-}
-
-static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
-{
-       struct mlx4_priv *priv;
-       struct mlx4_dev *dev;
-       int err;
-       int port;
-
-       pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
-
-       err = pci_enable_device(pdev);
-       if (err) {
-               dev_err(&pdev->dev, "Cannot enable PCI device, "
-                       "aborting.\n");
-               return err;
-       }
-
-       /*
-        * Check for BARs.  We expect 0: 1MB
-        */
-       if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
-           pci_resource_len(pdev, 0) != 1 << 20) {
-               dev_err(&pdev->dev, "Missing DCS, aborting.\n");
-               err = -ENODEV;
-               goto err_disable_pdev;
-       }
-       if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
-               dev_err(&pdev->dev, "Missing UAR, aborting.\n");
-               err = -ENODEV;
-               goto err_disable_pdev;
-       }
-
-       err = pci_request_regions(pdev, DRV_NAME);
-       if (err) {
-               dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
-               goto err_disable_pdev;
-       }
-
-       pci_set_master(pdev);
-
-       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
-       if (err) {
-               dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
-               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (err) {
-                       dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
-                       goto err_release_regions;
-               }
-       }
-       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-       if (err) {
-               dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
-                        "consistent PCI DMA mask.\n");
-               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (err) {
-                       dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
-                               "aborting.\n");
-                       goto err_release_regions;
-               }
-       }
-
-       /* Allow large DMA segments, up to the firmware limit of 1 GB */
-       dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
-
-       priv = kzalloc(sizeof *priv, GFP_KERNEL);
-       if (!priv) {
-               dev_err(&pdev->dev, "Device struct alloc failed, "
-                       "aborting.\n");
-               err = -ENOMEM;
-               goto err_release_regions;
-       }
-
-       dev       = &priv->dev;
-       dev->pdev = pdev;
-       INIT_LIST_HEAD(&priv->ctx_list);
-       spin_lock_init(&priv->ctx_lock);
-
-       mutex_init(&priv->port_mutex);
-
-       INIT_LIST_HEAD(&priv->pgdir_list);
-       mutex_init(&priv->pgdir_mutex);
-
-       INIT_LIST_HEAD(&priv->bf_list);
-       mutex_init(&priv->bf_mutex);
-
-       dev->rev_id = pdev->revision;
-
-       /*
-        * Now reset the HCA before we touch the PCI capabilities or
-        * attempt a firmware command, since a boot ROM may have left
-        * the HCA in an undefined state.
-        */
-       err = mlx4_reset(dev);
-       if (err) {
-               mlx4_err(dev, "Failed to reset HCA, aborting.\n");
-               goto err_free_dev;
-       }
-
-       if (mlx4_cmd_init(dev)) {
-               mlx4_err(dev, "Failed to init command interface, aborting.\n");
-               goto err_free_dev;
-       }
-
-       err = mlx4_init_hca(dev);
-       if (err)
-               goto err_cmd;
-
-       err = mlx4_alloc_eq_table(dev);
-       if (err)
-               goto err_close;
-
-       priv->msix_ctl.pool_bm = 0;
-       spin_lock_init(&priv->msix_ctl.pool_lock);
-
-       mlx4_enable_msi_x(dev);
-
-       err = mlx4_init_steering(dev);
-       if (err)
-               goto err_free_eq;
-
-       err = mlx4_setup_hca(dev);
-       if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
-               dev->flags &= ~MLX4_FLAG_MSI_X;
-               pci_disable_msix(pdev);
-               err = mlx4_setup_hca(dev);
-       }
-
-       if (err)
-               goto err_steer;
-
-       for (port = 1; port <= dev->caps.num_ports; port++) {
-               err = mlx4_init_port_info(dev, port);
-               if (err)
-                       goto err_port;
-       }
-
-       err = mlx4_register_device(dev);
-       if (err)
-               goto err_port;
-
-       mlx4_sense_init(dev);
-       mlx4_start_sense(dev);
-
-       pci_set_drvdata(pdev, dev);
-
-       return 0;
-
-err_port:
-       for (--port; port >= 1; --port)
-               mlx4_cleanup_port_info(&priv->port[port]);
-
-       mlx4_cleanup_counters_table(dev);
-       mlx4_cleanup_mcg_table(dev);
-       mlx4_cleanup_qp_table(dev);
-       mlx4_cleanup_srq_table(dev);
-       mlx4_cleanup_cq_table(dev);
-       mlx4_cmd_use_polling(dev);
-       mlx4_cleanup_eq_table(dev);
-       mlx4_cleanup_mr_table(dev);
-       mlx4_cleanup_pd_table(dev);
-       mlx4_cleanup_uar_table(dev);
-
-err_steer:
-       mlx4_clear_steering(dev);
-
-err_free_eq:
-       mlx4_free_eq_table(dev);
-
-err_close:
-       if (dev->flags & MLX4_FLAG_MSI_X)
-               pci_disable_msix(pdev);
-
-       mlx4_close_hca(dev);
-
-err_cmd:
-       mlx4_cmd_cleanup(dev);
-
-err_free_dev:
-       kfree(priv);
-
-err_release_regions:
-       pci_release_regions(pdev);
-
-err_disable_pdev:
-       pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
-       return err;
-}
-
-static int __devinit mlx4_init_one(struct pci_dev *pdev,
-                                  const struct pci_device_id *id)
-{
-       printk_once(KERN_INFO "%s", mlx4_version);
-
-       return __mlx4_init_one(pdev, id);
-}
-
-static void mlx4_remove_one(struct pci_dev *pdev)
-{
-       struct mlx4_dev  *dev  = pci_get_drvdata(pdev);
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int p;
-
-       if (dev) {
-               mlx4_stop_sense(dev);
-               mlx4_unregister_device(dev);
-
-               for (p = 1; p <= dev->caps.num_ports; p++) {
-                       mlx4_cleanup_port_info(&priv->port[p]);
-                       mlx4_CLOSE_PORT(dev, p);
-               }
-
-               mlx4_cleanup_counters_table(dev);
-               mlx4_cleanup_mcg_table(dev);
-               mlx4_cleanup_qp_table(dev);
-               mlx4_cleanup_srq_table(dev);
-               mlx4_cleanup_cq_table(dev);
-               mlx4_cmd_use_polling(dev);
-               mlx4_cleanup_eq_table(dev);
-               mlx4_cleanup_mr_table(dev);
-               mlx4_cleanup_pd_table(dev);
-
-               iounmap(priv->kar);
-               mlx4_uar_free(dev, &priv->driver_uar);
-               mlx4_cleanup_uar_table(dev);
-               mlx4_clear_steering(dev);
-               mlx4_free_eq_table(dev);
-               mlx4_close_hca(dev);
-               mlx4_cmd_cleanup(dev);
-
-               if (dev->flags & MLX4_FLAG_MSI_X)
-                       pci_disable_msix(pdev);
-
-               kfree(priv);
-               pci_release_regions(pdev);
-               pci_disable_device(pdev);
-               pci_set_drvdata(pdev, NULL);
-       }
-}
-
-int mlx4_restart_one(struct pci_dev *pdev)
-{
-       mlx4_remove_one(pdev);
-       return __mlx4_init_one(pdev, NULL);
-}
-
-static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
-       { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
-       { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
-       { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
-       { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
-       { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
-       { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
-       { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
-       { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
-       { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
-       { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
-       { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
-       { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
-       { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
-       { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
-       { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
-       { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
-       { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
-       { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
-       { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
-       { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
-       { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
-       { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
-       { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
-       { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
-       { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
-       { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
-       { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
-       { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
-
-static struct pci_driver mlx4_driver = {
-       .name           = DRV_NAME,
-       .id_table       = mlx4_pci_table,
-       .probe          = mlx4_init_one,
-       .remove         = __devexit_p(mlx4_remove_one)
-};
-
-static int __init mlx4_verify_params(void)
-{
-       if ((log_num_mac < 0) || (log_num_mac > 7)) {
-               pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac);
-               return -1;
-       }
-
-       if ((log_num_vlan < 0) || (log_num_vlan > 7)) {
-               pr_warning("mlx4_core: bad num_vlan: %d\n", log_num_vlan);
-               return -1;
-       }
-
-       if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
-               pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
-               return -1;
-       }
-
-       return 0;
-}
-
-static int __init mlx4_init(void)
-{
-       int ret;
-
-       if (mlx4_verify_params())
-               return -EINVAL;
-
-       mlx4_catas_init();
-
-       mlx4_wq = create_singlethread_workqueue("mlx4");
-       if (!mlx4_wq)
-               return -ENOMEM;
-
-       ret = pci_register_driver(&mlx4_driver);
-       return ret < 0 ? ret : 0;
-}
-
-static void __exit mlx4_cleanup(void)
-{
-       pci_unregister_driver(&mlx4_driver);
-       destroy_workqueue(mlx4_wq);
-}
-
-module_init(mlx4_init);
-module_exit(mlx4_cleanup);
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
deleted file mode 100644 (file)
index cd17845..0000000
+++ /dev/null
@@ -1,928 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
- * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/string.h>
-#include <linux/etherdevice.h>
-
-#include <linux/mlx4/cmd.h>
-
-#include "mlx4.h"
-
-#define MGM_QPN_MASK       0x00FFFFFF
-#define MGM_BLCK_LB_BIT    30
-
-static const u8 zero_gid[16];  /* automatically initialized to 0 */
-
-static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
-                          struct mlx4_cmd_mailbox *mailbox)
-{
-       return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
-                           MLX4_CMD_TIME_CLASS_A);
-}
-
-static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
-                           struct mlx4_cmd_mailbox *mailbox)
-{
-       return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
-                       MLX4_CMD_TIME_CLASS_A);
-}
-
-static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer,
-                             struct mlx4_cmd_mailbox *mailbox)
-{
-       u32 in_mod;
-
-       in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1;
-       return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
-                       MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A);
-}
-
-static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
-                        u16 *hash, u8 op_mod)
-{
-       u64 imm;
-       int err;
-
-       err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
-                          MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A);
-
-       if (!err)
-               *hash = imm;
-
-       return err;
-}
-
-static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
-                                             enum mlx4_steer_type steer,
-                                             u32 qpn)
-{
-       struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num];
-       struct mlx4_promisc_qp *pqp;
-
-       list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
-               if (pqp->qpn == qpn)
-                       return pqp;
-       }
-       /* not found */
-       return NULL;
-}
-
-/*
- * Add new entry to steering data structure.
- * All promisc QPs should be added as well
- */
-static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
-                             enum mlx4_steer_type steer,
-                             unsigned int index, u32 qpn)
-{
-       struct mlx4_steer *s_steer;
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_mgm *mgm;
-       u32 members_count;
-       struct mlx4_steer_index *new_entry;
-       struct mlx4_promisc_qp *pqp;
-       struct mlx4_promisc_qp *dqp = NULL;
-       u32 prot;
-       int err;
-       u8 pf_num;
-
-       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-       s_steer = &mlx4_priv(dev)->steer[pf_num];
-       new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
-       if (!new_entry)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&new_entry->duplicates);
-       new_entry->index = index;
-       list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
-
-       /* If the given qpn is also a promisc qp,
-        * it should be inserted to duplicates list
-        */
-       pqp = get_promisc_qp(dev, pf_num, steer, qpn);
-       if (pqp) {
-               dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
-               if (!dqp) {
-                       err = -ENOMEM;
-                       goto out_alloc;
-               }
-               dqp->qpn = qpn;
-               list_add_tail(&dqp->list, &new_entry->duplicates);
-       }
-
-       /* if no promisc qps for this vep, we are done */
-       if (list_empty(&s_steer->promisc_qps[steer]))
-               return 0;
-
-       /* now need to add all the promisc qps to the new
-        * steering entry, as they should also receive the packets
-        * destined to this address */
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox)) {
-               err = -ENOMEM;
-               goto out_alloc;
-       }
-       mgm = mailbox->buf;
-
-       err = mlx4_READ_ENTRY(dev, index, mailbox);
-       if (err)
-               goto out_mailbox;
-
-       members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
-       prot = be32_to_cpu(mgm->members_count) >> 30;
-       list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
-               /* don't add already existing qpn */
-               if (pqp->qpn == qpn)
-                       continue;
-               if (members_count == MLX4_QP_PER_MGM) {
-                       /* out of space */
-                       err = -ENOMEM;
-                       goto out_mailbox;
-               }
-
-               /* add the qpn */
-               mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
-       }
-       /* update the qps count and update the entry with all the promisc qps*/
-       mgm->members_count = cpu_to_be32(members_count | (prot << 30));
-       err = mlx4_WRITE_ENTRY(dev, index, mailbox);
-
-out_mailbox:
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       if (!err)
-               return 0;
-out_alloc:
-       if (dqp) {
-               list_del(&dqp->list);
-               kfree(dqp);
-       }
-       list_del(&new_entry->list);
-       kfree(new_entry);
-       return err;
-}
-
-/* update the data structures with existing steering entry */
-static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
-                                  enum mlx4_steer_type steer,
-                                  unsigned int index, u32 qpn)
-{
-       struct mlx4_steer *s_steer;
-       struct mlx4_steer_index *tmp_entry, *entry = NULL;
-       struct mlx4_promisc_qp *pqp;
-       struct mlx4_promisc_qp *dqp;
-       u8 pf_num;
-
-       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-       s_steer = &mlx4_priv(dev)->steer[pf_num];
-
-       pqp = get_promisc_qp(dev, pf_num, steer, qpn);
-       if (!pqp)
-               return 0; /* nothing to do */
-
-       list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
-               if (tmp_entry->index == index) {
-                       entry = tmp_entry;
-                       break;
-               }
-       }
-       if (unlikely(!entry)) {
-               mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
-               return -EINVAL;
-       }
-
-       /* the given qpn is listed as a promisc qpn
-        * we need to add it as a duplicate to this entry
-        * for future references */
-       list_for_each_entry(dqp, &entry->duplicates, list) {
-               if (qpn == dqp->qpn)
-                       return 0; /* qp is already duplicated */
-       }
-
-       /* add the qp as a duplicate on this index */
-       dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
-       if (!dqp)
-               return -ENOMEM;
-       dqp->qpn = qpn;
-       list_add_tail(&dqp->list, &entry->duplicates);
-
-       return 0;
-}
-
-/* Check whether a qpn is a duplicate on steering entry
- * If so, it should not be removed from mgm */
-static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
-                                 enum mlx4_steer_type steer,
-                                 unsigned int index, u32 qpn)
-{
-       struct mlx4_steer *s_steer;
-       struct mlx4_steer_index *tmp_entry, *entry = NULL;
-       struct mlx4_promisc_qp *dqp, *tmp_dqp;
-       u8 pf_num;
-
-       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-       s_steer = &mlx4_priv(dev)->steer[pf_num];
-
-       /* if qp is not promisc, it cannot be duplicated */
-       if (!get_promisc_qp(dev, pf_num, steer, qpn))
-               return false;
-
-       /* The qp is promisc qp so it is a duplicate on this index
-        * Find the index entry, and remove the duplicate */
-       list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
-               if (tmp_entry->index == index) {
-                       entry = tmp_entry;
-                       break;
-               }
-       }
-       if (unlikely(!entry)) {
-               mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
-               return false;
-       }
-       list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
-               if (dqp->qpn == qpn) {
-                       list_del(&dqp->list);
-                       kfree(dqp);
-               }
-       }
-       return true;
-}
-
-/* I a steering entry contains only promisc QPs, it can be removed. */
-static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
-                                     enum mlx4_steer_type steer,
-                                     unsigned int index, u32 tqpn)
-{
-       struct mlx4_steer *s_steer;
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_mgm *mgm;
-       struct mlx4_steer_index *entry = NULL, *tmp_entry;
-       u32 qpn;
-       u32 members_count;
-       bool ret = false;
-       int i;
-       u8 pf_num;
-
-       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-       s_steer = &mlx4_priv(dev)->steer[pf_num];
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return false;
-       mgm = mailbox->buf;
-
-       if (mlx4_READ_ENTRY(dev, index, mailbox))
-               goto out;
-       members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
-       for (i = 0;  i < members_count; i++) {
-               qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
-               if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) {
-                       /* the qp is not promisc, the entry can't be removed */
-                       goto out;
-               }
-       }
-        /* All the qps currently registered for this entry are promiscuous,
-         * Checking for duplicates */
-       ret = true;
-       list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
-               if (entry->index == index) {
-                       if (list_empty(&entry->duplicates)) {
-                               list_del(&entry->list);
-                               kfree(entry);
-                       } else {
-                               /* This entry contains duplicates so it shouldn't be removed */
-                               ret = false;
-                               goto out;
-                       }
-               }
-       }
-
-out:
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return ret;
-}
-
-static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
-                         enum mlx4_steer_type steer, u32 qpn)
-{
-       struct mlx4_steer *s_steer;
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_mgm *mgm;
-       struct mlx4_steer_index *entry;
-       struct mlx4_promisc_qp *pqp;
-       struct mlx4_promisc_qp *dqp;
-       u32 members_count;
-       u32 prot;
-       int i;
-       bool found;
-       int last_index;
-       int err;
-       u8 pf_num;
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-       s_steer = &mlx4_priv(dev)->steer[pf_num];
-
-       mutex_lock(&priv->mcg_table.mutex);
-
-       if (get_promisc_qp(dev, pf_num, steer, qpn)) {
-               err = 0;  /* Noting to do, already exists */
-               goto out_mutex;
-       }
-
-       pqp = kmalloc(sizeof *pqp, GFP_KERNEL);
-       if (!pqp) {
-               err = -ENOMEM;
-               goto out_mutex;
-       }
-       pqp->qpn = qpn;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox)) {
-               err = -ENOMEM;
-               goto out_alloc;
-       }
-       mgm = mailbox->buf;
-
-       /* the promisc qp needs to be added for each one of the steering
-        * entries, if it already exists, needs to be added as a duplicate
-        * for this entry */
-       list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
-               err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
-               if (err)
-                       goto out_mailbox;
-
-               members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
-               prot = be32_to_cpu(mgm->members_count) >> 30;
-               found = false;
-               for (i = 0; i < members_count; i++) {
-                       if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
-                               /* Entry already exists, add to duplicates */
-                               dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
-                               if (!dqp)
-                                       goto out_mailbox;
-                               dqp->qpn = qpn;
-                               list_add_tail(&dqp->list, &entry->duplicates);
-                               found = true;
-                       }
-               }
-               if (!found) {
-                       /* Need to add the qpn to mgm */
-                       if (members_count == MLX4_QP_PER_MGM) {
-                               /* entry is full */
-                               err = -ENOMEM;
-                               goto out_mailbox;
-                       }
-                       mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
-                       mgm->members_count = cpu_to_be32(members_count | (prot << 30));
-                       err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
-                       if (err)
-                               goto out_mailbox;
-               }
-               last_index = entry->index;
-       }
-
-       /* add the new qpn to list of promisc qps */
-       list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
-       /* now need to add all the promisc qps to default entry */
-       memset(mgm, 0, sizeof *mgm);
-       members_count = 0;
-       list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
-               mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
-       mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
-
-       err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
-       if (err)
-               goto out_list;
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       mutex_unlock(&priv->mcg_table.mutex);
-       return 0;
-
-out_list:
-       list_del(&pqp->list);
-out_mailbox:
-       mlx4_free_cmd_mailbox(dev, mailbox);
-out_alloc:
-       kfree(pqp);
-out_mutex:
-       mutex_unlock(&priv->mcg_table.mutex);
-       return err;
-}
-
-static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
-                            enum mlx4_steer_type steer, u32 qpn)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_steer *s_steer;
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_mgm *mgm;
-       struct mlx4_steer_index *entry;
-       struct mlx4_promisc_qp *pqp;
-       struct mlx4_promisc_qp *dqp;
-       u32 members_count;
-       bool found;
-       bool back_to_list = false;
-       int loc, i;
-       int err;
-       u8 pf_num;
-
-       pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
-       s_steer = &mlx4_priv(dev)->steer[pf_num];
-       mutex_lock(&priv->mcg_table.mutex);
-
-       pqp = get_promisc_qp(dev, pf_num, steer, qpn);
-       if (unlikely(!pqp)) {
-               mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
-               /* nothing to do */
-               err = 0;
-               goto out_mutex;
-       }
-
-       /*remove from list of promisc qps */
-       list_del(&pqp->list);
-
-       /* set the default entry not to include the removed one */
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox)) {
-               err = -ENOMEM;
-               back_to_list = true;
-               goto out_list;
-       }
-       mgm = mailbox->buf;
-       members_count = 0;
-       list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
-               mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
-       mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
-
-       err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
-       if (err)
-               goto out_mailbox;
-
-       /* remove the qp from all the steering entries*/
-       list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
-               found = false;
-               list_for_each_entry(dqp, &entry->duplicates, list) {
-                       if (dqp->qpn == qpn) {
-                               found = true;
-                               break;
-                       }
-               }
-               if (found) {
-                       /* a duplicate, no need to change the mgm,
-                        * only update the duplicates list */
-                       list_del(&dqp->list);
-                       kfree(dqp);
-               } else {
-                       err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
-                               if (err)
-                                       goto out_mailbox;
-                       members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
-                       for (loc = -1, i = 0; i < members_count; ++i)
-                               if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn)
-                                       loc = i;
-
-                       mgm->members_count = cpu_to_be32(--members_count |
-                                                        (MLX4_PROT_ETH << 30));
-                       mgm->qp[loc] = mgm->qp[i - 1];
-                       mgm->qp[i - 1] = 0;
-
-                       err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
-                               if (err)
-                                       goto out_mailbox;
-               }
-
-       }
-
-out_mailbox:
-       mlx4_free_cmd_mailbox(dev, mailbox);
-out_list:
-       if (back_to_list)
-               list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
-       else
-               kfree(pqp);
-out_mutex:
-       mutex_unlock(&priv->mcg_table.mutex);
-       return err;
-}
-
-/*
- * Caller must hold MCG table semaphore.  gid and mgm parameters must
- * be properly aligned for command interface.
- *
- *  Returns 0 unless a firmware command error occurs.
- *
- * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
- * and *mgm holds MGM entry.
- *
- * if GID is found in AMGM, *index = index in AMGM, *prev = index of
- * previous entry in hash chain and *mgm holds AMGM entry.
- *
- * If no AMGM exists for given gid, *index = -1, *prev = index of last
- * entry in hash chain and *mgm holds end of hash chain.
- */
-static int find_entry(struct mlx4_dev *dev, u8 port,
-                     u8 *gid, enum mlx4_protocol prot,
-                     enum mlx4_steer_type steer,
-                     struct mlx4_cmd_mailbox *mgm_mailbox,
-                     u16 *hash, int *prev, int *index)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_mgm *mgm = mgm_mailbox->buf;
-       u8 *mgid;
-       int err;
-       u8 op_mod = (prot == MLX4_PROT_ETH) ?
-               !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return -ENOMEM;
-       mgid = mailbox->buf;
-
-       memcpy(mgid, gid, 16);
-
-       err = mlx4_GID_HASH(dev, mailbox, hash, op_mod);
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       if (err)
-               return err;
-
-       if (0)
-               mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, *hash);
-
-       *index = *hash;
-       *prev  = -1;
-
-       do {
-               err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
-               if (err)
-                       return err;
-
-               if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
-                       if (*index != *hash) {
-                               mlx4_err(dev, "Found zero MGID in AMGM.\n");
-                               err = -EINVAL;
-                       }
-                       return err;
-               }
-
-               if (!memcmp(mgm->gid, gid, 16) &&
-                   be32_to_cpu(mgm->members_count) >> 30 == prot)
-                       return err;
-
-               *prev = *index;
-               *index = be32_to_cpu(mgm->next_gid_index) >> 6;
-       } while (*index);
-
-       *index = -1;
-       return err;
-}
-
-int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
-                         int block_mcast_loopback, enum mlx4_protocol prot,
-                         enum mlx4_steer_type steer)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_mgm *mgm;
-       u32 members_count;
-       u16 hash;
-       int index, prev;
-       int link = 0;
-       int i;
-       int err;
-       u8 port = gid[5];
-       u8 new_entry = 0;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-       mgm = mailbox->buf;
-
-       mutex_lock(&priv->mcg_table.mutex);
-       err = find_entry(dev, port, gid, prot, steer,
-                        mailbox, &hash, &prev, &index);
-       if (err)
-               goto out;
-
-       if (index != -1) {
-               if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
-                       new_entry = 1;
-                       memcpy(mgm->gid, gid, 16);
-               }
-       } else {
-               link = 1;
-
-               index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap);
-               if (index == -1) {
-                       mlx4_err(dev, "No AMGM entries left\n");
-                       err = -ENOMEM;
-                       goto out;
-               }
-               index += dev->caps.num_mgms;
-
-               memset(mgm, 0, sizeof *mgm);
-               memcpy(mgm->gid, gid, 16);
-       }
-
-       members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
-       if (members_count == MLX4_QP_PER_MGM) {
-               mlx4_err(dev, "MGM at index %x is full.\n", index);
-               err = -ENOMEM;
-               goto out;
-       }
-
-       for (i = 0; i < members_count; ++i)
-               if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
-                       mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn);
-                       err = 0;
-                       goto out;
-               }
-
-       if (block_mcast_loopback)
-               mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
-                                                      (1U << MGM_BLCK_LB_BIT));
-       else
-               mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
-
-       mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
-
-       err = mlx4_WRITE_ENTRY(dev, index, mailbox);
-       if (err)
-               goto out;
-
-       if (!link)
-               goto out;
-
-       err = mlx4_READ_ENTRY(dev, prev, mailbox);
-       if (err)
-               goto out;
-
-       mgm->next_gid_index = cpu_to_be32(index << 6);
-
-       err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
-       if (err)
-               goto out;
-
-out:
-       if (prot == MLX4_PROT_ETH) {
-               /* manage the steering entry for promisc mode */
-               if (new_entry)
-                       new_steering_entry(dev, 0, port, steer, index, qp->qpn);
-               else
-                       existing_steering_entry(dev, 0, port, steer,
-                                               index, qp->qpn);
-       }
-       if (err && link && index != -1) {
-               if (index < dev->caps.num_mgms)
-                       mlx4_warn(dev, "Got AMGM index %d < %d",
-                                 index, dev->caps.num_mgms);
-               else
-                       mlx4_bitmap_free(&priv->mcg_table.bitmap,
-                                        index - dev->caps.num_mgms);
-       }
-       mutex_unlock(&priv->mcg_table.mutex);
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-
-int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
-                         enum mlx4_protocol prot, enum mlx4_steer_type steer)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_mgm *mgm;
-       u32 members_count;
-       u16 hash;
-       int prev, index;
-       int i, loc;
-       int err;
-       u8 port = gid[5];
-       bool removed_entry = false;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-       mgm = mailbox->buf;
-
-       mutex_lock(&priv->mcg_table.mutex);
-
-       err = find_entry(dev, port, gid, prot, steer,
-                        mailbox, &hash, &prev, &index);
-       if (err)
-               goto out;
-
-       if (index == -1) {
-               mlx4_err(dev, "MGID %pI6 not found\n", gid);
-               err = -EINVAL;
-               goto out;
-       }
-
-       /* if this pq is also a promisc qp, it shouldn't be removed */
-       if (prot == MLX4_PROT_ETH &&
-           check_duplicate_entry(dev, 0, port, steer, index, qp->qpn))
-               goto out;
-
-       members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
-       for (loc = -1, i = 0; i < members_count; ++i)
-               if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
-                       loc = i;
-
-       if (loc == -1) {
-               mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
-               err = -EINVAL;
-               goto out;
-       }
-
-
-       mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
-       mgm->qp[loc]       = mgm->qp[i - 1];
-       mgm->qp[i - 1]     = 0;
-
-       if (prot == MLX4_PROT_ETH)
-               removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn);
-       if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
-               err = mlx4_WRITE_ENTRY(dev, index, mailbox);
-               goto out;
-       }
-
-       /* We are going to delete the entry, members count should be 0 */
-       mgm->members_count = cpu_to_be32((u32) prot << 30);
-
-       if (prev == -1) {
-               /* Remove entry from MGM */
-               int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
-               if (amgm_index) {
-                       err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
-                       if (err)
-                               goto out;
-               } else
-                       memset(mgm->gid, 0, 16);
-
-               err = mlx4_WRITE_ENTRY(dev, index, mailbox);
-               if (err)
-                       goto out;
-
-               if (amgm_index) {
-                       if (amgm_index < dev->caps.num_mgms)
-                               mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
-                                         index, amgm_index, dev->caps.num_mgms);
-                       else
-                               mlx4_bitmap_free(&priv->mcg_table.bitmap,
-                                                amgm_index - dev->caps.num_mgms);
-               }
-       } else {
-               /* Remove entry from AMGM */
-               int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
-               err = mlx4_READ_ENTRY(dev, prev, mailbox);
-               if (err)
-                       goto out;
-
-               mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
-
-               err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
-               if (err)
-                       goto out;
-
-               if (index < dev->caps.num_mgms)
-                       mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
-                                 prev, index, dev->caps.num_mgms);
-               else
-                       mlx4_bitmap_free(&priv->mcg_table.bitmap,
-                                        index - dev->caps.num_mgms);
-       }
-
-out:
-       mutex_unlock(&priv->mcg_table.mutex);
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-
-
-int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
-                         int block_mcast_loopback, enum mlx4_protocol prot)
-{
-       enum mlx4_steer_type steer;
-
-       steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
-
-       if (prot == MLX4_PROT_ETH &&
-                       !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
-               return 0;
-
-       if (prot == MLX4_PROT_ETH)
-               gid[7] |= (steer << 1);
-
-       return mlx4_qp_attach_common(dev, qp, gid,
-                                    block_mcast_loopback, prot,
-                                    steer);
-}
-EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
-
-int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
-                         enum mlx4_protocol prot)
-{
-       enum mlx4_steer_type steer;
-
-       steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
-
-       if (prot == MLX4_PROT_ETH &&
-                       !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
-               return 0;
-
-       if (prot == MLX4_PROT_ETH) {
-               gid[7] |= (steer << 1);
-       }
-
-       return mlx4_qp_detach_common(dev, qp, gid, prot, steer);
-}
-EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
-
-
-int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
-{
-       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
-               return 0;
-
-
-       return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
-}
-EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
-
-int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
-{
-       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
-               return 0;
-
-
-       return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
-}
-EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
-
-int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
-{
-       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
-               return 0;
-
-
-       return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
-}
-EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
-
-int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
-{
-       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
-               return 0;
-
-       return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
-}
-EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
-
-int mlx4_init_mcg_table(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int err;
-
-       err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
-                              dev->caps.num_amgms - 1, 0, 0);
-       if (err)
-               return err;
-
-       mutex_init(&priv->mcg_table.mutex);
-
-       return 0;
-}
-
-void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
-{
-       mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
-}
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
deleted file mode 100644 (file)
index a2fcd84..0000000
+++ /dev/null
@@ -1,459 +0,0 @@
-/*
- * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
- * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX4_H
-#define MLX4_H
-
-#include <linux/mutex.h>
-#include <linux/radix-tree.h>
-#include <linux/timer.h>
-#include <linux/semaphore.h>
-#include <linux/workqueue.h>
-
-#include <linux/mlx4/device.h>
-#include <linux/mlx4/driver.h>
-#include <linux/mlx4/doorbell.h>
-
-#define DRV_NAME       "mlx4_core"
-#define DRV_VERSION    "1.0"
-#define DRV_RELDATE    "July 14, 2011"
-
-enum {
-       MLX4_HCR_BASE           = 0x80680,
-       MLX4_HCR_SIZE           = 0x0001c,
-       MLX4_CLR_INT_SIZE       = 0x00008
-};
-
-enum {
-       MLX4_MGM_ENTRY_SIZE     =  0x100,
-       MLX4_QP_PER_MGM         = 4 * (MLX4_MGM_ENTRY_SIZE / 16 - 2),
-       MLX4_MTT_ENTRY_PER_SEG  = 8
-};
-
-enum {
-       MLX4_NUM_PDS            = 1 << 15
-};
-
-enum {
-       MLX4_CMPT_TYPE_QP       = 0,
-       MLX4_CMPT_TYPE_SRQ      = 1,
-       MLX4_CMPT_TYPE_CQ       = 2,
-       MLX4_CMPT_TYPE_EQ       = 3,
-       MLX4_CMPT_NUM_TYPE
-};
-
-enum {
-       MLX4_CMPT_SHIFT         = 24,
-       MLX4_NUM_CMPTS          = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT
-};
-
-#ifdef CONFIG_MLX4_DEBUG
-extern int mlx4_debug_level;
-#else /* CONFIG_MLX4_DEBUG */
-#define mlx4_debug_level       (0)
-#endif /* CONFIG_MLX4_DEBUG */
-
-#define mlx4_dbg(mdev, format, arg...)                                 \
-do {                                                                   \
-       if (mlx4_debug_level)                                           \
-               dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ##arg); \
-} while (0)
-
-#define mlx4_err(mdev, format, arg...) \
-       dev_err(&mdev->pdev->dev, format, ##arg)
-#define mlx4_info(mdev, format, arg...) \
-       dev_info(&mdev->pdev->dev, format, ##arg)
-#define mlx4_warn(mdev, format, arg...) \
-       dev_warn(&mdev->pdev->dev, format, ##arg)
-
-struct mlx4_bitmap {
-       u32                     last;
-       u32                     top;
-       u32                     max;
-       u32                     reserved_top;
-       u32                     mask;
-       u32                     avail;
-       spinlock_t              lock;
-       unsigned long          *table;
-};
-
-struct mlx4_buddy {
-       unsigned long         **bits;
-       unsigned int           *num_free;
-       int                     max_order;
-       spinlock_t              lock;
-};
-
-struct mlx4_icm;
-
-struct mlx4_icm_table {
-       u64                     virt;
-       int                     num_icm;
-       int                     num_obj;
-       int                     obj_size;
-       int                     lowmem;
-       int                     coherent;
-       struct mutex            mutex;
-       struct mlx4_icm       **icm;
-};
-
-struct mlx4_eq {
-       struct mlx4_dev        *dev;
-       void __iomem           *doorbell;
-       int                     eqn;
-       u32                     cons_index;
-       u16                     irq;
-       u16                     have_irq;
-       int                     nent;
-       struct mlx4_buf_list   *page_list;
-       struct mlx4_mtt         mtt;
-};
-
-struct mlx4_profile {
-       int                     num_qp;
-       int                     rdmarc_per_qp;
-       int                     num_srq;
-       int                     num_cq;
-       int                     num_mcg;
-       int                     num_mpt;
-       int                     num_mtt;
-};
-
-struct mlx4_fw {
-       u64                     clr_int_base;
-       u64                     catas_offset;
-       struct mlx4_icm        *fw_icm;
-       struct mlx4_icm        *aux_icm;
-       u32                     catas_size;
-       u16                     fw_pages;
-       u8                      clr_int_bar;
-       u8                      catas_bar;
-};
-
-#define MGM_QPN_MASK       0x00FFFFFF
-#define MGM_BLCK_LB_BIT    30
-
-struct mlx4_promisc_qp {
-       struct list_head list;
-       u32 qpn;
-};
-
-struct mlx4_steer_index {
-       struct list_head list;
-       unsigned int index;
-       struct list_head duplicates;
-};
-
-struct mlx4_mgm {
-       __be32                  next_gid_index;
-       __be32                  members_count;
-       u32                     reserved[2];
-       u8                      gid[16];
-       __be32                  qp[MLX4_QP_PER_MGM];
-};
-struct mlx4_cmd {
-       struct pci_pool        *pool;
-       void __iomem           *hcr;
-       struct mutex            hcr_mutex;
-       struct semaphore        poll_sem;
-       struct semaphore        event_sem;
-       int                     max_cmds;
-       spinlock_t              context_lock;
-       int                     free_head;
-       struct mlx4_cmd_context *context;
-       u16                     token_mask;
-       u8                      use_events;
-       u8                      toggle;
-};
-
-struct mlx4_uar_table {
-       struct mlx4_bitmap      bitmap;
-};
-
-struct mlx4_mr_table {
-       struct mlx4_bitmap      mpt_bitmap;
-       struct mlx4_buddy       mtt_buddy;
-       u64                     mtt_base;
-       u64                     mpt_base;
-       struct mlx4_icm_table   mtt_table;
-       struct mlx4_icm_table   dmpt_table;
-};
-
-struct mlx4_cq_table {
-       struct mlx4_bitmap      bitmap;
-       spinlock_t              lock;
-       struct radix_tree_root  tree;
-       struct mlx4_icm_table   table;
-       struct mlx4_icm_table   cmpt_table;
-};
-
-struct mlx4_eq_table {
-       struct mlx4_bitmap      bitmap;
-       char                   *irq_names;
-       void __iomem           *clr_int;
-       void __iomem          **uar_map;
-       u32                     clr_mask;
-       struct mlx4_eq         *eq;
-       struct mlx4_icm_table   table;
-       struct mlx4_icm_table   cmpt_table;
-       int                     have_irq;
-       u8                      inta_pin;
-};
-
-struct mlx4_srq_table {
-       struct mlx4_bitmap      bitmap;
-       spinlock_t              lock;
-       struct radix_tree_root  tree;
-       struct mlx4_icm_table   table;
-       struct mlx4_icm_table   cmpt_table;
-};
-
-struct mlx4_qp_table {
-       struct mlx4_bitmap      bitmap;
-       u32                     rdmarc_base;
-       int                     rdmarc_shift;
-       spinlock_t              lock;
-       struct mlx4_icm_table   qp_table;
-       struct mlx4_icm_table   auxc_table;
-       struct mlx4_icm_table   altc_table;
-       struct mlx4_icm_table   rdmarc_table;
-       struct mlx4_icm_table   cmpt_table;
-};
-
-struct mlx4_mcg_table {
-       struct mutex            mutex;
-       struct mlx4_bitmap      bitmap;
-       struct mlx4_icm_table   table;
-};
-
-struct mlx4_catas_err {
-       u32 __iomem            *map;
-       struct timer_list       timer;
-       struct list_head        list;
-};
-
-#define MLX4_MAX_MAC_NUM       128
-#define MLX4_MAC_TABLE_SIZE    (MLX4_MAX_MAC_NUM << 3)
-
-struct mlx4_mac_table {
-       __be64                  entries[MLX4_MAX_MAC_NUM];
-       int                     refs[MLX4_MAX_MAC_NUM];
-       struct mutex            mutex;
-       int                     total;
-       int                     max;
-};
-
-#define MLX4_MAX_VLAN_NUM      128
-#define MLX4_VLAN_TABLE_SIZE   (MLX4_MAX_VLAN_NUM << 2)
-
-struct mlx4_vlan_table {
-       __be32                  entries[MLX4_MAX_VLAN_NUM];
-       int                     refs[MLX4_MAX_VLAN_NUM];
-       struct mutex            mutex;
-       int                     total;
-       int                     max;
-};
-
-struct mlx4_mac_entry {
-       u64 mac;
-};
-
-struct mlx4_port_info {
-       struct mlx4_dev        *dev;
-       int                     port;
-       char                    dev_name[16];
-       struct device_attribute port_attr;
-       enum mlx4_port_type     tmp_type;
-       struct mlx4_mac_table   mac_table;
-       struct radix_tree_root  mac_tree;
-       struct mlx4_vlan_table  vlan_table;
-       int                     base_qpn;
-};
-
-struct mlx4_sense {
-       struct mlx4_dev         *dev;
-       u8                      do_sense_port[MLX4_MAX_PORTS + 1];
-       u8                      sense_allowed[MLX4_MAX_PORTS + 1];
-       struct delayed_work     sense_poll;
-};
-
-struct mlx4_msix_ctl {
-       u64             pool_bm;
-       spinlock_t      pool_lock;
-};
-
-struct mlx4_steer {
-       struct list_head promisc_qps[MLX4_NUM_STEERS];
-       struct list_head steer_entries[MLX4_NUM_STEERS];
-       struct list_head high_prios;
-};
-
-struct mlx4_priv {
-       struct mlx4_dev         dev;
-
-       struct list_head        dev_list;
-       struct list_head        ctx_list;
-       spinlock_t              ctx_lock;
-
-       struct list_head        pgdir_list;
-       struct mutex            pgdir_mutex;
-
-       struct mlx4_fw          fw;
-       struct mlx4_cmd         cmd;
-
-       struct mlx4_bitmap      pd_bitmap;
-       struct mlx4_uar_table   uar_table;
-       struct mlx4_mr_table    mr_table;
-       struct mlx4_cq_table    cq_table;
-       struct mlx4_eq_table    eq_table;
-       struct mlx4_srq_table   srq_table;
-       struct mlx4_qp_table    qp_table;
-       struct mlx4_mcg_table   mcg_table;
-       struct mlx4_bitmap      counters_bitmap;
-
-       struct mlx4_catas_err   catas_err;
-
-       void __iomem           *clr_base;
-
-       struct mlx4_uar         driver_uar;
-       void __iomem           *kar;
-       struct mlx4_port_info   port[MLX4_MAX_PORTS + 1];
-       struct mlx4_sense       sense;
-       struct mutex            port_mutex;
-       struct mlx4_msix_ctl    msix_ctl;
-       struct mlx4_steer       *steer;
-       struct list_head        bf_list;
-       struct mutex            bf_mutex;
-       struct io_mapping       *bf_mapping;
-};
-
-static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
-{
-       return container_of(dev, struct mlx4_priv, dev);
-}
-
-#define MLX4_SENSE_RANGE       (HZ * 3)
-
-extern struct workqueue_struct *mlx4_wq;
-
-u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
-void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
-u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
-void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
-u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap);
-int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
-                    u32 reserved_bot, u32 resetrved_top);
-void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
-
-int mlx4_reset(struct mlx4_dev *dev);
-
-int mlx4_alloc_eq_table(struct mlx4_dev *dev);
-void mlx4_free_eq_table(struct mlx4_dev *dev);
-
-int mlx4_init_pd_table(struct mlx4_dev *dev);
-int mlx4_init_uar_table(struct mlx4_dev *dev);
-int mlx4_init_mr_table(struct mlx4_dev *dev);
-int mlx4_init_eq_table(struct mlx4_dev *dev);
-int mlx4_init_cq_table(struct mlx4_dev *dev);
-int mlx4_init_qp_table(struct mlx4_dev *dev);
-int mlx4_init_srq_table(struct mlx4_dev *dev);
-int mlx4_init_mcg_table(struct mlx4_dev *dev);
-
-void mlx4_cleanup_pd_table(struct mlx4_dev *dev);
-void mlx4_cleanup_uar_table(struct mlx4_dev *dev);
-void mlx4_cleanup_mr_table(struct mlx4_dev *dev);
-void mlx4_cleanup_eq_table(struct mlx4_dev *dev);
-void mlx4_cleanup_cq_table(struct mlx4_dev *dev);
-void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
-void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
-void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
-
-void mlx4_start_catas_poll(struct mlx4_dev *dev);
-void mlx4_stop_catas_poll(struct mlx4_dev *dev);
-void mlx4_catas_init(void);
-int mlx4_restart_one(struct pci_dev *pdev);
-int mlx4_register_device(struct mlx4_dev *dev);
-void mlx4_unregister_device(struct mlx4_dev *dev);
-void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port);
-
-struct mlx4_dev_cap;
-struct mlx4_init_hca_param;
-
-u64 mlx4_make_profile(struct mlx4_dev *dev,
-                     struct mlx4_profile *request,
-                     struct mlx4_dev_cap *dev_cap,
-                     struct mlx4_init_hca_param *init_hca);
-
-int mlx4_cmd_init(struct mlx4_dev *dev);
-void mlx4_cmd_cleanup(struct mlx4_dev *dev);
-void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
-int mlx4_cmd_use_events(struct mlx4_dev *dev);
-void mlx4_cmd_use_polling(struct mlx4_dev *dev);
-
-void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
-void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
-
-void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
-
-void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
-
-void mlx4_handle_catas_err(struct mlx4_dev *dev);
-
-int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
-                   enum mlx4_port_type *type);
-void mlx4_do_sense_ports(struct mlx4_dev *dev,
-                        enum mlx4_port_type *stype,
-                        enum mlx4_port_type *defaults);
-void mlx4_start_sense(struct mlx4_dev *dev);
-void mlx4_stop_sense(struct mlx4_dev *dev);
-void mlx4_sense_init(struct mlx4_dev *dev);
-int mlx4_check_port_params(struct mlx4_dev *dev,
-                          enum mlx4_port_type *port_type);
-int mlx4_change_port_types(struct mlx4_dev *dev,
-                          enum mlx4_port_type *port_types);
-
-void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
-void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
-
-int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
-int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
-
-int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
-                         enum mlx4_protocol prot, enum mlx4_steer_type steer);
-int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
-                         int block_mcast_loopback, enum mlx4_protocol prot,
-                         enum mlx4_steer_type steer);
-#endif /* MLX4_H */
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
deleted file mode 100644 (file)
index ed84811..0000000
+++ /dev/null
@@ -1,607 +0,0 @@
-/*
- * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef _MLX4_EN_H_
-#define _MLX4_EN_H_
-
-#include <linux/bitops.h>
-#include <linux/compiler.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/netdevice.h>
-#include <linux/if_vlan.h>
-
-#include <linux/mlx4/device.h>
-#include <linux/mlx4/qp.h>
-#include <linux/mlx4/cq.h>
-#include <linux/mlx4/srq.h>
-#include <linux/mlx4/doorbell.h>
-#include <linux/mlx4/cmd.h>
-
-#include "en_port.h"
-
-#define DRV_NAME       "mlx4_en"
-#define DRV_VERSION    "1.5.4.1"
-#define DRV_RELDATE    "March 2011"
-
-#define MLX4_EN_MSG_LEVEL      (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
-
-/*
- * Device constants
- */
-
-
-#define MLX4_EN_PAGE_SHIFT     12
-#define MLX4_EN_PAGE_SIZE      (1 << MLX4_EN_PAGE_SHIFT)
-#define MAX_RX_RINGS           16
-#define MIN_RX_RINGS           4
-#define TXBB_SIZE              64
-#define HEADROOM               (2048 / TXBB_SIZE + 1)
-#define STAMP_STRIDE           64
-#define STAMP_DWORDS           (STAMP_STRIDE / 4)
-#define STAMP_SHIFT            31
-#define STAMP_VAL              0x7fffffff
-#define STATS_DELAY            (HZ / 4)
-
-/* Typical TSO descriptor with 16 gather entries is 352 bytes... */
-#define MAX_DESC_SIZE          512
-#define MAX_DESC_TXBBS         (MAX_DESC_SIZE / TXBB_SIZE)
-
-/*
- * OS related constants and tunables
- */
-
-#define MLX4_EN_WATCHDOG_TIMEOUT       (15 * HZ)
-
-#define MLX4_EN_ALLOC_ORDER    2
-#define MLX4_EN_ALLOC_SIZE     (PAGE_SIZE << MLX4_EN_ALLOC_ORDER)
-
-#define MLX4_EN_MAX_LRO_DESCRIPTORS    32
-
-/* Receive fragment sizes; we use at most 4 fragments (for 9600 byte MTU
- * and 4K allocations) */
-enum {
-       FRAG_SZ0 = 512 - NET_IP_ALIGN,
-       FRAG_SZ1 = 1024,
-       FRAG_SZ2 = 4096,
-       FRAG_SZ3 = MLX4_EN_ALLOC_SIZE
-};
-#define MLX4_EN_MAX_RX_FRAGS   4
-
-/* Maximum ring sizes */
-#define MLX4_EN_MAX_TX_SIZE    8192
-#define MLX4_EN_MAX_RX_SIZE    8192
-
-/* Minimum ring size for our page-allocation sceme to work */
-#define MLX4_EN_MIN_RX_SIZE    (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
-#define MLX4_EN_MIN_TX_SIZE    (4096 / TXBB_SIZE)
-
-#define MLX4_EN_SMALL_PKT_SIZE         64
-#define MLX4_EN_NUM_TX_RINGS           8
-#define MLX4_EN_NUM_PPP_RINGS          8
-#define MAX_TX_RINGS                   (MLX4_EN_NUM_TX_RINGS + MLX4_EN_NUM_PPP_RINGS)
-#define MLX4_EN_DEF_TX_RING_SIZE       512
-#define MLX4_EN_DEF_RX_RING_SIZE       1024
-
-/* Target number of packets to coalesce with interrupt moderation */
-#define MLX4_EN_RX_COAL_TARGET 44
-#define MLX4_EN_RX_COAL_TIME   0x10
-
-#define MLX4_EN_TX_COAL_PKTS   5
-#define MLX4_EN_TX_COAL_TIME   0x80
-
-#define MLX4_EN_RX_RATE_LOW            400000
-#define MLX4_EN_RX_COAL_TIME_LOW       0
-#define MLX4_EN_RX_RATE_HIGH           450000
-#define MLX4_EN_RX_COAL_TIME_HIGH      128
-#define MLX4_EN_RX_SIZE_THRESH         1024
-#define MLX4_EN_RX_RATE_THRESH         (1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
-#define MLX4_EN_SAMPLE_INTERVAL                0
-#define MLX4_EN_AVG_PKT_SMALL          256
-
-#define MLX4_EN_AUTO_CONF      0xffff
-
-#define MLX4_EN_DEF_RX_PAUSE   1
-#define MLX4_EN_DEF_TX_PAUSE   1
-
-/* Interval between successive polls in the Tx routine when polling is used
-   instead of interrupts (in per-core Tx rings) - should be power of 2 */
-#define MLX4_EN_TX_POLL_MODER  16
-#define MLX4_EN_TX_POLL_TIMEOUT        (HZ / 4)
-
-#define ETH_LLC_SNAP_SIZE      8
-
-#define SMALL_PACKET_SIZE      (256 - NET_IP_ALIGN)
-#define HEADER_COPY_SIZE       (128 - NET_IP_ALIGN)
-#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
-
-#define MLX4_EN_MIN_MTU                46
-#define ETH_BCAST              0xffffffffffffULL
-
-#define MLX4_EN_LOOPBACK_RETRIES       5
-#define MLX4_EN_LOOPBACK_TIMEOUT       100
-
-#ifdef MLX4_EN_PERF_STAT
-/* Number of samples to 'average' */
-#define AVG_SIZE                       128
-#define AVG_FACTOR                     1024
-#define NUM_PERF_STATS                 NUM_PERF_COUNTERS
-
-#define INC_PERF_COUNTER(cnt)          (++(cnt))
-#define ADD_PERF_COUNTER(cnt, add)     ((cnt) += (add))
-#define AVG_PERF_COUNTER(cnt, sample) \
-       ((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE)
-#define GET_PERF_COUNTER(cnt)          (cnt)
-#define GET_AVG_PERF_COUNTER(cnt)      ((cnt) / AVG_FACTOR)
-
-#else
-
-#define NUM_PERF_STATS                 0
-#define INC_PERF_COUNTER(cnt)          do {} while (0)
-#define ADD_PERF_COUNTER(cnt, add)     do {} while (0)
-#define AVG_PERF_COUNTER(cnt, sample)  do {} while (0)
-#define GET_PERF_COUNTER(cnt)          (0)
-#define GET_AVG_PERF_COUNTER(cnt)      (0)
-#endif /* MLX4_EN_PERF_STAT */
-
-/*
- * Configurables
- */
-
-enum cq_type {
-       RX = 0,
-       TX = 1,
-};
-
-
-/*
- * Useful macros
- */
-#define ROUNDUP_LOG2(x)                ilog2(roundup_pow_of_two(x))
-#define XNOR(x, y)             (!(x) == !(y))
-#define ILLEGAL_MAC(addr)      (addr == 0xffffffffffffULL || addr == 0x0)
-
-
-struct mlx4_en_tx_info {
-       struct sk_buff *skb;
-       u32 nr_txbb;
-       u8 linear;
-       u8 data_offset;
-       u8 inl;
-};
-
-
-#define MLX4_EN_BIT_DESC_OWN   0x80000000
-#define CTRL_SIZE      sizeof(struct mlx4_wqe_ctrl_seg)
-#define MLX4_EN_MEMTYPE_PAD    0x100
-#define DS_SIZE                sizeof(struct mlx4_wqe_data_seg)
-
-
-struct mlx4_en_tx_desc {
-       struct mlx4_wqe_ctrl_seg ctrl;
-       union {
-               struct mlx4_wqe_data_seg data; /* at least one data segment */
-               struct mlx4_wqe_lso_seg lso;
-               struct mlx4_wqe_inline_seg inl;
-       };
-};
-
-#define MLX4_EN_USE_SRQ                0x01000000
-
-#define MLX4_EN_CX3_LOW_ID     0x1000
-#define MLX4_EN_CX3_HIGH_ID    0x1005
-
-struct mlx4_en_rx_alloc {
-       struct page *page;
-       u16 offset;
-};
-
-struct mlx4_en_tx_ring {
-       struct mlx4_hwq_resources wqres;
-       u32 size ; /* number of TXBBs */
-       u32 size_mask;
-       u16 stride;
-       u16 cqn;        /* index of port CQ associated with this ring */
-       u32 prod;
-       u32 cons;
-       u32 buf_size;
-       u32 doorbell_qpn;
-       void *buf;
-       u16 poll_cnt;
-       int blocked;
-       struct mlx4_en_tx_info *tx_info;
-       u8 *bounce_buf;
-       u32 last_nr_txbb;
-       struct mlx4_qp qp;
-       struct mlx4_qp_context context;
-       int qpn;
-       enum mlx4_qp_state qp_state;
-       struct mlx4_srq dummy;
-       unsigned long bytes;
-       unsigned long packets;
-       spinlock_t comp_lock;
-       struct mlx4_bf bf;
-       bool bf_enabled;
-};
-
-struct mlx4_en_rx_desc {
-       /* actual number of entries depends on rx ring stride */
-       struct mlx4_wqe_data_seg data[0];
-};
-
-struct mlx4_en_rx_ring {
-       struct mlx4_hwq_resources wqres;
-       struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
-       u32 size ;      /* number of Rx descs*/
-       u32 actual_size;
-       u32 size_mask;
-       u16 stride;
-       u16 log_stride;
-       u16 cqn;        /* index of port CQ associated with this ring */
-       u32 prod;
-       u32 cons;
-       u32 buf_size;
-       void *buf;
-       void *rx_info;
-       unsigned long bytes;
-       unsigned long packets;
-};
-
-
-static inline int mlx4_en_can_lro(__be16 status)
-{
-       return (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4       |
-                                    MLX4_CQE_STATUS_IPV4F      |
-                                    MLX4_CQE_STATUS_IPV6       |
-                                    MLX4_CQE_STATUS_IPV4OPT    |
-                                    MLX4_CQE_STATUS_TCP        |
-                                    MLX4_CQE_STATUS_UDP        |
-                                    MLX4_CQE_STATUS_IPOK)) ==
-               cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
-                           MLX4_CQE_STATUS_IPOK |
-                           MLX4_CQE_STATUS_TCP);
-}
-
-struct mlx4_en_cq {
-       struct mlx4_cq          mcq;
-       struct mlx4_hwq_resources wqres;
-       int                     ring;
-       spinlock_t              lock;
-       struct net_device      *dev;
-       struct napi_struct      napi;
-       /* Per-core Tx cq processing support */
-       struct timer_list timer;
-       int size;
-       int buf_size;
-       unsigned vector;
-       enum cq_type is_tx;
-       u16 moder_time;
-       u16 moder_cnt;
-       struct mlx4_cqe *buf;
-#define MLX4_EN_OPCODE_ERROR   0x1e
-};
-
-struct mlx4_en_port_profile {
-       u32 flags;
-       u32 tx_ring_num;
-       u32 rx_ring_num;
-       u32 tx_ring_size;
-       u32 rx_ring_size;
-       u8 rx_pause;
-       u8 rx_ppp;
-       u8 tx_pause;
-       u8 tx_ppp;
-};
-
-struct mlx4_en_profile {
-       int rss_xor;
-       int tcp_rss;
-       int udp_rss;
-       u8 rss_mask;
-       u32 active_ports;
-       u32 small_pkt_int;
-       u8 no_reset;
-       struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
-};
-
-struct mlx4_en_dev {
-       struct mlx4_dev         *dev;
-       struct pci_dev          *pdev;
-       struct mutex            state_lock;
-       struct net_device       *pndev[MLX4_MAX_PORTS + 1];
-       u32                     port_cnt;
-       bool                    device_up;
-       struct mlx4_en_profile  profile;
-       u32                     LSO_support;
-       struct workqueue_struct *workqueue;
-       struct device           *dma_device;
-       void __iomem            *uar_map;
-       struct mlx4_uar         priv_uar;
-       struct mlx4_mr          mr;
-       u32                     priv_pdn;
-       spinlock_t              uar_lock;
-       u8                      mac_removed[MLX4_MAX_PORTS + 1];
-};
-
-
-struct mlx4_en_rss_map {
-       int base_qpn;
-       struct mlx4_qp qps[MAX_RX_RINGS];
-       enum mlx4_qp_state state[MAX_RX_RINGS];
-       struct mlx4_qp indir_qp;
-       enum mlx4_qp_state indir_state;
-};
-
-struct mlx4_en_rss_context {
-       __be32 base_qpn;
-       __be32 default_qpn;
-       u16 reserved;
-       u8 hash_fn;
-       u8 flags;
-       __be32 rss_key[10];
-       __be32 base_qpn_udp;
-};
-
-struct mlx4_en_port_state {
-       int link_state;
-       int link_speed;
-       int transciver;
-};
-
-struct mlx4_en_pkt_stats {
-       unsigned long broadcast;
-       unsigned long rx_prio[8];
-       unsigned long tx_prio[8];
-#define NUM_PKT_STATS          17
-};
-
-struct mlx4_en_port_stats {
-       unsigned long tso_packets;
-       unsigned long queue_stopped;
-       unsigned long wake_queue;
-       unsigned long tx_timeout;
-       unsigned long rx_alloc_failed;
-       unsigned long rx_chksum_good;
-       unsigned long rx_chksum_none;
-       unsigned long tx_chksum_offload;
-#define NUM_PORT_STATS         8
-};
-
-struct mlx4_en_perf_stats {
-       u32 tx_poll;
-       u64 tx_pktsz_avg;
-       u32 inflight_avg;
-       u16 tx_coal_avg;
-       u16 rx_coal_avg;
-       u32 napi_quota;
-#define NUM_PERF_COUNTERS              6
-};
-
-struct mlx4_en_frag_info {
-       u16 frag_size;
-       u16 frag_prefix_size;
-       u16 frag_stride;
-       u16 frag_align;
-       u16 last_offset;
-
-};
-
-struct mlx4_en_priv {
-       struct mlx4_en_dev *mdev;
-       struct mlx4_en_port_profile *prof;
-       struct net_device *dev;
-       unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
-       struct net_device_stats stats;
-       struct net_device_stats ret_stats;
-       struct mlx4_en_port_state port_state;
-       spinlock_t stats_lock;
-
-       unsigned long last_moder_packets;
-       unsigned long last_moder_tx_packets;
-       unsigned long last_moder_bytes;
-       unsigned long last_moder_jiffies;
-       int last_moder_time;
-       u16 rx_usecs;
-       u16 rx_frames;
-       u16 tx_usecs;
-       u16 tx_frames;
-       u32 pkt_rate_low;
-       u16 rx_usecs_low;
-       u32 pkt_rate_high;
-       u16 rx_usecs_high;
-       u16 sample_interval;
-       u16 adaptive_rx_coal;
-       u32 msg_enable;
-       u32 loopback_ok;
-       u32 validate_loopback;
-
-       struct mlx4_hwq_resources res;
-       int link_state;
-       int last_link_state;
-       bool port_up;
-       int port;
-       int registered;
-       int allocated;
-       int stride;
-       u64 mac;
-       int mac_index;
-       unsigned max_mtu;
-       int base_qpn;
-
-       struct mlx4_en_rss_map rss_map;
-       u32 flags;
-#define MLX4_EN_FLAG_PROMISC   0x1
-#define MLX4_EN_FLAG_MC_PROMISC        0x2
-       u32 tx_ring_num;
-       u32 rx_ring_num;
-       u32 rx_skb_size;
-       struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
-       u16 num_frags;
-       u16 log_rx_info;
-
-       struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS];
-       int tx_vector;
-       struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
-       struct mlx4_en_cq tx_cq[MAX_TX_RINGS];
-       struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
-       struct work_struct mcast_task;
-       struct work_struct mac_task;
-       struct work_struct watchdog_task;
-       struct work_struct linkstate_task;
-       struct delayed_work stats_task;
-       struct mlx4_en_perf_stats pstats;
-       struct mlx4_en_pkt_stats pkstats;
-       struct mlx4_en_port_stats port_stats;
-       char *mc_addrs;
-       int mc_addrs_cnt;
-       struct mlx4_en_stat_out_mbox hw_stats;
-       int vids[128];
-       bool wol;
-};
-
-enum mlx4_en_wol {
-       MLX4_EN_WOL_MAGIC = (1ULL << 61),
-       MLX4_EN_WOL_ENABLED = (1ULL << 62),
-       MLX4_EN_WOL_DO_MODIFY = (1ULL << 63),
-};
-
-
-void mlx4_en_destroy_netdev(struct net_device *dev);
-int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
-                       struct mlx4_en_port_profile *prof);
-
-int mlx4_en_start_port(struct net_device *dev);
-void mlx4_en_stop_port(struct net_device *dev);
-
-void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors);
-int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
-
-int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
-                     int entries, int ring, enum cq_type mode);
-void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
-                       bool reserve_vectors);
-int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
-void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
-int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
-int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
-
-void mlx4_en_poll_tx_cq(unsigned long data);
-void mlx4_en_tx_irq(struct mlx4_cq *mcq);
-u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
-netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
-
-int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
-                          int qpn, u32 size, u16 stride);
-void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
-int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
-                            struct mlx4_en_tx_ring *ring,
-                            int cq);
-void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
-                               struct mlx4_en_tx_ring *ring);
-
-int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
-                          struct mlx4_en_rx_ring *ring,
-                          u32 size, u16 stride);
-void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
-                            struct mlx4_en_rx_ring *ring);
-int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
-void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
-                               struct mlx4_en_rx_ring *ring);
-int mlx4_en_process_rx_cq(struct net_device *dev,
-                         struct mlx4_en_cq *cq,
-                         int budget);
-int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
-void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
-                            int is_tx, int rss, int qpn, int cqn,
-                            struct mlx4_qp_context *context);
-void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
-int mlx4_en_map_buffer(struct mlx4_buf *buf);
-void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
-
-void mlx4_en_calc_rx_buf(struct net_device *dev);
-int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
-void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
-int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
-void mlx4_en_rx_irq(struct mlx4_cq *mcq);
-
-int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
-int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv);
-int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
-                         u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
-int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
-                          u8 promisc);
-
-int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
-int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
-
-#define MLX4_EN_NUM_SELF_TEST  5
-void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
-u64 mlx4_en_mac_to_u64(u8 *addr);
-
-/*
- * Globals
- */
-extern const struct ethtool_ops mlx4_en_ethtool_ops;
-
-
-
-/*
- * printk / logging functions
- */
-
-int en_print(const char *level, const struct mlx4_en_priv *priv,
-            const char *format, ...) __attribute__ ((format (printf, 3, 4)));
-
-#define en_dbg(mlevel, priv, format, arg...)                   \
-do {                                                           \
-       if (NETIF_MSG_##mlevel & priv->msg_enable)              \
-               en_print(KERN_DEBUG, priv, format, ##arg);      \
-} while (0)
-#define en_warn(priv, format, arg...)                  \
-       en_print(KERN_WARNING, priv, format, ##arg)
-#define en_err(priv, format, arg...)                   \
-       en_print(KERN_ERR, priv, format, ##arg)
-#define en_info(priv, format, arg...)                  \
-       en_print(KERN_INFO, priv, format, ## arg)
-
-#define mlx4_err(mdev, format, arg...)                 \
-       pr_err("%s %s: " format, DRV_NAME,              \
-              dev_name(&mdev->pdev->dev), ##arg)
-#define mlx4_info(mdev, format, arg...)                        \
-       pr_info("%s %s: " format, DRV_NAME,             \
-               dev_name(&mdev->pdev->dev), ##arg)
-#define mlx4_warn(mdev, format, arg...)                        \
-       pr_warning("%s %s: " format, DRV_NAME,          \
-                  dev_name(&mdev->pdev->dev), ##arg)
-
-#endif
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
deleted file mode 100644 (file)
index 9c188bd..0000000
+++ /dev/null
@@ -1,667 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/errno.h>
-#include <linux/slab.h>
-
-#include <linux/mlx4/cmd.h>
-
-#include "mlx4.h"
-#include "icm.h"
-
-/*
- * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
- */
-struct mlx4_mpt_entry {
-       __be32 flags;
-       __be32 qpn;
-       __be32 key;
-       __be32 pd_flags;
-       __be64 start;
-       __be64 length;
-       __be32 lkey;
-       __be32 win_cnt;
-       u8      reserved1[3];
-       u8      mtt_rep;
-       __be64 mtt_seg;
-       __be32 mtt_sz;
-       __be32 entity_size;
-       __be32 first_byte_offset;
-} __packed;
-
-#define MLX4_MPT_FLAG_SW_OWNS      (0xfUL << 28)
-#define MLX4_MPT_FLAG_FREE         (0x3UL << 28)
-#define MLX4_MPT_FLAG_MIO          (1 << 17)
-#define MLX4_MPT_FLAG_BIND_ENABLE   (1 << 15)
-#define MLX4_MPT_FLAG_PHYSICAL     (1 <<  9)
-#define MLX4_MPT_FLAG_REGION       (1 <<  8)
-
-#define MLX4_MPT_PD_FLAG_FAST_REG   (1 << 27)
-#define MLX4_MPT_PD_FLAG_RAE       (1 << 28)
-#define MLX4_MPT_PD_FLAG_EN_INV            (3 << 24)
-
-#define MLX4_MPT_STATUS_SW             0xF0
-#define MLX4_MPT_STATUS_HW             0x00
-
-static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
-{
-       int o;
-       int m;
-       u32 seg;
-
-       spin_lock(&buddy->lock);
-
-       for (o = order; o <= buddy->max_order; ++o)
-               if (buddy->num_free[o]) {
-                       m = 1 << (buddy->max_order - o);
-                       seg = find_first_bit(buddy->bits[o], m);
-                       if (seg < m)
-                               goto found;
-               }
-
-       spin_unlock(&buddy->lock);
-       return -1;
-
- found:
-       clear_bit(seg, buddy->bits[o]);
-       --buddy->num_free[o];
-
-       while (o > order) {
-               --o;
-               seg <<= 1;
-               set_bit(seg ^ 1, buddy->bits[o]);
-               ++buddy->num_free[o];
-       }
-
-       spin_unlock(&buddy->lock);
-
-       seg <<= order;
-
-       return seg;
-}
-
-static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
-{
-       seg >>= order;
-
-       spin_lock(&buddy->lock);
-
-       while (test_bit(seg ^ 1, buddy->bits[order])) {
-               clear_bit(seg ^ 1, buddy->bits[order]);
-               --buddy->num_free[order];
-               seg >>= 1;
-               ++order;
-       }
-
-       set_bit(seg, buddy->bits[order]);
-       ++buddy->num_free[order];
-
-       spin_unlock(&buddy->lock);
-}
-
-static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
-{
-       int i, s;
-
-       buddy->max_order = max_order;
-       spin_lock_init(&buddy->lock);
-
-       buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
-                             GFP_KERNEL);
-       buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof (int *),
-                                 GFP_KERNEL);
-       if (!buddy->bits || !buddy->num_free)
-               goto err_out;
-
-       for (i = 0; i <= buddy->max_order; ++i) {
-               s = BITS_TO_LONGS(1 << (buddy->max_order - i));
-               buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL);
-               if (!buddy->bits[i])
-                       goto err_out_free;
-               bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i));
-       }
-
-       set_bit(0, buddy->bits[buddy->max_order]);
-       buddy->num_free[buddy->max_order] = 1;
-
-       return 0;
-
-err_out_free:
-       for (i = 0; i <= buddy->max_order; ++i)
-               kfree(buddy->bits[i]);
-
-err_out:
-       kfree(buddy->bits);
-       kfree(buddy->num_free);
-
-       return -ENOMEM;
-}
-
-static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
-{
-       int i;
-
-       for (i = 0; i <= buddy->max_order; ++i)
-               kfree(buddy->bits[i]);
-
-       kfree(buddy->bits);
-       kfree(buddy->num_free);
-}
-
-static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
-{
-       struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
-       u32 seg;
-
-       seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order);
-       if (seg == -1)
-               return -1;
-
-       if (mlx4_table_get_range(dev, &mr_table->mtt_table, seg,
-                                seg + (1 << order) - 1)) {
-               mlx4_buddy_free(&mr_table->mtt_buddy, seg, order);
-               return -1;
-       }
-
-       return seg;
-}
-
-int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
-                 struct mlx4_mtt *mtt)
-{
-       int i;
-
-       if (!npages) {
-               mtt->order      = -1;
-               mtt->page_shift = MLX4_ICM_PAGE_SHIFT;
-               return 0;
-       } else
-               mtt->page_shift = page_shift;
-
-       for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1)
-               ++mtt->order;
-
-       mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
-       if (mtt->first_seg == -1)
-               return -ENOMEM;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(mlx4_mtt_init);
-
-void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
-{
-       struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
-
-       if (mtt->order < 0)
-               return;
-
-       mlx4_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
-       mlx4_table_put_range(dev, &mr_table->mtt_table, mtt->first_seg,
-                            mtt->first_seg + (1 << mtt->order) - 1);
-}
-EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
-
-u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
-{
-       return (u64) mtt->first_seg * dev->caps.mtt_entry_sz;
-}
-EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
-
-static u32 hw_index_to_key(u32 ind)
-{
-       return (ind >> 24) | (ind << 8);
-}
-
-static u32 key_to_hw_index(u32 key)
-{
-       return (key << 24) | (key >> 8);
-}
-
-static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
-                         int mpt_index)
-{
-       return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT,
-                       MLX4_CMD_TIME_CLASS_B);
-}
-
-static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
-                         int mpt_index)
-{
-       return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
-                           !mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B);
-}
-
-int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
-                 int npages, int page_shift, struct mlx4_mr *mr)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       u32 index;
-       int err;
-
-       index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
-       if (index == -1)
-               return -ENOMEM;
-
-       mr->iova       = iova;
-       mr->size       = size;
-       mr->pd         = pd;
-       mr->access     = access;
-       mr->enabled    = 0;
-       mr->key        = hw_index_to_key(index);
-
-       err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
-       if (err)
-               mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
-
-       return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
-
-void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int err;
-
-       if (mr->enabled) {
-               err = mlx4_HW2SW_MPT(dev, NULL,
-                                    key_to_hw_index(mr->key) &
-                                    (dev->caps.num_mpts - 1));
-               if (err)
-                       mlx4_warn(dev, "HW2SW_MPT failed (%d)\n", err);
-       }
-
-       mlx4_mtt_cleanup(dev, &mr->mtt);
-       mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key));
-}
-EXPORT_SYMBOL_GPL(mlx4_mr_free);
-
-int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
-{
-       struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_mpt_entry *mpt_entry;
-       int err;
-
-       err = mlx4_table_get(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
-       if (err)
-               return err;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox)) {
-               err = PTR_ERR(mailbox);
-               goto err_table;
-       }
-       mpt_entry = mailbox->buf;
-
-       memset(mpt_entry, 0, sizeof *mpt_entry);
-
-       mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO         |
-                                      MLX4_MPT_FLAG_REGION      |
-                                      mr->access);
-
-       mpt_entry->key         = cpu_to_be32(key_to_hw_index(mr->key));
-       mpt_entry->pd_flags    = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV);
-       mpt_entry->start       = cpu_to_be64(mr->iova);
-       mpt_entry->length      = cpu_to_be64(mr->size);
-       mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
-
-       if (mr->mtt.order < 0) {
-               mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
-               mpt_entry->mtt_seg = 0;
-       } else {
-               mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt));
-       }
-
-       if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
-               /* fast register MR in free state */
-               mpt_entry->flags    |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
-               mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
-                                                  MLX4_MPT_PD_FLAG_RAE);
-               mpt_entry->mtt_sz    = cpu_to_be32((1 << mr->mtt.order) *
-                                                  dev->caps.mtts_per_seg);
-       } else {
-               mpt_entry->flags    |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
-       }
-
-       err = mlx4_SW2HW_MPT(dev, mailbox,
-                            key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1));
-       if (err) {
-               mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
-               goto err_cmd;
-       }
-
-       mr->enabled = 1;
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-
-       return 0;
-
-err_cmd:
-       mlx4_free_cmd_mailbox(dev, mailbox);
-
-err_table:
-       mlx4_table_put(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
-       return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_mr_enable);
-
-static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
-                               int start_index, int npages, u64 *page_list)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       __be64 *mtts;
-       dma_addr_t dma_handle;
-       int i;
-       int s = start_index * sizeof (u64);
-
-       /* All MTTs must fit in the same page */
-       if (start_index / (PAGE_SIZE / sizeof (u64)) !=
-           (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
-               return -EINVAL;
-
-       if (start_index & (dev->caps.mtts_per_seg - 1))
-               return -EINVAL;
-
-       mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
-                               s / dev->caps.mtt_entry_sz, &dma_handle);
-       if (!mtts)
-               return -ENOMEM;
-
-       dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
-                               npages * sizeof (u64), DMA_TO_DEVICE);
-
-       for (i = 0; i < npages; ++i)
-               mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
-
-       dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
-                                  npages * sizeof (u64), DMA_TO_DEVICE);
-
-       return 0;
-}
-
-int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
-                  int start_index, int npages, u64 *page_list)
-{
-       int chunk;
-       int err;
-
-       if (mtt->order < 0)
-               return -EINVAL;
-
-       while (npages > 0) {
-               chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
-               err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
-               if (err)
-                       return err;
-
-               npages      -= chunk;
-               start_index += chunk;
-               page_list   += chunk;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(mlx4_write_mtt);
-
-int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
-                      struct mlx4_buf *buf)
-{
-       u64 *page_list;
-       int err;
-       int i;
-
-       page_list = kmalloc(buf->npages * sizeof *page_list, GFP_KERNEL);
-       if (!page_list)
-               return -ENOMEM;
-
-       for (i = 0; i < buf->npages; ++i)
-               if (buf->nbufs == 1)
-                       page_list[i] = buf->direct.map + (i << buf->page_shift);
-               else
-                       page_list[i] = buf->page_list[i].map;
-
-       err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
-
-       kfree(page_list);
-       return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
-
-int mlx4_init_mr_table(struct mlx4_dev *dev)
-{
-       struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
-       int err;
-
-       err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
-                              ~0, dev->caps.reserved_mrws, 0);
-       if (err)
-               return err;
-
-       err = mlx4_buddy_init(&mr_table->mtt_buddy,
-                             ilog2(dev->caps.num_mtt_segs));
-       if (err)
-               goto err_buddy;
-
-       if (dev->caps.reserved_mtts) {
-               if (mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)) == -1) {
-                       mlx4_warn(dev, "MTT table of order %d is too small.\n",
-                                 mr_table->mtt_buddy.max_order);
-                       err = -ENOMEM;
-                       goto err_reserve_mtts;
-               }
-       }
-
-       return 0;
-
-err_reserve_mtts:
-       mlx4_buddy_cleanup(&mr_table->mtt_buddy);
-
-err_buddy:
-       mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
-
-       return err;
-}
-
-void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
-{
-       struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
-
-       mlx4_buddy_cleanup(&mr_table->mtt_buddy);
-       mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
-}
-
-static inline int mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list,
-                                 int npages, u64 iova)
-{
-       int i, page_mask;
-
-       if (npages > fmr->max_pages)
-               return -EINVAL;
-
-       page_mask = (1 << fmr->page_shift) - 1;
-
-       /* We are getting page lists, so va must be page aligned. */
-       if (iova & page_mask)
-               return -EINVAL;
-
-       /* Trust the user not to pass misaligned data in page_list */
-       if (0)
-               for (i = 0; i < npages; ++i) {
-                       if (page_list[i] & ~page_mask)
-                               return -EINVAL;
-               }
-
-       if (fmr->maps >= fmr->max_maps)
-               return -EINVAL;
-
-       return 0;
-}
-
-int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
-                     int npages, u64 iova, u32 *lkey, u32 *rkey)
-{
-       u32 key;
-       int i, err;
-
-       err = mlx4_check_fmr(fmr, page_list, npages, iova);
-       if (err)
-               return err;
-
-       ++fmr->maps;
-
-       key = key_to_hw_index(fmr->mr.key);
-       key += dev->caps.num_mpts;
-       *lkey = *rkey = fmr->mr.key = hw_index_to_key(key);
-
-       *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
-
-       /* Make sure MPT status is visible before writing MTT entries */
-       wmb();
-
-       dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
-                               npages * sizeof(u64), DMA_TO_DEVICE);
-
-       for (i = 0; i < npages; ++i)
-               fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
-
-       dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle,
-                                  npages * sizeof(u64), DMA_TO_DEVICE);
-
-       fmr->mpt->key    = cpu_to_be32(key);
-       fmr->mpt->lkey   = cpu_to_be32(key);
-       fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift));
-       fmr->mpt->start  = cpu_to_be64(iova);
-
-       /* Make MTT entries are visible before setting MPT status */
-       wmb();
-
-       *(u8 *) fmr->mpt = MLX4_MPT_STATUS_HW;
-
-       /* Make sure MPT status is visible before consumer can use FMR */
-       wmb();
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(mlx4_map_phys_fmr);
-
-int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
-                  int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       u64 mtt_seg;
-       int err = -ENOMEM;
-
-       if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
-               return -EINVAL;
-
-       /* All MTTs must fit in the same page */
-       if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
-               return -EINVAL;
-
-       fmr->page_shift = page_shift;
-       fmr->max_pages  = max_pages;
-       fmr->max_maps   = max_maps;
-       fmr->maps = 0;
-
-       err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages,
-                           page_shift, &fmr->mr);
-       if (err)
-               return err;
-
-       mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz;
-
-       fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
-                                   fmr->mr.mtt.first_seg,
-                                   &fmr->dma_handle);
-       if (!fmr->mtts) {
-               err = -ENOMEM;
-               goto err_free;
-       }
-
-       return 0;
-
-err_free:
-       mlx4_mr_free(dev, &fmr->mr);
-       return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
-
-int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int err;
-
-       err = mlx4_mr_enable(dev, &fmr->mr);
-       if (err)
-               return err;
-
-       fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table,
-                                   key_to_hw_index(fmr->mr.key), NULL);
-       if (!fmr->mpt)
-               return -ENOMEM;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
-
-void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
-                   u32 *lkey, u32 *rkey)
-{
-       if (!fmr->maps)
-               return;
-
-       fmr->maps = 0;
-
-       *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
-}
-EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
-
-int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
-{
-       if (fmr->maps)
-               return -EBUSY;
-
-       fmr->mr.enabled = 0;
-       mlx4_mr_free(dev, &fmr->mr);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(mlx4_fmr_free);
-
-int mlx4_SYNC_TPT(struct mlx4_dev *dev)
-{
-       return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000);
-}
-EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
deleted file mode 100644 (file)
index 1286b88..0000000
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/errno.h>
-#include <linux/io-mapping.h>
-
-#include <asm/page.h>
-
-#include "mlx4.h"
-#include "icm.h"
-
-enum {
-       MLX4_NUM_RESERVED_UARS = 8
-};
-
-int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-
-       *pdn = mlx4_bitmap_alloc(&priv->pd_bitmap);
-       if (*pdn == -1)
-               return -ENOMEM;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
-
-void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn)
-{
-       mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn);
-}
-EXPORT_SYMBOL_GPL(mlx4_pd_free);
-
-int mlx4_init_pd_table(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-
-       return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
-                               (1 << 24) - 1, dev->caps.reserved_pds, 0);
-}
-
-void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
-{
-       mlx4_bitmap_cleanup(&mlx4_priv(dev)->pd_bitmap);
-}
-
-
-int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
-{
-       uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap);
-       if (uar->index == -1)
-               return -ENOMEM;
-
-       uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
-       uar->map = NULL;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(mlx4_uar_alloc);
-
-void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
-{
-       mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index);
-}
-EXPORT_SYMBOL_GPL(mlx4_uar_free);
-
-int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_uar *uar;
-       int err = 0;
-       int idx;
-
-       if (!priv->bf_mapping)
-               return -ENOMEM;
-
-       mutex_lock(&priv->bf_mutex);
-       if (!list_empty(&priv->bf_list))
-               uar = list_entry(priv->bf_list.next, struct mlx4_uar, bf_list);
-       else {
-               if (mlx4_bitmap_avail(&priv->uar_table.bitmap) < MLX4_NUM_RESERVED_UARS) {
-                       err = -ENOMEM;
-                       goto out;
-               }
-               uar = kmalloc(sizeof *uar, GFP_KERNEL);
-               if (!uar) {
-                       err = -ENOMEM;
-                       goto out;
-               }
-               err = mlx4_uar_alloc(dev, uar);
-               if (err)
-                       goto free_kmalloc;
-
-               uar->map = ioremap(uar->pfn << PAGE_SHIFT, PAGE_SIZE);
-               if (!uar->map) {
-                       err = -ENOMEM;
-                       goto free_uar;
-               }
-
-               uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT);
-               if (!uar->bf_map) {
-                       err = -ENOMEM;
-                       goto unamp_uar;
-               }
-               uar->free_bf_bmap = 0;
-               list_add(&uar->bf_list, &priv->bf_list);
-       }
-
-       bf->uar = uar;
-       idx = ffz(uar->free_bf_bmap);
-       uar->free_bf_bmap |= 1 << idx;
-       bf->uar = uar;
-       bf->offset = 0;
-       bf->buf_size = dev->caps.bf_reg_size / 2;
-       bf->reg = uar->bf_map + idx * dev->caps.bf_reg_size;
-       if (uar->free_bf_bmap == (1 << dev->caps.bf_regs_per_page) - 1)
-               list_del_init(&uar->bf_list);
-
-       goto out;
-
-unamp_uar:
-       bf->uar = NULL;
-       iounmap(uar->map);
-
-free_uar:
-       mlx4_uar_free(dev, uar);
-
-free_kmalloc:
-       kfree(uar);
-
-out:
-       mutex_unlock(&priv->bf_mutex);
-       return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_bf_alloc);
-
-void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int idx;
-
-       if (!bf->uar || !bf->uar->bf_map)
-               return;
-
-       mutex_lock(&priv->bf_mutex);
-       idx = (bf->reg - bf->uar->bf_map) / dev->caps.bf_reg_size;
-       bf->uar->free_bf_bmap &= ~(1 << idx);
-       if (!bf->uar->free_bf_bmap) {
-               if (!list_empty(&bf->uar->bf_list))
-                       list_del(&bf->uar->bf_list);
-
-               io_mapping_unmap(bf->uar->bf_map);
-               iounmap(bf->uar->map);
-               mlx4_uar_free(dev, bf->uar);
-               kfree(bf->uar);
-       } else if (list_empty(&bf->uar->bf_list))
-               list_add(&bf->uar->bf_list, &priv->bf_list);
-
-       mutex_unlock(&priv->bf_mutex);
-}
-EXPORT_SYMBOL_GPL(mlx4_bf_free);
-
-int mlx4_init_uar_table(struct mlx4_dev *dev)
-{
-       if (dev->caps.num_uars <= 128) {
-               mlx4_err(dev, "Only %d UAR pages (need more than 128)\n",
-                        dev->caps.num_uars);
-               mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n");
-               return -ENODEV;
-       }
-
-       return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
-                               dev->caps.num_uars, dev->caps.num_uars - 1,
-                               max(128, dev->caps.reserved_uars), 0);
-}
-
-void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
-{
-       mlx4_bitmap_cleanup(&mlx4_priv(dev)->uar_table.bitmap);
-}
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
deleted file mode 100644 (file)
index 609e0ec..0000000
+++ /dev/null
@@ -1,487 +0,0 @@
-/*
- * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/errno.h>
-#include <linux/if_ether.h>
-
-#include <linux/mlx4/cmd.h>
-
-#include "mlx4.h"
-
-#define MLX4_MAC_VALID         (1ull << 63)
-#define MLX4_MAC_MASK          0xffffffffffffULL
-
-#define MLX4_VLAN_VALID                (1u << 31)
-#define MLX4_VLAN_MASK         0xfff
-
-void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
-{
-       int i;
-
-       mutex_init(&table->mutex);
-       for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
-               table->entries[i] = 0;
-               table->refs[i]   = 0;
-       }
-       table->max   = 1 << dev->caps.log_num_macs;
-       table->total = 0;
-}
-
-void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
-{
-       int i;
-
-       mutex_init(&table->mutex);
-       for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
-               table->entries[i] = 0;
-               table->refs[i]   = 0;
-       }
-       table->max   = 1 << dev->caps.log_num_vlans;
-       table->total = 0;
-}
-
-static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
-                                  __be64 *entries)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       u32 in_mod;
-       int err;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-
-       memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
-
-       in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
-       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B);
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-
-static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
-                            u64 mac, int *qpn, u8 reserve)
-{
-       struct mlx4_qp qp;
-       u8 gid[16] = {0};
-       int err;
-
-       if (reserve) {
-               err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
-               if (err) {
-                       mlx4_err(dev, "Failed to reserve qp for mac registration\n");
-                       return err;
-               }
-       }
-       qp.qpn = *qpn;
-
-       mac &= 0xffffffffffffULL;
-       mac = cpu_to_be64(mac << 16);
-       memcpy(&gid[10], &mac, ETH_ALEN);
-       gid[5] = port;
-       gid[7] = MLX4_UC_STEER << 1;
-
-       err = mlx4_qp_attach_common(dev, &qp, gid, 0,
-                                   MLX4_PROT_ETH, MLX4_UC_STEER);
-       if (err && reserve)
-               mlx4_qp_release_range(dev, *qpn, 1);
-
-       return err;
-}
-
-static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
-                                 u64 mac, int qpn, u8 free)
-{
-       struct mlx4_qp qp;
-       u8 gid[16] = {0};
-
-       qp.qpn = qpn;
-       mac &= 0xffffffffffffULL;
-       mac = cpu_to_be64(mac << 16);
-       memcpy(&gid[10], &mac, ETH_ALEN);
-       gid[5] = port;
-       gid[7] = MLX4_UC_STEER << 1;
-
-       mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER);
-       if (free)
-               mlx4_qp_release_range(dev, qpn, 1);
-}
-
-int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
-{
-       struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
-       struct mlx4_mac_table *table = &info->mac_table;
-       struct mlx4_mac_entry *entry;
-       int i, err = 0;
-       int free = -1;
-
-       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
-               err = mlx4_uc_steer_add(dev, port, mac, qpn, 1);
-               if (!err) {
-                       entry = kmalloc(sizeof *entry, GFP_KERNEL);
-                       if (!entry) {
-                               mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
-                               return -ENOMEM;
-                       }
-                       entry->mac = mac;
-                       err = radix_tree_insert(&info->mac_tree, *qpn, entry);
-                       if (err) {
-                               mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
-                               return err;
-                       }
-               } else
-                       return err;
-       }
-       mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
-       mutex_lock(&table->mutex);
-       for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
-               if (free < 0 && !table->refs[i]) {
-                       free = i;
-                       continue;
-               }
-
-               if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
-                       /* MAC already registered, increase references count */
-                       ++table->refs[i];
-                       goto out;
-               }
-       }
-
-       if (free < 0) {
-               err = -ENOMEM;
-               goto out;
-       }
-
-       mlx4_dbg(dev, "Free MAC index is %d\n", free);
-
-       if (table->total == table->max) {
-               /* No free mac entries */
-               err = -ENOSPC;
-               goto out;
-       }
-
-       /* Register new MAC */
-       table->refs[free] = 1;
-       table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
-
-       err = mlx4_set_port_mac_table(dev, port, table->entries);
-       if (unlikely(err)) {
-               mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac);
-               table->refs[free] = 0;
-               table->entries[free] = 0;
-               goto out;
-       }
-
-       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
-               *qpn = info->base_qpn + free;
-       ++table->total;
-out:
-       mutex_unlock(&table->mutex);
-       return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_register_mac);
-
-static int validate_index(struct mlx4_dev *dev,
-                         struct mlx4_mac_table *table, int index)
-{
-       int err = 0;
-
-       if (index < 0 || index >= table->max || !table->entries[index]) {
-               mlx4_warn(dev, "No valid Mac entry for the given index\n");
-               err = -EINVAL;
-       }
-       return err;
-}
-
-static int find_index(struct mlx4_dev *dev,
-                     struct mlx4_mac_table *table, u64 mac)
-{
-       int i;
-       for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
-               if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
-                       return i;
-       }
-       /* Mac not found */
-       return -EINVAL;
-}
-
-void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
-{
-       struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
-       struct mlx4_mac_table *table = &info->mac_table;
-       int index = qpn - info->base_qpn;
-       struct mlx4_mac_entry *entry;
-
-       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
-               entry = radix_tree_lookup(&info->mac_tree, qpn);
-               if (entry) {
-                       mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1);
-                       radix_tree_delete(&info->mac_tree, qpn);
-                       index = find_index(dev, table, entry->mac);
-                       kfree(entry);
-               }
-       }
-
-       mutex_lock(&table->mutex);
-
-       if (validate_index(dev, table, index))
-               goto out;
-
-       /* Check whether this address has reference count */
-       if (!(--table->refs[index])) {
-               table->entries[index] = 0;
-               mlx4_set_port_mac_table(dev, port, table->entries);
-               --table->total;
-       }
-out:
-       mutex_unlock(&table->mutex);
-}
-EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
-
-int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap)
-{
-       struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
-       struct mlx4_mac_table *table = &info->mac_table;
-       int index = qpn - info->base_qpn;
-       struct mlx4_mac_entry *entry;
-       int err;
-
-       if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
-               entry = radix_tree_lookup(&info->mac_tree, qpn);
-               if (!entry)
-                       return -EINVAL;
-               index = find_index(dev, table, entry->mac);
-               mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0);
-               entry->mac = new_mac;
-               err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0);
-               if (err || index < 0)
-                       return err;
-       }
-
-       mutex_lock(&table->mutex);
-
-       err = validate_index(dev, table, index);
-       if (err)
-               goto out;
-
-       table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
-
-       err = mlx4_set_port_mac_table(dev, port, table->entries);
-       if (unlikely(err)) {
-               mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac);
-               table->entries[index] = 0;
-       }
-out:
-       mutex_unlock(&table->mutex);
-       return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_replace_mac);
-static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
-                                   __be32 *entries)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       u32 in_mod;
-       int err;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-
-       memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
-       in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
-       err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B);
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-
-       return err;
-}
-
-int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
-{
-       struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
-       int i;
-
-       for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
-               if (table->refs[i] &&
-                   (vid == (MLX4_VLAN_MASK &
-                             be32_to_cpu(table->entries[i])))) {
-                       /* VLAN already registered, increase reference count */
-                       *idx = i;
-                       return 0;
-               }
-       }
-
-       return -ENOENT;
-}
-EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
-
-int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
-{
-       struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
-       int i, err = 0;
-       int free = -1;
-
-       mutex_lock(&table->mutex);
-       for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
-               if (free < 0 && (table->refs[i] == 0)) {
-                       free = i;
-                       continue;
-               }
-
-               if (table->refs[i] &&
-                   (vlan == (MLX4_VLAN_MASK &
-                             be32_to_cpu(table->entries[i])))) {
-                       /* Vlan already registered, increase references count */
-                       *index = i;
-                       ++table->refs[i];
-                       goto out;
-               }
-       }
-
-       if (free < 0) {
-               err = -ENOMEM;
-               goto out;
-       }
-
-       if (table->total == table->max) {
-               /* No free vlan entries */
-               err = -ENOSPC;
-               goto out;
-       }
-
-       /* Register new MAC */
-       table->refs[free] = 1;
-       table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
-
-       err = mlx4_set_port_vlan_table(dev, port, table->entries);
-       if (unlikely(err)) {
-               mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
-               table->refs[free] = 0;
-               table->entries[free] = 0;
-               goto out;
-       }
-
-       *index = free;
-       ++table->total;
-out:
-       mutex_unlock(&table->mutex);
-       return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_register_vlan);
-
-void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
-{
-       struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
-
-       if (index < MLX4_VLAN_REGULAR) {
-               mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
-               return;
-       }
-
-       mutex_lock(&table->mutex);
-       if (!table->refs[index]) {
-               mlx4_warn(dev, "No vlan entry for index %d\n", index);
-               goto out;
-       }
-       if (--table->refs[index]) {
-               mlx4_dbg(dev, "Have more references for index %d,"
-                        "no need to modify vlan table\n", index);
-               goto out;
-       }
-       table->entries[index] = 0;
-       mlx4_set_port_vlan_table(dev, port, table->entries);
-       --table->total;
-out:
-       mutex_unlock(&table->mutex);
-}
-EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
-
-int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
-{
-       struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
-       u8 *inbuf, *outbuf;
-       int err;
-
-       inmailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(inmailbox))
-               return PTR_ERR(inmailbox);
-
-       outmailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(outmailbox)) {
-               mlx4_free_cmd_mailbox(dev, inmailbox);
-               return PTR_ERR(outmailbox);
-       }
-
-       inbuf = inmailbox->buf;
-       outbuf = outmailbox->buf;
-       memset(inbuf, 0, 256);
-       memset(outbuf, 0, 256);
-       inbuf[0] = 1;
-       inbuf[1] = 1;
-       inbuf[2] = 1;
-       inbuf[3] = 1;
-       *(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
-       *(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
-
-       err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
-                          MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
-       if (!err)
-               *caps = *(__be32 *) (outbuf + 84);
-       mlx4_free_cmd_mailbox(dev, inmailbox);
-       mlx4_free_cmd_mailbox(dev, outmailbox);
-       return err;
-}
-
-int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       int err;
-
-       if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
-               return 0;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-
-       memset(mailbox->buf, 0, 256);
-
-       ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
-       err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
-                      MLX4_CMD_TIME_CLASS_B);
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
deleted file mode 100644 (file)
index b967647..0000000
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/slab.h>
-
-#include "mlx4.h"
-#include "fw.h"
-
-enum {
-       MLX4_RES_QP,
-       MLX4_RES_RDMARC,
-       MLX4_RES_ALTC,
-       MLX4_RES_AUXC,
-       MLX4_RES_SRQ,
-       MLX4_RES_CQ,
-       MLX4_RES_EQ,
-       MLX4_RES_DMPT,
-       MLX4_RES_CMPT,
-       MLX4_RES_MTT,
-       MLX4_RES_MCG,
-       MLX4_RES_NUM
-};
-
-static const char *res_name[] = {
-       [MLX4_RES_QP]           = "QP",
-       [MLX4_RES_RDMARC]       = "RDMARC",
-       [MLX4_RES_ALTC]         = "ALTC",
-       [MLX4_RES_AUXC]         = "AUXC",
-       [MLX4_RES_SRQ]          = "SRQ",
-       [MLX4_RES_CQ]           = "CQ",
-       [MLX4_RES_EQ]           = "EQ",
-       [MLX4_RES_DMPT]         = "DMPT",
-       [MLX4_RES_CMPT]         = "CMPT",
-       [MLX4_RES_MTT]          = "MTT",
-       [MLX4_RES_MCG]          = "MCG",
-};
-
-u64 mlx4_make_profile(struct mlx4_dev *dev,
-                     struct mlx4_profile *request,
-                     struct mlx4_dev_cap *dev_cap,
-                     struct mlx4_init_hca_param *init_hca)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_resource {
-               u64 size;
-               u64 start;
-               int type;
-               int num;
-               int log_num;
-       };
-
-       u64 total_size = 0;
-       struct mlx4_resource *profile;
-       struct mlx4_resource tmp;
-       int i, j;
-
-       profile = kcalloc(MLX4_RES_NUM, sizeof(*profile), GFP_KERNEL);
-       if (!profile)
-               return -ENOMEM;
-
-       profile[MLX4_RES_QP].size     = dev_cap->qpc_entry_sz;
-       profile[MLX4_RES_RDMARC].size = dev_cap->rdmarc_entry_sz;
-       profile[MLX4_RES_ALTC].size   = dev_cap->altc_entry_sz;
-       profile[MLX4_RES_AUXC].size   = dev_cap->aux_entry_sz;
-       profile[MLX4_RES_SRQ].size    = dev_cap->srq_entry_sz;
-       profile[MLX4_RES_CQ].size     = dev_cap->cqc_entry_sz;
-       profile[MLX4_RES_EQ].size     = dev_cap->eqc_entry_sz;
-       profile[MLX4_RES_DMPT].size   = dev_cap->dmpt_entry_sz;
-       profile[MLX4_RES_CMPT].size   = dev_cap->cmpt_entry_sz;
-       profile[MLX4_RES_MTT].size    = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
-       profile[MLX4_RES_MCG].size    = MLX4_MGM_ENTRY_SIZE;
-
-       profile[MLX4_RES_QP].num      = request->num_qp;
-       profile[MLX4_RES_RDMARC].num  = request->num_qp * request->rdmarc_per_qp;
-       profile[MLX4_RES_ALTC].num    = request->num_qp;
-       profile[MLX4_RES_AUXC].num    = request->num_qp;
-       profile[MLX4_RES_SRQ].num     = request->num_srq;
-       profile[MLX4_RES_CQ].num      = request->num_cq;
-       profile[MLX4_RES_EQ].num      = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
-       profile[MLX4_RES_DMPT].num    = request->num_mpt;
-       profile[MLX4_RES_CMPT].num    = MLX4_NUM_CMPTS;
-       profile[MLX4_RES_MTT].num     = request->num_mtt;
-       profile[MLX4_RES_MCG].num     = request->num_mcg;
-
-       for (i = 0; i < MLX4_RES_NUM; ++i) {
-               profile[i].type     = i;
-               profile[i].num      = roundup_pow_of_two(profile[i].num);
-               profile[i].log_num  = ilog2(profile[i].num);
-               profile[i].size    *= profile[i].num;
-               profile[i].size     = max(profile[i].size, (u64) PAGE_SIZE);
-       }
-
-       /*
-        * Sort the resources in decreasing order of size.  Since they
-        * all have sizes that are powers of 2, we'll be able to keep
-        * resources aligned to their size and pack them without gaps
-        * using the sorted order.
-        */
-       for (i = MLX4_RES_NUM; i > 0; --i)
-               for (j = 1; j < i; ++j) {
-                       if (profile[j].size > profile[j - 1].size) {
-                               tmp            = profile[j];
-                               profile[j]     = profile[j - 1];
-                               profile[j - 1] = tmp;
-                       }
-               }
-
-       for (i = 0; i < MLX4_RES_NUM; ++i) {
-               if (profile[i].size) {
-                       profile[i].start = total_size;
-                       total_size      += profile[i].size;
-               }
-
-               if (total_size > dev_cap->max_icm_sz) {
-                       mlx4_err(dev, "Profile requires 0x%llx bytes; "
-                                 "won't fit in 0x%llx bytes of context memory.\n",
-                                 (unsigned long long) total_size,
-                                 (unsigned long long) dev_cap->max_icm_sz);
-                       kfree(profile);
-                       return -ENOMEM;
-               }
-
-               if (profile[i].size)
-                       mlx4_dbg(dev, "  profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, "
-                                 "size 0x%10llx\n",
-                                i, res_name[profile[i].type], profile[i].log_num,
-                                (unsigned long long) profile[i].start,
-                                (unsigned long long) profile[i].size);
-       }
-
-       mlx4_dbg(dev, "HCA context memory: reserving %d KB\n",
-                (int) (total_size >> 10));
-
-       for (i = 0; i < MLX4_RES_NUM; ++i) {
-               switch (profile[i].type) {
-               case MLX4_RES_QP:
-                       dev->caps.num_qps     = profile[i].num;
-                       init_hca->qpc_base    = profile[i].start;
-                       init_hca->log_num_qps = profile[i].log_num;
-                       break;
-               case MLX4_RES_RDMARC:
-                       for (priv->qp_table.rdmarc_shift = 0;
-                            request->num_qp << priv->qp_table.rdmarc_shift < profile[i].num;
-                            ++priv->qp_table.rdmarc_shift)
-                               ; /* nothing */
-                       dev->caps.max_qp_dest_rdma = 1 << priv->qp_table.rdmarc_shift;
-                       priv->qp_table.rdmarc_base   = (u32) profile[i].start;
-                       init_hca->rdmarc_base        = profile[i].start;
-                       init_hca->log_rd_per_qp      = priv->qp_table.rdmarc_shift;
-                       break;
-               case MLX4_RES_ALTC:
-                       init_hca->altc_base = profile[i].start;
-                       break;
-               case MLX4_RES_AUXC:
-                       init_hca->auxc_base = profile[i].start;
-                       break;
-               case MLX4_RES_SRQ:
-                       dev->caps.num_srqs     = profile[i].num;
-                       init_hca->srqc_base    = profile[i].start;
-                       init_hca->log_num_srqs = profile[i].log_num;
-                       break;
-               case MLX4_RES_CQ:
-                       dev->caps.num_cqs     = profile[i].num;
-                       init_hca->cqc_base    = profile[i].start;
-                       init_hca->log_num_cqs = profile[i].log_num;
-                       break;
-               case MLX4_RES_EQ:
-                       dev->caps.num_eqs     = profile[i].num;
-                       init_hca->eqc_base    = profile[i].start;
-                       init_hca->log_num_eqs = profile[i].log_num;
-                       break;
-               case MLX4_RES_DMPT:
-                       dev->caps.num_mpts      = profile[i].num;
-                       priv->mr_table.mpt_base = profile[i].start;
-                       init_hca->dmpt_base     = profile[i].start;
-                       init_hca->log_mpt_sz    = profile[i].log_num;
-                       break;
-               case MLX4_RES_CMPT:
-                       init_hca->cmpt_base      = profile[i].start;
-                       break;
-               case MLX4_RES_MTT:
-                       dev->caps.num_mtt_segs   = profile[i].num;
-                       priv->mr_table.mtt_base  = profile[i].start;
-                       init_hca->mtt_base       = profile[i].start;
-                       break;
-               case MLX4_RES_MCG:
-                       dev->caps.num_mgms        = profile[i].num >> 1;
-                       dev->caps.num_amgms       = profile[i].num >> 1;
-                       init_hca->mc_base         = profile[i].start;
-                       init_hca->log_mc_entry_sz = ilog2(MLX4_MGM_ENTRY_SIZE);
-                       init_hca->log_mc_table_sz = profile[i].log_num;
-                       init_hca->log_mc_hash_sz  = profile[i].log_num - 1;
-                       break;
-               default:
-                       break;
-               }
-       }
-
-       /*
-        * PDs don't take any HCA memory, but we assign them as part
-        * of the HCA profile anyway.
-        */
-       dev->caps.num_pds = MLX4_NUM_PDS;
-
-       kfree(profile);
-       return total_size;
-}
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
deleted file mode 100644 (file)
index ec9350e..0000000
+++ /dev/null
@@ -1,380 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
- * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/gfp.h>
-#include <linux/mlx4/cmd.h>
-#include <linux/mlx4/qp.h>
-
-#include "mlx4.h"
-#include "icm.h"
-
-void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
-{
-       struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
-       struct mlx4_qp *qp;
-
-       spin_lock(&qp_table->lock);
-
-       qp = __mlx4_qp_lookup(dev, qpn);
-       if (qp)
-               atomic_inc(&qp->refcount);
-
-       spin_unlock(&qp_table->lock);
-
-       if (!qp) {
-               mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn);
-               return;
-       }
-
-       qp->event(qp, event_type);
-
-       if (atomic_dec_and_test(&qp->refcount))
-               complete(&qp->free);
-}
-
-int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
-                  enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
-                  struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
-                  int sqd_event, struct mlx4_qp *qp)
-{
-       static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
-               [MLX4_QP_STATE_RST] = {
-                       [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
-                       [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
-                       [MLX4_QP_STATE_INIT]    = MLX4_CMD_RST2INIT_QP,
-               },
-               [MLX4_QP_STATE_INIT]  = {
-                       [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
-                       [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
-                       [MLX4_QP_STATE_INIT]    = MLX4_CMD_INIT2INIT_QP,
-                       [MLX4_QP_STATE_RTR]     = MLX4_CMD_INIT2RTR_QP,
-               },
-               [MLX4_QP_STATE_RTR]   = {
-                       [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
-                       [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
-                       [MLX4_QP_STATE_RTS]     = MLX4_CMD_RTR2RTS_QP,
-               },
-               [MLX4_QP_STATE_RTS]   = {
-                       [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
-                       [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
-                       [MLX4_QP_STATE_RTS]     = MLX4_CMD_RTS2RTS_QP,
-                       [MLX4_QP_STATE_SQD]     = MLX4_CMD_RTS2SQD_QP,
-               },
-               [MLX4_QP_STATE_SQD] = {
-                       [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
-                       [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
-                       [MLX4_QP_STATE_RTS]     = MLX4_CMD_SQD2RTS_QP,
-                       [MLX4_QP_STATE_SQD]     = MLX4_CMD_SQD2SQD_QP,
-               },
-               [MLX4_QP_STATE_SQER] = {
-                       [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
-                       [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
-                       [MLX4_QP_STATE_RTS]     = MLX4_CMD_SQERR2RTS_QP,
-               },
-               [MLX4_QP_STATE_ERR] = {
-                       [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
-                       [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
-               }
-       };
-
-       struct mlx4_cmd_mailbox *mailbox;
-       int ret = 0;
-
-       if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
-           !op[cur_state][new_state])
-               return -EINVAL;
-
-       if (op[cur_state][new_state] == MLX4_CMD_2RST_QP)
-               return mlx4_cmd(dev, 0, qp->qpn, 2,
-                               MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A);
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-
-       if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) {
-               u64 mtt_addr = mlx4_mtt_addr(dev, mtt);
-               context->mtt_base_addr_h = mtt_addr >> 32;
-               context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
-               context->log_page_size   = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
-       }
-
-       *(__be32 *) mailbox->buf = cpu_to_be32(optpar);
-       memcpy(mailbox->buf + 8, context, sizeof *context);
-
-       ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
-               cpu_to_be32(qp->qpn);
-
-       ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31),
-                      new_state == MLX4_QP_STATE_RST ? 2 : 0,
-                      op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C);
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(mlx4_qp_modify);
-
-int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_qp_table *qp_table = &priv->qp_table;
-       int qpn;
-
-       qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
-       if (qpn == -1)
-               return -ENOMEM;
-
-       *base = qpn;
-       return 0;
-}
-EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
-
-void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_qp_table *qp_table = &priv->qp_table;
-       if (base_qpn < dev->caps.sqp_start + 8)
-               return;
-
-       mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
-}
-EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
-
-int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_qp_table *qp_table = &priv->qp_table;
-       int err;
-
-       if (!qpn)
-               return -EINVAL;
-
-       qp->qpn = qpn;
-
-       err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
-       if (err)
-               goto err_out;
-
-       err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn);
-       if (err)
-               goto err_put_qp;
-
-       err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn);
-       if (err)
-               goto err_put_auxc;
-
-       err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn);
-       if (err)
-               goto err_put_altc;
-
-       err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn);
-       if (err)
-               goto err_put_rdmarc;
-
-       spin_lock_irq(&qp_table->lock);
-       err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp);
-       spin_unlock_irq(&qp_table->lock);
-       if (err)
-               goto err_put_cmpt;
-
-       atomic_set(&qp->refcount, 1);
-       init_completion(&qp->free);
-
-       return 0;
-
-err_put_cmpt:
-       mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
-
-err_put_rdmarc:
-       mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
-
-err_put_altc:
-       mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
-
-err_put_auxc:
-       mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
-
-err_put_qp:
-       mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
-
-err_out:
-       return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
-
-void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
-{
-       struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
-       unsigned long flags;
-
-       spin_lock_irqsave(&qp_table->lock, flags);
-       radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1));
-       spin_unlock_irqrestore(&qp_table->lock, flags);
-}
-EXPORT_SYMBOL_GPL(mlx4_qp_remove);
-
-void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
-{
-       struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
-
-       if (atomic_dec_and_test(&qp->refcount))
-               complete(&qp->free);
-       wait_for_completion(&qp->free);
-
-       mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
-       mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
-       mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
-       mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
-       mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
-}
-EXPORT_SYMBOL_GPL(mlx4_qp_free);
-
-static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
-{
-       return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
-                       MLX4_CMD_TIME_CLASS_B);
-}
-
-int mlx4_init_qp_table(struct mlx4_dev *dev)
-{
-       struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
-       int err;
-       int reserved_from_top = 0;
-
-       spin_lock_init(&qp_table->lock);
-       INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
-
-       /*
-        * We reserve 2 extra QPs per port for the special QPs.  The
-        * block of special QPs must be aligned to a multiple of 8, so
-        * round up.
-        */
-       dev->caps.sqp_start =
-               ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
-
-       {
-               int sort[MLX4_NUM_QP_REGION];
-               int i, j, tmp;
-               int last_base = dev->caps.num_qps;
-
-               for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
-                       sort[i] = i;
-
-               for (i = MLX4_NUM_QP_REGION; i > 0; --i) {
-                       for (j = 2; j < i; ++j) {
-                               if (dev->caps.reserved_qps_cnt[sort[j]] >
-                                   dev->caps.reserved_qps_cnt[sort[j - 1]]) {
-                                       tmp             = sort[j];
-                                       sort[j]         = sort[j - 1];
-                                       sort[j - 1]     = tmp;
-                               }
-                       }
-               }
-
-               for (i = 1; i < MLX4_NUM_QP_REGION; ++i) {
-                       last_base -= dev->caps.reserved_qps_cnt[sort[i]];
-                       dev->caps.reserved_qps_base[sort[i]] = last_base;
-                       reserved_from_top +=
-                               dev->caps.reserved_qps_cnt[sort[i]];
-               }
-
-       }
-
-       err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
-                              (1 << 23) - 1, dev->caps.sqp_start + 8,
-                              reserved_from_top);
-       if (err)
-               return err;
-
-       return mlx4_CONF_SPECIAL_QP(dev, dev->caps.sqp_start);
-}
-
-void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
-{
-       mlx4_CONF_SPECIAL_QP(dev, 0);
-       mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
-}
-
-int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
-                 struct mlx4_qp_context *context)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       int err;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-
-       err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
-                          MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A);
-       if (!err)
-               memcpy(context, mailbox->buf + 8, sizeof *context);
-
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_qp_query);
-
-int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
-                    struct mlx4_qp_context *context,
-                    struct mlx4_qp *qp, enum mlx4_qp_state *qp_state)
-{
-       int err;
-       int i;
-       enum mlx4_qp_state states[] = {
-               MLX4_QP_STATE_RST,
-               MLX4_QP_STATE_INIT,
-               MLX4_QP_STATE_RTR,
-               MLX4_QP_STATE_RTS
-       };
-
-       for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
-               context->flags &= cpu_to_be32(~(0xf << 28));
-               context->flags |= cpu_to_be32(states[i + 1] << 28);
-               err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
-                                    context, 0, 0, qp);
-               if (err) {
-                       mlx4_err(dev, "Failed to bring QP to state: "
-                                "%d with error: %d\n",
-                                states[i + 1], err);
-                       return err;
-               }
-
-               *qp_state = states[i + 1];
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);
diff --git a/drivers/net/mlx4/reset.c b/drivers/net/mlx4/reset.c
deleted file mode 100644 (file)
index 11e7c1c..0000000
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
- * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
-
-#include "mlx4.h"
-
-int mlx4_reset(struct mlx4_dev *dev)
-{
-       void __iomem *reset;
-       u32 *hca_header = NULL;
-       int pcie_cap;
-       u16 devctl;
-       u16 linkctl;
-       u16 vendor;
-       unsigned long end;
-       u32 sem;
-       int i;
-       int err = 0;
-
-#define MLX4_RESET_BASE                0xf0000
-#define MLX4_RESET_SIZE                  0x400
-#define MLX4_SEM_OFFSET                  0x3fc
-#define MLX4_RESET_OFFSET         0x10
-#define MLX4_RESET_VALUE       swab32(1)
-
-#define MLX4_SEM_TIMEOUT_JIFFIES       (10 * HZ)
-#define MLX4_RESET_TIMEOUT_JIFFIES     (2 * HZ)
-
-       /*
-        * Reset the chip.  This is somewhat ugly because we have to
-        * save off the PCI header before reset and then restore it
-        * after the chip reboots.  We skip config space offsets 22
-        * and 23 since those have a special meaning.
-        */
-
-       /* Do we need to save off the full 4K PCI Express header?? */
-       hca_header = kmalloc(256, GFP_KERNEL);
-       if (!hca_header) {
-               err = -ENOMEM;
-               mlx4_err(dev, "Couldn't allocate memory to save HCA "
-                         "PCI header, aborting.\n");
-               goto out;
-       }
-
-       pcie_cap = pci_pcie_cap(dev->pdev);
-
-       for (i = 0; i < 64; ++i) {
-               if (i == 22 || i == 23)
-                       continue;
-               if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) {
-                       err = -ENODEV;
-                       mlx4_err(dev, "Couldn't save HCA "
-                                 "PCI header, aborting.\n");
-                       goto out;
-               }
-       }
-
-       reset = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_RESET_BASE,
-                       MLX4_RESET_SIZE);
-       if (!reset) {
-               err = -ENOMEM;
-               mlx4_err(dev, "Couldn't map HCA reset register, aborting.\n");
-               goto out;
-       }
-
-       /* grab HW semaphore to lock out flash updates */
-       end = jiffies + MLX4_SEM_TIMEOUT_JIFFIES;
-       do {
-               sem = readl(reset + MLX4_SEM_OFFSET);
-               if (!sem)
-                       break;
-
-               msleep(1);
-       } while (time_before(jiffies, end));
-
-       if (sem) {
-               mlx4_err(dev, "Failed to obtain HW semaphore, aborting\n");
-               err = -EAGAIN;
-               iounmap(reset);
-               goto out;
-       }
-
-       /* actually hit reset */
-       writel(MLX4_RESET_VALUE, reset + MLX4_RESET_OFFSET);
-       iounmap(reset);
-
-       /* Docs say to wait one second before accessing device */
-       msleep(1000);
-
-       end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES;
-       do {
-               if (!pci_read_config_word(dev->pdev, PCI_VENDOR_ID, &vendor) &&
-                   vendor != 0xffff)
-                       break;
-
-               msleep(1);
-       } while (time_before(jiffies, end));
-
-       if (vendor == 0xffff) {
-               err = -ENODEV;
-               mlx4_err(dev, "PCI device did not come back after reset, "
-                         "aborting.\n");
-               goto out;
-       }
-
-       /* Now restore the PCI headers */
-       if (pcie_cap) {
-               devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4];
-               if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_DEVCTL,
-                                          devctl)) {
-                       err = -ENODEV;
-                       mlx4_err(dev, "Couldn't restore HCA PCI Express "
-                                "Device Control register, aborting.\n");
-                       goto out;
-               }
-               linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
-               if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_LNKCTL,
-                                          linkctl)) {
-                       err = -ENODEV;
-                       mlx4_err(dev, "Couldn't restore HCA PCI Express "
-                                "Link control register, aborting.\n");
-                       goto out;
-               }
-       }
-
-       for (i = 0; i < 16; ++i) {
-               if (i * 4 == PCI_COMMAND)
-                       continue;
-
-               if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) {
-                       err = -ENODEV;
-                       mlx4_err(dev, "Couldn't restore HCA reg %x, "
-                                 "aborting.\n", i);
-                       goto out;
-               }
-       }
-
-       if (pci_write_config_dword(dev->pdev, PCI_COMMAND,
-                                  hca_header[PCI_COMMAND / 4])) {
-               err = -ENODEV;
-               mlx4_err(dev, "Couldn't restore HCA COMMAND, "
-                         "aborting.\n");
-               goto out;
-       }
-
-out:
-       kfree(hca_header);
-
-       return err;
-}
diff --git a/drivers/net/mlx4/sense.c b/drivers/net/mlx4/sense.c
deleted file mode 100644 (file)
index e2337a7..0000000
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/errno.h>
-#include <linux/if_ether.h>
-
-#include <linux/mlx4/cmd.h>
-
-#include "mlx4.h"
-
-int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
-                   enum mlx4_port_type *type)
-{
-       u64 out_param;
-       int err = 0;
-
-       err = mlx4_cmd_imm(dev, 0, &out_param, port, 0,
-                          MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B);
-       if (err) {
-               mlx4_err(dev, "Sense command failed for port: %d\n", port);
-               return err;
-       }
-
-       if (out_param > 2) {
-               mlx4_err(dev, "Sense returned illegal value: 0x%llx\n", out_param);
-               return -EINVAL;
-       }
-
-       *type = out_param;
-       return 0;
-}
-
-void mlx4_do_sense_ports(struct mlx4_dev *dev,
-                        enum mlx4_port_type *stype,
-                        enum mlx4_port_type *defaults)
-{
-       struct mlx4_sense *sense = &mlx4_priv(dev)->sense;
-       int err;
-       int i;
-
-       for (i = 1; i <= dev->caps.num_ports; i++) {
-               stype[i - 1] = 0;
-               if (sense->do_sense_port[i] && sense->sense_allowed[i] &&
-                   dev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
-                       err = mlx4_SENSE_PORT(dev, i, &stype[i - 1]);
-                       if (err)
-                               stype[i - 1] = defaults[i - 1];
-               } else
-                       stype[i - 1] = defaults[i - 1];
-       }
-
-       /*
-        * Adjust port configuration:
-        * If port 1 sensed nothing and port 2 is IB, set both as IB
-        * If port 2 sensed nothing and port 1 is Eth, set both as Eth
-        */
-       if (stype[0] == MLX4_PORT_TYPE_ETH) {
-               for (i = 1; i < dev->caps.num_ports; i++)
-                       stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_ETH;
-       }
-       if (stype[dev->caps.num_ports - 1] == MLX4_PORT_TYPE_IB) {
-               for (i = 0; i < dev->caps.num_ports - 1; i++)
-                       stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_IB;
-       }
-
-       /*
-        * If sensed nothing, remain in current configuration.
-        */
-       for (i = 0; i < dev->caps.num_ports; i++)
-               stype[i] = stype[i] ? stype[i] : defaults[i];
-
-}
-
-static void mlx4_sense_port(struct work_struct *work)
-{
-       struct delayed_work *delay = to_delayed_work(work);
-       struct mlx4_sense *sense = container_of(delay, struct mlx4_sense,
-                                               sense_poll);
-       struct mlx4_dev *dev = sense->dev;
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       enum mlx4_port_type stype[MLX4_MAX_PORTS];
-
-       mutex_lock(&priv->port_mutex);
-       mlx4_do_sense_ports(dev, stype, &dev->caps.port_type[1]);
-
-       if (mlx4_check_port_params(dev, stype))
-               goto sense_again;
-
-       if (mlx4_change_port_types(dev, stype))
-               mlx4_err(dev, "Failed to change port_types\n");
-
-sense_again:
-       mutex_unlock(&priv->port_mutex);
-       queue_delayed_work(mlx4_wq , &sense->sense_poll,
-                          round_jiffies_relative(MLX4_SENSE_RANGE));
-}
-
-void mlx4_start_sense(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_sense *sense = &priv->sense;
-
-       if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP))
-               return;
-
-       queue_delayed_work(mlx4_wq , &sense->sense_poll,
-                          round_jiffies_relative(MLX4_SENSE_RANGE));
-}
-
-void mlx4_stop_sense(struct mlx4_dev *dev)
-{
-       cancel_delayed_work_sync(&mlx4_priv(dev)->sense.sense_poll);
-}
-
-void  mlx4_sense_init(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_sense *sense = &priv->sense;
-       int port;
-
-       sense->dev = dev;
-       for (port = 1; port <= dev->caps.num_ports; port++)
-               sense->do_sense_port[port] = 1;
-
-       INIT_DELAYED_WORK_DEFERRABLE(&sense->sense_poll, mlx4_sense_port);
-}
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c
deleted file mode 100644 (file)
index 3b07b80..0000000
+++ /dev/null
@@ -1,257 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
- * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/mlx4/cmd.h>
-#include <linux/gfp.h>
-
-#include "mlx4.h"
-#include "icm.h"
-
-struct mlx4_srq_context {
-       __be32                  state_logsize_srqn;
-       u8                      logstride;
-       u8                      reserved1[3];
-       u8                      pg_offset;
-       u8                      reserved2[3];
-       u32                     reserved3;
-       u8                      log_page_size;
-       u8                      reserved4[2];
-       u8                      mtt_base_addr_h;
-       __be32                  mtt_base_addr_l;
-       __be32                  pd;
-       __be16                  limit_watermark;
-       __be16                  wqe_cnt;
-       u16                     reserved5;
-       __be16                  wqe_counter;
-       u32                     reserved6;
-       __be64                  db_rec_addr;
-};
-
-void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
-{
-       struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
-       struct mlx4_srq *srq;
-
-       spin_lock(&srq_table->lock);
-
-       srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1));
-       if (srq)
-               atomic_inc(&srq->refcount);
-
-       spin_unlock(&srq_table->lock);
-
-       if (!srq) {
-               mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
-               return;
-       }
-
-       srq->event(srq, event_type);
-
-       if (atomic_dec_and_test(&srq->refcount))
-               complete(&srq->free);
-}
-
-static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
-                         int srq_num)
-{
-       return mlx4_cmd(dev, mailbox->dma, srq_num, 0, MLX4_CMD_SW2HW_SRQ,
-                       MLX4_CMD_TIME_CLASS_A);
-}
-
-static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
-                         int srq_num)
-{
-       return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
-                           mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ,
-                           MLX4_CMD_TIME_CLASS_A);
-}
-
-static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark)
-{
-       return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ,
-                       MLX4_CMD_TIME_CLASS_B);
-}
-
-static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
-                         int srq_num)
-{
-       return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ,
-                           MLX4_CMD_TIME_CLASS_A);
-}
-
-int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
-                  u64 db_rec, struct mlx4_srq *srq)
-{
-       struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_srq_context *srq_context;
-       u64 mtt_addr;
-       int err;
-
-       srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
-       if (srq->srqn == -1)
-               return -ENOMEM;
-
-       err = mlx4_table_get(dev, &srq_table->table, srq->srqn);
-       if (err)
-               goto err_out;
-
-       err = mlx4_table_get(dev, &srq_table->cmpt_table, srq->srqn);
-       if (err)
-               goto err_put;
-
-       spin_lock_irq(&srq_table->lock);
-       err = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
-       spin_unlock_irq(&srq_table->lock);
-       if (err)
-               goto err_cmpt_put;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox)) {
-               err = PTR_ERR(mailbox);
-               goto err_radix;
-       }
-
-       srq_context = mailbox->buf;
-       memset(srq_context, 0, sizeof *srq_context);
-
-       srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) |
-                                                     srq->srqn);
-       srq_context->logstride          = srq->wqe_shift - 4;
-       srq_context->log_page_size      = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
-
-       mtt_addr = mlx4_mtt_addr(dev, mtt);
-       srq_context->mtt_base_addr_h    = mtt_addr >> 32;
-       srq_context->mtt_base_addr_l    = cpu_to_be32(mtt_addr & 0xffffffff);
-       srq_context->pd                 = cpu_to_be32(pdn);
-       srq_context->db_rec_addr        = cpu_to_be64(db_rec);
-
-       err = mlx4_SW2HW_SRQ(dev, mailbox, srq->srqn);
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       if (err)
-               goto err_radix;
-
-       atomic_set(&srq->refcount, 1);
-       init_completion(&srq->free);
-
-       return 0;
-
-err_radix:
-       spin_lock_irq(&srq_table->lock);
-       radix_tree_delete(&srq_table->tree, srq->srqn);
-       spin_unlock_irq(&srq_table->lock);
-
-err_cmpt_put:
-       mlx4_table_put(dev, &srq_table->cmpt_table, srq->srqn);
-
-err_put:
-       mlx4_table_put(dev, &srq_table->table, srq->srqn);
-
-err_out:
-       mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
-
-       return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_srq_alloc);
-
-void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
-{
-       struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
-       int err;
-
-       err = mlx4_HW2SW_SRQ(dev, NULL, srq->srqn);
-       if (err)
-               mlx4_warn(dev, "HW2SW_SRQ failed (%d) for SRQN %06x\n", err, srq->srqn);
-
-       spin_lock_irq(&srq_table->lock);
-       radix_tree_delete(&srq_table->tree, srq->srqn);
-       spin_unlock_irq(&srq_table->lock);
-
-       if (atomic_dec_and_test(&srq->refcount))
-               complete(&srq->free);
-       wait_for_completion(&srq->free);
-
-       mlx4_table_put(dev, &srq_table->table, srq->srqn);
-       mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
-}
-EXPORT_SYMBOL_GPL(mlx4_srq_free);
-
-int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark)
-{
-       return mlx4_ARM_SRQ(dev, srq->srqn, limit_watermark);
-}
-EXPORT_SYMBOL_GPL(mlx4_srq_arm);
-
-int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark)
-{
-       struct mlx4_cmd_mailbox *mailbox;
-       struct mlx4_srq_context *srq_context;
-       int err;
-
-       mailbox = mlx4_alloc_cmd_mailbox(dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
-
-       srq_context = mailbox->buf;
-
-       err = mlx4_QUERY_SRQ(dev, mailbox, srq->srqn);
-       if (err)
-               goto err_out;
-       *limit_watermark = be16_to_cpu(srq_context->limit_watermark);
-
-err_out:
-       mlx4_free_cmd_mailbox(dev, mailbox);
-       return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_srq_query);
-
-int mlx4_init_srq_table(struct mlx4_dev *dev)
-{
-       struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
-       int err;
-
-       spin_lock_init(&srq_table->lock);
-       INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
-
-       err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
-                              dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
-       if (err)
-               return err;
-
-       return 0;
-}
-
-void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
-{
-       mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
-}