ioat: move to drivers/dma/ioat/
authorDan Williams <dan.j.williams@intel.com>
Tue, 28 Jul 2009 21:32:12 +0000 (14:32 -0700)
committerDan Williams <dan.j.williams@intel.com>
Tue, 28 Jul 2009 21:32:12 +0000 (14:32 -0700)
When first created the ioat driver was the only inhabitant of
drivers/dma/.  Now, it is the only multi-file (more than a .c and a .h)
driver in the directory.  Moving it to an ioat/ subdirectory allows the
naming convention to be cleaned up, and allows for future splitting of
the source files by hardware version (v1, v2, and v3).

Signed-off-by: Maciej Sosnowski <maciej.sosnowski@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
15 files changed:
drivers/dma/Makefile
drivers/dma/ioat.c [deleted file]
drivers/dma/ioat/Makefile [new file with mode: 0644]
drivers/dma/ioat/dca.c [new file with mode: 0644]
drivers/dma/ioat/dma.c [new file with mode: 0644]
drivers/dma/ioat/dma.h [new file with mode: 0644]
drivers/dma/ioat/hw.h [new file with mode: 0644]
drivers/dma/ioat/pci.c [new file with mode: 0644]
drivers/dma/ioat/registers.h [new file with mode: 0644]
drivers/dma/ioat_dca.c [deleted file]
drivers/dma/ioat_dma.c [deleted file]
drivers/dma/ioatdma.h [deleted file]
drivers/dma/ioatdma_hw.h [deleted file]
drivers/dma/ioatdma_registers.h [deleted file]
drivers/idle/i7300_idle.c

index 2e5dc96700d20b677535b4576956ef7954bcd98d..a1cb2857bba6df1b3874b3e5946e2c0c8e702ac1 100644 (file)
@@ -1,8 +1,7 @@
 obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
 obj-$(CONFIG_NET_DMA) += iovlock.o
 obj-$(CONFIG_DMATEST) += dmatest.o
-obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
-ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o
+obj-$(CONFIG_INTEL_IOATDMA) += ioat/
 obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
 obj-$(CONFIG_FSL_DMA) += fsldma.o
 obj-$(CONFIG_MV_XOR) += mv_xor.o
diff --git a/drivers/dma/ioat.c b/drivers/dma/ioat.c
deleted file mode 100644 (file)
index 2225bb6..0000000
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Intel I/OAT DMA Linux driver
- * Copyright(c) 2007 - 2009 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- */
-
-/*
- * This driver supports an Intel I/OAT DMA engine, which does asynchronous
- * copy operations.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/dca.h>
-#include "ioatdma.h"
-#include "ioatdma_registers.h"
-#include "ioatdma_hw.h"
-
-MODULE_VERSION(IOAT_DMA_VERSION);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Intel Corporation");
-
-static struct pci_device_id ioat_pci_tbl[] = {
-       /* I/OAT v1 platforms */
-       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
-       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB)  },
-       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) },
-       { PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) },
-
-       /* I/OAT v2 platforms */
-       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) },
-
-       /* I/OAT v3 platforms */
-       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
-       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
-       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
-       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
-       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
-       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
-       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
-       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
-       { 0, }
-};
-
-struct ioat_device {
-       struct pci_dev          *pdev;
-       void __iomem            *iobase;
-       struct ioatdma_device   *dma;
-       struct dca_provider     *dca;
-};
-
-static int __devinit ioat_probe(struct pci_dev *pdev,
-                               const struct pci_device_id *id);
-static void __devexit ioat_remove(struct pci_dev *pdev);
-
-static int ioat_dca_enabled = 1;
-module_param(ioat_dca_enabled, int, 0644);
-MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
-
-static struct pci_driver ioat_pci_driver = {
-       .name           = "ioatdma",
-       .id_table       = ioat_pci_tbl,
-       .probe          = ioat_probe,
-       .remove         = __devexit_p(ioat_remove),
-};
-
-static int __devinit ioat_probe(struct pci_dev *pdev,
-                               const struct pci_device_id *id)
-{
-       void __iomem *iobase;
-       struct ioat_device *device;
-       unsigned long mmio_start, mmio_len;
-       int err;
-
-       err = pci_enable_device(pdev);
-       if (err)
-               goto err_enable_device;
-
-       err = pci_request_regions(pdev, ioat_pci_driver.name);
-       if (err)
-               goto err_request_regions;
-
-       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
-       if (err)
-               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-       if (err)
-               goto err_set_dma_mask;
-
-       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-       if (err)
-               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-       if (err)
-               goto err_set_dma_mask;
-
-       mmio_start = pci_resource_start(pdev, 0);
-       mmio_len = pci_resource_len(pdev, 0);
-       iobase = ioremap(mmio_start, mmio_len);
-       if (!iobase) {
-               err = -ENOMEM;
-               goto err_ioremap;
-       }
-
-       device = kzalloc(sizeof(*device), GFP_KERNEL);
-       if (!device) {
-               err = -ENOMEM;
-               goto err_kzalloc;
-       }
-       device->pdev = pdev;
-       pci_set_drvdata(pdev, device);
-       device->iobase = iobase;
-
-       pci_set_master(pdev);
-
-       switch (readb(iobase + IOAT_VER_OFFSET)) {
-       case IOAT_VER_1_2:
-               device->dma = ioat_dma_probe(pdev, iobase);
-               if (device->dma && ioat_dca_enabled)
-                       device->dca = ioat_dca_init(pdev, iobase);
-               break;
-       case IOAT_VER_2_0:
-               device->dma = ioat_dma_probe(pdev, iobase);
-               if (device->dma && ioat_dca_enabled)
-                       device->dca = ioat2_dca_init(pdev, iobase);
-               break;
-       case IOAT_VER_3_0:
-               device->dma = ioat_dma_probe(pdev, iobase);
-               if (device->dma && ioat_dca_enabled)
-                       device->dca = ioat3_dca_init(pdev, iobase);
-               break;
-       default:
-               err = -ENODEV;
-               break;
-       }
-       if (!device->dma)
-               err = -ENODEV;
-
-       if (err)
-               goto err_version;
-
-       return 0;
-
-err_version:
-       kfree(device);
-err_kzalloc:
-       iounmap(iobase);
-err_ioremap:
-err_set_dma_mask:
-       pci_release_regions(pdev);
-       pci_disable_device(pdev);
-err_request_regions:
-err_enable_device:
-       return err;
-}
-
-static void __devexit ioat_remove(struct pci_dev *pdev)
-{
-       struct ioat_device *device = pci_get_drvdata(pdev);
-
-       dev_err(&pdev->dev, "Removing dma and dca services\n");
-       if (device->dca) {
-               unregister_dca_provider(device->dca);
-               free_dca_provider(device->dca);
-               device->dca = NULL;
-       }
-
-       if (device->dma) {
-               ioat_dma_remove(device->dma);
-               device->dma = NULL;
-       }
-
-       kfree(device);
-}
-
-static int __init ioat_init_module(void)
-{
-       return pci_register_driver(&ioat_pci_driver);
-}
-module_init(ioat_init_module);
-
-static void __exit ioat_exit_module(void)
-{
-       pci_unregister_driver(&ioat_pci_driver);
-}
-module_exit(ioat_exit_module);
diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile
new file mode 100644 (file)
index 0000000..2ce3d3a
--- /dev/null
@@ -0,0 +1,2 @@
+obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
+ioatdma-objs := pci.o dma.o dca.o
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
new file mode 100644 (file)
index 0000000..af1c762
--- /dev/null
@@ -0,0 +1,681 @@
+/*
+ * Intel I/OAT DMA Linux driver
+ * Copyright(c) 2007 - 2009 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/dca.h>
+
+/* either a kernel change is needed, or we need something like this in kernel */
+#ifndef CONFIG_SMP
+#include <asm/smp.h>
+#undef cpu_physical_id
+#define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24)
+#endif
+
+#include "dma.h"
+#include "registers.h"
+
+/*
+ * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
+ * contain the bit number of the APIC ID to map into the DCA tag.  If the valid
+ * bit is not set, then the value must be 0 or 1 and defines the bit in the tag.
+ */
+#define DCA_TAG_MAP_VALID 0x80
+
+#define DCA3_TAG_MAP_BIT_TO_INV 0x80
+#define DCA3_TAG_MAP_BIT_TO_SEL 0x40
+#define DCA3_TAG_MAP_LITERAL_VAL 0x1
+
+#define DCA_TAG_MAP_MASK 0xDF
+
+/* expected tag map bytes for I/OAT ver.2 */
+#define DCA2_TAG_MAP_BYTE0 0x80
+#define DCA2_TAG_MAP_BYTE1 0x0
+#define DCA2_TAG_MAP_BYTE2 0x81
+#define DCA2_TAG_MAP_BYTE3 0x82
+#define DCA2_TAG_MAP_BYTE4 0x82
+
+/* verify if tag map matches expected values */
+static inline int dca2_tag_map_valid(u8 *tag_map)
+{
+       return ((tag_map[0] == DCA2_TAG_MAP_BYTE0) &&
+               (tag_map[1] == DCA2_TAG_MAP_BYTE1) &&
+               (tag_map[2] == DCA2_TAG_MAP_BYTE2) &&
+               (tag_map[3] == DCA2_TAG_MAP_BYTE3) &&
+               (tag_map[4] == DCA2_TAG_MAP_BYTE4));
+}
+
+/*
+ * "Legacy" DCA systems do not implement the DCA register set in the
+ * I/OAT device.  Software needs direct support for their tag mappings.
+ */
+
+#define APICID_BIT(x)          (DCA_TAG_MAP_VALID | (x))
+#define IOAT_TAG_MAP_LEN       8
+
+static u8 ioat_tag_map_BNB[IOAT_TAG_MAP_LEN] = {
+       1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
+static u8 ioat_tag_map_SCNB[IOAT_TAG_MAP_LEN] = {
+       1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
+static u8 ioat_tag_map_CNB[IOAT_TAG_MAP_LEN] = {
+       1, APICID_BIT(1), APICID_BIT(3), APICID_BIT(4), APICID_BIT(2), };
+static u8 ioat_tag_map_UNISYS[IOAT_TAG_MAP_LEN] = { 0 };
+
+/* pack PCI B/D/F into a u16 */
+static inline u16 dcaid_from_pcidev(struct pci_dev *pci)
+{
+       return (pci->bus->number << 8) | pci->devfn;
+}
+
+static int dca_enabled_in_bios(struct pci_dev *pdev)
+{
+       /* CPUID level 9 returns DCA configuration */
+       /* Bit 0 indicates DCA enabled by the BIOS */
+       unsigned long cpuid_level_9;
+       int res;
+
+       cpuid_level_9 = cpuid_eax(9);
+       res = test_bit(0, &cpuid_level_9);
+       if (!res)
+               dev_err(&pdev->dev, "DCA is disabled in BIOS\n");
+
+       return res;
+}
+
+static int system_has_dca_enabled(struct pci_dev *pdev)
+{
+       if (boot_cpu_has(X86_FEATURE_DCA))
+               return dca_enabled_in_bios(pdev);
+
+       dev_err(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n");
+       return 0;
+}
+
+struct ioat_dca_slot {
+       struct pci_dev *pdev;   /* requester device */
+       u16 rid;                /* requester id, as used by IOAT */
+};
+
+#define IOAT_DCA_MAX_REQ 6
+#define IOAT3_DCA_MAX_REQ 2
+
+struct ioat_dca_priv {
+       void __iomem            *iobase;
+       void __iomem            *dca_base;
+       int                      max_requesters;
+       int                      requester_count;
+       u8                       tag_map[IOAT_TAG_MAP_LEN];
+       struct ioat_dca_slot     req_slots[0];
+};
+
+/* 5000 series chipset DCA Port Requester ID Table Entry Format
+ * [15:8]      PCI-Express Bus Number
+ * [7:3]       PCI-Express Device Number
+ * [2:0]       PCI-Express Function Number
+ *
+ * 5000 series chipset DCA control register format
+ * [7:1]       Reserved (0)
+ * [0]         Ignore Function Number
+ */
+
+static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
+{
+       struct ioat_dca_priv *ioatdca = dca_priv(dca);
+       struct pci_dev *pdev;
+       int i;
+       u16 id;
+
+       /* This implementation only supports PCI-Express */
+       if (dev->bus != &pci_bus_type)
+               return -ENODEV;
+       pdev = to_pci_dev(dev);
+       id = dcaid_from_pcidev(pdev);
+
+       if (ioatdca->requester_count == ioatdca->max_requesters)
+               return -ENODEV;
+
+       for (i = 0; i < ioatdca->max_requesters; i++) {
+               if (ioatdca->req_slots[i].pdev == NULL) {
+                       /* found an empty slot */
+                       ioatdca->requester_count++;
+                       ioatdca->req_slots[i].pdev = pdev;
+                       ioatdca->req_slots[i].rid = id;
+                       writew(id, ioatdca->dca_base + (i * 4));
+                       /* make sure the ignore function bit is off */
+                       writeb(0, ioatdca->dca_base + (i * 4) + 2);
+                       return i;
+               }
+       }
+       /* Error, ioatdma->requester_count is out of whack */
+       return -EFAULT;
+}
+
+static int ioat_dca_remove_requester(struct dca_provider *dca,
+                                    struct device *dev)
+{
+       struct ioat_dca_priv *ioatdca = dca_priv(dca);
+       struct pci_dev *pdev;
+       int i;
+
+       /* This implementation only supports PCI-Express */
+       if (dev->bus != &pci_bus_type)
+               return -ENODEV;
+       pdev = to_pci_dev(dev);
+
+       for (i = 0; i < ioatdca->max_requesters; i++) {
+               if (ioatdca->req_slots[i].pdev == pdev) {
+                       writew(0, ioatdca->dca_base + (i * 4));
+                       ioatdca->req_slots[i].pdev = NULL;
+                       ioatdca->req_slots[i].rid = 0;
+                       ioatdca->requester_count--;
+                       return i;
+               }
+       }
+       return -ENODEV;
+}
+
+static u8 ioat_dca_get_tag(struct dca_provider *dca,
+                          struct device *dev,
+                          int cpu)
+{
+       struct ioat_dca_priv *ioatdca = dca_priv(dca);
+       int i, apic_id, bit, value;
+       u8 entry, tag;
+
+       tag = 0;
+       apic_id = cpu_physical_id(cpu);
+
+       for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
+               entry = ioatdca->tag_map[i];
+               if (entry & DCA_TAG_MAP_VALID) {
+                       bit = entry & ~DCA_TAG_MAP_VALID;
+                       value = (apic_id & (1 << bit)) ? 1 : 0;
+               } else {
+                       value = entry ? 1 : 0;
+               }
+               tag |= (value << i);
+       }
+       return tag;
+}
+
+static int ioat_dca_dev_managed(struct dca_provider *dca,
+                               struct device *dev)
+{
+       struct ioat_dca_priv *ioatdca = dca_priv(dca);
+       struct pci_dev *pdev;
+       int i;
+
+       pdev = to_pci_dev(dev);
+       for (i = 0; i < ioatdca->max_requesters; i++) {
+               if (ioatdca->req_slots[i].pdev == pdev)
+                       return 1;
+       }
+       return 0;
+}
+
+static struct dca_ops ioat_dca_ops = {
+       .add_requester          = ioat_dca_add_requester,
+       .remove_requester       = ioat_dca_remove_requester,
+       .get_tag                = ioat_dca_get_tag,
+       .dev_managed            = ioat_dca_dev_managed,
+};
+
+
+struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
+{
+       struct dca_provider *dca;
+       struct ioat_dca_priv *ioatdca;
+       u8 *tag_map = NULL;
+       int i;
+       int err;
+       u8 version;
+       u8 max_requesters;
+
+       if (!system_has_dca_enabled(pdev))
+               return NULL;
+
+       /* I/OAT v1 systems must have a known tag_map to support DCA */
+       switch (pdev->vendor) {
+       case PCI_VENDOR_ID_INTEL:
+               switch (pdev->device) {
+               case PCI_DEVICE_ID_INTEL_IOAT:
+                       tag_map = ioat_tag_map_BNB;
+                       break;
+               case PCI_DEVICE_ID_INTEL_IOAT_CNB:
+                       tag_map = ioat_tag_map_CNB;
+                       break;
+               case PCI_DEVICE_ID_INTEL_IOAT_SCNB:
+                       tag_map = ioat_tag_map_SCNB;
+                       break;
+               }
+               break;
+       case PCI_VENDOR_ID_UNISYS:
+               switch (pdev->device) {
+               case PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR:
+                       tag_map = ioat_tag_map_UNISYS;
+                       break;
+               }
+               break;
+       }
+       if (tag_map == NULL)
+               return NULL;
+
+       version = readb(iobase + IOAT_VER_OFFSET);
+       if (version == IOAT_VER_3_0)
+               max_requesters = IOAT3_DCA_MAX_REQ;
+       else
+               max_requesters = IOAT_DCA_MAX_REQ;
+
+       dca = alloc_dca_provider(&ioat_dca_ops,
+                       sizeof(*ioatdca) +
+                       (sizeof(struct ioat_dca_slot) * max_requesters));
+       if (!dca)
+               return NULL;
+
+       ioatdca = dca_priv(dca);
+       ioatdca->max_requesters = max_requesters;
+       ioatdca->dca_base = iobase + 0x54;
+
+       /* copy over the APIC ID to DCA tag mapping */
+       for (i = 0; i < IOAT_TAG_MAP_LEN; i++)
+               ioatdca->tag_map[i] = tag_map[i];
+
+       err = register_dca_provider(dca, &pdev->dev);
+       if (err) {
+               free_dca_provider(dca);
+               return NULL;
+       }
+
+       return dca;
+}
+
+
+static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev)
+{
+       struct ioat_dca_priv *ioatdca = dca_priv(dca);
+       struct pci_dev *pdev;
+       int i;
+       u16 id;
+       u16 global_req_table;
+
+       /* This implementation only supports PCI-Express */
+       if (dev->bus != &pci_bus_type)
+               return -ENODEV;
+       pdev = to_pci_dev(dev);
+       id = dcaid_from_pcidev(pdev);
+
+       if (ioatdca->requester_count == ioatdca->max_requesters)
+               return -ENODEV;
+
+       for (i = 0; i < ioatdca->max_requesters; i++) {
+               if (ioatdca->req_slots[i].pdev == NULL) {
+                       /* found an empty slot */
+                       ioatdca->requester_count++;
+                       ioatdca->req_slots[i].pdev = pdev;
+                       ioatdca->req_slots[i].rid = id;
+                       global_req_table =
+                             readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
+                       writel(id | IOAT_DCA_GREQID_VALID,
+                              ioatdca->iobase + global_req_table + (i * 4));
+                       return i;
+               }
+       }
+       /* Error, ioatdma->requester_count is out of whack */
+       return -EFAULT;
+}
+
+static int ioat2_dca_remove_requester(struct dca_provider *dca,
+                                     struct device *dev)
+{
+       struct ioat_dca_priv *ioatdca = dca_priv(dca);
+       struct pci_dev *pdev;
+       int i;
+       u16 global_req_table;
+
+       /* This implementation only supports PCI-Express */
+       if (dev->bus != &pci_bus_type)
+               return -ENODEV;
+       pdev = to_pci_dev(dev);
+
+       for (i = 0; i < ioatdca->max_requesters; i++) {
+               if (ioatdca->req_slots[i].pdev == pdev) {
+                       global_req_table =
+                             readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
+                       writel(0, ioatdca->iobase + global_req_table + (i * 4));
+                       ioatdca->req_slots[i].pdev = NULL;
+                       ioatdca->req_slots[i].rid = 0;
+                       ioatdca->requester_count--;
+                       return i;
+               }
+       }
+       return -ENODEV;
+}
+
+static u8 ioat2_dca_get_tag(struct dca_provider *dca,
+                           struct device *dev,
+                           int cpu)
+{
+       u8 tag;
+
+       tag = ioat_dca_get_tag(dca, dev, cpu);
+       tag = (~tag) & 0x1F;
+       return tag;
+}
+
+static struct dca_ops ioat2_dca_ops = {
+       .add_requester          = ioat2_dca_add_requester,
+       .remove_requester       = ioat2_dca_remove_requester,
+       .get_tag                = ioat2_dca_get_tag,
+       .dev_managed            = ioat_dca_dev_managed,
+};
+
+static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
+{
+       int slots = 0;
+       u32 req;
+       u16 global_req_table;
+
+       global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET);
+       if (global_req_table == 0)
+               return 0;
+       do {
+               req = readl(iobase + global_req_table + (slots * sizeof(u32)));
+               slots++;
+       } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
+
+       return slots;
+}
+
+struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
+{
+       struct dca_provider *dca;
+       struct ioat_dca_priv *ioatdca;
+       int slots;
+       int i;
+       int err;
+       u32 tag_map;
+       u16 dca_offset;
+       u16 csi_fsb_control;
+       u16 pcie_control;
+       u8 bit;
+
+       if (!system_has_dca_enabled(pdev))
+               return NULL;
+
+       dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
+       if (dca_offset == 0)
+               return NULL;
+
+       slots = ioat2_dca_count_dca_slots(iobase, dca_offset);
+       if (slots == 0)
+               return NULL;
+
+       dca = alloc_dca_provider(&ioat2_dca_ops,
+                                sizeof(*ioatdca)
+                                     + (sizeof(struct ioat_dca_slot) * slots));
+       if (!dca)
+               return NULL;
+
+       ioatdca = dca_priv(dca);
+       ioatdca->iobase = iobase;
+       ioatdca->dca_base = iobase + dca_offset;
+       ioatdca->max_requesters = slots;
+
+       /* some bios might not know to turn these on */
+       csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
+       if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) {
+               csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH;
+               writew(csi_fsb_control,
+                      ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
+       }
+       pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
+       if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) {
+               pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR;
+               writew(pcie_control,
+                      ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
+       }
+
+
+       /* TODO version, compatibility and configuration checks */
+
+       /* copy out the APIC to DCA tag map */
+       tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET);
+       for (i = 0; i < 5; i++) {
+               bit = (tag_map >> (4 * i)) & 0x0f;
+               if (bit < 8)
+                       ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID;
+               else
+                       ioatdca->tag_map[i] = 0;
+       }
+
+       if (!dca2_tag_map_valid(ioatdca->tag_map)) {
+               dev_err(&pdev->dev, "APICID_TAG_MAP set incorrectly by BIOS, "
+                       "disabling DCA\n");
+               free_dca_provider(dca);
+               return NULL;
+       }
+
+       err = register_dca_provider(dca, &pdev->dev);
+       if (err) {
+               free_dca_provider(dca);
+               return NULL;
+       }
+
+       return dca;
+}
+
+static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
+{
+       struct ioat_dca_priv *ioatdca = dca_priv(dca);
+       struct pci_dev *pdev;
+       int i;
+       u16 id;
+       u16 global_req_table;
+
+       /* This implementation only supports PCI-Express */
+       if (dev->bus != &pci_bus_type)
+               return -ENODEV;
+       pdev = to_pci_dev(dev);
+       id = dcaid_from_pcidev(pdev);
+
+       if (ioatdca->requester_count == ioatdca->max_requesters)
+               return -ENODEV;
+
+       for (i = 0; i < ioatdca->max_requesters; i++) {
+               if (ioatdca->req_slots[i].pdev == NULL) {
+                       /* found an empty slot */
+                       ioatdca->requester_count++;
+                       ioatdca->req_slots[i].pdev = pdev;
+                       ioatdca->req_slots[i].rid = id;
+                       global_req_table =
+                             readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
+                       writel(id | IOAT_DCA_GREQID_VALID,
+                              ioatdca->iobase + global_req_table + (i * 4));
+                       return i;
+               }
+       }
+       /* Error, ioatdma->requester_count is out of whack */
+       return -EFAULT;
+}
+
+static int ioat3_dca_remove_requester(struct dca_provider *dca,
+                                     struct device *dev)
+{
+       struct ioat_dca_priv *ioatdca = dca_priv(dca);
+       struct pci_dev *pdev;
+       int i;
+       u16 global_req_table;
+
+       /* This implementation only supports PCI-Express */
+       if (dev->bus != &pci_bus_type)
+               return -ENODEV;
+       pdev = to_pci_dev(dev);
+
+       for (i = 0; i < ioatdca->max_requesters; i++) {
+               if (ioatdca->req_slots[i].pdev == pdev) {
+                       global_req_table =
+                             readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
+                       writel(0, ioatdca->iobase + global_req_table + (i * 4));
+                       ioatdca->req_slots[i].pdev = NULL;
+                       ioatdca->req_slots[i].rid = 0;
+                       ioatdca->requester_count--;
+                       return i;
+               }
+       }
+       return -ENODEV;
+}
+
+static u8 ioat3_dca_get_tag(struct dca_provider *dca,
+                           struct device *dev,
+                           int cpu)
+{
+       u8 tag;
+
+       struct ioat_dca_priv *ioatdca = dca_priv(dca);
+       int i, apic_id, bit, value;
+       u8 entry;
+
+       tag = 0;
+       apic_id = cpu_physical_id(cpu);
+
+       for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
+               entry = ioatdca->tag_map[i];
+               if (entry & DCA3_TAG_MAP_BIT_TO_SEL) {
+                       bit = entry &
+                               ~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV);
+                       value = (apic_id & (1 << bit)) ? 1 : 0;
+               } else if (entry & DCA3_TAG_MAP_BIT_TO_INV) {
+                       bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV;
+                       value = (apic_id & (1 << bit)) ? 0 : 1;
+               } else {
+                       value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0;
+               }
+               tag |= (value << i);
+       }
+
+       return tag;
+}
+
+static struct dca_ops ioat3_dca_ops = {
+       .add_requester          = ioat3_dca_add_requester,
+       .remove_requester       = ioat3_dca_remove_requester,
+       .get_tag                = ioat3_dca_get_tag,
+       .dev_managed            = ioat_dca_dev_managed,
+};
+
+static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
+{
+       int slots = 0;
+       u32 req;
+       u16 global_req_table;
+
+       global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET);
+       if (global_req_table == 0)
+               return 0;
+
+       do {
+               req = readl(iobase + global_req_table + (slots * sizeof(u32)));
+               slots++;
+       } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
+
+       return slots;
+}
+
+struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
+{
+       struct dca_provider *dca;
+       struct ioat_dca_priv *ioatdca;
+       int slots;
+       int i;
+       int err;
+       u16 dca_offset;
+       u16 csi_fsb_control;
+       u16 pcie_control;
+       u8 bit;
+
+       union {
+               u64 full;
+               struct {
+                       u32 low;
+                       u32 high;
+               };
+       } tag_map;
+
+       if (!system_has_dca_enabled(pdev))
+               return NULL;
+
+       dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
+       if (dca_offset == 0)
+               return NULL;
+
+       slots = ioat3_dca_count_dca_slots(iobase, dca_offset);
+       if (slots == 0)
+               return NULL;
+
+       dca = alloc_dca_provider(&ioat3_dca_ops,
+                                sizeof(*ioatdca)
+                                     + (sizeof(struct ioat_dca_slot) * slots));
+       if (!dca)
+               return NULL;
+
+       ioatdca = dca_priv(dca);
+       ioatdca->iobase = iobase;
+       ioatdca->dca_base = iobase + dca_offset;
+       ioatdca->max_requesters = slots;
+
+       /* some bios might not know to turn these on */
+       csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
+       if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) {
+               csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH;
+               writew(csi_fsb_control,
+                      ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
+       }
+       pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
+       if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) {
+               pcie_control |= IOAT3_PCI_CONTROL_MEMWR;
+               writew(pcie_control,
+                      ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
+       }
+
+
+       /* TODO version, compatibility and configuration checks */
+
+       /* copy out the APIC to DCA tag map */
+       tag_map.low =
+               readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW);
+       tag_map.high =
+               readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH);
+       for (i = 0; i < 8; i++) {
+               bit = tag_map.full >> (8 * i);
+               ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK;
+       }
+
+       err = register_dca_provider(dca, &pdev->dev);
+       if (err) {
+               free_dca_provider(dca);
+               return NULL;
+       }
+
+       return dca;
+}
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
new file mode 100644 (file)
index 0000000..648797e
--- /dev/null
@@ -0,0 +1,1741 @@
+/*
+ * Intel I/OAT DMA Linux driver
+ * Copyright(c) 2004 - 2009 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+/*
+ * This driver supports an Intel I/OAT DMA engine, which does asynchronous
+ * copy operations.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/i7300_idle.h>
+#include "dma.h"
+#include "registers.h"
+#include "hw.h"
+
+#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
+#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
+#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
+#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
+
+#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
+static int ioat_pending_level = 4;
+module_param(ioat_pending_level, int, 0644);
+MODULE_PARM_DESC(ioat_pending_level,
+                "high-water mark for pushing ioat descriptors (default: 4)");
+
+#define RESET_DELAY  msecs_to_jiffies(100)
+#define WATCHDOG_DELAY  round_jiffies(msecs_to_jiffies(2000))
+static void ioat_dma_chan_reset_part2(struct work_struct *work);
+static void ioat_dma_chan_watchdog(struct work_struct *work);
+
+/*
+ * workaround for IOAT ver.3.0 null descriptor issue
+ * (channel returns error when size is 0)
+ */
+#define NULL_DESC_BUFFER_SIZE 1
+
+/* internal functions */
+static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
+static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
+
+static struct ioat_desc_sw *
+ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
+static struct ioat_desc_sw *
+ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
+
+static inline struct ioat_dma_chan *ioat_lookup_chan_by_index(
+                                               struct ioatdma_device *device,
+                                               int index)
+{
+       return device->idx[index];
+}
+
+/**
+ * ioat_dma_do_interrupt - handler used for single vector interrupt mode
+ * @irq: interrupt id
+ * @data: interrupt data
+ */
+static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
+{
+       struct ioatdma_device *instance = data;
+       struct ioat_dma_chan *ioat_chan;
+       unsigned long attnstatus;
+       int bit;
+       u8 intrctrl;
+
+       intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
+
+       if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
+               return IRQ_NONE;
+
+       if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
+               writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
+               return IRQ_NONE;
+       }
+
+       attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
+       for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
+               ioat_chan = ioat_lookup_chan_by_index(instance, bit);
+               tasklet_schedule(&ioat_chan->cleanup_task);
+       }
+
+       writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
+       return IRQ_HANDLED;
+}
+
+/**
+ * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
+ * @irq: interrupt id
+ * @data: interrupt data
+ */
+static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
+{
+       struct ioat_dma_chan *ioat_chan = data;
+
+       tasklet_schedule(&ioat_chan->cleanup_task);
+
+       return IRQ_HANDLED;
+}
+
+static void ioat_dma_cleanup_tasklet(unsigned long data);
+
+/**
+ * ioat_dma_enumerate_channels - find and initialize the device's channels
+ * @device: the device to be enumerated
+ */
+static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
+{
+       u8 xfercap_scale;
+       u32 xfercap;
+       int i;
+       struct ioat_dma_chan *ioat_chan;
+
+       /*
+        * IOAT ver.3 workarounds
+        */
+       if (device->version == IOAT_VER_3_0) {
+               u32 chan_err_mask;
+               u16 dev_id;
+               u32 dmauncerrsts;
+
+               /*
+                * Write CHANERRMSK_INT with 3E07h to mask out the errors
+                * that can cause stability issues for IOAT ver.3
+                */
+               chan_err_mask = 0x3E07;
+               pci_write_config_dword(device->pdev,
+                       IOAT_PCI_CHANERRMASK_INT_OFFSET,
+                       chan_err_mask);
+
+               /*
+                * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
+                * (workaround for spurious config parity error after restart)
+                */
+               pci_read_config_word(device->pdev,
+                       IOAT_PCI_DEVICE_ID_OFFSET,
+                       &dev_id);
+               if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
+                       dmauncerrsts = 0x10;
+                       pci_write_config_dword(device->pdev,
+                               IOAT_PCI_DMAUNCERRSTS_OFFSET,
+                               dmauncerrsts);
+               }
+       }
+
+       device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
+       xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
+       xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
+
+#ifdef  CONFIG_I7300_IDLE_IOAT_CHANNEL
+       if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) {
+               device->common.chancnt--;
+       }
+#endif
+       for (i = 0; i < device->common.chancnt; i++) {
+               ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
+               if (!ioat_chan) {
+                       device->common.chancnt = i;
+                       break;
+               }
+
+               ioat_chan->device = device;
+               ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
+               ioat_chan->xfercap = xfercap;
+               ioat_chan->desccount = 0;
+               INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2);
+               if (ioat_chan->device->version == IOAT_VER_2_0)
+                       writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE |
+                              IOAT_DMA_DCA_ANY_CPU,
+                              ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
+               else if (ioat_chan->device->version == IOAT_VER_3_0)
+                       writel(IOAT_DMA_DCA_ANY_CPU,
+                              ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
+               spin_lock_init(&ioat_chan->cleanup_lock);
+               spin_lock_init(&ioat_chan->desc_lock);
+               INIT_LIST_HEAD(&ioat_chan->free_desc);
+               INIT_LIST_HEAD(&ioat_chan->used_desc);
+               /* This should be made common somewhere in dmaengine.c */
+               ioat_chan->common.device = &device->common;
+               list_add_tail(&ioat_chan->common.device_node,
+                             &device->common.channels);
+               device->idx[i] = ioat_chan;
+               tasklet_init(&ioat_chan->cleanup_task,
+                            ioat_dma_cleanup_tasklet,
+                            (unsigned long) ioat_chan);
+               tasklet_disable(&ioat_chan->cleanup_task);
+       }
+       return device->common.chancnt;
+}
+
+/**
+ * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
+ *                                 descriptors to hw
+ * @chan: DMA channel handle
+ */
+static inline void __ioat1_dma_memcpy_issue_pending(
+                                               struct ioat_dma_chan *ioat_chan)
+{
+       ioat_chan->pending = 0;
+       writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
+}
+
+static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
+{
+       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
+
+       if (ioat_chan->pending > 0) {
+               spin_lock_bh(&ioat_chan->desc_lock);
+               __ioat1_dma_memcpy_issue_pending(ioat_chan);
+               spin_unlock_bh(&ioat_chan->desc_lock);
+       }
+}
+
+static inline void __ioat2_dma_memcpy_issue_pending(
+                                               struct ioat_dma_chan *ioat_chan)
+{
+       ioat_chan->pending = 0;
+       writew(ioat_chan->dmacount,
+              ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
+}
+
+static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
+{
+       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
+
+       if (ioat_chan->pending > 0) {
+               spin_lock_bh(&ioat_chan->desc_lock);
+               __ioat2_dma_memcpy_issue_pending(ioat_chan);
+               spin_unlock_bh(&ioat_chan->desc_lock);
+       }
+}
+
+
+/**
+ * ioat_dma_chan_reset_part2 - reinit the channel after a reset
+ */
+static void ioat_dma_chan_reset_part2(struct work_struct *work)
+{
+       struct ioat_dma_chan *ioat_chan =
+               container_of(work, struct ioat_dma_chan, work.work);
+       struct ioat_desc_sw *desc;
+
+       spin_lock_bh(&ioat_chan->cleanup_lock);
+       spin_lock_bh(&ioat_chan->desc_lock);
+
+       ioat_chan->completion_virt->low = 0;
+       ioat_chan->completion_virt->high = 0;
+       ioat_chan->pending = 0;
+
+       /*
+        * count the descriptors waiting, and be sure to do it
+        * right for both the CB1 line and the CB2 ring
+        */
+       ioat_chan->dmacount = 0;
+       if (ioat_chan->used_desc.prev) {
+               desc = to_ioat_desc(ioat_chan->used_desc.prev);
+               do {
+                       ioat_chan->dmacount++;
+                       desc = to_ioat_desc(desc->node.next);
+               } while (&desc->node != ioat_chan->used_desc.next);
+       }
+
+       /*
+        * write the new starting descriptor address
+        * this puts channel engine into ARMED state
+        */
+       desc = to_ioat_desc(ioat_chan->used_desc.prev);
+       switch (ioat_chan->device->version) {
+       case IOAT_VER_1_2:
+               writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
+                      ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
+               writel(((u64) desc->async_tx.phys) >> 32,
+                      ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
+
+               writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
+                       + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
+               break;
+       case IOAT_VER_2_0:
+               writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
+                      ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
+               writel(((u64) desc->async_tx.phys) >> 32,
+                      ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
+
+               /* tell the engine to go with what's left to be done */
+               writew(ioat_chan->dmacount,
+                      ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
+
+               break;
+       }
+       dev_err(&ioat_chan->device->pdev->dev,
+               "chan%d reset - %d descs waiting, %d total desc\n",
+               chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
+
+       spin_unlock_bh(&ioat_chan->desc_lock);
+       spin_unlock_bh(&ioat_chan->cleanup_lock);
+}
+
+/**
+ * ioat_dma_reset_channel - restart a channel
+ * @ioat_chan: IOAT DMA channel handle
+ */
+static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan)
+{
+       u32 chansts, chanerr;
+
+       if (!ioat_chan->used_desc.prev)
+               return;
+
+       chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+       chansts = (ioat_chan->completion_virt->low
+                                       & IOAT_CHANSTS_DMA_TRANSFER_STATUS);
+       if (chanerr) {
+               dev_err(&ioat_chan->device->pdev->dev,
+                       "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
+                       chan_num(ioat_chan), chansts, chanerr);
+               writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+       }
+
+       /*
+        * whack it upside the head with a reset
+        * and wait for things to settle out.
+        * force the pending count to a really big negative
+        * to make sure no one forces an issue_pending
+        * while we're waiting.
+        */
+
+       spin_lock_bh(&ioat_chan->desc_lock);
+       ioat_chan->pending = INT_MIN;
+       writeb(IOAT_CHANCMD_RESET,
+              ioat_chan->reg_base
+              + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
+       spin_unlock_bh(&ioat_chan->desc_lock);
+
+       /* schedule the 2nd half instead of sleeping a long time */
+       schedule_delayed_work(&ioat_chan->work, RESET_DELAY);
+}
+
+/**
+ * ioat_dma_chan_watchdog - watch for stuck channels
+ */
+static void ioat_dma_chan_watchdog(struct work_struct *work)
+{
+       struct ioatdma_device *device =
+               container_of(work, struct ioatdma_device, work.work);
+       struct ioat_dma_chan *ioat_chan;
+       int i;
+
+       union {
+               u64 full;
+               struct {
+                       u32 low;
+                       u32 high;
+               };
+       } completion_hw;
+       unsigned long compl_desc_addr_hw;
+
+       for (i = 0; i < device->common.chancnt; i++) {
+               ioat_chan = ioat_lookup_chan_by_index(device, i);
+
+               if (ioat_chan->device->version == IOAT_VER_1_2
+                       /* have we started processing anything yet */
+                   && ioat_chan->last_completion
+                       /* have we completed any since last watchdog cycle? */
+                   && (ioat_chan->last_completion ==
+                               ioat_chan->watchdog_completion)
+                       /* has TCP stuck on one cookie since last watchdog? */
+                   && (ioat_chan->watchdog_tcp_cookie ==
+                               ioat_chan->watchdog_last_tcp_cookie)
+                   && (ioat_chan->watchdog_tcp_cookie !=
+                               ioat_chan->completed_cookie)
+                       /* is there something in the chain to be processed? */
+                       /* CB1 chain always has at least the last one processed */
+                   && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next)
+                   && ioat_chan->pending == 0) {
+
+                       /*
+                        * check CHANSTS register for completed
+                        * descriptor address.
+                        * if it is different than completion writeback,
+                        * it is not zero
+                        * and it has changed since the last watchdog
+                        *     we can assume that channel
+                        *     is still working correctly
+                        *     and the problem is in completion writeback.
+                        *     update completion writeback
+                        *     with actual CHANSTS value
+                        * else
+                        *     try resetting the channel
+                        */
+
+                       completion_hw.low = readl(ioat_chan->reg_base +
+                               IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version));
+                       completion_hw.high = readl(ioat_chan->reg_base +
+                               IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version));
+#if (BITS_PER_LONG == 64)
+                       compl_desc_addr_hw =
+                               completion_hw.full
+                               & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
+#else
+                       compl_desc_addr_hw =
+                               completion_hw.low & IOAT_LOW_COMPLETION_MASK;
+#endif
+
+                       if ((compl_desc_addr_hw != 0)
+                          && (compl_desc_addr_hw != ioat_chan->watchdog_completion)
+                          && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) {
+                               ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
+                               ioat_chan->completion_virt->low = completion_hw.low;
+                               ioat_chan->completion_virt->high = completion_hw.high;
+                       } else {
+                               ioat_dma_reset_channel(ioat_chan);
+                               ioat_chan->watchdog_completion = 0;
+                               ioat_chan->last_compl_desc_addr_hw = 0;
+                       }
+
+               /*
+                * for version 2.0 if there are descriptors yet to be processed
+                * and the last completed hasn't changed since the last watchdog
+                *      if they haven't hit the pending level
+                *          issue the pending to push them through
+                *      else
+                *          try resetting the channel
+                */
+               } else if (ioat_chan->device->version == IOAT_VER_2_0
+                   && ioat_chan->used_desc.prev
+                   && ioat_chan->last_completion
+                   && ioat_chan->last_completion == ioat_chan->watchdog_completion) {
+
+                       if (ioat_chan->pending < ioat_pending_level)
+                               ioat2_dma_memcpy_issue_pending(&ioat_chan->common);
+                       else {
+                               ioat_dma_reset_channel(ioat_chan);
+                               ioat_chan->watchdog_completion = 0;
+                       }
+               } else {
+                       ioat_chan->last_compl_desc_addr_hw = 0;
+                       ioat_chan->watchdog_completion
+                                       = ioat_chan->last_completion;
+               }
+
+               ioat_chan->watchdog_last_tcp_cookie =
+                       ioat_chan->watchdog_tcp_cookie;
+       }
+
+       schedule_delayed_work(&device->work, WATCHDOG_DELAY);
+}
+
+static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+       struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
+       struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
+       struct ioat_desc_sw *prev, *new;
+       struct ioat_dma_descriptor *hw;
+       dma_cookie_t cookie;
+       LIST_HEAD(new_chain);
+       u32 copy;
+       size_t len;
+       dma_addr_t src, dst;
+       unsigned long orig_flags;
+       unsigned int desc_count = 0;
+
+       /* src and dest and len are stored in the initial descriptor */
+       len = first->len;
+       src = first->src;
+       dst = first->dst;
+       orig_flags = first->async_tx.flags;
+       new = first;
+
+       spin_lock_bh(&ioat_chan->desc_lock);
+       prev = to_ioat_desc(ioat_chan->used_desc.prev);
+       prefetch(prev->hw);
+       do {
+               copy = min_t(size_t, len, ioat_chan->xfercap);
+
+               async_tx_ack(&new->async_tx);
+
+               hw = new->hw;
+               hw->size = copy;
+               hw->ctl = 0;
+               hw->src_addr = src;
+               hw->dst_addr = dst;
+               hw->next = 0;
+
+               /* chain together the physical address list for the HW */
+               wmb();
+               prev->hw->next = (u64) new->async_tx.phys;
+
+               len -= copy;
+               dst += copy;
+               src += copy;
+
+               list_add_tail(&new->node, &new_chain);
+               desc_count++;
+               prev = new;
+       } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
+
+       if (!new) {
+               dev_err(&ioat_chan->device->pdev->dev,
+                       "tx submit failed\n");
+               spin_unlock_bh(&ioat_chan->desc_lock);
+               return -ENOMEM;
+       }
+
+       hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
+       if (first->async_tx.callback) {
+               hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
+               if (first != new) {
+                       /* move callback into to last desc */
+                       new->async_tx.callback = first->async_tx.callback;
+                       new->async_tx.callback_param
+                                       = first->async_tx.callback_param;
+                       first->async_tx.callback = NULL;
+                       first->async_tx.callback_param = NULL;
+               }
+       }
+
+       new->tx_cnt = desc_count;
+       new->async_tx.flags = orig_flags; /* client is in control of this ack */
+
+       /* store the original values for use in later cleanup */
+       if (new != first) {
+               new->src = first->src;
+               new->dst = first->dst;
+               new->len = first->len;
+       }
+
+       /* cookie incr and addition to used_list must be atomic */
+       cookie = ioat_chan->common.cookie;
+       cookie++;
+       if (cookie < 0)
+               cookie = 1;
+       ioat_chan->common.cookie = new->async_tx.cookie = cookie;
+
+       /* write address into NextDescriptor field of last desc in chain */
+       to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
+                                                       first->async_tx.phys;
+       list_splice_tail(&new_chain, &ioat_chan->used_desc);
+
+       ioat_chan->dmacount += desc_count;
+       ioat_chan->pending += desc_count;
+       if (ioat_chan->pending >= ioat_pending_level)
+               __ioat1_dma_memcpy_issue_pending(ioat_chan);
+       spin_unlock_bh(&ioat_chan->desc_lock);
+
+       return cookie;
+}
+
+static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+       struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
+       struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
+       struct ioat_desc_sw *new;
+       struct ioat_dma_descriptor *hw;
+       dma_cookie_t cookie;
+       u32 copy;
+       size_t len;
+       dma_addr_t src, dst;
+       unsigned long orig_flags;
+       unsigned int desc_count = 0;
+
+       /* src and dest and len are stored in the initial descriptor */
+       len = first->len;
+       src = first->src;
+       dst = first->dst;
+       orig_flags = first->async_tx.flags;
+       new = first;
+
+       /*
+        * ioat_chan->desc_lock is still in force in version 2 path
+        * it gets unlocked at end of this function
+        */
+       do {
+               copy = min_t(size_t, len, ioat_chan->xfercap);
+
+               async_tx_ack(&new->async_tx);
+
+               hw = new->hw;
+               hw->size = copy;
+               hw->ctl = 0;
+               hw->src_addr = src;
+               hw->dst_addr = dst;
+
+               len -= copy;
+               dst += copy;
+               src += copy;
+               desc_count++;
+       } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
+
+       if (!new) {
+               dev_err(&ioat_chan->device->pdev->dev,
+                       "tx submit failed\n");
+               spin_unlock_bh(&ioat_chan->desc_lock);
+               return -ENOMEM;
+       }
+
+       hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
+       if (first->async_tx.callback) {
+               hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
+               if (first != new) {
+                       /* move callback into to last desc */
+                       new->async_tx.callback = first->async_tx.callback;
+                       new->async_tx.callback_param
+                                       = first->async_tx.callback_param;
+                       first->async_tx.callback = NULL;
+                       first->async_tx.callback_param = NULL;
+               }
+       }
+
+       new->tx_cnt = desc_count;
+       new->async_tx.flags = orig_flags; /* client is in control of this ack */
+
+       /* store the original values for use in later cleanup */
+       if (new != first) {
+               new->src = first->src;
+               new->dst = first->dst;
+               new->len = first->len;
+       }
+
+       /* cookie incr and addition to used_list must be atomic */
+       cookie = ioat_chan->common.cookie;
+       cookie++;
+       if (cookie < 0)
+               cookie = 1;
+       ioat_chan->common.cookie = new->async_tx.cookie = cookie;
+
+       ioat_chan->dmacount += desc_count;
+       ioat_chan->pending += desc_count;
+       if (ioat_chan->pending >= ioat_pending_level)
+               __ioat2_dma_memcpy_issue_pending(ioat_chan);
+       spin_unlock_bh(&ioat_chan->desc_lock);
+
+       return cookie;
+}
+
+/**
+ * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
+ * @ioat_chan: the channel supplying the memory pool for the descriptors
+ * @flags: allocation flags
+ */
+static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
+                                       struct ioat_dma_chan *ioat_chan,
+                                       gfp_t flags)
+{
+       struct ioat_dma_descriptor *desc;
+       struct ioat_desc_sw *desc_sw;
+       struct ioatdma_device *ioatdma_device;
+       dma_addr_t phys;
+
+       ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
+       desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
+       if (unlikely(!desc))
+               return NULL;
+
+       desc_sw = kzalloc(sizeof(*desc_sw), flags);
+       if (unlikely(!desc_sw)) {
+               pci_pool_free(ioatdma_device->dma_pool, desc, phys);
+               return NULL;
+       }
+
+       memset(desc, 0, sizeof(*desc));
+       dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
+       switch (ioat_chan->device->version) {
+       case IOAT_VER_1_2:
+               desc_sw->async_tx.tx_submit = ioat1_tx_submit;
+               break;
+       case IOAT_VER_2_0:
+       case IOAT_VER_3_0:
+               desc_sw->async_tx.tx_submit = ioat2_tx_submit;
+               break;
+       }
+
+       desc_sw->hw = desc;
+       desc_sw->async_tx.phys = phys;
+
+       return desc_sw;
+}
+
+static int ioat_initial_desc_count = 256;
+module_param(ioat_initial_desc_count, int, 0644);
+MODULE_PARM_DESC(ioat_initial_desc_count,
+                "initial descriptors per channel (default: 256)");
+
+/**
+ * ioat2_dma_massage_chan_desc - link the descriptors into a circle
+ * @ioat_chan: the channel to be massaged
+ */
+static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
+{
+       struct ioat_desc_sw *desc, *_desc;
+
+       /* setup used_desc */
+       ioat_chan->used_desc.next = ioat_chan->free_desc.next;
+       ioat_chan->used_desc.prev = NULL;
+
+       /* pull free_desc out of the circle so that every node is a hw
+        * descriptor, but leave it pointing to the list
+        */
+       ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next;
+       ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev;
+
+       /* circle link the hw descriptors */
+       desc = to_ioat_desc(ioat_chan->free_desc.next);
+       desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
+       list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) {
+               desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
+       }
+}
+
+/**
+ * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
+ * @chan: the channel to be filled out
+ */
+static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
+       struct ioat_desc_sw *desc;
+       u16 chanctrl;
+       u32 chanerr;
+       int i;
+       LIST_HEAD(tmp_list);
+
+       /* have we already been set up? */
+       if (!list_empty(&ioat_chan->free_desc))
+               return ioat_chan->desccount;
+
+       /* Setup register to interrupt and write completion status on error */
+       chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
+               IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
+               IOAT_CHANCTRL_ERR_COMPLETION_EN;
+       writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
+
+       chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+       if (chanerr) {
+               dev_err(&ioat_chan->device->pdev->dev,
+                       "CHANERR = %x, clearing\n", chanerr);
+               writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+       }
+
+       /* Allocate descriptors */
+       for (i = 0; i < ioat_initial_desc_count; i++) {
+               desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
+               if (!desc) {
+                       dev_err(&ioat_chan->device->pdev->dev,
+                               "Only %d initial descriptors\n", i);
+                       break;
+               }
+               list_add_tail(&desc->node, &tmp_list);
+       }
+       spin_lock_bh(&ioat_chan->desc_lock);
+       ioat_chan->desccount = i;
+       list_splice(&tmp_list, &ioat_chan->free_desc);
+       if (ioat_chan->device->version != IOAT_VER_1_2)
+               ioat2_dma_massage_chan_desc(ioat_chan);
+       spin_unlock_bh(&ioat_chan->desc_lock);
+
+       /* allocate a completion writeback area */
+       /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
+       ioat_chan->completion_virt =
+               pci_pool_alloc(ioat_chan->device->completion_pool,
+                              GFP_KERNEL,
+                              &ioat_chan->completion_addr);
+       memset(ioat_chan->completion_virt, 0,
+              sizeof(*ioat_chan->completion_virt));
+       writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
+              ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
+       writel(((u64) ioat_chan->completion_addr) >> 32,
+              ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
+
+       tasklet_enable(&ioat_chan->cleanup_task);
+       ioat_dma_start_null_desc(ioat_chan);  /* give chain to dma device */
+       return ioat_chan->desccount;
+}
+
+/**
+ * ioat_dma_free_chan_resources - release all the descriptors
+ * @chan: the channel to be cleaned
+ */
+static void ioat_dma_free_chan_resources(struct dma_chan *chan)
+{
+       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
+       struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
+       struct ioat_desc_sw *desc, *_desc;
+       int in_use_descs = 0;
+
+       /* Before freeing channel resources first check
+        * if they have been previously allocated for this channel.
+        */
+       if (ioat_chan->desccount == 0)
+               return;
+
+       tasklet_disable(&ioat_chan->cleanup_task);
+       ioat_dma_memcpy_cleanup(ioat_chan);
+
+       /* Delay 100ms after reset to allow internal DMA logic to quiesce
+        * before removing DMA descriptor resources.
+        */
+       writeb(IOAT_CHANCMD_RESET,
+              ioat_chan->reg_base
+                       + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
+       mdelay(100);
+
+       spin_lock_bh(&ioat_chan->desc_lock);
+       switch (ioat_chan->device->version) {
+       case IOAT_VER_1_2:
+               list_for_each_entry_safe(desc, _desc,
+                                        &ioat_chan->used_desc, node) {
+                       in_use_descs++;
+                       list_del(&desc->node);
+                       pci_pool_free(ioatdma_device->dma_pool, desc->hw,
+                                     desc->async_tx.phys);
+                       kfree(desc);
+               }
+               list_for_each_entry_safe(desc, _desc,
+                                        &ioat_chan->free_desc, node) {
+                       list_del(&desc->node);
+                       pci_pool_free(ioatdma_device->dma_pool, desc->hw,
+                                     desc->async_tx.phys);
+                       kfree(desc);
+               }
+               break;
+       case IOAT_VER_2_0:
+       case IOAT_VER_3_0:
+               list_for_each_entry_safe(desc, _desc,
+                                        ioat_chan->free_desc.next, node) {
+                       list_del(&desc->node);
+                       pci_pool_free(ioatdma_device->dma_pool, desc->hw,
+                                     desc->async_tx.phys);
+                       kfree(desc);
+               }
+               desc = to_ioat_desc(ioat_chan->free_desc.next);
+               pci_pool_free(ioatdma_device->dma_pool, desc->hw,
+                             desc->async_tx.phys);
+               kfree(desc);
+               INIT_LIST_HEAD(&ioat_chan->free_desc);
+               INIT_LIST_HEAD(&ioat_chan->used_desc);
+               break;
+       }
+       spin_unlock_bh(&ioat_chan->desc_lock);
+
+       pci_pool_free(ioatdma_device->completion_pool,
+                     ioat_chan->completion_virt,
+                     ioat_chan->completion_addr);
+
+       /* one is ok since we left it on there on purpose */
+       if (in_use_descs > 1)
+               dev_err(&ioat_chan->device->pdev->dev,
+                       "Freeing %d in use descriptors!\n",
+                       in_use_descs - 1);
+
+       ioat_chan->last_completion = ioat_chan->completion_addr = 0;
+       ioat_chan->pending = 0;
+       ioat_chan->dmacount = 0;
+       ioat_chan->desccount = 0;
+       ioat_chan->watchdog_completion = 0;
+       ioat_chan->last_compl_desc_addr_hw = 0;
+       ioat_chan->watchdog_tcp_cookie =
+               ioat_chan->watchdog_last_tcp_cookie = 0;
+}
+
+/**
+ * ioat_dma_get_next_descriptor - return the next available descriptor
+ * @ioat_chan: IOAT DMA channel handle
+ *
+ * Gets the next descriptor from the chain, and must be called with the
+ * channel's desc_lock held.  Allocates more descriptors if the channel
+ * has run out.
+ */
+static struct ioat_desc_sw *
+ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
+{
+       struct ioat_desc_sw *new;
+
+       if (!list_empty(&ioat_chan->free_desc)) {
+               new = to_ioat_desc(ioat_chan->free_desc.next);
+               list_del(&new->node);
+       } else {
+               /* try to get another desc */
+               new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
+               if (!new) {
+                       dev_err(&ioat_chan->device->pdev->dev,
+                               "alloc failed\n");
+                       return NULL;
+               }
+       }
+
+       prefetch(new->hw);
+       return new;
+}
+
+static struct ioat_desc_sw *
+ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
+{
+       struct ioat_desc_sw *new;
+
+       /*
+        * used.prev points to where to start processing
+        * used.next points to next free descriptor
+        * if used.prev == NULL, there are none waiting to be processed
+        * if used.next == used.prev.prev, there is only one free descriptor,
+        *      and we need to use it to as a noop descriptor before
+        *      linking in a new set of descriptors, since the device
+        *      has probably already read the pointer to it
+        */
+       if (ioat_chan->used_desc.prev &&
+           ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
+
+               struct ioat_desc_sw *desc;
+               struct ioat_desc_sw *noop_desc;
+               int i;
+
+               /* set up the noop descriptor */
+               noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
+               /* set size to non-zero value (channel returns error when size is 0) */
+               noop_desc->hw->size = NULL_DESC_BUFFER_SIZE;
+               noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
+               noop_desc->hw->src_addr = 0;
+               noop_desc->hw->dst_addr = 0;
+
+               ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;
+               ioat_chan->pending++;
+               ioat_chan->dmacount++;
+
+               /* try to get a few more descriptors */
+               for (i = 16; i; i--) {
+                       desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
+                       if (!desc) {
+                               dev_err(&ioat_chan->device->pdev->dev,
+                                       "alloc failed\n");
+                               break;
+                       }
+                       list_add_tail(&desc->node, ioat_chan->used_desc.next);
+
+                       desc->hw->next
+                               = to_ioat_desc(desc->node.next)->async_tx.phys;
+                       to_ioat_desc(desc->node.prev)->hw->next
+                               = desc->async_tx.phys;
+                       ioat_chan->desccount++;
+               }
+
+               ioat_chan->used_desc.next = noop_desc->node.next;
+       }
+       new = to_ioat_desc(ioat_chan->used_desc.next);
+       prefetch(new);
+       ioat_chan->used_desc.next = new->node.next;
+
+       if (ioat_chan->used_desc.prev == NULL)
+               ioat_chan->used_desc.prev = &new->node;
+
+       prefetch(new->hw);
+       return new;
+}
+
+static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
+                                               struct ioat_dma_chan *ioat_chan)
+{
+       if (!ioat_chan)
+               return NULL;
+
+       switch (ioat_chan->device->version) {
+       case IOAT_VER_1_2:
+               return ioat1_dma_get_next_descriptor(ioat_chan);
+       case IOAT_VER_2_0:
+       case IOAT_VER_3_0:
+               return ioat2_dma_get_next_descriptor(ioat_chan);
+       }
+       return NULL;
+}
+
+static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
+                                               struct dma_chan *chan,
+                                               dma_addr_t dma_dest,
+                                               dma_addr_t dma_src,
+                                               size_t len,
+                                               unsigned long flags)
+{
+       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
+       struct ioat_desc_sw *new;
+
+       spin_lock_bh(&ioat_chan->desc_lock);
+       new = ioat_dma_get_next_descriptor(ioat_chan);
+       spin_unlock_bh(&ioat_chan->desc_lock);
+
+       if (new) {
+               new->len = len;
+               new->dst = dma_dest;
+               new->src = dma_src;
+               new->async_tx.flags = flags;
+               return &new->async_tx;
+       } else {
+               dev_err(&ioat_chan->device->pdev->dev,
+                       "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
+                       chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
+               return NULL;
+       }
+}
+
+static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
+                                               struct dma_chan *chan,
+                                               dma_addr_t dma_dest,
+                                               dma_addr_t dma_src,
+                                               size_t len,
+                                               unsigned long flags)
+{
+       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
+       struct ioat_desc_sw *new;
+
+       spin_lock_bh(&ioat_chan->desc_lock);
+       new = ioat2_dma_get_next_descriptor(ioat_chan);
+
+       /*
+        * leave ioat_chan->desc_lock set in ioat 2 path
+        * it will get unlocked at end of tx_submit
+        */
+
+       if (new) {
+               new->len = len;
+               new->dst = dma_dest;
+               new->src = dma_src;
+               new->async_tx.flags = flags;
+               return &new->async_tx;
+       } else {
+               spin_unlock_bh(&ioat_chan->desc_lock);
+               dev_err(&ioat_chan->device->pdev->dev,
+                       "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
+                       chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
+               return NULL;
+       }
+}
+
+static void ioat_dma_cleanup_tasklet(unsigned long data)
+{
+       struct ioat_dma_chan *chan = (void *)data;
+       ioat_dma_memcpy_cleanup(chan);
+       writew(IOAT_CHANCTRL_INT_DISABLE,
+              chan->reg_base + IOAT_CHANCTRL_OFFSET);
+}
+
+static void
+ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
+{
+       if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+               if (desc->async_tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+                       pci_unmap_single(ioat_chan->device->pdev,
+                                        pci_unmap_addr(desc, dst),
+                                        pci_unmap_len(desc, len),
+                                        PCI_DMA_FROMDEVICE);
+               else
+                       pci_unmap_page(ioat_chan->device->pdev,
+                                      pci_unmap_addr(desc, dst),
+                                      pci_unmap_len(desc, len),
+                                      PCI_DMA_FROMDEVICE);
+       }
+
+       if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+               if (desc->async_tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+                       pci_unmap_single(ioat_chan->device->pdev,
+                                        pci_unmap_addr(desc, src),
+                                        pci_unmap_len(desc, len),
+                                        PCI_DMA_TODEVICE);
+               else
+                       pci_unmap_page(ioat_chan->device->pdev,
+                                      pci_unmap_addr(desc, src),
+                                      pci_unmap_len(desc, len),
+                                      PCI_DMA_TODEVICE);
+       }
+}
+
+/**
+ * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
+ * @chan: ioat channel to be cleaned up
+ */
+static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
+{
+       unsigned long phys_complete;
+       struct ioat_desc_sw *desc, *_desc;
+       dma_cookie_t cookie = 0;
+       unsigned long desc_phys;
+       struct ioat_desc_sw *latest_desc;
+
+       prefetch(ioat_chan->completion_virt);
+
+       if (!spin_trylock_bh(&ioat_chan->cleanup_lock))
+               return;
+
+       /* The completion writeback can happen at any time,
+          so reads by the driver need to be atomic operations
+          The descriptor physical addresses are limited to 32-bits
+          when the CPU can only do a 32-bit mov */
+
+#if (BITS_PER_LONG == 64)
+       phys_complete =
+               ioat_chan->completion_virt->full
+               & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
+#else
+       phys_complete =
+               ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
+#endif
+
+       if ((ioat_chan->completion_virt->full
+               & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
+                               IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
+               dev_err(&ioat_chan->device->pdev->dev,
+                       "Channel halted, chanerr = %x\n",
+                       readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
+
+               /* TODO do something to salvage the situation */
+       }
+
+       if (phys_complete == ioat_chan->last_completion) {
+               spin_unlock_bh(&ioat_chan->cleanup_lock);
+               /*
+                * perhaps we're stuck so hard that the watchdog can't go off?
+                * try to catch it after 2 seconds
+                */
+               if (ioat_chan->device->version != IOAT_VER_3_0) {
+                       if (time_after(jiffies,
+                                      ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
+                               ioat_dma_chan_watchdog(&(ioat_chan->device->work.work));
+                               ioat_chan->last_completion_time = jiffies;
+                       }
+               }
+               return;
+       }
+       ioat_chan->last_completion_time = jiffies;
+
+       cookie = 0;
+       if (!spin_trylock_bh(&ioat_chan->desc_lock)) {
+               spin_unlock_bh(&ioat_chan->cleanup_lock);
+               return;
+       }
+
+       switch (ioat_chan->device->version) {
+       case IOAT_VER_1_2:
+               list_for_each_entry_safe(desc, _desc,
+                                        &ioat_chan->used_desc, node) {
+
+                       /*
+                        * Incoming DMA requests may use multiple descriptors,
+                        * due to exceeding xfercap, perhaps. If so, only the
+                        * last one will have a cookie, and require unmapping.
+                        */
+                       if (desc->async_tx.cookie) {
+                               cookie = desc->async_tx.cookie;
+                               ioat_dma_unmap(ioat_chan, desc);
+                               if (desc->async_tx.callback) {
+                                       desc->async_tx.callback(desc->async_tx.callback_param);
+                                       desc->async_tx.callback = NULL;
+                               }
+                       }
+
+                       if (desc->async_tx.phys != phys_complete) {
+                               /*
+                                * a completed entry, but not the last, so clean
+                                * up if the client is done with the descriptor
+                                */
+                               if (async_tx_test_ack(&desc->async_tx)) {
+                                       list_move_tail(&desc->node,
+                                                      &ioat_chan->free_desc);
+                               } else
+                                       desc->async_tx.cookie = 0;
+                       } else {
+                               /*
+                                * last used desc. Do not remove, so we can
+                                * append from it, but don't look at it next
+                                * time, either
+                                */
+                               desc->async_tx.cookie = 0;
+
+                               /* TODO check status bits? */
+                               break;
+                       }
+               }
+               break;
+       case IOAT_VER_2_0:
+       case IOAT_VER_3_0:
+               /* has some other thread has already cleaned up? */
+               if (ioat_chan->used_desc.prev == NULL)
+                       break;
+
+               /* work backwards to find latest finished desc */
+               desc = to_ioat_desc(ioat_chan->used_desc.next);
+               latest_desc = NULL;
+               do {
+                       desc = to_ioat_desc(desc->node.prev);
+                       desc_phys = (unsigned long)desc->async_tx.phys
+                                      & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
+                       if (desc_phys == phys_complete) {
+                               latest_desc = desc;
+                               break;
+                       }
+               } while (&desc->node != ioat_chan->used_desc.prev);
+
+               if (latest_desc != NULL) {
+
+                       /* work forwards to clear finished descriptors */
+                       for (desc = to_ioat_desc(ioat_chan->used_desc.prev);
+                            &desc->node != latest_desc->node.next &&
+                            &desc->node != ioat_chan->used_desc.next;
+                            desc = to_ioat_desc(desc->node.next)) {
+                               if (desc->async_tx.cookie) {
+                                       cookie = desc->async_tx.cookie;
+                                       desc->async_tx.cookie = 0;
+                                       ioat_dma_unmap(ioat_chan, desc);
+                                       if (desc->async_tx.callback) {
+                                               desc->async_tx.callback(desc->async_tx.callback_param);
+                                               desc->async_tx.callback = NULL;
+                                       }
+                               }
+                       }
+
+                       /* move used.prev up beyond those that are finished */
+                       if (&desc->node == ioat_chan->used_desc.next)
+                               ioat_chan->used_desc.prev = NULL;
+                       else
+                               ioat_chan->used_desc.prev = &desc->node;
+               }
+               break;
+       }
+
+       spin_unlock_bh(&ioat_chan->desc_lock);
+
+       ioat_chan->last_completion = phys_complete;
+       if (cookie != 0)
+               ioat_chan->completed_cookie = cookie;
+
+       spin_unlock_bh(&ioat_chan->cleanup_lock);
+}
+
+/**
+ * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
+ * @chan: IOAT DMA channel handle
+ * @cookie: DMA transaction identifier
+ * @done: if not %NULL, updated with last completed transaction
+ * @used: if not %NULL, updated with last used transaction
+ */
+static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
+                                           dma_cookie_t cookie,
+                                           dma_cookie_t *done,
+                                           dma_cookie_t *used)
+{
+       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
+       dma_cookie_t last_used;
+       dma_cookie_t last_complete;
+       enum dma_status ret;
+
+       last_used = chan->cookie;
+       last_complete = ioat_chan->completed_cookie;
+       ioat_chan->watchdog_tcp_cookie = cookie;
+
+       if (done)
+               *done = last_complete;
+       if (used)
+               *used = last_used;
+
+       ret = dma_async_is_complete(cookie, last_complete, last_used);
+       if (ret == DMA_SUCCESS)
+               return ret;
+
+       ioat_dma_memcpy_cleanup(ioat_chan);
+
+       last_used = chan->cookie;
+       last_complete = ioat_chan->completed_cookie;
+
+       if (done)
+               *done = last_complete;
+       if (used)
+               *used = last_used;
+
+       return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
+static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
+{
+       struct ioat_desc_sw *desc;
+
+       spin_lock_bh(&ioat_chan->desc_lock);
+
+       desc = ioat_dma_get_next_descriptor(ioat_chan);
+
+       if (!desc) {
+               dev_err(&ioat_chan->device->pdev->dev,
+                       "Unable to start null desc - get next desc failed\n");
+               spin_unlock_bh(&ioat_chan->desc_lock);
+               return;
+       }
+
+       desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
+                               | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
+                               | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
+       /* set size to non-zero value (channel returns error when size is 0) */
+       desc->hw->size = NULL_DESC_BUFFER_SIZE;
+       desc->hw->src_addr = 0;
+       desc->hw->dst_addr = 0;
+       async_tx_ack(&desc->async_tx);
+       switch (ioat_chan->device->version) {
+       case IOAT_VER_1_2:
+               desc->hw->next = 0;
+               list_add_tail(&desc->node, &ioat_chan->used_desc);
+
+               writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
+                      ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
+               writel(((u64) desc->async_tx.phys) >> 32,
+                      ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
+
+               writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
+                       + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
+               break;
+       case IOAT_VER_2_0:
+       case IOAT_VER_3_0:
+               writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
+                      ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
+               writel(((u64) desc->async_tx.phys) >> 32,
+                      ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
+
+               ioat_chan->dmacount++;
+               __ioat2_dma_memcpy_issue_pending(ioat_chan);
+               break;
+       }
+       spin_unlock_bh(&ioat_chan->desc_lock);
+}
+
+/*
+ * Perform a IOAT transaction to verify the HW works.
+ */
+#define IOAT_TEST_SIZE 2000
+
+static void ioat_dma_test_callback(void *dma_async_param)
+{
+       struct completion *cmp = dma_async_param;
+
+       complete(cmp);
+}
+
+/**
+ * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
+ * @device: device to be tested
+ */
+static int ioat_dma_self_test(struct ioatdma_device *device)
+{
+       int i;
+       u8 *src;
+       u8 *dest;
+       struct dma_chan *dma_chan;
+       struct dma_async_tx_descriptor *tx;
+       dma_addr_t dma_dest, dma_src;
+       dma_cookie_t cookie;
+       int err = 0;
+       struct completion cmp;
+       unsigned long tmo;
+       unsigned long flags;
+
+       src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
+       if (!src)
+               return -ENOMEM;
+       dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
+       if (!dest) {
+               kfree(src);
+               return -ENOMEM;
+       }
+
+       /* Fill in src buffer */
+       for (i = 0; i < IOAT_TEST_SIZE; i++)
+               src[i] = (u8)i;
+
+       /* Start copy, using first DMA channel */
+       dma_chan = container_of(device->common.channels.next,
+                               struct dma_chan,
+                               device_node);
+       if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
+               dev_err(&device->pdev->dev,
+                       "selftest cannot allocate chan resource\n");
+               err = -ENODEV;
+               goto out;
+       }
+
+       dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
+                                DMA_TO_DEVICE);
+       dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
+                                 DMA_FROM_DEVICE);
+       flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE;
+       tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
+                                                  IOAT_TEST_SIZE, flags);
+       if (!tx) {
+               dev_err(&device->pdev->dev,
+                       "Self-test prep failed, disabling\n");
+               err = -ENODEV;
+               goto free_resources;
+       }
+
+       async_tx_ack(tx);
+       init_completion(&cmp);
+       tx->callback = ioat_dma_test_callback;
+       tx->callback_param = &cmp;
+       cookie = tx->tx_submit(tx);
+       if (cookie < 0) {
+               dev_err(&device->pdev->dev,
+                       "Self-test setup failed, disabling\n");
+               err = -ENODEV;
+               goto free_resources;
+       }
+       device->common.device_issue_pending(dma_chan);
+
+       tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+
+       if (tmo == 0 ||
+           device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
+                                       != DMA_SUCCESS) {
+               dev_err(&device->pdev->dev,
+                       "Self-test copy timed out, disabling\n");
+               err = -ENODEV;
+               goto free_resources;
+       }
+       if (memcmp(src, dest, IOAT_TEST_SIZE)) {
+               dev_err(&device->pdev->dev,
+                       "Self-test copy failed compare, disabling\n");
+               err = -ENODEV;
+               goto free_resources;
+       }
+
+free_resources:
+       device->common.device_free_chan_resources(dma_chan);
+out:
+       kfree(src);
+       kfree(dest);
+       return err;
+}
+
+static char ioat_interrupt_style[32] = "msix";
+module_param_string(ioat_interrupt_style, ioat_interrupt_style,
+                   sizeof(ioat_interrupt_style), 0644);
+MODULE_PARM_DESC(ioat_interrupt_style,
+                "set ioat interrupt style: msix (default), "
+                "msix-single-vector, msi, intx)");
+
+/**
+ * ioat_dma_setup_interrupts - setup interrupt handler
+ * @device: ioat device
+ */
+static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
+{
+       struct ioat_dma_chan *ioat_chan;
+       int err, i, j, msixcnt;
+       u8 intrctrl = 0;
+
+       if (!strcmp(ioat_interrupt_style, "msix"))
+               goto msix;
+       if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
+               goto msix_single_vector;
+       if (!strcmp(ioat_interrupt_style, "msi"))
+               goto msi;
+       if (!strcmp(ioat_interrupt_style, "intx"))
+               goto intx;
+       dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n",
+               ioat_interrupt_style);
+       goto err_no_irq;
+
+msix:
+       /* The number of MSI-X vectors should equal the number of channels */
+       msixcnt = device->common.chancnt;
+       for (i = 0; i < msixcnt; i++)
+               device->msix_entries[i].entry = i;
+
+       err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt);
+       if (err < 0)
+               goto msi;
+       if (err > 0)
+               goto msix_single_vector;
+
+       for (i = 0; i < msixcnt; i++) {
+               ioat_chan = ioat_lookup_chan_by_index(device, i);
+               err = request_irq(device->msix_entries[i].vector,
+                                 ioat_dma_do_interrupt_msix,
+                                 0, "ioat-msix", ioat_chan);
+               if (err) {
+                       for (j = 0; j < i; j++) {
+                               ioat_chan =
+                                       ioat_lookup_chan_by_index(device, j);
+                               free_irq(device->msix_entries[j].vector,
+                                        ioat_chan);
+                       }
+                       goto msix_single_vector;
+               }
+       }
+       intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
+       device->irq_mode = msix_multi_vector;
+       goto done;
+
+msix_single_vector:
+       device->msix_entries[0].entry = 0;
+       err = pci_enable_msix(device->pdev, device->msix_entries, 1);
+       if (err)
+               goto msi;
+
+       err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt,
+                         0, "ioat-msix", device);
+       if (err) {
+               pci_disable_msix(device->pdev);
+               goto msi;
+       }
+       device->irq_mode = msix_single_vector;
+       goto done;
+
+msi:
+       err = pci_enable_msi(device->pdev);
+       if (err)
+               goto intx;
+
+       err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
+                         0, "ioat-msi", device);
+       if (err) {
+               pci_disable_msi(device->pdev);
+               goto intx;
+       }
+       /*
+        * CB 1.2 devices need a bit set in configuration space to enable MSI
+        */
+       if (device->version == IOAT_VER_1_2) {
+               u32 dmactrl;
+               pci_read_config_dword(device->pdev,
+                                     IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
+               dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
+               pci_write_config_dword(device->pdev,
+                                      IOAT_PCI_DMACTRL_OFFSET, dmactrl);
+       }
+       device->irq_mode = msi;
+       goto done;
+
+intx:
+       err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
+                         IRQF_SHARED, "ioat-intx", device);
+       if (err)
+               goto err_no_irq;
+       device->irq_mode = intx;
+
+done:
+       intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
+       writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
+       return 0;
+
+err_no_irq:
+       /* Disable all interrupt generation */
+       writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
+       dev_err(&device->pdev->dev, "no usable interrupts\n");
+       device->irq_mode = none;
+       return -1;
+}
+
+/**
+ * ioat_dma_remove_interrupts - remove whatever interrupts were set
+ * @device: ioat device
+ */
+static void ioat_dma_remove_interrupts(struct ioatdma_device *device)
+{
+       struct ioat_dma_chan *ioat_chan;
+       int i;
+
+       /* Disable all interrupt generation */
+       writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
+
+       switch (device->irq_mode) {
+       case msix_multi_vector:
+               for (i = 0; i < device->common.chancnt; i++) {
+                       ioat_chan = ioat_lookup_chan_by_index(device, i);
+                       free_irq(device->msix_entries[i].vector, ioat_chan);
+               }
+               pci_disable_msix(device->pdev);
+               break;
+       case msix_single_vector:
+               free_irq(device->msix_entries[0].vector, device);
+               pci_disable_msix(device->pdev);
+               break;
+       case msi:
+               free_irq(device->pdev->irq, device);
+               pci_disable_msi(device->pdev);
+               break;
+       case intx:
+               free_irq(device->pdev->irq, device);
+               break;
+       case none:
+               dev_warn(&device->pdev->dev,
+                        "call to %s without interrupts setup\n", __func__);
+       }
+       device->irq_mode = none;
+}
+
+struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
+                                     void __iomem *iobase)
+{
+       int err;
+       struct ioatdma_device *device;
+
+       device = kzalloc(sizeof(*device), GFP_KERNEL);
+       if (!device) {
+               err = -ENOMEM;
+               goto err_kzalloc;
+       }
+       device->pdev = pdev;
+       device->reg_base = iobase;
+       device->version = readb(device->reg_base + IOAT_VER_OFFSET);
+
+       /* DMA coherent memory pool for DMA descriptor allocations */
+       device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
+                                          sizeof(struct ioat_dma_descriptor),
+                                          64, 0);
+       if (!device->dma_pool) {
+               err = -ENOMEM;
+               goto err_dma_pool;
+       }
+
+       device->completion_pool = pci_pool_create("completion_pool", pdev,
+                                                 sizeof(u64), SMP_CACHE_BYTES,
+                                                 SMP_CACHE_BYTES);
+       if (!device->completion_pool) {
+               err = -ENOMEM;
+               goto err_completion_pool;
+       }
+
+       INIT_LIST_HEAD(&device->common.channels);
+       ioat_dma_enumerate_channels(device);
+
+       device->common.device_alloc_chan_resources =
+                                               ioat_dma_alloc_chan_resources;
+       device->common.device_free_chan_resources =
+                                               ioat_dma_free_chan_resources;
+       device->common.dev = &pdev->dev;
+
+       dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
+       device->common.device_is_tx_complete = ioat_dma_is_complete;
+       switch (device->version) {
+       case IOAT_VER_1_2:
+               device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
+               device->common.device_issue_pending =
+                                               ioat1_dma_memcpy_issue_pending;
+               break;
+       case IOAT_VER_2_0:
+       case IOAT_VER_3_0:
+               device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
+               device->common.device_issue_pending =
+                                               ioat2_dma_memcpy_issue_pending;
+               break;
+       }
+
+       dev_err(&device->pdev->dev,
+               "Intel(R) I/OAT DMA Engine found,"
+               " %d channels, device version 0x%02x, driver version %s\n",
+               device->common.chancnt, device->version, IOAT_DMA_VERSION);
+
+       if (!device->common.chancnt) {
+               dev_err(&device->pdev->dev,
+                       "Intel(R) I/OAT DMA Engine problem found: "
+                       "zero channels detected\n");
+               goto err_setup_interrupts;
+       }
+
+       err = ioat_dma_setup_interrupts(device);
+       if (err)
+               goto err_setup_interrupts;
+
+       err = ioat_dma_self_test(device);
+       if (err)
+               goto err_self_test;
+
+       ioat_set_tcp_copy_break(device);
+
+       dma_async_device_register(&device->common);
+
+       if (device->version != IOAT_VER_3_0) {
+               INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
+               schedule_delayed_work(&device->work,
+                                     WATCHDOG_DELAY);
+       }
+
+       return device;
+
+err_self_test:
+       ioat_dma_remove_interrupts(device);
+err_setup_interrupts:
+       pci_pool_destroy(device->completion_pool);
+err_completion_pool:
+       pci_pool_destroy(device->dma_pool);
+err_dma_pool:
+       kfree(device);
+err_kzalloc:
+       dev_err(&pdev->dev,
+               "Intel(R) I/OAT DMA Engine initialization failed\n");
+       return NULL;
+}
+
+void ioat_dma_remove(struct ioatdma_device *device)
+{
+       struct dma_chan *chan, *_chan;
+       struct ioat_dma_chan *ioat_chan;
+
+       if (device->version != IOAT_VER_3_0)
+               cancel_delayed_work(&device->work);
+
+       ioat_dma_remove_interrupts(device);
+
+       dma_async_device_unregister(&device->common);
+
+       pci_pool_destroy(device->dma_pool);
+       pci_pool_destroy(device->completion_pool);
+
+       iounmap(device->reg_base);
+       pci_release_regions(device->pdev);
+       pci_disable_device(device->pdev);
+
+       list_for_each_entry_safe(chan, _chan,
+                                &device->common.channels, device_node) {
+               ioat_chan = to_ioat_chan(chan);
+               list_del(&chan->device_node);
+               kfree(ioat_chan);
+       }
+       kfree(device);
+}
+
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
new file mode 100644 (file)
index 0000000..e80e787
--- /dev/null
@@ -0,0 +1,165 @@
+/*
+ * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+#ifndef IOATDMA_H
+#define IOATDMA_H
+
+#include <linux/dmaengine.h>
+#include "hw.h"
+#include <linux/init.h>
+#include <linux/dmapool.h>
+#include <linux/cache.h>
+#include <linux/pci_ids.h>
+#include <net/tcp.h>
+
+#define IOAT_DMA_VERSION  "3.64"
+
+enum ioat_interrupt {
+       none = 0,
+       msix_multi_vector = 1,
+       msix_single_vector = 2,
+       msi = 3,
+       intx = 4,
+};
+
+#define IOAT_LOW_COMPLETION_MASK       0xffffffc0
+#define IOAT_DMA_DCA_ANY_CPU           ~0
+#define IOAT_WATCHDOG_PERIOD           (2 * HZ)
+
+
+/**
+ * struct ioatdma_device - internal representation of a IOAT device
+ * @pdev: PCI-Express device
+ * @reg_base: MMIO register space base address
+ * @dma_pool: for allocating DMA descriptors
+ * @common: embedded struct dma_device
+ * @version: version of ioatdma device
+ * @irq_mode: which style irq to use
+ * @msix_entries: irq handlers
+ * @idx: per channel data
+ */
+
+struct ioatdma_device {
+       struct pci_dev *pdev;
+       void __iomem *reg_base;
+       struct pci_pool *dma_pool;
+       struct pci_pool *completion_pool;
+       struct dma_device common;
+       u8 version;
+       enum ioat_interrupt irq_mode;
+       struct delayed_work work;
+       struct msix_entry msix_entries[4];
+       struct ioat_dma_chan *idx[4];
+};
+
+/**
+ * struct ioat_dma_chan - internal representation of a DMA channel
+ */
+struct ioat_dma_chan {
+
+       void __iomem *reg_base;
+
+       dma_cookie_t completed_cookie;
+       unsigned long last_completion;
+       unsigned long last_completion_time;
+
+       size_t xfercap; /* XFERCAP register value expanded out */
+
+       spinlock_t cleanup_lock;
+       spinlock_t desc_lock;
+       struct list_head free_desc;
+       struct list_head used_desc;
+       unsigned long watchdog_completion;
+       int watchdog_tcp_cookie;
+       u32 watchdog_last_tcp_cookie;
+       struct delayed_work work;
+
+       int pending;
+       int dmacount;
+       int desccount;
+
+       struct ioatdma_device *device;
+       struct dma_chan common;
+
+       dma_addr_t completion_addr;
+       union {
+               u64 full; /* HW completion writeback */
+               struct {
+                       u32 low;
+                       u32 high;
+               };
+       } *completion_virt;
+       unsigned long last_compl_desc_addr_hw;
+       struct tasklet_struct cleanup_task;
+};
+
+/* wrapper around hardware descriptor format + additional software fields */
+
+/**
+ * struct ioat_desc_sw - wrapper around hardware descriptor
+ * @hw: hardware DMA descriptor
+ * @node: this descriptor will either be on the free list,
+ *     or attached to a transaction list (async_tx.tx_list)
+ * @tx_cnt: number of descriptors required to complete the transaction
+ * @async_tx: the generic software descriptor for all engines
+ */
+struct ioat_desc_sw {
+       struct ioat_dma_descriptor *hw;
+       struct list_head node;
+       int tx_cnt;
+       size_t len;
+       dma_addr_t src;
+       dma_addr_t dst;
+       struct dma_async_tx_descriptor async_tx;
+};
+
+static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev)
+{
+       #ifdef CONFIG_NET_DMA
+       switch (dev->version) {
+       case IOAT_VER_1_2:
+               sysctl_tcp_dma_copybreak = 4096;
+               break;
+       case IOAT_VER_2_0:
+               sysctl_tcp_dma_copybreak = 2048;
+               break;
+       case IOAT_VER_3_0:
+               sysctl_tcp_dma_copybreak = 262144;
+               break;
+       }
+       #endif
+}
+
+#if defined(CONFIG_INTEL_IOATDMA) || defined(CONFIG_INTEL_IOATDMA_MODULE)
+struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
+                                     void __iomem *iobase);
+void ioat_dma_remove(struct ioatdma_device *device);
+struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
+struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
+struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
+#else
+#define ioat_dma_probe(pdev, iobase)    NULL
+#define ioat_dma_remove(device)         do { } while (0)
+#define ioat_dca_init(pdev, iobase)    NULL
+#define ioat2_dca_init(pdev, iobase)   NULL
+#define ioat3_dca_init(pdev, iobase)   NULL
+#endif
+
+#endif /* IOATDMA_H */
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
new file mode 100644 (file)
index 0000000..afa57ee
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+#ifndef _IOAT_HW_H_
+#define _IOAT_HW_H_
+
+/* PCI Configuration Space Values */
+#define IOAT_PCI_VID            0x8086
+
+/* CB device ID's */
+#define IOAT_PCI_DID_5000       0x1A38
+#define IOAT_PCI_DID_CNB        0x360B
+#define IOAT_PCI_DID_SCNB       0x65FF
+#define IOAT_PCI_DID_SNB        0x402F
+
+#define IOAT_PCI_RID            0x00
+#define IOAT_PCI_SVID           0x8086
+#define IOAT_PCI_SID            0x8086
+#define IOAT_VER_1_2            0x12    /* Version 1.2 */
+#define IOAT_VER_2_0            0x20    /* Version 2.0 */
+#define IOAT_VER_3_0            0x30    /* Version 3.0 */
+
+struct ioat_dma_descriptor {
+       uint32_t        size;
+       uint32_t        ctl;
+       uint64_t        src_addr;
+       uint64_t        dst_addr;
+       uint64_t        next;
+       uint64_t        rsv1;
+       uint64_t        rsv2;
+       uint64_t        user1;
+       uint64_t        user2;
+};
+
+#define IOAT_DMA_DESCRIPTOR_CTL_INT_GN 0x00000001
+#define IOAT_DMA_DESCRIPTOR_CTL_SRC_SN 0x00000002
+#define IOAT_DMA_DESCRIPTOR_CTL_DST_SN 0x00000004
+#define IOAT_DMA_DESCRIPTOR_CTL_CP_STS 0x00000008
+#define IOAT_DMA_DESCRIPTOR_CTL_FRAME  0x00000010
+#define IOAT_DMA_DESCRIPTOR_NUL                0x00000020
+#define IOAT_DMA_DESCRIPTOR_CTL_SP_BRK 0x00000040
+#define IOAT_DMA_DESCRIPTOR_CTL_DP_BRK 0x00000080
+#define IOAT_DMA_DESCRIPTOR_CTL_BNDL   0x00000100
+#define IOAT_DMA_DESCRIPTOR_CTL_DCA    0x00000200
+#define IOAT_DMA_DESCRIPTOR_CTL_BUFHINT        0x00000400
+
+#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_CONTEXT 0xFF000000
+#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_DMA     0x00000000
+
+#define IOAT_DMA_DESCRIPTOR_CTL_CONTEXT_DCA    0x00000001
+#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_MASK    0xFF000000
+
+#endif
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
new file mode 100644 (file)
index 0000000..d7948bf
--- /dev/null
@@ -0,0 +1,202 @@
+/*
+ * Intel I/OAT DMA Linux driver
+ * Copyright(c) 2007 - 2009 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+/*
+ * This driver supports an Intel I/OAT DMA engine, which does asynchronous
+ * copy operations.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/dca.h>
+#include "dma.h"
+#include "registers.h"
+#include "hw.h"
+
+MODULE_VERSION(IOAT_DMA_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Intel Corporation");
+
+static struct pci_device_id ioat_pci_tbl[] = {
+       /* I/OAT v1 platforms */
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB)  },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) },
+       { PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) },
+
+       /* I/OAT v2 platforms */
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) },
+
+       /* I/OAT v3 platforms */
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
+       { 0, }
+};
+
+struct ioat_device {
+       struct pci_dev          *pdev;
+       void __iomem            *iobase;
+       struct ioatdma_device   *dma;
+       struct dca_provider     *dca;
+};
+
+static int __devinit ioat_probe(struct pci_dev *pdev,
+                               const struct pci_device_id *id);
+static void __devexit ioat_remove(struct pci_dev *pdev);
+
+static int ioat_dca_enabled = 1;
+module_param(ioat_dca_enabled, int, 0644);
+MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
+
+static struct pci_driver ioat_pci_driver = {
+       .name           = "ioatdma",
+       .id_table       = ioat_pci_tbl,
+       .probe          = ioat_probe,
+       .remove         = __devexit_p(ioat_remove),
+};
+
+static int __devinit ioat_probe(struct pci_dev *pdev,
+                               const struct pci_device_id *id)
+{
+       void __iomem *iobase;
+       struct ioat_device *device;
+       unsigned long mmio_start, mmio_len;
+       int err;
+
+       err = pci_enable_device(pdev);
+       if (err)
+               goto err_enable_device;
+
+       err = pci_request_regions(pdev, ioat_pci_driver.name);
+       if (err)
+               goto err_request_regions;
+
+       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (err)
+               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+       if (err)
+               goto err_set_dma_mask;
+
+       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (err)
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+       if (err)
+               goto err_set_dma_mask;
+
+       mmio_start = pci_resource_start(pdev, 0);
+       mmio_len = pci_resource_len(pdev, 0);
+       iobase = ioremap(mmio_start, mmio_len);
+       if (!iobase) {
+               err = -ENOMEM;
+               goto err_ioremap;
+       }
+
+       device = kzalloc(sizeof(*device), GFP_KERNEL);
+       if (!device) {
+               err = -ENOMEM;
+               goto err_kzalloc;
+       }
+       device->pdev = pdev;
+       pci_set_drvdata(pdev, device);
+       device->iobase = iobase;
+
+       pci_set_master(pdev);
+
+       switch (readb(iobase + IOAT_VER_OFFSET)) {
+       case IOAT_VER_1_2:
+               device->dma = ioat_dma_probe(pdev, iobase);
+               if (device->dma && ioat_dca_enabled)
+                       device->dca = ioat_dca_init(pdev, iobase);
+               break;
+       case IOAT_VER_2_0:
+               device->dma = ioat_dma_probe(pdev, iobase);
+               if (device->dma && ioat_dca_enabled)
+                       device->dca = ioat2_dca_init(pdev, iobase);
+               break;
+       case IOAT_VER_3_0:
+               device->dma = ioat_dma_probe(pdev, iobase);
+               if (device->dma && ioat_dca_enabled)
+                       device->dca = ioat3_dca_init(pdev, iobase);
+               break;
+       default:
+               err = -ENODEV;
+               break;
+       }
+       if (!device->dma)
+               err = -ENODEV;
+
+       if (err)
+               goto err_version;
+
+       return 0;
+
+err_version:
+       kfree(device);
+err_kzalloc:
+       iounmap(iobase);
+err_ioremap:
+err_set_dma_mask:
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+err_request_regions:
+err_enable_device:
+       return err;
+}
+
+static void __devexit ioat_remove(struct pci_dev *pdev)
+{
+       struct ioat_device *device = pci_get_drvdata(pdev);
+
+       dev_err(&pdev->dev, "Removing dma and dca services\n");
+       if (device->dca) {
+               unregister_dca_provider(device->dca);
+               free_dca_provider(device->dca);
+               device->dca = NULL;
+       }
+
+       if (device->dma) {
+               ioat_dma_remove(device->dma);
+               device->dma = NULL;
+       }
+
+       kfree(device);
+}
+
+static int __init ioat_init_module(void)
+{
+       return pci_register_driver(&ioat_pci_driver);
+}
+module_init(ioat_init_module);
+
+static void __exit ioat_exit_module(void)
+{
+       pci_unregister_driver(&ioat_pci_driver);
+}
+module_exit(ioat_exit_module);
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
new file mode 100644 (file)
index 0000000..49bc277
--- /dev/null
@@ -0,0 +1,226 @@
+/*
+ * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+#ifndef _IOAT_REGISTERS_H_
+#define _IOAT_REGISTERS_H_
+
+#define IOAT_PCI_DMACTRL_OFFSET                        0x48
+#define IOAT_PCI_DMACTRL_DMA_EN                        0x00000001
+#define IOAT_PCI_DMACTRL_MSI_EN                        0x00000002
+
+#define IOAT_PCI_DEVICE_ID_OFFSET              0x02
+#define IOAT_PCI_DMAUNCERRSTS_OFFSET           0x148
+#define IOAT_PCI_CHANERRMASK_INT_OFFSET                0x184
+
+/* MMIO Device Registers */
+#define IOAT_CHANCNT_OFFSET                    0x00    /*  8-bit */
+
+#define IOAT_XFERCAP_OFFSET                    0x01    /*  8-bit */
+#define IOAT_XFERCAP_4KB                       12
+#define IOAT_XFERCAP_8KB                       13
+#define IOAT_XFERCAP_16KB                      14
+#define IOAT_XFERCAP_32KB                      15
+#define IOAT_XFERCAP_32GB                      0
+
+#define IOAT_GENCTRL_OFFSET                    0x02    /*  8-bit */
+#define IOAT_GENCTRL_DEBUG_EN                  0x01
+
+#define IOAT_INTRCTRL_OFFSET                   0x03    /*  8-bit */
+#define IOAT_INTRCTRL_MASTER_INT_EN            0x01    /* Master Interrupt Enable */
+#define IOAT_INTRCTRL_INT_STATUS               0x02    /* ATTNSTATUS -or- Channel Int */
+#define IOAT_INTRCTRL_INT                      0x04    /* INT_STATUS -and- MASTER_INT_EN */
+#define IOAT_INTRCTRL_MSIX_VECTOR_CONTROL      0x08    /* Enable all MSI-X vectors */
+
+#define IOAT_ATTNSTATUS_OFFSET                 0x04    /* Each bit is a channel */
+
+#define IOAT_VER_OFFSET                                0x08    /*  8-bit */
+#define IOAT_VER_MAJOR_MASK                    0xF0
+#define IOAT_VER_MINOR_MASK                    0x0F
+#define GET_IOAT_VER_MAJOR(x)                  (((x) & IOAT_VER_MAJOR_MASK) >> 4)
+#define GET_IOAT_VER_MINOR(x)                  ((x) & IOAT_VER_MINOR_MASK)
+
+#define IOAT_PERPORTOFFSET_OFFSET              0x0A    /* 16-bit */
+
+#define IOAT_INTRDELAY_OFFSET                  0x0C    /* 16-bit */
+#define IOAT_INTRDELAY_INT_DELAY_MASK          0x3FFF  /* Interrupt Delay Time */
+#define IOAT_INTRDELAY_COALESE_SUPPORT         0x8000  /* Interrupt Coalescing Supported */
+
+#define IOAT_DEVICE_STATUS_OFFSET              0x0E    /* 16-bit */
+#define IOAT_DEVICE_STATUS_DEGRADED_MODE       0x0001
+
+#define IOAT_CHANNEL_MMIO_SIZE                 0x80    /* Each Channel MMIO space is this size */
+
+/* DMA Channel Registers */
+#define IOAT_CHANCTRL_OFFSET                   0x00    /* 16-bit Channel Control Register */
+#define IOAT_CHANCTRL_CHANNEL_PRIORITY_MASK    0xF000
+#define IOAT_CHANCTRL_CHANNEL_IN_USE           0x0100
+#define IOAT_CHANCTRL_DESCRIPTOR_ADDR_SNOOP_CONTROL    0x0020
+#define IOAT_CHANCTRL_ERR_INT_EN               0x0010
+#define IOAT_CHANCTRL_ANY_ERR_ABORT_EN         0x0008
+#define IOAT_CHANCTRL_ERR_COMPLETION_EN                0x0004
+#define IOAT_CHANCTRL_INT_DISABLE              0x0001
+
+#define IOAT_DMA_COMP_OFFSET                   0x02    /* 16-bit DMA channel compatibility */
+#define IOAT_DMA_COMP_V1                       0x0001  /* Compatibility with DMA version 1 */
+#define IOAT_DMA_COMP_V2                       0x0002  /* Compatibility with DMA version 2 */
+
+
+#define IOAT1_CHANSTS_OFFSET           0x04    /* 64-bit Channel Status Register */
+#define IOAT2_CHANSTS_OFFSET           0x08    /* 64-bit Channel Status Register */
+#define IOAT_CHANSTS_OFFSET(ver)               ((ver) < IOAT_VER_2_0 \
+                                               ? IOAT1_CHANSTS_OFFSET : IOAT2_CHANSTS_OFFSET)
+#define IOAT1_CHANSTS_OFFSET_LOW       0x04
+#define IOAT2_CHANSTS_OFFSET_LOW       0x08
+#define IOAT_CHANSTS_OFFSET_LOW(ver)           ((ver) < IOAT_VER_2_0 \
+                                               ? IOAT1_CHANSTS_OFFSET_LOW : IOAT2_CHANSTS_OFFSET_LOW)
+#define IOAT1_CHANSTS_OFFSET_HIGH      0x08
+#define IOAT2_CHANSTS_OFFSET_HIGH      0x0C
+#define IOAT_CHANSTS_OFFSET_HIGH(ver)          ((ver) < IOAT_VER_2_0 \
+                                               ? IOAT1_CHANSTS_OFFSET_HIGH : IOAT2_CHANSTS_OFFSET_HIGH)
+#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR ~0x3F
+#define IOAT_CHANSTS_SOFT_ERR                  0x0000000000000010
+#define IOAT_CHANSTS_UNAFFILIATED_ERR          0x0000000000000008
+#define IOAT_CHANSTS_DMA_TRANSFER_STATUS       0x0000000000000007
+#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE        0x0
+#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE  0x1
+#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_SUSPENDED     0x2
+#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED        0x3
+
+
+
+#define IOAT_CHAN_DMACOUNT_OFFSET      0x06    /* 16-bit DMA Count register */
+
+#define IOAT_DCACTRL_OFFSET         0x30   /* 32 bit Direct Cache Access Control Register */
+#define IOAT_DCACTRL_CMPL_WRITE_ENABLE 0x10000
+#define IOAT_DCACTRL_TARGET_CPU_MASK   0xFFFF /* APIC ID */
+
+/* CB DCA Memory Space Registers */
+#define IOAT_DCAOFFSET_OFFSET       0x14
+/* CB_BAR + IOAT_DCAOFFSET value */
+#define IOAT_DCA_VER_OFFSET         0x00
+#define IOAT_DCA_VER_MAJOR_MASK     0xF0
+#define IOAT_DCA_VER_MINOR_MASK     0x0F
+
+#define IOAT_DCA_COMP_OFFSET        0x02
+#define IOAT_DCA_COMP_V1            0x1
+
+#define IOAT_FSB_CAPABILITY_OFFSET  0x04
+#define IOAT_FSB_CAPABILITY_PREFETCH    0x1
+
+#define IOAT_PCI_CAPABILITY_OFFSET  0x06
+#define IOAT_PCI_CAPABILITY_MEMWR   0x1
+
+#define IOAT_FSB_CAP_ENABLE_OFFSET  0x08
+#define IOAT_FSB_CAP_ENABLE_PREFETCH    0x1
+
+#define IOAT_PCI_CAP_ENABLE_OFFSET  0x0A
+#define IOAT_PCI_CAP_ENABLE_MEMWR   0x1
+
+#define IOAT_APICID_TAG_MAP_OFFSET  0x0C
+#define IOAT_APICID_TAG_MAP_TAG0    0x0000000F
+#define IOAT_APICID_TAG_MAP_TAG0_SHIFT 0
+#define IOAT_APICID_TAG_MAP_TAG1    0x000000F0
+#define IOAT_APICID_TAG_MAP_TAG1_SHIFT 4
+#define IOAT_APICID_TAG_MAP_TAG2    0x00000F00
+#define IOAT_APICID_TAG_MAP_TAG2_SHIFT 8
+#define IOAT_APICID_TAG_MAP_TAG3    0x0000F000
+#define IOAT_APICID_TAG_MAP_TAG3_SHIFT 12
+#define IOAT_APICID_TAG_MAP_TAG4    0x000F0000
+#define IOAT_APICID_TAG_MAP_TAG4_SHIFT 16
+#define IOAT_APICID_TAG_CB2_VALID   0x8080808080
+
+#define IOAT_DCA_GREQID_OFFSET      0x10
+#define IOAT_DCA_GREQID_SIZE        0x04
+#define IOAT_DCA_GREQID_MASK        0xFFFF
+#define IOAT_DCA_GREQID_IGNOREFUN   0x10000000
+#define IOAT_DCA_GREQID_VALID       0x20000000
+#define IOAT_DCA_GREQID_LASTID      0x80000000
+
+#define IOAT3_CSI_CAPABILITY_OFFSET 0x08
+#define IOAT3_CSI_CAPABILITY_PREFETCH    0x1
+
+#define IOAT3_PCI_CAPABILITY_OFFSET 0x0A
+#define IOAT3_PCI_CAPABILITY_MEMWR  0x1
+
+#define IOAT3_CSI_CONTROL_OFFSET    0x0C
+#define IOAT3_CSI_CONTROL_PREFETCH  0x1
+
+#define IOAT3_PCI_CONTROL_OFFSET    0x0E
+#define IOAT3_PCI_CONTROL_MEMWR     0x1
+
+#define IOAT3_APICID_TAG_MAP_OFFSET 0x10
+#define IOAT3_APICID_TAG_MAP_OFFSET_LOW  0x10
+#define IOAT3_APICID_TAG_MAP_OFFSET_HIGH 0x14
+
+#define IOAT3_DCA_GREQID_OFFSET     0x02
+
+#define IOAT1_CHAINADDR_OFFSET         0x0C    /* 64-bit Descriptor Chain Address Register */
+#define IOAT2_CHAINADDR_OFFSET         0x10    /* 64-bit Descriptor Chain Address Register */
+#define IOAT_CHAINADDR_OFFSET(ver)             ((ver) < IOAT_VER_2_0 \
+                                               ? IOAT1_CHAINADDR_OFFSET : IOAT2_CHAINADDR_OFFSET)
+#define IOAT1_CHAINADDR_OFFSET_LOW     0x0C
+#define IOAT2_CHAINADDR_OFFSET_LOW     0x10
+#define IOAT_CHAINADDR_OFFSET_LOW(ver)         ((ver) < IOAT_VER_2_0 \
+                                               ? IOAT1_CHAINADDR_OFFSET_LOW : IOAT2_CHAINADDR_OFFSET_LOW)
+#define IOAT1_CHAINADDR_OFFSET_HIGH    0x10
+#define IOAT2_CHAINADDR_OFFSET_HIGH    0x14
+#define IOAT_CHAINADDR_OFFSET_HIGH(ver)                ((ver) < IOAT_VER_2_0 \
+                                               ? IOAT1_CHAINADDR_OFFSET_HIGH : IOAT2_CHAINADDR_OFFSET_HIGH)
+
+#define IOAT1_CHANCMD_OFFSET           0x14    /*  8-bit DMA Channel Command Register */
+#define IOAT2_CHANCMD_OFFSET           0x04    /*  8-bit DMA Channel Command Register */
+#define IOAT_CHANCMD_OFFSET(ver)               ((ver) < IOAT_VER_2_0 \
+                                               ? IOAT1_CHANCMD_OFFSET : IOAT2_CHANCMD_OFFSET)
+#define IOAT_CHANCMD_RESET                     0x20
+#define IOAT_CHANCMD_RESUME                    0x10
+#define IOAT_CHANCMD_ABORT                     0x08
+#define IOAT_CHANCMD_SUSPEND                   0x04
+#define IOAT_CHANCMD_APPEND                    0x02
+#define IOAT_CHANCMD_START                     0x01
+
+#define IOAT_CHANCMP_OFFSET                    0x18    /* 64-bit Channel Completion Address Register */
+#define IOAT_CHANCMP_OFFSET_LOW                        0x18
+#define IOAT_CHANCMP_OFFSET_HIGH               0x1C
+
+#define IOAT_CDAR_OFFSET                       0x20    /* 64-bit Current Descriptor Address Register */
+#define IOAT_CDAR_OFFSET_LOW                   0x20
+#define IOAT_CDAR_OFFSET_HIGH                  0x24
+
+#define IOAT_CHANERR_OFFSET                    0x28    /* 32-bit Channel Error Register */
+#define IOAT_CHANERR_DMA_TRANSFER_SRC_ADDR_ERR 0x0001
+#define IOAT_CHANERR_DMA_TRANSFER_DEST_ADDR_ERR        0x0002
+#define IOAT_CHANERR_NEXT_DESCRIPTOR_ADDR_ERR  0x0004
+#define IOAT_CHANERR_NEXT_DESCRIPTOR_ALIGNMENT_ERR     0x0008
+#define IOAT_CHANERR_CHAIN_ADDR_VALUE_ERR      0x0010
+#define IOAT_CHANERR_CHANCMD_ERR               0x0020
+#define IOAT_CHANERR_CHIPSET_UNCORRECTABLE_DATA_INTEGRITY_ERR  0x0040
+#define IOAT_CHANERR_DMA_UNCORRECTABLE_DATA_INTEGRITY_ERR      0x0080
+#define IOAT_CHANERR_READ_DATA_ERR             0x0100
+#define IOAT_CHANERR_WRITE_DATA_ERR            0x0200
+#define IOAT_CHANERR_DESCRIPTOR_CONTROL_ERR    0x0400
+#define IOAT_CHANERR_DESCRIPTOR_LENGTH_ERR     0x0800
+#define IOAT_CHANERR_COMPLETION_ADDR_ERR       0x1000
+#define IOAT_CHANERR_INT_CONFIGURATION_ERR     0x2000
+#define IOAT_CHANERR_SOFT_ERR                  0x4000
+#define IOAT_CHANERR_UNAFFILIATED_ERR          0x8000
+
+#define IOAT_CHANERR_MASK_OFFSET               0x2C    /* 32-bit Channel Error Register */
+
+#endif /* _IOAT_REGISTERS_H_ */
diff --git a/drivers/dma/ioat_dca.c b/drivers/dma/ioat_dca.c
deleted file mode 100644 (file)
index c012a1e..0000000
+++ /dev/null
@@ -1,681 +0,0 @@
-/*
- * Intel I/OAT DMA Linux driver
- * Copyright(c) 2007 - 2009 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/dca.h>
-
-/* either a kernel change is needed, or we need something like this in kernel */
-#ifndef CONFIG_SMP
-#include <asm/smp.h>
-#undef cpu_physical_id
-#define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24)
-#endif
-
-#include "ioatdma.h"
-#include "ioatdma_registers.h"
-
-/*
- * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
- * contain the bit number of the APIC ID to map into the DCA tag.  If the valid
- * bit is not set, then the value must be 0 or 1 and defines the bit in the tag.
- */
-#define DCA_TAG_MAP_VALID 0x80
-
-#define DCA3_TAG_MAP_BIT_TO_INV 0x80
-#define DCA3_TAG_MAP_BIT_TO_SEL 0x40
-#define DCA3_TAG_MAP_LITERAL_VAL 0x1
-
-#define DCA_TAG_MAP_MASK 0xDF
-
-/* expected tag map bytes for I/OAT ver.2 */
-#define DCA2_TAG_MAP_BYTE0 0x80
-#define DCA2_TAG_MAP_BYTE1 0x0
-#define DCA2_TAG_MAP_BYTE2 0x81
-#define DCA2_TAG_MAP_BYTE3 0x82
-#define DCA2_TAG_MAP_BYTE4 0x82
-
-/* verify if tag map matches expected values */
-static inline int dca2_tag_map_valid(u8 *tag_map)
-{
-       return ((tag_map[0] == DCA2_TAG_MAP_BYTE0) &&
-               (tag_map[1] == DCA2_TAG_MAP_BYTE1) &&
-               (tag_map[2] == DCA2_TAG_MAP_BYTE2) &&
-               (tag_map[3] == DCA2_TAG_MAP_BYTE3) &&
-               (tag_map[4] == DCA2_TAG_MAP_BYTE4));
-}
-
-/*
- * "Legacy" DCA systems do not implement the DCA register set in the
- * I/OAT device.  Software needs direct support for their tag mappings.
- */
-
-#define APICID_BIT(x)          (DCA_TAG_MAP_VALID | (x))
-#define IOAT_TAG_MAP_LEN       8
-
-static u8 ioat_tag_map_BNB[IOAT_TAG_MAP_LEN] = {
-       1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
-static u8 ioat_tag_map_SCNB[IOAT_TAG_MAP_LEN] = {
-       1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
-static u8 ioat_tag_map_CNB[IOAT_TAG_MAP_LEN] = {
-       1, APICID_BIT(1), APICID_BIT(3), APICID_BIT(4), APICID_BIT(2), };
-static u8 ioat_tag_map_UNISYS[IOAT_TAG_MAP_LEN] = { 0 };
-
-/* pack PCI B/D/F into a u16 */
-static inline u16 dcaid_from_pcidev(struct pci_dev *pci)
-{
-       return (pci->bus->number << 8) | pci->devfn;
-}
-
-static int dca_enabled_in_bios(struct pci_dev *pdev)
-{
-       /* CPUID level 9 returns DCA configuration */
-       /* Bit 0 indicates DCA enabled by the BIOS */
-       unsigned long cpuid_level_9;
-       int res;
-
-       cpuid_level_9 = cpuid_eax(9);
-       res = test_bit(0, &cpuid_level_9);
-       if (!res)
-               dev_err(&pdev->dev, "DCA is disabled in BIOS\n");
-
-       return res;
-}
-
-static int system_has_dca_enabled(struct pci_dev *pdev)
-{
-       if (boot_cpu_has(X86_FEATURE_DCA))
-               return dca_enabled_in_bios(pdev);
-
-       dev_err(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n");
-       return 0;
-}
-
-struct ioat_dca_slot {
-       struct pci_dev *pdev;   /* requester device */
-       u16 rid;                /* requester id, as used by IOAT */
-};
-
-#define IOAT_DCA_MAX_REQ 6
-#define IOAT3_DCA_MAX_REQ 2
-
-struct ioat_dca_priv {
-       void __iomem            *iobase;
-       void __iomem            *dca_base;
-       int                      max_requesters;
-       int                      requester_count;
-       u8                       tag_map[IOAT_TAG_MAP_LEN];
-       struct ioat_dca_slot     req_slots[0];
-};
-
-/* 5000 series chipset DCA Port Requester ID Table Entry Format
- * [15:8]      PCI-Express Bus Number
- * [7:3]       PCI-Express Device Number
- * [2:0]       PCI-Express Function Number
- *
- * 5000 series chipset DCA control register format
- * [7:1]       Reserved (0)
- * [0]         Ignore Function Number
- */
-
-static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
-{
-       struct ioat_dca_priv *ioatdca = dca_priv(dca);
-       struct pci_dev *pdev;
-       int i;
-       u16 id;
-
-       /* This implementation only supports PCI-Express */
-       if (dev->bus != &pci_bus_type)
-               return -ENODEV;
-       pdev = to_pci_dev(dev);
-       id = dcaid_from_pcidev(pdev);
-
-       if (ioatdca->requester_count == ioatdca->max_requesters)
-               return -ENODEV;
-
-       for (i = 0; i < ioatdca->max_requesters; i++) {
-               if (ioatdca->req_slots[i].pdev == NULL) {
-                       /* found an empty slot */
-                       ioatdca->requester_count++;
-                       ioatdca->req_slots[i].pdev = pdev;
-                       ioatdca->req_slots[i].rid = id;
-                       writew(id, ioatdca->dca_base + (i * 4));
-                       /* make sure the ignore function bit is off */
-                       writeb(0, ioatdca->dca_base + (i * 4) + 2);
-                       return i;
-               }
-       }
-       /* Error, ioatdma->requester_count is out of whack */
-       return -EFAULT;
-}
-
-static int ioat_dca_remove_requester(struct dca_provider *dca,
-                                    struct device *dev)
-{
-       struct ioat_dca_priv *ioatdca = dca_priv(dca);
-       struct pci_dev *pdev;
-       int i;
-
-       /* This implementation only supports PCI-Express */
-       if (dev->bus != &pci_bus_type)
-               return -ENODEV;
-       pdev = to_pci_dev(dev);
-
-       for (i = 0; i < ioatdca->max_requesters; i++) {
-               if (ioatdca->req_slots[i].pdev == pdev) {
-                       writew(0, ioatdca->dca_base + (i * 4));
-                       ioatdca->req_slots[i].pdev = NULL;
-                       ioatdca->req_slots[i].rid = 0;
-                       ioatdca->requester_count--;
-                       return i;
-               }
-       }
-       return -ENODEV;
-}
-
-static u8 ioat_dca_get_tag(struct dca_provider *dca,
-                          struct device *dev,
-                          int cpu)
-{
-       struct ioat_dca_priv *ioatdca = dca_priv(dca);
-       int i, apic_id, bit, value;
-       u8 entry, tag;
-
-       tag = 0;
-       apic_id = cpu_physical_id(cpu);
-
-       for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
-               entry = ioatdca->tag_map[i];
-               if (entry & DCA_TAG_MAP_VALID) {
-                       bit = entry & ~DCA_TAG_MAP_VALID;
-                       value = (apic_id & (1 << bit)) ? 1 : 0;
-               } else {
-                       value = entry ? 1 : 0;
-               }
-               tag |= (value << i);
-       }
-       return tag;
-}
-
-static int ioat_dca_dev_managed(struct dca_provider *dca,
-                               struct device *dev)
-{
-       struct ioat_dca_priv *ioatdca = dca_priv(dca);
-       struct pci_dev *pdev;
-       int i;
-
-       pdev = to_pci_dev(dev);
-       for (i = 0; i < ioatdca->max_requesters; i++) {
-               if (ioatdca->req_slots[i].pdev == pdev)
-                       return 1;
-       }
-       return 0;
-}
-
-static struct dca_ops ioat_dca_ops = {
-       .add_requester          = ioat_dca_add_requester,
-       .remove_requester       = ioat_dca_remove_requester,
-       .get_tag                = ioat_dca_get_tag,
-       .dev_managed            = ioat_dca_dev_managed,
-};
-
-
-struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
-{
-       struct dca_provider *dca;
-       struct ioat_dca_priv *ioatdca;
-       u8 *tag_map = NULL;
-       int i;
-       int err;
-       u8 version;
-       u8 max_requesters;
-
-       if (!system_has_dca_enabled(pdev))
-               return NULL;
-
-       /* I/OAT v1 systems must have a known tag_map to support DCA */
-       switch (pdev->vendor) {
-       case PCI_VENDOR_ID_INTEL:
-               switch (pdev->device) {
-               case PCI_DEVICE_ID_INTEL_IOAT:
-                       tag_map = ioat_tag_map_BNB;
-                       break;
-               case PCI_DEVICE_ID_INTEL_IOAT_CNB:
-                       tag_map = ioat_tag_map_CNB;
-                       break;
-               case PCI_DEVICE_ID_INTEL_IOAT_SCNB:
-                       tag_map = ioat_tag_map_SCNB;
-                       break;
-               }
-               break;
-       case PCI_VENDOR_ID_UNISYS:
-               switch (pdev->device) {
-               case PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR:
-                       tag_map = ioat_tag_map_UNISYS;
-                       break;
-               }
-               break;
-       }
-       if (tag_map == NULL)
-               return NULL;
-
-       version = readb(iobase + IOAT_VER_OFFSET);
-       if (version == IOAT_VER_3_0)
-               max_requesters = IOAT3_DCA_MAX_REQ;
-       else
-               max_requesters = IOAT_DCA_MAX_REQ;
-
-       dca = alloc_dca_provider(&ioat_dca_ops,
-                       sizeof(*ioatdca) +
-                       (sizeof(struct ioat_dca_slot) * max_requesters));
-       if (!dca)
-               return NULL;
-
-       ioatdca = dca_priv(dca);
-       ioatdca->max_requesters = max_requesters;
-       ioatdca->dca_base = iobase + 0x54;
-
-       /* copy over the APIC ID to DCA tag mapping */
-       for (i = 0; i < IOAT_TAG_MAP_LEN; i++)
-               ioatdca->tag_map[i] = tag_map[i];
-
-       err = register_dca_provider(dca, &pdev->dev);
-       if (err) {
-               free_dca_provider(dca);
-               return NULL;
-       }
-
-       return dca;
-}
-
-
-static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev)
-{
-       struct ioat_dca_priv *ioatdca = dca_priv(dca);
-       struct pci_dev *pdev;
-       int i;
-       u16 id;
-       u16 global_req_table;
-
-       /* This implementation only supports PCI-Express */
-       if (dev->bus != &pci_bus_type)
-               return -ENODEV;
-       pdev = to_pci_dev(dev);
-       id = dcaid_from_pcidev(pdev);
-
-       if (ioatdca->requester_count == ioatdca->max_requesters)
-               return -ENODEV;
-
-       for (i = 0; i < ioatdca->max_requesters; i++) {
-               if (ioatdca->req_slots[i].pdev == NULL) {
-                       /* found an empty slot */
-                       ioatdca->requester_count++;
-                       ioatdca->req_slots[i].pdev = pdev;
-                       ioatdca->req_slots[i].rid = id;
-                       global_req_table =
-                             readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
-                       writel(id | IOAT_DCA_GREQID_VALID,
-                              ioatdca->iobase + global_req_table + (i * 4));
-                       return i;
-               }
-       }
-       /* Error, ioatdma->requester_count is out of whack */
-       return -EFAULT;
-}
-
-static int ioat2_dca_remove_requester(struct dca_provider *dca,
-                                     struct device *dev)
-{
-       struct ioat_dca_priv *ioatdca = dca_priv(dca);
-       struct pci_dev *pdev;
-       int i;
-       u16 global_req_table;
-
-       /* This implementation only supports PCI-Express */
-       if (dev->bus != &pci_bus_type)
-               return -ENODEV;
-       pdev = to_pci_dev(dev);
-
-       for (i = 0; i < ioatdca->max_requesters; i++) {
-               if (ioatdca->req_slots[i].pdev == pdev) {
-                       global_req_table =
-                             readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
-                       writel(0, ioatdca->iobase + global_req_table + (i * 4));
-                       ioatdca->req_slots[i].pdev = NULL;
-                       ioatdca->req_slots[i].rid = 0;
-                       ioatdca->requester_count--;
-                       return i;
-               }
-       }
-       return -ENODEV;
-}
-
-static u8 ioat2_dca_get_tag(struct dca_provider *dca,
-                           struct device *dev,
-                           int cpu)
-{
-       u8 tag;
-
-       tag = ioat_dca_get_tag(dca, dev, cpu);
-       tag = (~tag) & 0x1F;
-       return tag;
-}
-
-static struct dca_ops ioat2_dca_ops = {
-       .add_requester          = ioat2_dca_add_requester,
-       .remove_requester       = ioat2_dca_remove_requester,
-       .get_tag                = ioat2_dca_get_tag,
-       .dev_managed            = ioat_dca_dev_managed,
-};
-
-static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
-{
-       int slots = 0;
-       u32 req;
-       u16 global_req_table;
-
-       global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET);
-       if (global_req_table == 0)
-               return 0;
-       do {
-               req = readl(iobase + global_req_table + (slots * sizeof(u32)));
-               slots++;
-       } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
-
-       return slots;
-}
-
-struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
-{
-       struct dca_provider *dca;
-       struct ioat_dca_priv *ioatdca;
-       int slots;
-       int i;
-       int err;
-       u32 tag_map;
-       u16 dca_offset;
-       u16 csi_fsb_control;
-       u16 pcie_control;
-       u8 bit;
-
-       if (!system_has_dca_enabled(pdev))
-               return NULL;
-
-       dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
-       if (dca_offset == 0)
-               return NULL;
-
-       slots = ioat2_dca_count_dca_slots(iobase, dca_offset);
-       if (slots == 0)
-               return NULL;
-
-       dca = alloc_dca_provider(&ioat2_dca_ops,
-                                sizeof(*ioatdca)
-                                     + (sizeof(struct ioat_dca_slot) * slots));
-       if (!dca)
-               return NULL;
-
-       ioatdca = dca_priv(dca);
-       ioatdca->iobase = iobase;
-       ioatdca->dca_base = iobase + dca_offset;
-       ioatdca->max_requesters = slots;
-
-       /* some bios might not know to turn these on */
-       csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
-       if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) {
-               csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH;
-               writew(csi_fsb_control,
-                      ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
-       }
-       pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
-       if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) {
-               pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR;
-               writew(pcie_control,
-                      ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
-       }
-
-
-       /* TODO version, compatibility and configuration checks */
-
-       /* copy out the APIC to DCA tag map */
-       tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET);
-       for (i = 0; i < 5; i++) {
-               bit = (tag_map >> (4 * i)) & 0x0f;
-               if (bit < 8)
-                       ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID;
-               else
-                       ioatdca->tag_map[i] = 0;
-       }
-
-       if (!dca2_tag_map_valid(ioatdca->tag_map)) {
-               dev_err(&pdev->dev, "APICID_TAG_MAP set incorrectly by BIOS, "
-                       "disabling DCA\n");
-               free_dca_provider(dca);
-               return NULL;
-       }
-
-       err = register_dca_provider(dca, &pdev->dev);
-       if (err) {
-               free_dca_provider(dca);
-               return NULL;
-       }
-
-       return dca;
-}
-
-static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
-{
-       struct ioat_dca_priv *ioatdca = dca_priv(dca);
-       struct pci_dev *pdev;
-       int i;
-       u16 id;
-       u16 global_req_table;
-
-       /* This implementation only supports PCI-Express */
-       if (dev->bus != &pci_bus_type)
-               return -ENODEV;
-       pdev = to_pci_dev(dev);
-       id = dcaid_from_pcidev(pdev);
-
-       if (ioatdca->requester_count == ioatdca->max_requesters)
-               return -ENODEV;
-
-       for (i = 0; i < ioatdca->max_requesters; i++) {
-               if (ioatdca->req_slots[i].pdev == NULL) {
-                       /* found an empty slot */
-                       ioatdca->requester_count++;
-                       ioatdca->req_slots[i].pdev = pdev;
-                       ioatdca->req_slots[i].rid = id;
-                       global_req_table =
-                             readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
-                       writel(id | IOAT_DCA_GREQID_VALID,
-                              ioatdca->iobase + global_req_table + (i * 4));
-                       return i;
-               }
-       }
-       /* Error, ioatdma->requester_count is out of whack */
-       return -EFAULT;
-}
-
-static int ioat3_dca_remove_requester(struct dca_provider *dca,
-                                     struct device *dev)
-{
-       struct ioat_dca_priv *ioatdca = dca_priv(dca);
-       struct pci_dev *pdev;
-       int i;
-       u16 global_req_table;
-
-       /* This implementation only supports PCI-Express */
-       if (dev->bus != &pci_bus_type)
-               return -ENODEV;
-       pdev = to_pci_dev(dev);
-
-       for (i = 0; i < ioatdca->max_requesters; i++) {
-               if (ioatdca->req_slots[i].pdev == pdev) {
-                       global_req_table =
-                             readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
-                       writel(0, ioatdca->iobase + global_req_table + (i * 4));
-                       ioatdca->req_slots[i].pdev = NULL;
-                       ioatdca->req_slots[i].rid = 0;
-                       ioatdca->requester_count--;
-                       return i;
-               }
-       }
-       return -ENODEV;
-}
-
-static u8 ioat3_dca_get_tag(struct dca_provider *dca,
-                           struct device *dev,
-                           int cpu)
-{
-       u8 tag;
-
-       struct ioat_dca_priv *ioatdca = dca_priv(dca);
-       int i, apic_id, bit, value;
-       u8 entry;
-
-       tag = 0;
-       apic_id = cpu_physical_id(cpu);
-
-       for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
-               entry = ioatdca->tag_map[i];
-               if (entry & DCA3_TAG_MAP_BIT_TO_SEL) {
-                       bit = entry &
-                               ~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV);
-                       value = (apic_id & (1 << bit)) ? 1 : 0;
-               } else if (entry & DCA3_TAG_MAP_BIT_TO_INV) {
-                       bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV;
-                       value = (apic_id & (1 << bit)) ? 0 : 1;
-               } else {
-                       value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0;
-               }
-               tag |= (value << i);
-       }
-
-       return tag;
-}
-
-static struct dca_ops ioat3_dca_ops = {
-       .add_requester          = ioat3_dca_add_requester,
-       .remove_requester       = ioat3_dca_remove_requester,
-       .get_tag                = ioat3_dca_get_tag,
-       .dev_managed            = ioat_dca_dev_managed,
-};
-
-static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
-{
-       int slots = 0;
-       u32 req;
-       u16 global_req_table;
-
-       global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET);
-       if (global_req_table == 0)
-               return 0;
-
-       do {
-               req = readl(iobase + global_req_table + (slots * sizeof(u32)));
-               slots++;
-       } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
-
-       return slots;
-}
-
-struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
-{
-       struct dca_provider *dca;
-       struct ioat_dca_priv *ioatdca;
-       int slots;
-       int i;
-       int err;
-       u16 dca_offset;
-       u16 csi_fsb_control;
-       u16 pcie_control;
-       u8 bit;
-
-       union {
-               u64 full;
-               struct {
-                       u32 low;
-                       u32 high;
-               };
-       } tag_map;
-
-       if (!system_has_dca_enabled(pdev))
-               return NULL;
-
-       dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
-       if (dca_offset == 0)
-               return NULL;
-
-       slots = ioat3_dca_count_dca_slots(iobase, dca_offset);
-       if (slots == 0)
-               return NULL;
-
-       dca = alloc_dca_provider(&ioat3_dca_ops,
-                                sizeof(*ioatdca)
-                                     + (sizeof(struct ioat_dca_slot) * slots));
-       if (!dca)
-               return NULL;
-
-       ioatdca = dca_priv(dca);
-       ioatdca->iobase = iobase;
-       ioatdca->dca_base = iobase + dca_offset;
-       ioatdca->max_requesters = slots;
-
-       /* some bios might not know to turn these on */
-       csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
-       if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) {
-               csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH;
-               writew(csi_fsb_control,
-                      ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
-       }
-       pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
-       if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) {
-               pcie_control |= IOAT3_PCI_CONTROL_MEMWR;
-               writew(pcie_control,
-                      ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
-       }
-
-
-       /* TODO version, compatibility and configuration checks */
-
-       /* copy out the APIC to DCA tag map */
-       tag_map.low =
-               readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW);
-       tag_map.high =
-               readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH);
-       for (i = 0; i < 8; i++) {
-               bit = tag_map.full >> (8 * i);
-               ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK;
-       }
-
-       err = register_dca_provider(dca, &pdev->dev);
-       if (err) {
-               free_dca_provider(dca);
-               return NULL;
-       }
-
-       return dca;
-}
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
deleted file mode 100644 (file)
index a600fc0..0000000
+++ /dev/null
@@ -1,1741 +0,0 @@
-/*
- * Intel I/OAT DMA Linux driver
- * Copyright(c) 2004 - 2009 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- */
-
-/*
- * This driver supports an Intel I/OAT DMA engine, which does asynchronous
- * copy operations.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/dmaengine.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/workqueue.h>
-#include <linux/i7300_idle.h>
-#include "ioatdma.h"
-#include "ioatdma_registers.h"
-#include "ioatdma_hw.h"
-
-#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
-#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
-#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
-#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
-
-#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
-static int ioat_pending_level = 4;
-module_param(ioat_pending_level, int, 0644);
-MODULE_PARM_DESC(ioat_pending_level,
-                "high-water mark for pushing ioat descriptors (default: 4)");
-
-#define RESET_DELAY  msecs_to_jiffies(100)
-#define WATCHDOG_DELAY  round_jiffies(msecs_to_jiffies(2000))
-static void ioat_dma_chan_reset_part2(struct work_struct *work);
-static void ioat_dma_chan_watchdog(struct work_struct *work);
-
-/*
- * workaround for IOAT ver.3.0 null descriptor issue
- * (channel returns error when size is 0)
- */
-#define NULL_DESC_BUFFER_SIZE 1
-
-/* internal functions */
-static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
-static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
-
-static struct ioat_desc_sw *
-ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
-static struct ioat_desc_sw *
-ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
-
-static inline struct ioat_dma_chan *ioat_lookup_chan_by_index(
-                                               struct ioatdma_device *device,
-                                               int index)
-{
-       return device->idx[index];
-}
-
-/**
- * ioat_dma_do_interrupt - handler used for single vector interrupt mode
- * @irq: interrupt id
- * @data: interrupt data
- */
-static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
-{
-       struct ioatdma_device *instance = data;
-       struct ioat_dma_chan *ioat_chan;
-       unsigned long attnstatus;
-       int bit;
-       u8 intrctrl;
-
-       intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
-
-       if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
-               return IRQ_NONE;
-
-       if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
-               writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
-               return IRQ_NONE;
-       }
-
-       attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
-       for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
-               ioat_chan = ioat_lookup_chan_by_index(instance, bit);
-               tasklet_schedule(&ioat_chan->cleanup_task);
-       }
-
-       writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
-       return IRQ_HANDLED;
-}
-
-/**
- * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
- * @irq: interrupt id
- * @data: interrupt data
- */
-static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
-{
-       struct ioat_dma_chan *ioat_chan = data;
-
-       tasklet_schedule(&ioat_chan->cleanup_task);
-
-       return IRQ_HANDLED;
-}
-
-static void ioat_dma_cleanup_tasklet(unsigned long data);
-
-/**
- * ioat_dma_enumerate_channels - find and initialize the device's channels
- * @device: the device to be enumerated
- */
-static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
-{
-       u8 xfercap_scale;
-       u32 xfercap;
-       int i;
-       struct ioat_dma_chan *ioat_chan;
-
-       /*
-        * IOAT ver.3 workarounds
-        */
-       if (device->version == IOAT_VER_3_0) {
-               u32 chan_err_mask;
-               u16 dev_id;
-               u32 dmauncerrsts;
-
-               /*
-                * Write CHANERRMSK_INT with 3E07h to mask out the errors
-                * that can cause stability issues for IOAT ver.3
-                */
-               chan_err_mask = 0x3E07;
-               pci_write_config_dword(device->pdev,
-                       IOAT_PCI_CHANERRMASK_INT_OFFSET,
-                       chan_err_mask);
-
-               /*
-                * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
-                * (workaround for spurious config parity error after restart)
-                */
-               pci_read_config_word(device->pdev,
-                       IOAT_PCI_DEVICE_ID_OFFSET,
-                       &dev_id);
-               if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
-                       dmauncerrsts = 0x10;
-                       pci_write_config_dword(device->pdev,
-                               IOAT_PCI_DMAUNCERRSTS_OFFSET,
-                               dmauncerrsts);
-               }
-       }
-
-       device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
-       xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
-       xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
-
-#ifdef  CONFIG_I7300_IDLE_IOAT_CHANNEL
-       if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) {
-               device->common.chancnt--;
-       }
-#endif
-       for (i = 0; i < device->common.chancnt; i++) {
-               ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
-               if (!ioat_chan) {
-                       device->common.chancnt = i;
-                       break;
-               }
-
-               ioat_chan->device = device;
-               ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
-               ioat_chan->xfercap = xfercap;
-               ioat_chan->desccount = 0;
-               INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2);
-               if (ioat_chan->device->version == IOAT_VER_2_0)
-                       writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE |
-                              IOAT_DMA_DCA_ANY_CPU,
-                              ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
-               else if (ioat_chan->device->version == IOAT_VER_3_0)
-                       writel(IOAT_DMA_DCA_ANY_CPU,
-                              ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
-               spin_lock_init(&ioat_chan->cleanup_lock);
-               spin_lock_init(&ioat_chan->desc_lock);
-               INIT_LIST_HEAD(&ioat_chan->free_desc);
-               INIT_LIST_HEAD(&ioat_chan->used_desc);
-               /* This should be made common somewhere in dmaengine.c */
-               ioat_chan->common.device = &device->common;
-               list_add_tail(&ioat_chan->common.device_node,
-                             &device->common.channels);
-               device->idx[i] = ioat_chan;
-               tasklet_init(&ioat_chan->cleanup_task,
-                            ioat_dma_cleanup_tasklet,
-                            (unsigned long) ioat_chan);
-               tasklet_disable(&ioat_chan->cleanup_task);
-       }
-       return device->common.chancnt;
-}
-
-/**
- * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
- *                                 descriptors to hw
- * @chan: DMA channel handle
- */
-static inline void __ioat1_dma_memcpy_issue_pending(
-                                               struct ioat_dma_chan *ioat_chan)
-{
-       ioat_chan->pending = 0;
-       writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
-}
-
-static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
-{
-       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-
-       if (ioat_chan->pending > 0) {
-               spin_lock_bh(&ioat_chan->desc_lock);
-               __ioat1_dma_memcpy_issue_pending(ioat_chan);
-               spin_unlock_bh(&ioat_chan->desc_lock);
-       }
-}
-
-static inline void __ioat2_dma_memcpy_issue_pending(
-                                               struct ioat_dma_chan *ioat_chan)
-{
-       ioat_chan->pending = 0;
-       writew(ioat_chan->dmacount,
-              ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
-}
-
-static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
-{
-       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-
-       if (ioat_chan->pending > 0) {
-               spin_lock_bh(&ioat_chan->desc_lock);
-               __ioat2_dma_memcpy_issue_pending(ioat_chan);
-               spin_unlock_bh(&ioat_chan->desc_lock);
-       }
-}
-
-
-/**
- * ioat_dma_chan_reset_part2 - reinit the channel after a reset
- */
-static void ioat_dma_chan_reset_part2(struct work_struct *work)
-{
-       struct ioat_dma_chan *ioat_chan =
-               container_of(work, struct ioat_dma_chan, work.work);
-       struct ioat_desc_sw *desc;
-
-       spin_lock_bh(&ioat_chan->cleanup_lock);
-       spin_lock_bh(&ioat_chan->desc_lock);
-
-       ioat_chan->completion_virt->low = 0;
-       ioat_chan->completion_virt->high = 0;
-       ioat_chan->pending = 0;
-
-       /*
-        * count the descriptors waiting, and be sure to do it
-        * right for both the CB1 line and the CB2 ring
-        */
-       ioat_chan->dmacount = 0;
-       if (ioat_chan->used_desc.prev) {
-               desc = to_ioat_desc(ioat_chan->used_desc.prev);
-               do {
-                       ioat_chan->dmacount++;
-                       desc = to_ioat_desc(desc->node.next);
-               } while (&desc->node != ioat_chan->used_desc.next);
-       }
-
-       /*
-        * write the new starting descriptor address
-        * this puts channel engine into ARMED state
-        */
-       desc = to_ioat_desc(ioat_chan->used_desc.prev);
-       switch (ioat_chan->device->version) {
-       case IOAT_VER_1_2:
-               writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
-                      ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
-               writel(((u64) desc->async_tx.phys) >> 32,
-                      ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
-
-               writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
-                       + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
-               break;
-       case IOAT_VER_2_0:
-               writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
-                      ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
-               writel(((u64) desc->async_tx.phys) >> 32,
-                      ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
-
-               /* tell the engine to go with what's left to be done */
-               writew(ioat_chan->dmacount,
-                      ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
-
-               break;
-       }
-       dev_err(&ioat_chan->device->pdev->dev,
-               "chan%d reset - %d descs waiting, %d total desc\n",
-               chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
-
-       spin_unlock_bh(&ioat_chan->desc_lock);
-       spin_unlock_bh(&ioat_chan->cleanup_lock);
-}
-
-/**
- * ioat_dma_reset_channel - restart a channel
- * @ioat_chan: IOAT DMA channel handle
- */
-static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan)
-{
-       u32 chansts, chanerr;
-
-       if (!ioat_chan->used_desc.prev)
-               return;
-
-       chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
-       chansts = (ioat_chan->completion_virt->low
-                                       & IOAT_CHANSTS_DMA_TRANSFER_STATUS);
-       if (chanerr) {
-               dev_err(&ioat_chan->device->pdev->dev,
-                       "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
-                       chan_num(ioat_chan), chansts, chanerr);
-               writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
-       }
-
-       /*
-        * whack it upside the head with a reset
-        * and wait for things to settle out.
-        * force the pending count to a really big negative
-        * to make sure no one forces an issue_pending
-        * while we're waiting.
-        */
-
-       spin_lock_bh(&ioat_chan->desc_lock);
-       ioat_chan->pending = INT_MIN;
-       writeb(IOAT_CHANCMD_RESET,
-              ioat_chan->reg_base
-              + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
-       spin_unlock_bh(&ioat_chan->desc_lock);
-
-       /* schedule the 2nd half instead of sleeping a long time */
-       schedule_delayed_work(&ioat_chan->work, RESET_DELAY);
-}
-
-/**
- * ioat_dma_chan_watchdog - watch for stuck channels
- */
-static void ioat_dma_chan_watchdog(struct work_struct *work)
-{
-       struct ioatdma_device *device =
-               container_of(work, struct ioatdma_device, work.work);
-       struct ioat_dma_chan *ioat_chan;
-       int i;
-
-       union {
-               u64 full;
-               struct {
-                       u32 low;
-                       u32 high;
-               };
-       } completion_hw;
-       unsigned long compl_desc_addr_hw;
-
-       for (i = 0; i < device->common.chancnt; i++) {
-               ioat_chan = ioat_lookup_chan_by_index(device, i);
-
-               if (ioat_chan->device->version == IOAT_VER_1_2
-                       /* have we started processing anything yet */
-                   && ioat_chan->last_completion
-                       /* have we completed any since last watchdog cycle? */
-                   && (ioat_chan->last_completion ==
-                               ioat_chan->watchdog_completion)
-                       /* has TCP stuck on one cookie since last watchdog? */
-                   && (ioat_chan->watchdog_tcp_cookie ==
-                               ioat_chan->watchdog_last_tcp_cookie)
-                   && (ioat_chan->watchdog_tcp_cookie !=
-                               ioat_chan->completed_cookie)
-                       /* is there something in the chain to be processed? */
-                       /* CB1 chain always has at least the last one processed */
-                   && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next)
-                   && ioat_chan->pending == 0) {
-
-                       /*
-                        * check CHANSTS register for completed
-                        * descriptor address.
-                        * if it is different than completion writeback,
-                        * it is not zero
-                        * and it has changed since the last watchdog
-                        *     we can assume that channel
-                        *     is still working correctly
-                        *     and the problem is in completion writeback.
-                        *     update completion writeback
-                        *     with actual CHANSTS value
-                        * else
-                        *     try resetting the channel
-                        */
-
-                       completion_hw.low = readl(ioat_chan->reg_base +
-                               IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version));
-                       completion_hw.high = readl(ioat_chan->reg_base +
-                               IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version));
-#if (BITS_PER_LONG == 64)
-                       compl_desc_addr_hw =
-                               completion_hw.full
-                               & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
-#else
-                       compl_desc_addr_hw =
-                               completion_hw.low & IOAT_LOW_COMPLETION_MASK;
-#endif
-
-                       if ((compl_desc_addr_hw != 0)
-                          && (compl_desc_addr_hw != ioat_chan->watchdog_completion)
-                          && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) {
-                               ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
-                               ioat_chan->completion_virt->low = completion_hw.low;
-                               ioat_chan->completion_virt->high = completion_hw.high;
-                       } else {
-                               ioat_dma_reset_channel(ioat_chan);
-                               ioat_chan->watchdog_completion = 0;
-                               ioat_chan->last_compl_desc_addr_hw = 0;
-                       }
-
-               /*
-                * for version 2.0 if there are descriptors yet to be processed
-                * and the last completed hasn't changed since the last watchdog
-                *      if they haven't hit the pending level
-                *          issue the pending to push them through
-                *      else
-                *          try resetting the channel
-                */
-               } else if (ioat_chan->device->version == IOAT_VER_2_0
-                   && ioat_chan->used_desc.prev
-                   && ioat_chan->last_completion
-                   && ioat_chan->last_completion == ioat_chan->watchdog_completion) {
-
-                       if (ioat_chan->pending < ioat_pending_level)
-                               ioat2_dma_memcpy_issue_pending(&ioat_chan->common);
-                       else {
-                               ioat_dma_reset_channel(ioat_chan);
-                               ioat_chan->watchdog_completion = 0;
-                       }
-               } else {
-                       ioat_chan->last_compl_desc_addr_hw = 0;
-                       ioat_chan->watchdog_completion
-                                       = ioat_chan->last_completion;
-               }
-
-               ioat_chan->watchdog_last_tcp_cookie =
-                       ioat_chan->watchdog_tcp_cookie;
-       }
-
-       schedule_delayed_work(&device->work, WATCHDOG_DELAY);
-}
-
-static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
-{
-       struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
-       struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
-       struct ioat_desc_sw *prev, *new;
-       struct ioat_dma_descriptor *hw;
-       dma_cookie_t cookie;
-       LIST_HEAD(new_chain);
-       u32 copy;
-       size_t len;
-       dma_addr_t src, dst;
-       unsigned long orig_flags;
-       unsigned int desc_count = 0;
-
-       /* src and dest and len are stored in the initial descriptor */
-       len = first->len;
-       src = first->src;
-       dst = first->dst;
-       orig_flags = first->async_tx.flags;
-       new = first;
-
-       spin_lock_bh(&ioat_chan->desc_lock);
-       prev = to_ioat_desc(ioat_chan->used_desc.prev);
-       prefetch(prev->hw);
-       do {
-               copy = min_t(size_t, len, ioat_chan->xfercap);
-
-               async_tx_ack(&new->async_tx);
-
-               hw = new->hw;
-               hw->size = copy;
-               hw->ctl = 0;
-               hw->src_addr = src;
-               hw->dst_addr = dst;
-               hw->next = 0;
-
-               /* chain together the physical address list for the HW */
-               wmb();
-               prev->hw->next = (u64) new->async_tx.phys;
-
-               len -= copy;
-               dst += copy;
-               src += copy;
-
-               list_add_tail(&new->node, &new_chain);
-               desc_count++;
-               prev = new;
-       } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
-
-       if (!new) {
-               dev_err(&ioat_chan->device->pdev->dev,
-                       "tx submit failed\n");
-               spin_unlock_bh(&ioat_chan->desc_lock);
-               return -ENOMEM;
-       }
-
-       hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
-       if (first->async_tx.callback) {
-               hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
-               if (first != new) {
-                       /* move callback into to last desc */
-                       new->async_tx.callback = first->async_tx.callback;
-                       new->async_tx.callback_param
-                                       = first->async_tx.callback_param;
-                       first->async_tx.callback = NULL;
-                       first->async_tx.callback_param = NULL;
-               }
-       }
-
-       new->tx_cnt = desc_count;
-       new->async_tx.flags = orig_flags; /* client is in control of this ack */
-
-       /* store the original values for use in later cleanup */
-       if (new != first) {
-               new->src = first->src;
-               new->dst = first->dst;
-               new->len = first->len;
-       }
-
-       /* cookie incr and addition to used_list must be atomic */
-       cookie = ioat_chan->common.cookie;
-       cookie++;
-       if (cookie < 0)
-               cookie = 1;
-       ioat_chan->common.cookie = new->async_tx.cookie = cookie;
-
-       /* write address into NextDescriptor field of last desc in chain */
-       to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
-                                                       first->async_tx.phys;
-       list_splice_tail(&new_chain, &ioat_chan->used_desc);
-
-       ioat_chan->dmacount += desc_count;
-       ioat_chan->pending += desc_count;
-       if (ioat_chan->pending >= ioat_pending_level)
-               __ioat1_dma_memcpy_issue_pending(ioat_chan);
-       spin_unlock_bh(&ioat_chan->desc_lock);
-
-       return cookie;
-}
-
-static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
-{
-       struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
-       struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
-       struct ioat_desc_sw *new;
-       struct ioat_dma_descriptor *hw;
-       dma_cookie_t cookie;
-       u32 copy;
-       size_t len;
-       dma_addr_t src, dst;
-       unsigned long orig_flags;
-       unsigned int desc_count = 0;
-
-       /* src and dest and len are stored in the initial descriptor */
-       len = first->len;
-       src = first->src;
-       dst = first->dst;
-       orig_flags = first->async_tx.flags;
-       new = first;
-
-       /*
-        * ioat_chan->desc_lock is still in force in version 2 path
-        * it gets unlocked at end of this function
-        */
-       do {
-               copy = min_t(size_t, len, ioat_chan->xfercap);
-
-               async_tx_ack(&new->async_tx);
-
-               hw = new->hw;
-               hw->size = copy;
-               hw->ctl = 0;
-               hw->src_addr = src;
-               hw->dst_addr = dst;
-
-               len -= copy;
-               dst += copy;
-               src += copy;
-               desc_count++;
-       } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
-
-       if (!new) {
-               dev_err(&ioat_chan->device->pdev->dev,
-                       "tx submit failed\n");
-               spin_unlock_bh(&ioat_chan->desc_lock);
-               return -ENOMEM;
-       }
-
-       hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
-       if (first->async_tx.callback) {
-               hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
-               if (first != new) {
-                       /* move callback into to last desc */
-                       new->async_tx.callback = first->async_tx.callback;
-                       new->async_tx.callback_param
-                                       = first->async_tx.callback_param;
-                       first->async_tx.callback = NULL;
-                       first->async_tx.callback_param = NULL;
-               }
-       }
-
-       new->tx_cnt = desc_count;
-       new->async_tx.flags = orig_flags; /* client is in control of this ack */
-
-       /* store the original values for use in later cleanup */
-       if (new != first) {
-               new->src = first->src;
-               new->dst = first->dst;
-               new->len = first->len;
-       }
-
-       /* cookie incr and addition to used_list must be atomic */
-       cookie = ioat_chan->common.cookie;
-       cookie++;
-       if (cookie < 0)
-               cookie = 1;
-       ioat_chan->common.cookie = new->async_tx.cookie = cookie;
-
-       ioat_chan->dmacount += desc_count;
-       ioat_chan->pending += desc_count;
-       if (ioat_chan->pending >= ioat_pending_level)
-               __ioat2_dma_memcpy_issue_pending(ioat_chan);
-       spin_unlock_bh(&ioat_chan->desc_lock);
-
-       return cookie;
-}
-
-/**
- * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
- * @ioat_chan: the channel supplying the memory pool for the descriptors
- * @flags: allocation flags
- */
-static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
-                                       struct ioat_dma_chan *ioat_chan,
-                                       gfp_t flags)
-{
-       struct ioat_dma_descriptor *desc;
-       struct ioat_desc_sw *desc_sw;
-       struct ioatdma_device *ioatdma_device;
-       dma_addr_t phys;
-
-       ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
-       desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
-       if (unlikely(!desc))
-               return NULL;
-
-       desc_sw = kzalloc(sizeof(*desc_sw), flags);
-       if (unlikely(!desc_sw)) {
-               pci_pool_free(ioatdma_device->dma_pool, desc, phys);
-               return NULL;
-       }
-
-       memset(desc, 0, sizeof(*desc));
-       dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
-       switch (ioat_chan->device->version) {
-       case IOAT_VER_1_2:
-               desc_sw->async_tx.tx_submit = ioat1_tx_submit;
-               break;
-       case IOAT_VER_2_0:
-       case IOAT_VER_3_0:
-               desc_sw->async_tx.tx_submit = ioat2_tx_submit;
-               break;
-       }
-
-       desc_sw->hw = desc;
-       desc_sw->async_tx.phys = phys;
-
-       return desc_sw;
-}
-
-static int ioat_initial_desc_count = 256;
-module_param(ioat_initial_desc_count, int, 0644);
-MODULE_PARM_DESC(ioat_initial_desc_count,
-                "initial descriptors per channel (default: 256)");
-
-/**
- * ioat2_dma_massage_chan_desc - link the descriptors into a circle
- * @ioat_chan: the channel to be massaged
- */
-static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
-{
-       struct ioat_desc_sw *desc, *_desc;
-
-       /* setup used_desc */
-       ioat_chan->used_desc.next = ioat_chan->free_desc.next;
-       ioat_chan->used_desc.prev = NULL;
-
-       /* pull free_desc out of the circle so that every node is a hw
-        * descriptor, but leave it pointing to the list
-        */
-       ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next;
-       ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev;
-
-       /* circle link the hw descriptors */
-       desc = to_ioat_desc(ioat_chan->free_desc.next);
-       desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
-       list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) {
-               desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
-       }
-}
-
-/**
- * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
- * @chan: the channel to be filled out
- */
-static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
-{
-       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-       struct ioat_desc_sw *desc;
-       u16 chanctrl;
-       u32 chanerr;
-       int i;
-       LIST_HEAD(tmp_list);
-
-       /* have we already been set up? */
-       if (!list_empty(&ioat_chan->free_desc))
-               return ioat_chan->desccount;
-
-       /* Setup register to interrupt and write completion status on error */
-       chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
-               IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
-               IOAT_CHANCTRL_ERR_COMPLETION_EN;
-       writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
-
-       chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
-       if (chanerr) {
-               dev_err(&ioat_chan->device->pdev->dev,
-                       "CHANERR = %x, clearing\n", chanerr);
-               writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
-       }
-
-       /* Allocate descriptors */
-       for (i = 0; i < ioat_initial_desc_count; i++) {
-               desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
-               if (!desc) {
-                       dev_err(&ioat_chan->device->pdev->dev,
-                               "Only %d initial descriptors\n", i);
-                       break;
-               }
-               list_add_tail(&desc->node, &tmp_list);
-       }
-       spin_lock_bh(&ioat_chan->desc_lock);
-       ioat_chan->desccount = i;
-       list_splice(&tmp_list, &ioat_chan->free_desc);
-       if (ioat_chan->device->version != IOAT_VER_1_2)
-               ioat2_dma_massage_chan_desc(ioat_chan);
-       spin_unlock_bh(&ioat_chan->desc_lock);
-
-       /* allocate a completion writeback area */
-       /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
-       ioat_chan->completion_virt =
-               pci_pool_alloc(ioat_chan->device->completion_pool,
-                              GFP_KERNEL,
-                              &ioat_chan->completion_addr);
-       memset(ioat_chan->completion_virt, 0,
-              sizeof(*ioat_chan->completion_virt));
-       writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
-              ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
-       writel(((u64) ioat_chan->completion_addr) >> 32,
-              ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
-
-       tasklet_enable(&ioat_chan->cleanup_task);
-       ioat_dma_start_null_desc(ioat_chan);  /* give chain to dma device */
-       return ioat_chan->desccount;
-}
-
-/**
- * ioat_dma_free_chan_resources - release all the descriptors
- * @chan: the channel to be cleaned
- */
-static void ioat_dma_free_chan_resources(struct dma_chan *chan)
-{
-       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-       struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
-       struct ioat_desc_sw *desc, *_desc;
-       int in_use_descs = 0;
-
-       /* Before freeing channel resources first check
-        * if they have been previously allocated for this channel.
-        */
-       if (ioat_chan->desccount == 0)
-               return;
-
-       tasklet_disable(&ioat_chan->cleanup_task);
-       ioat_dma_memcpy_cleanup(ioat_chan);
-
-       /* Delay 100ms after reset to allow internal DMA logic to quiesce
-        * before removing DMA descriptor resources.
-        */
-       writeb(IOAT_CHANCMD_RESET,
-              ioat_chan->reg_base
-                       + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
-       mdelay(100);
-
-       spin_lock_bh(&ioat_chan->desc_lock);
-       switch (ioat_chan->device->version) {
-       case IOAT_VER_1_2:
-               list_for_each_entry_safe(desc, _desc,
-                                        &ioat_chan->used_desc, node) {
-                       in_use_descs++;
-                       list_del(&desc->node);
-                       pci_pool_free(ioatdma_device->dma_pool, desc->hw,
-                                     desc->async_tx.phys);
-                       kfree(desc);
-               }
-               list_for_each_entry_safe(desc, _desc,
-                                        &ioat_chan->free_desc, node) {
-                       list_del(&desc->node);
-                       pci_pool_free(ioatdma_device->dma_pool, desc->hw,
-                                     desc->async_tx.phys);
-                       kfree(desc);
-               }
-               break;
-       case IOAT_VER_2_0:
-       case IOAT_VER_3_0:
-               list_for_each_entry_safe(desc, _desc,
-                                        ioat_chan->free_desc.next, node) {
-                       list_del(&desc->node);
-                       pci_pool_free(ioatdma_device->dma_pool, desc->hw,
-                                     desc->async_tx.phys);
-                       kfree(desc);
-               }
-               desc = to_ioat_desc(ioat_chan->free_desc.next);
-               pci_pool_free(ioatdma_device->dma_pool, desc->hw,
-                             desc->async_tx.phys);
-               kfree(desc);
-               INIT_LIST_HEAD(&ioat_chan->free_desc);
-               INIT_LIST_HEAD(&ioat_chan->used_desc);
-               break;
-       }
-       spin_unlock_bh(&ioat_chan->desc_lock);
-
-       pci_pool_free(ioatdma_device->completion_pool,
-                     ioat_chan->completion_virt,
-                     ioat_chan->completion_addr);
-
-       /* one is ok since we left it on there on purpose */
-       if (in_use_descs > 1)
-               dev_err(&ioat_chan->device->pdev->dev,
-                       "Freeing %d in use descriptors!\n",
-                       in_use_descs - 1);
-
-       ioat_chan->last_completion = ioat_chan->completion_addr = 0;
-       ioat_chan->pending = 0;
-       ioat_chan->dmacount = 0;
-       ioat_chan->desccount = 0;
-       ioat_chan->watchdog_completion = 0;
-       ioat_chan->last_compl_desc_addr_hw = 0;
-       ioat_chan->watchdog_tcp_cookie =
-               ioat_chan->watchdog_last_tcp_cookie = 0;
-}
-
-/**
- * ioat_dma_get_next_descriptor - return the next available descriptor
- * @ioat_chan: IOAT DMA channel handle
- *
- * Gets the next descriptor from the chain, and must be called with the
- * channel's desc_lock held.  Allocates more descriptors if the channel
- * has run out.
- */
-static struct ioat_desc_sw *
-ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
-{
-       struct ioat_desc_sw *new;
-
-       if (!list_empty(&ioat_chan->free_desc)) {
-               new = to_ioat_desc(ioat_chan->free_desc.next);
-               list_del(&new->node);
-       } else {
-               /* try to get another desc */
-               new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
-               if (!new) {
-                       dev_err(&ioat_chan->device->pdev->dev,
-                               "alloc failed\n");
-                       return NULL;
-               }
-       }
-
-       prefetch(new->hw);
-       return new;
-}
-
-static struct ioat_desc_sw *
-ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
-{
-       struct ioat_desc_sw *new;
-
-       /*
-        * used.prev points to where to start processing
-        * used.next points to next free descriptor
-        * if used.prev == NULL, there are none waiting to be processed
-        * if used.next == used.prev.prev, there is only one free descriptor,
-        *      and we need to use it to as a noop descriptor before
-        *      linking in a new set of descriptors, since the device
-        *      has probably already read the pointer to it
-        */
-       if (ioat_chan->used_desc.prev &&
-           ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
-
-               struct ioat_desc_sw *desc;
-               struct ioat_desc_sw *noop_desc;
-               int i;
-
-               /* set up the noop descriptor */
-               noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
-               /* set size to non-zero value (channel returns error when size is 0) */
-               noop_desc->hw->size = NULL_DESC_BUFFER_SIZE;
-               noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
-               noop_desc->hw->src_addr = 0;
-               noop_desc->hw->dst_addr = 0;
-
-               ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;
-               ioat_chan->pending++;
-               ioat_chan->dmacount++;
-
-               /* try to get a few more descriptors */
-               for (i = 16; i; i--) {
-                       desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
-                       if (!desc) {
-                               dev_err(&ioat_chan->device->pdev->dev,
-                                       "alloc failed\n");
-                               break;
-                       }
-                       list_add_tail(&desc->node, ioat_chan->used_desc.next);
-
-                       desc->hw->next
-                               = to_ioat_desc(desc->node.next)->async_tx.phys;
-                       to_ioat_desc(desc->node.prev)->hw->next
-                               = desc->async_tx.phys;
-                       ioat_chan->desccount++;
-               }
-
-               ioat_chan->used_desc.next = noop_desc->node.next;
-       }
-       new = to_ioat_desc(ioat_chan->used_desc.next);
-       prefetch(new);
-       ioat_chan->used_desc.next = new->node.next;
-
-       if (ioat_chan->used_desc.prev == NULL)
-               ioat_chan->used_desc.prev = &new->node;
-
-       prefetch(new->hw);
-       return new;
-}
-
-static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
-                                               struct ioat_dma_chan *ioat_chan)
-{
-       if (!ioat_chan)
-               return NULL;
-
-       switch (ioat_chan->device->version) {
-       case IOAT_VER_1_2:
-               return ioat1_dma_get_next_descriptor(ioat_chan);
-       case IOAT_VER_2_0:
-       case IOAT_VER_3_0:
-               return ioat2_dma_get_next_descriptor(ioat_chan);
-       }
-       return NULL;
-}
-
-static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
-                                               struct dma_chan *chan,
-                                               dma_addr_t dma_dest,
-                                               dma_addr_t dma_src,
-                                               size_t len,
-                                               unsigned long flags)
-{
-       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-       struct ioat_desc_sw *new;
-
-       spin_lock_bh(&ioat_chan->desc_lock);
-       new = ioat_dma_get_next_descriptor(ioat_chan);
-       spin_unlock_bh(&ioat_chan->desc_lock);
-
-       if (new) {
-               new->len = len;
-               new->dst = dma_dest;
-               new->src = dma_src;
-               new->async_tx.flags = flags;
-               return &new->async_tx;
-       } else {
-               dev_err(&ioat_chan->device->pdev->dev,
-                       "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
-                       chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
-               return NULL;
-       }
-}
-
-static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
-                                               struct dma_chan *chan,
-                                               dma_addr_t dma_dest,
-                                               dma_addr_t dma_src,
-                                               size_t len,
-                                               unsigned long flags)
-{
-       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-       struct ioat_desc_sw *new;
-
-       spin_lock_bh(&ioat_chan->desc_lock);
-       new = ioat2_dma_get_next_descriptor(ioat_chan);
-
-       /*
-        * leave ioat_chan->desc_lock set in ioat 2 path
-        * it will get unlocked at end of tx_submit
-        */
-
-       if (new) {
-               new->len = len;
-               new->dst = dma_dest;
-               new->src = dma_src;
-               new->async_tx.flags = flags;
-               return &new->async_tx;
-       } else {
-               spin_unlock_bh(&ioat_chan->desc_lock);
-               dev_err(&ioat_chan->device->pdev->dev,
-                       "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
-                       chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
-               return NULL;
-       }
-}
-
-static void ioat_dma_cleanup_tasklet(unsigned long data)
-{
-       struct ioat_dma_chan *chan = (void *)data;
-       ioat_dma_memcpy_cleanup(chan);
-       writew(IOAT_CHANCTRL_INT_DISABLE,
-              chan->reg_base + IOAT_CHANCTRL_OFFSET);
-}
-
-static void
-ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
-{
-       if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
-               if (desc->async_tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
-                       pci_unmap_single(ioat_chan->device->pdev,
-                                        pci_unmap_addr(desc, dst),
-                                        pci_unmap_len(desc, len),
-                                        PCI_DMA_FROMDEVICE);
-               else
-                       pci_unmap_page(ioat_chan->device->pdev,
-                                      pci_unmap_addr(desc, dst),
-                                      pci_unmap_len(desc, len),
-                                      PCI_DMA_FROMDEVICE);
-       }
-
-       if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
-               if (desc->async_tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
-                       pci_unmap_single(ioat_chan->device->pdev,
-                                        pci_unmap_addr(desc, src),
-                                        pci_unmap_len(desc, len),
-                                        PCI_DMA_TODEVICE);
-               else
-                       pci_unmap_page(ioat_chan->device->pdev,
-                                      pci_unmap_addr(desc, src),
-                                      pci_unmap_len(desc, len),
-                                      PCI_DMA_TODEVICE);
-       }
-}
-
-/**
- * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
- * @chan: ioat channel to be cleaned up
- */
-static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
-{
-       unsigned long phys_complete;
-       struct ioat_desc_sw *desc, *_desc;
-       dma_cookie_t cookie = 0;
-       unsigned long desc_phys;
-       struct ioat_desc_sw *latest_desc;
-
-       prefetch(ioat_chan->completion_virt);
-
-       if (!spin_trylock_bh(&ioat_chan->cleanup_lock))
-               return;
-
-       /* The completion writeback can happen at any time,
-          so reads by the driver need to be atomic operations
-          The descriptor physical addresses are limited to 32-bits
-          when the CPU can only do a 32-bit mov */
-
-#if (BITS_PER_LONG == 64)
-       phys_complete =
-               ioat_chan->completion_virt->full
-               & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
-#else
-       phys_complete =
-               ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
-#endif
-
-       if ((ioat_chan->completion_virt->full
-               & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
-                               IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
-               dev_err(&ioat_chan->device->pdev->dev,
-                       "Channel halted, chanerr = %x\n",
-                       readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
-
-               /* TODO do something to salvage the situation */
-       }
-
-       if (phys_complete == ioat_chan->last_completion) {
-               spin_unlock_bh(&ioat_chan->cleanup_lock);
-               /*
-                * perhaps we're stuck so hard that the watchdog can't go off?
-                * try to catch it after 2 seconds
-                */
-               if (ioat_chan->device->version != IOAT_VER_3_0) {
-                       if (time_after(jiffies,
-                                      ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
-                               ioat_dma_chan_watchdog(&(ioat_chan->device->work.work));
-                               ioat_chan->last_completion_time = jiffies;
-                       }
-               }
-               return;
-       }
-       ioat_chan->last_completion_time = jiffies;
-
-       cookie = 0;
-       if (!spin_trylock_bh(&ioat_chan->desc_lock)) {
-               spin_unlock_bh(&ioat_chan->cleanup_lock);
-               return;
-       }
-
-       switch (ioat_chan->device->version) {
-       case IOAT_VER_1_2:
-               list_for_each_entry_safe(desc, _desc,
-                                        &ioat_chan->used_desc, node) {
-
-                       /*
-                        * Incoming DMA requests may use multiple descriptors,
-                        * due to exceeding xfercap, perhaps. If so, only the
-                        * last one will have a cookie, and require unmapping.
-                        */
-                       if (desc->async_tx.cookie) {
-                               cookie = desc->async_tx.cookie;
-                               ioat_dma_unmap(ioat_chan, desc);
-                               if (desc->async_tx.callback) {
-                                       desc->async_tx.callback(desc->async_tx.callback_param);
-                                       desc->async_tx.callback = NULL;
-                               }
-                       }
-
-                       if (desc->async_tx.phys != phys_complete) {
-                               /*
-                                * a completed entry, but not the last, so clean
-                                * up if the client is done with the descriptor
-                                */
-                               if (async_tx_test_ack(&desc->async_tx)) {
-                                       list_move_tail(&desc->node,
-                                                      &ioat_chan->free_desc);
-                               } else
-                                       desc->async_tx.cookie = 0;
-                       } else {
-                               /*
-                                * last used desc. Do not remove, so we can
-                                * append from it, but don't look at it next
-                                * time, either
-                                */
-                               desc->async_tx.cookie = 0;
-
-                               /* TODO check status bits? */
-                               break;
-                       }
-               }
-               break;
-       case IOAT_VER_2_0:
-       case IOAT_VER_3_0:
-               /* has some other thread has already cleaned up? */
-               if (ioat_chan->used_desc.prev == NULL)
-                       break;
-
-               /* work backwards to find latest finished desc */
-               desc = to_ioat_desc(ioat_chan->used_desc.next);
-               latest_desc = NULL;
-               do {
-                       desc = to_ioat_desc(desc->node.prev);
-                       desc_phys = (unsigned long)desc->async_tx.phys
-                                      & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
-                       if (desc_phys == phys_complete) {
-                               latest_desc = desc;
-                               break;
-                       }
-               } while (&desc->node != ioat_chan->used_desc.prev);
-
-               if (latest_desc != NULL) {
-
-                       /* work forwards to clear finished descriptors */
-                       for (desc = to_ioat_desc(ioat_chan->used_desc.prev);
-                            &desc->node != latest_desc->node.next &&
-                            &desc->node != ioat_chan->used_desc.next;
-                            desc = to_ioat_desc(desc->node.next)) {
-                               if (desc->async_tx.cookie) {
-                                       cookie = desc->async_tx.cookie;
-                                       desc->async_tx.cookie = 0;
-                                       ioat_dma_unmap(ioat_chan, desc);
-                                       if (desc->async_tx.callback) {
-                                               desc->async_tx.callback(desc->async_tx.callback_param);
-                                               desc->async_tx.callback = NULL;
-                                       }
-                               }
-                       }
-
-                       /* move used.prev up beyond those that are finished */
-                       if (&desc->node == ioat_chan->used_desc.next)
-                               ioat_chan->used_desc.prev = NULL;
-                       else
-                               ioat_chan->used_desc.prev = &desc->node;
-               }
-               break;
-       }
-
-       spin_unlock_bh(&ioat_chan->desc_lock);
-
-       ioat_chan->last_completion = phys_complete;
-       if (cookie != 0)
-               ioat_chan->completed_cookie = cookie;
-
-       spin_unlock_bh(&ioat_chan->cleanup_lock);
-}
-
-/**
- * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
- * @chan: IOAT DMA channel handle
- * @cookie: DMA transaction identifier
- * @done: if not %NULL, updated with last completed transaction
- * @used: if not %NULL, updated with last used transaction
- */
-static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
-                                           dma_cookie_t cookie,
-                                           dma_cookie_t *done,
-                                           dma_cookie_t *used)
-{
-       struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
-       dma_cookie_t last_used;
-       dma_cookie_t last_complete;
-       enum dma_status ret;
-
-       last_used = chan->cookie;
-       last_complete = ioat_chan->completed_cookie;
-       ioat_chan->watchdog_tcp_cookie = cookie;
-
-       if (done)
-               *done = last_complete;
-       if (used)
-               *used = last_used;
-
-       ret = dma_async_is_complete(cookie, last_complete, last_used);
-       if (ret == DMA_SUCCESS)
-               return ret;
-
-       ioat_dma_memcpy_cleanup(ioat_chan);
-
-       last_used = chan->cookie;
-       last_complete = ioat_chan->completed_cookie;
-
-       if (done)
-               *done = last_complete;
-       if (used)
-               *used = last_used;
-
-       return dma_async_is_complete(cookie, last_complete, last_used);
-}
-
-static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
-{
-       struct ioat_desc_sw *desc;
-
-       spin_lock_bh(&ioat_chan->desc_lock);
-
-       desc = ioat_dma_get_next_descriptor(ioat_chan);
-
-       if (!desc) {
-               dev_err(&ioat_chan->device->pdev->dev,
-                       "Unable to start null desc - get next desc failed\n");
-               spin_unlock_bh(&ioat_chan->desc_lock);
-               return;
-       }
-
-       desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
-                               | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
-                               | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
-       /* set size to non-zero value (channel returns error when size is 0) */
-       desc->hw->size = NULL_DESC_BUFFER_SIZE;
-       desc->hw->src_addr = 0;
-       desc->hw->dst_addr = 0;
-       async_tx_ack(&desc->async_tx);
-       switch (ioat_chan->device->version) {
-       case IOAT_VER_1_2:
-               desc->hw->next = 0;
-               list_add_tail(&desc->node, &ioat_chan->used_desc);
-
-               writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
-                      ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
-               writel(((u64) desc->async_tx.phys) >> 32,
-                      ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
-
-               writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
-                       + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
-               break;
-       case IOAT_VER_2_0:
-       case IOAT_VER_3_0:
-               writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
-                      ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
-               writel(((u64) desc->async_tx.phys) >> 32,
-                      ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
-
-               ioat_chan->dmacount++;
-               __ioat2_dma_memcpy_issue_pending(ioat_chan);
-               break;
-       }
-       spin_unlock_bh(&ioat_chan->desc_lock);
-}
-
-/*
- * Perform a IOAT transaction to verify the HW works.
- */
-#define IOAT_TEST_SIZE 2000
-
-static void ioat_dma_test_callback(void *dma_async_param)
-{
-       struct completion *cmp = dma_async_param;
-
-       complete(cmp);
-}
-
-/**
- * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
- * @device: device to be tested
- */
-static int ioat_dma_self_test(struct ioatdma_device *device)
-{
-       int i;
-       u8 *src;
-       u8 *dest;
-       struct dma_chan *dma_chan;
-       struct dma_async_tx_descriptor *tx;
-       dma_addr_t dma_dest, dma_src;
-       dma_cookie_t cookie;
-       int err = 0;
-       struct completion cmp;
-       unsigned long tmo;
-       unsigned long flags;
-
-       src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
-       if (!src)
-               return -ENOMEM;
-       dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
-       if (!dest) {
-               kfree(src);
-               return -ENOMEM;
-       }
-
-       /* Fill in src buffer */
-       for (i = 0; i < IOAT_TEST_SIZE; i++)
-               src[i] = (u8)i;
-
-       /* Start copy, using first DMA channel */
-       dma_chan = container_of(device->common.channels.next,
-                               struct dma_chan,
-                               device_node);
-       if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
-               dev_err(&device->pdev->dev,
-                       "selftest cannot allocate chan resource\n");
-               err = -ENODEV;
-               goto out;
-       }
-
-       dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
-                                DMA_TO_DEVICE);
-       dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
-                                 DMA_FROM_DEVICE);
-       flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE;
-       tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
-                                                  IOAT_TEST_SIZE, flags);
-       if (!tx) {
-               dev_err(&device->pdev->dev,
-                       "Self-test prep failed, disabling\n");
-               err = -ENODEV;
-               goto free_resources;
-       }
-
-       async_tx_ack(tx);
-       init_completion(&cmp);
-       tx->callback = ioat_dma_test_callback;
-       tx->callback_param = &cmp;
-       cookie = tx->tx_submit(tx);
-       if (cookie < 0) {
-               dev_err(&device->pdev->dev,
-                       "Self-test setup failed, disabling\n");
-               err = -ENODEV;
-               goto free_resources;
-       }
-       device->common.device_issue_pending(dma_chan);
-
-       tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
-
-       if (tmo == 0 ||
-           device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
-                                       != DMA_SUCCESS) {
-               dev_err(&device->pdev->dev,
-                       "Self-test copy timed out, disabling\n");
-               err = -ENODEV;
-               goto free_resources;
-       }
-       if (memcmp(src, dest, IOAT_TEST_SIZE)) {
-               dev_err(&device->pdev->dev,
-                       "Self-test copy failed compare, disabling\n");
-               err = -ENODEV;
-               goto free_resources;
-       }
-
-free_resources:
-       device->common.device_free_chan_resources(dma_chan);
-out:
-       kfree(src);
-       kfree(dest);
-       return err;
-}
-
-static char ioat_interrupt_style[32] = "msix";
-module_param_string(ioat_interrupt_style, ioat_interrupt_style,
-                   sizeof(ioat_interrupt_style), 0644);
-MODULE_PARM_DESC(ioat_interrupt_style,
-                "set ioat interrupt style: msix (default), "
-                "msix-single-vector, msi, intx)");
-
-/**
- * ioat_dma_setup_interrupts - setup interrupt handler
- * @device: ioat device
- */
-static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
-{
-       struct ioat_dma_chan *ioat_chan;
-       int err, i, j, msixcnt;
-       u8 intrctrl = 0;
-
-       if (!strcmp(ioat_interrupt_style, "msix"))
-               goto msix;
-       if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
-               goto msix_single_vector;
-       if (!strcmp(ioat_interrupt_style, "msi"))
-               goto msi;
-       if (!strcmp(ioat_interrupt_style, "intx"))
-               goto intx;
-       dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n",
-               ioat_interrupt_style);
-       goto err_no_irq;
-
-msix:
-       /* The number of MSI-X vectors should equal the number of channels */
-       msixcnt = device->common.chancnt;
-       for (i = 0; i < msixcnt; i++)
-               device->msix_entries[i].entry = i;
-
-       err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt);
-       if (err < 0)
-               goto msi;
-       if (err > 0)
-               goto msix_single_vector;
-
-       for (i = 0; i < msixcnt; i++) {
-               ioat_chan = ioat_lookup_chan_by_index(device, i);
-               err = request_irq(device->msix_entries[i].vector,
-                                 ioat_dma_do_interrupt_msix,
-                                 0, "ioat-msix", ioat_chan);
-               if (err) {
-                       for (j = 0; j < i; j++) {
-                               ioat_chan =
-                                       ioat_lookup_chan_by_index(device, j);
-                               free_irq(device->msix_entries[j].vector,
-                                        ioat_chan);
-                       }
-                       goto msix_single_vector;
-               }
-       }
-       intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
-       device->irq_mode = msix_multi_vector;
-       goto done;
-
-msix_single_vector:
-       device->msix_entries[0].entry = 0;
-       err = pci_enable_msix(device->pdev, device->msix_entries, 1);
-       if (err)
-               goto msi;
-
-       err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt,
-                         0, "ioat-msix", device);
-       if (err) {
-               pci_disable_msix(device->pdev);
-               goto msi;
-       }
-       device->irq_mode = msix_single_vector;
-       goto done;
-
-msi:
-       err = pci_enable_msi(device->pdev);
-       if (err)
-               goto intx;
-
-       err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
-                         0, "ioat-msi", device);
-       if (err) {
-               pci_disable_msi(device->pdev);
-               goto intx;
-       }
-       /*
-        * CB 1.2 devices need a bit set in configuration space to enable MSI
-        */
-       if (device->version == IOAT_VER_1_2) {
-               u32 dmactrl;
-               pci_read_config_dword(device->pdev,
-                                     IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
-               dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
-               pci_write_config_dword(device->pdev,
-                                      IOAT_PCI_DMACTRL_OFFSET, dmactrl);
-       }
-       device->irq_mode = msi;
-       goto done;
-
-intx:
-       err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
-                         IRQF_SHARED, "ioat-intx", device);
-       if (err)
-               goto err_no_irq;
-       device->irq_mode = intx;
-
-done:
-       intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
-       writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
-       return 0;
-
-err_no_irq:
-       /* Disable all interrupt generation */
-       writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
-       dev_err(&device->pdev->dev, "no usable interrupts\n");
-       device->irq_mode = none;
-       return -1;
-}
-
-/**
- * ioat_dma_remove_interrupts - remove whatever interrupts were set
- * @device: ioat device
- */
-static void ioat_dma_remove_interrupts(struct ioatdma_device *device)
-{
-       struct ioat_dma_chan *ioat_chan;
-       int i;
-
-       /* Disable all interrupt generation */
-       writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
-
-       switch (device->irq_mode) {
-       case msix_multi_vector:
-               for (i = 0; i < device->common.chancnt; i++) {
-                       ioat_chan = ioat_lookup_chan_by_index(device, i);
-                       free_irq(device->msix_entries[i].vector, ioat_chan);
-               }
-               pci_disable_msix(device->pdev);
-               break;
-       case msix_single_vector:
-               free_irq(device->msix_entries[0].vector, device);
-               pci_disable_msix(device->pdev);
-               break;
-       case msi:
-               free_irq(device->pdev->irq, device);
-               pci_disable_msi(device->pdev);
-               break;
-       case intx:
-               free_irq(device->pdev->irq, device);
-               break;
-       case none:
-               dev_warn(&device->pdev->dev,
-                        "call to %s without interrupts setup\n", __func__);
-       }
-       device->irq_mode = none;
-}
-
-struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
-                                     void __iomem *iobase)
-{
-       int err;
-       struct ioatdma_device *device;
-
-       device = kzalloc(sizeof(*device), GFP_KERNEL);
-       if (!device) {
-               err = -ENOMEM;
-               goto err_kzalloc;
-       }
-       device->pdev = pdev;
-       device->reg_base = iobase;
-       device->version = readb(device->reg_base + IOAT_VER_OFFSET);
-
-       /* DMA coherent memory pool for DMA descriptor allocations */
-       device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
-                                          sizeof(struct ioat_dma_descriptor),
-                                          64, 0);
-       if (!device->dma_pool) {
-               err = -ENOMEM;
-               goto err_dma_pool;
-       }
-
-       device->completion_pool = pci_pool_create("completion_pool", pdev,
-                                                 sizeof(u64), SMP_CACHE_BYTES,
-                                                 SMP_CACHE_BYTES);
-       if (!device->completion_pool) {
-               err = -ENOMEM;
-               goto err_completion_pool;
-       }
-
-       INIT_LIST_HEAD(&device->common.channels);
-       ioat_dma_enumerate_channels(device);
-
-       device->common.device_alloc_chan_resources =
-                                               ioat_dma_alloc_chan_resources;
-       device->common.device_free_chan_resources =
-                                               ioat_dma_free_chan_resources;
-       device->common.dev = &pdev->dev;
-
-       dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
-       device->common.device_is_tx_complete = ioat_dma_is_complete;
-       switch (device->version) {
-       case IOAT_VER_1_2:
-               device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
-               device->common.device_issue_pending =
-                                               ioat1_dma_memcpy_issue_pending;
-               break;
-       case IOAT_VER_2_0:
-       case IOAT_VER_3_0:
-               device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
-               device->common.device_issue_pending =
-                                               ioat2_dma_memcpy_issue_pending;
-               break;
-       }
-
-       dev_err(&device->pdev->dev,
-               "Intel(R) I/OAT DMA Engine found,"
-               " %d channels, device version 0x%02x, driver version %s\n",
-               device->common.chancnt, device->version, IOAT_DMA_VERSION);
-
-       if (!device->common.chancnt) {
-               dev_err(&device->pdev->dev,
-                       "Intel(R) I/OAT DMA Engine problem found: "
-                       "zero channels detected\n");
-               goto err_setup_interrupts;
-       }
-
-       err = ioat_dma_setup_interrupts(device);
-       if (err)
-               goto err_setup_interrupts;
-
-       err = ioat_dma_self_test(device);
-       if (err)
-               goto err_self_test;
-
-       ioat_set_tcp_copy_break(device);
-
-       dma_async_device_register(&device->common);
-
-       if (device->version != IOAT_VER_3_0) {
-               INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
-               schedule_delayed_work(&device->work,
-                                     WATCHDOG_DELAY);
-       }
-
-       return device;
-
-err_self_test:
-       ioat_dma_remove_interrupts(device);
-err_setup_interrupts:
-       pci_pool_destroy(device->completion_pool);
-err_completion_pool:
-       pci_pool_destroy(device->dma_pool);
-err_dma_pool:
-       kfree(device);
-err_kzalloc:
-       dev_err(&pdev->dev,
-               "Intel(R) I/OAT DMA Engine initialization failed\n");
-       return NULL;
-}
-
-void ioat_dma_remove(struct ioatdma_device *device)
-{
-       struct dma_chan *chan, *_chan;
-       struct ioat_dma_chan *ioat_chan;
-
-       if (device->version != IOAT_VER_3_0)
-               cancel_delayed_work(&device->work);
-
-       ioat_dma_remove_interrupts(device);
-
-       dma_async_device_unregister(&device->common);
-
-       pci_pool_destroy(device->dma_pool);
-       pci_pool_destroy(device->completion_pool);
-
-       iounmap(device->reg_base);
-       pci_release_regions(device->pdev);
-       pci_disable_device(device->pdev);
-
-       list_for_each_entry_safe(chan, _chan,
-                                &device->common.channels, device_node) {
-               ioat_chan = to_ioat_chan(chan);
-               list_del(&chan->device_node);
-               kfree(ioat_chan);
-       }
-       kfree(device);
-}
-
diff --git a/drivers/dma/ioatdma.h b/drivers/dma/ioatdma.h
deleted file mode 100644 (file)
index a52ff4b..0000000
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called COPYING.
- */
-#ifndef IOATDMA_H
-#define IOATDMA_H
-
-#include <linux/dmaengine.h>
-#include "ioatdma_hw.h"
-#include <linux/init.h>
-#include <linux/dmapool.h>
-#include <linux/cache.h>
-#include <linux/pci_ids.h>
-#include <net/tcp.h>
-
-#define IOAT_DMA_VERSION  "3.64"
-
-enum ioat_interrupt {
-       none = 0,
-       msix_multi_vector = 1,
-       msix_single_vector = 2,
-       msi = 3,
-       intx = 4,
-};
-
-#define IOAT_LOW_COMPLETION_MASK       0xffffffc0
-#define IOAT_DMA_DCA_ANY_CPU           ~0
-#define IOAT_WATCHDOG_PERIOD           (2 * HZ)
-
-
-/**
- * struct ioatdma_device - internal representation of a IOAT device
- * @pdev: PCI-Express device
- * @reg_base: MMIO register space base address
- * @dma_pool: for allocating DMA descriptors
- * @common: embedded struct dma_device
- * @version: version of ioatdma device
- * @irq_mode: which style irq to use
- * @msix_entries: irq handlers
- * @idx: per channel data
- */
-
-struct ioatdma_device {
-       struct pci_dev *pdev;
-       void __iomem *reg_base;
-       struct pci_pool *dma_pool;
-       struct pci_pool *completion_pool;
-       struct dma_device common;
-       u8 version;
-       enum ioat_interrupt irq_mode;
-       struct delayed_work work;
-       struct msix_entry msix_entries[4];
-       struct ioat_dma_chan *idx[4];
-};
-
-/**
- * struct ioat_dma_chan - internal representation of a DMA channel
- */
-struct ioat_dma_chan {
-
-       void __iomem *reg_base;
-
-       dma_cookie_t completed_cookie;
-       unsigned long last_completion;
-       unsigned long last_completion_time;
-
-       size_t xfercap; /* XFERCAP register value expanded out */
-
-       spinlock_t cleanup_lock;
-       spinlock_t desc_lock;
-       struct list_head free_desc;
-       struct list_head used_desc;
-       unsigned long watchdog_completion;
-       int watchdog_tcp_cookie;
-       u32 watchdog_last_tcp_cookie;
-       struct delayed_work work;
-
-       int pending;
-       int dmacount;
-       int desccount;
-
-       struct ioatdma_device *device;
-       struct dma_chan common;
-
-       dma_addr_t completion_addr;
-       union {
-               u64 full; /* HW completion writeback */
-               struct {
-                       u32 low;
-                       u32 high;
-               };
-       } *completion_virt;
-       unsigned long last_compl_desc_addr_hw;
-       struct tasklet_struct cleanup_task;
-};
-
-/* wrapper around hardware descriptor format + additional software fields */
-
-/**
- * struct ioat_desc_sw - wrapper around hardware descriptor
- * @hw: hardware DMA descriptor
- * @node: this descriptor will either be on the free list,
- *     or attached to a transaction list (async_tx.tx_list)
- * @tx_cnt: number of descriptors required to complete the transaction
- * @async_tx: the generic software descriptor for all engines
- */
-struct ioat_desc_sw {
-       struct ioat_dma_descriptor *hw;
-       struct list_head node;
-       int tx_cnt;
-       size_t len;
-       dma_addr_t src;
-       dma_addr_t dst;
-       struct dma_async_tx_descriptor async_tx;
-};
-
-static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev)
-{
-       #ifdef CONFIG_NET_DMA
-       switch (dev->version) {
-       case IOAT_VER_1_2:
-               sysctl_tcp_dma_copybreak = 4096;
-               break;
-       case IOAT_VER_2_0:
-               sysctl_tcp_dma_copybreak = 2048;
-               break;
-       case IOAT_VER_3_0:
-               sysctl_tcp_dma_copybreak = 262144;
-               break;
-       }
-       #endif
-}
-
-#if defined(CONFIG_INTEL_IOATDMA) || defined(CONFIG_INTEL_IOATDMA_MODULE)
-struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
-                                     void __iomem *iobase);
-void ioat_dma_remove(struct ioatdma_device *device);
-struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-#else
-#define ioat_dma_probe(pdev, iobase)    NULL
-#define ioat_dma_remove(device)         do { } while (0)
-#define ioat_dca_init(pdev, iobase)    NULL
-#define ioat2_dca_init(pdev, iobase)   NULL
-#define ioat3_dca_init(pdev, iobase)   NULL
-#endif
-
-#endif /* IOATDMA_H */
diff --git a/drivers/dma/ioatdma_hw.h b/drivers/dma/ioatdma_hw.h
deleted file mode 100644 (file)
index afa57ee..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called COPYING.
- */
-#ifndef _IOAT_HW_H_
-#define _IOAT_HW_H_
-
-/* PCI Configuration Space Values */
-#define IOAT_PCI_VID            0x8086
-
-/* CB device ID's */
-#define IOAT_PCI_DID_5000       0x1A38
-#define IOAT_PCI_DID_CNB        0x360B
-#define IOAT_PCI_DID_SCNB       0x65FF
-#define IOAT_PCI_DID_SNB        0x402F
-
-#define IOAT_PCI_RID            0x00
-#define IOAT_PCI_SVID           0x8086
-#define IOAT_PCI_SID            0x8086
-#define IOAT_VER_1_2            0x12    /* Version 1.2 */
-#define IOAT_VER_2_0            0x20    /* Version 2.0 */
-#define IOAT_VER_3_0            0x30    /* Version 3.0 */
-
-struct ioat_dma_descriptor {
-       uint32_t        size;
-       uint32_t        ctl;
-       uint64_t        src_addr;
-       uint64_t        dst_addr;
-       uint64_t        next;
-       uint64_t        rsv1;
-       uint64_t        rsv2;
-       uint64_t        user1;
-       uint64_t        user2;
-};
-
-#define IOAT_DMA_DESCRIPTOR_CTL_INT_GN 0x00000001
-#define IOAT_DMA_DESCRIPTOR_CTL_SRC_SN 0x00000002
-#define IOAT_DMA_DESCRIPTOR_CTL_DST_SN 0x00000004
-#define IOAT_DMA_DESCRIPTOR_CTL_CP_STS 0x00000008
-#define IOAT_DMA_DESCRIPTOR_CTL_FRAME  0x00000010
-#define IOAT_DMA_DESCRIPTOR_NUL                0x00000020
-#define IOAT_DMA_DESCRIPTOR_CTL_SP_BRK 0x00000040
-#define IOAT_DMA_DESCRIPTOR_CTL_DP_BRK 0x00000080
-#define IOAT_DMA_DESCRIPTOR_CTL_BNDL   0x00000100
-#define IOAT_DMA_DESCRIPTOR_CTL_DCA    0x00000200
-#define IOAT_DMA_DESCRIPTOR_CTL_BUFHINT        0x00000400
-
-#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_CONTEXT 0xFF000000
-#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_DMA     0x00000000
-
-#define IOAT_DMA_DESCRIPTOR_CTL_CONTEXT_DCA    0x00000001
-#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_MASK    0xFF000000
-
-#endif
diff --git a/drivers/dma/ioatdma_registers.h b/drivers/dma/ioatdma_registers.h
deleted file mode 100644 (file)
index 49bc277..0000000
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called COPYING.
- */
-#ifndef _IOAT_REGISTERS_H_
-#define _IOAT_REGISTERS_H_
-
-#define IOAT_PCI_DMACTRL_OFFSET                        0x48
-#define IOAT_PCI_DMACTRL_DMA_EN                        0x00000001
-#define IOAT_PCI_DMACTRL_MSI_EN                        0x00000002
-
-#define IOAT_PCI_DEVICE_ID_OFFSET              0x02
-#define IOAT_PCI_DMAUNCERRSTS_OFFSET           0x148
-#define IOAT_PCI_CHANERRMASK_INT_OFFSET                0x184
-
-/* MMIO Device Registers */
-#define IOAT_CHANCNT_OFFSET                    0x00    /*  8-bit */
-
-#define IOAT_XFERCAP_OFFSET                    0x01    /*  8-bit */
-#define IOAT_XFERCAP_4KB                       12
-#define IOAT_XFERCAP_8KB                       13
-#define IOAT_XFERCAP_16KB                      14
-#define IOAT_XFERCAP_32KB                      15
-#define IOAT_XFERCAP_32GB                      0
-
-#define IOAT_GENCTRL_OFFSET                    0x02    /*  8-bit */
-#define IOAT_GENCTRL_DEBUG_EN                  0x01
-
-#define IOAT_INTRCTRL_OFFSET                   0x03    /*  8-bit */
-#define IOAT_INTRCTRL_MASTER_INT_EN            0x01    /* Master Interrupt Enable */
-#define IOAT_INTRCTRL_INT_STATUS               0x02    /* ATTNSTATUS -or- Channel Int */
-#define IOAT_INTRCTRL_INT                      0x04    /* INT_STATUS -and- MASTER_INT_EN */
-#define IOAT_INTRCTRL_MSIX_VECTOR_CONTROL      0x08    /* Enable all MSI-X vectors */
-
-#define IOAT_ATTNSTATUS_OFFSET                 0x04    /* Each bit is a channel */
-
-#define IOAT_VER_OFFSET                                0x08    /*  8-bit */
-#define IOAT_VER_MAJOR_MASK                    0xF0
-#define IOAT_VER_MINOR_MASK                    0x0F
-#define GET_IOAT_VER_MAJOR(x)                  (((x) & IOAT_VER_MAJOR_MASK) >> 4)
-#define GET_IOAT_VER_MINOR(x)                  ((x) & IOAT_VER_MINOR_MASK)
-
-#define IOAT_PERPORTOFFSET_OFFSET              0x0A    /* 16-bit */
-
-#define IOAT_INTRDELAY_OFFSET                  0x0C    /* 16-bit */
-#define IOAT_INTRDELAY_INT_DELAY_MASK          0x3FFF  /* Interrupt Delay Time */
-#define IOAT_INTRDELAY_COALESE_SUPPORT         0x8000  /* Interrupt Coalescing Supported */
-
-#define IOAT_DEVICE_STATUS_OFFSET              0x0E    /* 16-bit */
-#define IOAT_DEVICE_STATUS_DEGRADED_MODE       0x0001
-
-#define IOAT_CHANNEL_MMIO_SIZE                 0x80    /* Each Channel MMIO space is this size */
-
-/* DMA Channel Registers */
-#define IOAT_CHANCTRL_OFFSET                   0x00    /* 16-bit Channel Control Register */
-#define IOAT_CHANCTRL_CHANNEL_PRIORITY_MASK    0xF000
-#define IOAT_CHANCTRL_CHANNEL_IN_USE           0x0100
-#define IOAT_CHANCTRL_DESCRIPTOR_ADDR_SNOOP_CONTROL    0x0020
-#define IOAT_CHANCTRL_ERR_INT_EN               0x0010
-#define IOAT_CHANCTRL_ANY_ERR_ABORT_EN         0x0008
-#define IOAT_CHANCTRL_ERR_COMPLETION_EN                0x0004
-#define IOAT_CHANCTRL_INT_DISABLE              0x0001
-
-#define IOAT_DMA_COMP_OFFSET                   0x02    /* 16-bit DMA channel compatibility */
-#define IOAT_DMA_COMP_V1                       0x0001  /* Compatibility with DMA version 1 */
-#define IOAT_DMA_COMP_V2                       0x0002  /* Compatibility with DMA version 2 */
-
-
-#define IOAT1_CHANSTS_OFFSET           0x04    /* 64-bit Channel Status Register */
-#define IOAT2_CHANSTS_OFFSET           0x08    /* 64-bit Channel Status Register */
-#define IOAT_CHANSTS_OFFSET(ver)               ((ver) < IOAT_VER_2_0 \
-                                               ? IOAT1_CHANSTS_OFFSET : IOAT2_CHANSTS_OFFSET)
-#define IOAT1_CHANSTS_OFFSET_LOW       0x04
-#define IOAT2_CHANSTS_OFFSET_LOW       0x08
-#define IOAT_CHANSTS_OFFSET_LOW(ver)           ((ver) < IOAT_VER_2_0 \
-                                               ? IOAT1_CHANSTS_OFFSET_LOW : IOAT2_CHANSTS_OFFSET_LOW)
-#define IOAT1_CHANSTS_OFFSET_HIGH      0x08
-#define IOAT2_CHANSTS_OFFSET_HIGH      0x0C
-#define IOAT_CHANSTS_OFFSET_HIGH(ver)          ((ver) < IOAT_VER_2_0 \
-                                               ? IOAT1_CHANSTS_OFFSET_HIGH : IOAT2_CHANSTS_OFFSET_HIGH)
-#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR ~0x3F
-#define IOAT_CHANSTS_SOFT_ERR                  0x0000000000000010
-#define IOAT_CHANSTS_UNAFFILIATED_ERR          0x0000000000000008
-#define IOAT_CHANSTS_DMA_TRANSFER_STATUS       0x0000000000000007
-#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE        0x0
-#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE  0x1
-#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_SUSPENDED     0x2
-#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED        0x3
-
-
-
-#define IOAT_CHAN_DMACOUNT_OFFSET      0x06    /* 16-bit DMA Count register */
-
-#define IOAT_DCACTRL_OFFSET         0x30   /* 32 bit Direct Cache Access Control Register */
-#define IOAT_DCACTRL_CMPL_WRITE_ENABLE 0x10000
-#define IOAT_DCACTRL_TARGET_CPU_MASK   0xFFFF /* APIC ID */
-
-/* CB DCA Memory Space Registers */
-#define IOAT_DCAOFFSET_OFFSET       0x14
-/* CB_BAR + IOAT_DCAOFFSET value */
-#define IOAT_DCA_VER_OFFSET         0x00
-#define IOAT_DCA_VER_MAJOR_MASK     0xF0
-#define IOAT_DCA_VER_MINOR_MASK     0x0F
-
-#define IOAT_DCA_COMP_OFFSET        0x02
-#define IOAT_DCA_COMP_V1            0x1
-
-#define IOAT_FSB_CAPABILITY_OFFSET  0x04
-#define IOAT_FSB_CAPABILITY_PREFETCH    0x1
-
-#define IOAT_PCI_CAPABILITY_OFFSET  0x06
-#define IOAT_PCI_CAPABILITY_MEMWR   0x1
-
-#define IOAT_FSB_CAP_ENABLE_OFFSET  0x08
-#define IOAT_FSB_CAP_ENABLE_PREFETCH    0x1
-
-#define IOAT_PCI_CAP_ENABLE_OFFSET  0x0A
-#define IOAT_PCI_CAP_ENABLE_MEMWR   0x1
-
-#define IOAT_APICID_TAG_MAP_OFFSET  0x0C
-#define IOAT_APICID_TAG_MAP_TAG0    0x0000000F
-#define IOAT_APICID_TAG_MAP_TAG0_SHIFT 0
-#define IOAT_APICID_TAG_MAP_TAG1    0x000000F0
-#define IOAT_APICID_TAG_MAP_TAG1_SHIFT 4
-#define IOAT_APICID_TAG_MAP_TAG2    0x00000F00
-#define IOAT_APICID_TAG_MAP_TAG2_SHIFT 8
-#define IOAT_APICID_TAG_MAP_TAG3    0x0000F000
-#define IOAT_APICID_TAG_MAP_TAG3_SHIFT 12
-#define IOAT_APICID_TAG_MAP_TAG4    0x000F0000
-#define IOAT_APICID_TAG_MAP_TAG4_SHIFT 16
-#define IOAT_APICID_TAG_CB2_VALID   0x8080808080
-
-#define IOAT_DCA_GREQID_OFFSET      0x10
-#define IOAT_DCA_GREQID_SIZE        0x04
-#define IOAT_DCA_GREQID_MASK        0xFFFF
-#define IOAT_DCA_GREQID_IGNOREFUN   0x10000000
-#define IOAT_DCA_GREQID_VALID       0x20000000
-#define IOAT_DCA_GREQID_LASTID      0x80000000
-
-#define IOAT3_CSI_CAPABILITY_OFFSET 0x08
-#define IOAT3_CSI_CAPABILITY_PREFETCH    0x1
-
-#define IOAT3_PCI_CAPABILITY_OFFSET 0x0A
-#define IOAT3_PCI_CAPABILITY_MEMWR  0x1
-
-#define IOAT3_CSI_CONTROL_OFFSET    0x0C
-#define IOAT3_CSI_CONTROL_PREFETCH  0x1
-
-#define IOAT3_PCI_CONTROL_OFFSET    0x0E
-#define IOAT3_PCI_CONTROL_MEMWR     0x1
-
-#define IOAT3_APICID_TAG_MAP_OFFSET 0x10
-#define IOAT3_APICID_TAG_MAP_OFFSET_LOW  0x10
-#define IOAT3_APICID_TAG_MAP_OFFSET_HIGH 0x14
-
-#define IOAT3_DCA_GREQID_OFFSET     0x02
-
-#define IOAT1_CHAINADDR_OFFSET         0x0C    /* 64-bit Descriptor Chain Address Register */
-#define IOAT2_CHAINADDR_OFFSET         0x10    /* 64-bit Descriptor Chain Address Register */
-#define IOAT_CHAINADDR_OFFSET(ver)             ((ver) < IOAT_VER_2_0 \
-                                               ? IOAT1_CHAINADDR_OFFSET : IOAT2_CHAINADDR_OFFSET)
-#define IOAT1_CHAINADDR_OFFSET_LOW     0x0C
-#define IOAT2_CHAINADDR_OFFSET_LOW     0x10
-#define IOAT_CHAINADDR_OFFSET_LOW(ver)         ((ver) < IOAT_VER_2_0 \
-                                               ? IOAT1_CHAINADDR_OFFSET_LOW : IOAT2_CHAINADDR_OFFSET_LOW)
-#define IOAT1_CHAINADDR_OFFSET_HIGH    0x10
-#define IOAT2_CHAINADDR_OFFSET_HIGH    0x14
-#define IOAT_CHAINADDR_OFFSET_HIGH(ver)                ((ver) < IOAT_VER_2_0 \
-                                               ? IOAT1_CHAINADDR_OFFSET_HIGH : IOAT2_CHAINADDR_OFFSET_HIGH)
-
-#define IOAT1_CHANCMD_OFFSET           0x14    /*  8-bit DMA Channel Command Register */
-#define IOAT2_CHANCMD_OFFSET           0x04    /*  8-bit DMA Channel Command Register */
-#define IOAT_CHANCMD_OFFSET(ver)               ((ver) < IOAT_VER_2_0 \
-                                               ? IOAT1_CHANCMD_OFFSET : IOAT2_CHANCMD_OFFSET)
-#define IOAT_CHANCMD_RESET                     0x20
-#define IOAT_CHANCMD_RESUME                    0x10
-#define IOAT_CHANCMD_ABORT                     0x08
-#define IOAT_CHANCMD_SUSPEND                   0x04
-#define IOAT_CHANCMD_APPEND                    0x02
-#define IOAT_CHANCMD_START                     0x01
-
-#define IOAT_CHANCMP_OFFSET                    0x18    /* 64-bit Channel Completion Address Register */
-#define IOAT_CHANCMP_OFFSET_LOW                        0x18
-#define IOAT_CHANCMP_OFFSET_HIGH               0x1C
-
-#define IOAT_CDAR_OFFSET                       0x20    /* 64-bit Current Descriptor Address Register */
-#define IOAT_CDAR_OFFSET_LOW                   0x20
-#define IOAT_CDAR_OFFSET_HIGH                  0x24
-
-#define IOAT_CHANERR_OFFSET                    0x28    /* 32-bit Channel Error Register */
-#define IOAT_CHANERR_DMA_TRANSFER_SRC_ADDR_ERR 0x0001
-#define IOAT_CHANERR_DMA_TRANSFER_DEST_ADDR_ERR        0x0002
-#define IOAT_CHANERR_NEXT_DESCRIPTOR_ADDR_ERR  0x0004
-#define IOAT_CHANERR_NEXT_DESCRIPTOR_ALIGNMENT_ERR     0x0008
-#define IOAT_CHANERR_CHAIN_ADDR_VALUE_ERR      0x0010
-#define IOAT_CHANERR_CHANCMD_ERR               0x0020
-#define IOAT_CHANERR_CHIPSET_UNCORRECTABLE_DATA_INTEGRITY_ERR  0x0040
-#define IOAT_CHANERR_DMA_UNCORRECTABLE_DATA_INTEGRITY_ERR      0x0080
-#define IOAT_CHANERR_READ_DATA_ERR             0x0100
-#define IOAT_CHANERR_WRITE_DATA_ERR            0x0200
-#define IOAT_CHANERR_DESCRIPTOR_CONTROL_ERR    0x0400
-#define IOAT_CHANERR_DESCRIPTOR_LENGTH_ERR     0x0800
-#define IOAT_CHANERR_COMPLETION_ADDR_ERR       0x1000
-#define IOAT_CHANERR_INT_CONFIGURATION_ERR     0x2000
-#define IOAT_CHANERR_SOFT_ERR                  0x4000
-#define IOAT_CHANERR_UNAFFILIATED_ERR          0x8000
-
-#define IOAT_CHANERR_MASK_OFFSET               0x2C    /* 32-bit Channel Error Register */
-
-#endif /* _IOAT_REGISTERS_H_ */
index 949c97ff57e35bec6917c4aab4aca2ebaa165c32..f2ec7243549ed5df0b71b44f5cc5cdcdda6aaae3 100644 (file)
@@ -29,8 +29,8 @@
 
 #include <asm/idle.h>
 
-#include "../dma/ioatdma_hw.h"
-#include "../dma/ioatdma_registers.h"
+#include "../dma/ioat/hw.h"
+#include "../dma/ioat/registers.h"
 
 #define I7300_IDLE_DRIVER_VERSION      "1.55"
 #define I7300_PRINT                    "i7300_idle:"