2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2015 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/workqueue.h>
28 #include <linux/prefetch.h>
29 #include <linux/dca.h>
31 #include "registers.h"
34 #include "../dmaengine.h"
36 MODULE_VERSION(IOAT_DMA_VERSION);
37 MODULE_LICENSE("Dual BSD/GPL");
38 MODULE_AUTHOR("Intel Corporation");
40 static struct pci_device_id ioat_pci_tbl[] = {
41 /* I/OAT v3 platforms */
42 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
43 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
44 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
45 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
46 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
47 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
48 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
49 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
51 /* I/OAT v3.2 platforms */
52 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) },
53 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) },
54 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) },
55 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) },
56 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) },
57 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) },
58 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) },
59 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) },
60 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) },
61 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) },
63 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) },
64 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) },
65 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) },
66 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) },
67 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) },
68 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) },
69 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) },
70 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) },
71 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) },
72 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) },
74 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) },
75 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) },
76 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) },
77 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) },
78 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) },
79 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) },
80 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) },
81 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) },
82 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) },
83 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) },
85 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) },
86 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) },
87 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) },
88 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) },
89 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) },
90 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) },
91 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) },
92 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) },
93 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) },
94 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) },
96 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX0) },
97 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX1) },
98 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX2) },
99 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX3) },
100 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX4) },
101 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX5) },
102 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX6) },
103 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX7) },
104 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) },
105 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) },
107 /* I/OAT v3.3 platforms */
108 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
109 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
110 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
111 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
113 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) },
114 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) },
115 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
116 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
120 MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
122 static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
123 static void ioat_remove(struct pci_dev *pdev);
125 ioat_init_channel(struct ioatdma_device *ioat_dma,
126 struct ioatdma_chan *ioat_chan, int idx);
127 static void ioat_intr_quirk(struct ioatdma_device *ioat_dma);
128 static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
129 static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma);
131 static int ioat_dca_enabled = 1;
132 module_param(ioat_dca_enabled, int, 0644);
133 MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
134 int ioat_pending_level = 4;
135 module_param(ioat_pending_level, int, 0644);
136 MODULE_PARM_DESC(ioat_pending_level,
137 "high-water mark for pushing ioat descriptors (default: 4)");
138 int ioat_ring_alloc_order = 8;
139 module_param(ioat_ring_alloc_order, int, 0644);
140 MODULE_PARM_DESC(ioat_ring_alloc_order,
141 "ioat+: allocate 2^n descriptors per channel (default: 8 max: 16)");
142 int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
143 module_param(ioat_ring_max_alloc_order, int, 0644);
144 MODULE_PARM_DESC(ioat_ring_max_alloc_order,
145 "ioat+: upper limit for ring size (default: 16)");
146 static char ioat_interrupt_style[32] = "msix";
147 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
148 sizeof(ioat_interrupt_style), 0644);
149 MODULE_PARM_DESC(ioat_interrupt_style,
150 "set ioat interrupt style: msix (default), msi, intx");
152 struct kmem_cache *ioat_cache;
153 struct kmem_cache *ioat_sed_cache;
155 static bool is_jf_ioat(struct pci_dev *pdev)
157 switch (pdev->device) {
158 case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
159 case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
160 case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
161 case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
162 case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
163 case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
164 case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
165 case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
166 case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
167 case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
174 static bool is_snb_ioat(struct pci_dev *pdev)
176 switch (pdev->device) {
177 case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
178 case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
179 case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
180 case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
181 case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
182 case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
183 case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
184 case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
185 case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
186 case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
193 static bool is_ivb_ioat(struct pci_dev *pdev)
195 switch (pdev->device) {
196 case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
197 case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
198 case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
199 case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
200 case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
201 case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
202 case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
203 case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
204 case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
205 case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
213 static bool is_hsw_ioat(struct pci_dev *pdev)
215 switch (pdev->device) {
216 case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
217 case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
218 case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
219 case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
220 case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
221 case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
222 case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
223 case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
224 case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
225 case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
233 static bool is_bdx_ioat(struct pci_dev *pdev)
235 switch (pdev->device) {
236 case PCI_DEVICE_ID_INTEL_IOAT_BDX0:
237 case PCI_DEVICE_ID_INTEL_IOAT_BDX1:
238 case PCI_DEVICE_ID_INTEL_IOAT_BDX2:
239 case PCI_DEVICE_ID_INTEL_IOAT_BDX3:
240 case PCI_DEVICE_ID_INTEL_IOAT_BDX4:
241 case PCI_DEVICE_ID_INTEL_IOAT_BDX5:
242 case PCI_DEVICE_ID_INTEL_IOAT_BDX6:
243 case PCI_DEVICE_ID_INTEL_IOAT_BDX7:
244 case PCI_DEVICE_ID_INTEL_IOAT_BDX8:
245 case PCI_DEVICE_ID_INTEL_IOAT_BDX9:
252 static bool is_xeon_cb32(struct pci_dev *pdev)
254 return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
255 is_hsw_ioat(pdev) || is_bdx_ioat(pdev);
258 bool is_bwd_ioat(struct pci_dev *pdev)
260 switch (pdev->device) {
261 case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
262 case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
263 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
264 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
265 /* even though not Atom, BDX-DE has same DMA silicon */
266 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
267 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
268 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
269 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
276 static bool is_bwd_noraid(struct pci_dev *pdev)
278 switch (pdev->device) {
279 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
280 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
281 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
282 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
283 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
284 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
293 * Perform a IOAT transaction to verify the HW works.
295 #define IOAT_TEST_SIZE 2000
297 static void ioat_dma_test_callback(void *dma_async_param)
299 struct completion *cmp = dma_async_param;
305 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
306 * @ioat_dma: dma device to be tested
308 static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
313 struct dma_device *dma = &ioat_dma->dma_dev;
314 struct device *dev = &ioat_dma->pdev->dev;
315 struct dma_chan *dma_chan;
316 struct dma_async_tx_descriptor *tx;
317 dma_addr_t dma_dest, dma_src;
320 struct completion cmp;
324 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
327 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
333 /* Fill in src buffer */
334 for (i = 0; i < IOAT_TEST_SIZE; i++)
337 /* Start copy, using first DMA channel */
338 dma_chan = container_of(dma->channels.next, struct dma_chan,
340 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
341 dev_err(dev, "selftest cannot allocate chan resource\n");
346 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
347 if (dma_mapping_error(dev, dma_src)) {
348 dev_err(dev, "mapping src buffer failed\n");
351 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
352 if (dma_mapping_error(dev, dma_dest)) {
353 dev_err(dev, "mapping dest buffer failed\n");
356 flags = DMA_PREP_INTERRUPT;
357 tx = ioat_dma->dma_dev.device_prep_dma_memcpy(dma_chan, dma_dest,
358 dma_src, IOAT_TEST_SIZE,
361 dev_err(dev, "Self-test prep failed, disabling\n");
367 init_completion(&cmp);
368 tx->callback = ioat_dma_test_callback;
369 tx->callback_param = &cmp;
370 cookie = tx->tx_submit(tx);
372 dev_err(dev, "Self-test setup failed, disabling\n");
376 dma->device_issue_pending(dma_chan);
378 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
381 dma->device_tx_status(dma_chan, cookie, NULL)
383 dev_err(dev, "Self-test copy timed out, disabling\n");
387 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
388 dev_err(dev, "Self-test copy failed compare, disabling\n");
394 dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
396 dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
398 dma->device_free_chan_resources(dma_chan);
406 * ioat_dma_setup_interrupts - setup interrupt handler
407 * @ioat_dma: ioat dma device
409 int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma)
411 struct ioatdma_chan *ioat_chan;
412 struct pci_dev *pdev = ioat_dma->pdev;
413 struct device *dev = &pdev->dev;
414 struct msix_entry *msix;
419 if (!strcmp(ioat_interrupt_style, "msix"))
421 if (!strcmp(ioat_interrupt_style, "msi"))
423 if (!strcmp(ioat_interrupt_style, "intx"))
425 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
429 /* The number of MSI-X vectors should equal the number of channels */
430 msixcnt = ioat_dma->dma_dev.chancnt;
431 for (i = 0; i < msixcnt; i++)
432 ioat_dma->msix_entries[i].entry = i;
434 err = pci_enable_msix_exact(pdev, ioat_dma->msix_entries, msixcnt);
438 for (i = 0; i < msixcnt; i++) {
439 msix = &ioat_dma->msix_entries[i];
440 ioat_chan = ioat_chan_by_index(ioat_dma, i);
441 err = devm_request_irq(dev, msix->vector,
442 ioat_dma_do_interrupt_msix, 0,
443 "ioat-msix", ioat_chan);
445 for (j = 0; j < i; j++) {
446 msix = &ioat_dma->msix_entries[j];
447 ioat_chan = ioat_chan_by_index(ioat_dma, j);
448 devm_free_irq(dev, msix->vector, ioat_chan);
453 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
454 ioat_dma->irq_mode = IOAT_MSIX;
458 err = pci_enable_msi(pdev);
462 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
463 "ioat-msi", ioat_dma);
465 pci_disable_msi(pdev);
468 ioat_dma->irq_mode = IOAT_MSI;
472 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
473 IRQF_SHARED, "ioat-intx", ioat_dma);
477 ioat_dma->irq_mode = IOAT_INTX;
479 if (is_bwd_ioat(pdev))
480 ioat_intr_quirk(ioat_dma);
481 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
482 writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
486 /* Disable all interrupt generation */
487 writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
488 ioat_dma->irq_mode = IOAT_NOIRQ;
489 dev_err(dev, "no usable interrupts\n");
493 static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma)
495 /* Disable all interrupt generation */
496 writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
499 static int ioat_probe(struct ioatdma_device *ioat_dma)
502 struct dma_device *dma = &ioat_dma->dma_dev;
503 struct pci_dev *pdev = ioat_dma->pdev;
504 struct device *dev = &pdev->dev;
506 /* DMA coherent memory pool for DMA descriptor allocations */
507 ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev,
508 sizeof(struct ioat_dma_descriptor),
510 if (!ioat_dma->dma_pool) {
515 ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev,
520 if (!ioat_dma->completion_pool) {
522 goto err_completion_pool;
525 ioat_enumerate_channels(ioat_dma);
527 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
528 dma->dev = &pdev->dev;
531 dev_err(dev, "channel enumeration error\n");
532 goto err_setup_interrupts;
535 err = ioat_dma_setup_interrupts(ioat_dma);
537 goto err_setup_interrupts;
539 err = ioat3_dma_self_test(ioat_dma);
546 ioat_disable_interrupts(ioat_dma);
547 err_setup_interrupts:
548 pci_pool_destroy(ioat_dma->completion_pool);
550 pci_pool_destroy(ioat_dma->dma_pool);
555 static int ioat_register(struct ioatdma_device *ioat_dma)
557 int err = dma_async_device_register(&ioat_dma->dma_dev);
560 ioat_disable_interrupts(ioat_dma);
561 pci_pool_destroy(ioat_dma->completion_pool);
562 pci_pool_destroy(ioat_dma->dma_pool);
568 static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
570 struct dma_device *dma = &ioat_dma->dma_dev;
572 ioat_disable_interrupts(ioat_dma);
574 ioat_kobject_del(ioat_dma);
576 dma_async_device_unregister(dma);
578 pci_pool_destroy(ioat_dma->dma_pool);
579 pci_pool_destroy(ioat_dma->completion_pool);
581 INIT_LIST_HEAD(&dma->channels);
585 * ioat_enumerate_channels - find and initialize the device's channels
586 * @ioat_dma: the ioat dma device to be enumerated
588 static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
590 struct ioatdma_chan *ioat_chan;
591 struct device *dev = &ioat_dma->pdev->dev;
592 struct dma_device *dma = &ioat_dma->dma_dev;
596 INIT_LIST_HEAD(&dma->channels);
597 dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
598 dma->chancnt &= 0x1f; /* bits [4:0] valid */
599 if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) {
600 dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
601 dma->chancnt, ARRAY_SIZE(ioat_dma->idx));
602 dma->chancnt = ARRAY_SIZE(ioat_dma->idx);
604 xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
605 xfercap_log &= 0x1f; /* bits [4:0] valid */
606 if (xfercap_log == 0)
608 dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
610 for (i = 0; i < dma->chancnt; i++) {
611 ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
615 ioat_init_channel(ioat_dma, ioat_chan, i);
616 ioat_chan->xfercap_log = xfercap_log;
617 spin_lock_init(&ioat_chan->prep_lock);
618 if (ioat_reset_hw(ioat_chan)) {
628 * ioat_free_chan_resources - release all the descriptors
629 * @chan: the channel to be cleaned
631 static void ioat_free_chan_resources(struct dma_chan *c)
633 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
634 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
635 struct ioat_ring_ent *desc;
636 const int total_descs = 1 << ioat_chan->alloc_order;
640 /* Before freeing channel resources first check
641 * if they have been previously allocated for this channel.
643 if (!ioat_chan->ring)
646 ioat_stop(ioat_chan);
647 ioat_reset_hw(ioat_chan);
649 spin_lock_bh(&ioat_chan->cleanup_lock);
650 spin_lock_bh(&ioat_chan->prep_lock);
651 descs = ioat_ring_space(ioat_chan);
652 dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs);
653 for (i = 0; i < descs; i++) {
654 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head + i);
655 ioat_free_ring_ent(desc, c);
658 if (descs < total_descs)
659 dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n",
660 total_descs - descs);
662 for (i = 0; i < total_descs - descs; i++) {
663 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail + i);
664 dump_desc_dbg(ioat_chan, desc);
665 ioat_free_ring_ent(desc, c);
668 kfree(ioat_chan->ring);
669 ioat_chan->ring = NULL;
670 ioat_chan->alloc_order = 0;
671 pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
672 ioat_chan->completion_dma);
673 spin_unlock_bh(&ioat_chan->prep_lock);
674 spin_unlock_bh(&ioat_chan->cleanup_lock);
676 ioat_chan->last_completion = 0;
677 ioat_chan->completion_dma = 0;
678 ioat_chan->dmacount = 0;
681 /* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring
682 * @chan: channel to be initialized
684 static int ioat_alloc_chan_resources(struct dma_chan *c)
686 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
687 struct ioat_ring_ent **ring;
693 /* have we already been set up? */
695 return 1 << ioat_chan->alloc_order;
697 /* Setup register to interrupt and write completion status on error */
698 writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
700 /* allocate a completion writeback area */
701 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
702 ioat_chan->completion =
703 pci_pool_alloc(ioat_chan->ioat_dma->completion_pool,
704 GFP_KERNEL, &ioat_chan->completion_dma);
705 if (!ioat_chan->completion)
708 memset(ioat_chan->completion, 0, sizeof(*ioat_chan->completion));
709 writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF,
710 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
711 writel(((u64)ioat_chan->completion_dma) >> 32,
712 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
714 order = ioat_get_alloc_order();
715 ring = ioat_alloc_ring(c, order, GFP_KERNEL);
719 spin_lock_bh(&ioat_chan->cleanup_lock);
720 spin_lock_bh(&ioat_chan->prep_lock);
721 ioat_chan->ring = ring;
723 ioat_chan->issued = 0;
725 ioat_chan->alloc_order = order;
726 set_bit(IOAT_RUN, &ioat_chan->state);
727 spin_unlock_bh(&ioat_chan->prep_lock);
728 spin_unlock_bh(&ioat_chan->cleanup_lock);
730 ioat_start_null_desc(ioat_chan);
732 /* check that we got off the ground */
735 status = ioat_chansts(ioat_chan);
736 } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
738 if (is_ioat_active(status) || is_ioat_idle(status))
739 return 1 << ioat_chan->alloc_order;
741 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
743 dev_WARN(to_dev(ioat_chan),
744 "failed to start channel chanerr: %#x\n", chanerr);
745 ioat_free_chan_resources(c);
749 /* common channel initialization */
751 ioat_init_channel(struct ioatdma_device *ioat_dma,
752 struct ioatdma_chan *ioat_chan, int idx)
754 struct dma_device *dma = &ioat_dma->dma_dev;
755 struct dma_chan *c = &ioat_chan->dma_chan;
756 unsigned long data = (unsigned long) c;
758 ioat_chan->ioat_dma = ioat_dma;
759 ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1));
760 spin_lock_init(&ioat_chan->cleanup_lock);
761 ioat_chan->dma_chan.device = dma;
762 dma_cookie_init(&ioat_chan->dma_chan);
763 list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels);
764 ioat_dma->idx[idx] = ioat_chan;
765 init_timer(&ioat_chan->timer);
766 ioat_chan->timer.function = ioat_timer_event;
767 ioat_chan->timer.data = data;
768 tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data);
771 #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
772 static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
776 struct page *xor_srcs[IOAT_NUM_SRC_TEST];
777 struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
778 dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
780 struct dma_async_tx_descriptor *tx;
781 struct dma_chan *dma_chan;
787 struct completion cmp;
789 struct device *dev = &ioat_dma->pdev->dev;
790 struct dma_device *dma = &ioat_dma->dma_dev;
793 dev_dbg(dev, "%s\n", __func__);
795 if (!dma_has_cap(DMA_XOR, dma->cap_mask))
798 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
799 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
800 if (!xor_srcs[src_idx]) {
802 __free_page(xor_srcs[src_idx]);
807 dest = alloc_page(GFP_KERNEL);
810 __free_page(xor_srcs[src_idx]);
814 /* Fill in src buffers */
815 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
816 u8 *ptr = page_address(xor_srcs[src_idx]);
818 for (i = 0; i < PAGE_SIZE; i++)
819 ptr[i] = (1 << src_idx);
822 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
823 cmp_byte ^= (u8) (1 << src_idx);
825 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
826 (cmp_byte << 8) | cmp_byte;
828 memset(page_address(dest), 0, PAGE_SIZE);
830 dma_chan = container_of(dma->channels.next, struct dma_chan,
832 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
840 dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
841 if (dma_mapping_error(dev, dest_dma))
844 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
845 dma_srcs[i] = DMA_ERROR_CODE;
846 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
847 dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
849 if (dma_mapping_error(dev, dma_srcs[i]))
852 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
853 IOAT_NUM_SRC_TEST, PAGE_SIZE,
857 dev_err(dev, "Self-test xor prep failed\n");
863 init_completion(&cmp);
864 tx->callback = ioat_dma_test_callback;
865 tx->callback_param = &cmp;
866 cookie = tx->tx_submit(tx);
868 dev_err(dev, "Self-test xor setup failed\n");
872 dma->device_issue_pending(dma_chan);
874 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
877 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
878 dev_err(dev, "Self-test xor timed out\n");
883 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
884 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
886 dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
887 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
888 u32 *ptr = page_address(dest);
890 if (ptr[i] != cmp_word) {
891 dev_err(dev, "Self-test xor failed compare\n");
896 dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
898 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
900 /* skip validate if the capability is not present */
901 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
904 op = IOAT_OP_XOR_VAL;
906 /* validate the sources with the destintation page */
907 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
908 xor_val_srcs[i] = xor_srcs[i];
909 xor_val_srcs[i] = dest;
913 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
914 dma_srcs[i] = DMA_ERROR_CODE;
915 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
916 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
918 if (dma_mapping_error(dev, dma_srcs[i]))
921 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
922 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
923 &xor_val_result, DMA_PREP_INTERRUPT);
925 dev_err(dev, "Self-test zero prep failed\n");
931 init_completion(&cmp);
932 tx->callback = ioat_dma_test_callback;
933 tx->callback_param = &cmp;
934 cookie = tx->tx_submit(tx);
936 dev_err(dev, "Self-test zero setup failed\n");
940 dma->device_issue_pending(dma_chan);
942 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
945 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
946 dev_err(dev, "Self-test validate timed out\n");
951 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
952 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
954 if (xor_val_result != 0) {
955 dev_err(dev, "Self-test validate failed compare\n");
960 memset(page_address(dest), 0, PAGE_SIZE);
962 /* test for non-zero parity sum */
963 op = IOAT_OP_XOR_VAL;
966 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
967 dma_srcs[i] = DMA_ERROR_CODE;
968 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
969 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
971 if (dma_mapping_error(dev, dma_srcs[i]))
974 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
975 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
976 &xor_val_result, DMA_PREP_INTERRUPT);
978 dev_err(dev, "Self-test 2nd zero prep failed\n");
984 init_completion(&cmp);
985 tx->callback = ioat_dma_test_callback;
986 tx->callback_param = &cmp;
987 cookie = tx->tx_submit(tx);
989 dev_err(dev, "Self-test 2nd zero setup failed\n");
993 dma->device_issue_pending(dma_chan);
995 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
998 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
999 dev_err(dev, "Self-test 2nd validate timed out\n");
1004 if (xor_val_result != SUM_CHECK_P_RESULT) {
1005 dev_err(dev, "Self-test validate failed compare\n");
1010 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1011 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
1013 goto free_resources;
1015 if (op == IOAT_OP_XOR) {
1016 if (dest_dma != DMA_ERROR_CODE)
1017 dma_unmap_page(dev, dest_dma, PAGE_SIZE,
1019 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1020 if (dma_srcs[i] != DMA_ERROR_CODE)
1021 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1023 } else if (op == IOAT_OP_XOR_VAL) {
1024 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1025 if (dma_srcs[i] != DMA_ERROR_CODE)
1026 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1030 dma->device_free_chan_resources(dma_chan);
1032 src_idx = IOAT_NUM_SRC_TEST;
1034 __free_page(xor_srcs[src_idx]);
1039 static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma)
1043 rc = ioat_dma_self_test(ioat_dma);
1047 rc = ioat_xor_val_self_test(ioat_dma);
1052 static void ioat_intr_quirk(struct ioatdma_device *ioat_dma)
1054 struct dma_device *dma;
1056 struct ioatdma_chan *ioat_chan;
1059 dma = &ioat_dma->dma_dev;
1062 * if we have descriptor write back error status, we mask the
1065 if (ioat_dma->cap & IOAT_CAP_DWBES) {
1066 list_for_each_entry(c, &dma->channels, device_node) {
1067 ioat_chan = to_ioat_chan(c);
1068 errmask = readl(ioat_chan->reg_base +
1069 IOAT_CHANERR_MASK_OFFSET);
1070 errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
1071 IOAT_CHANERR_XOR_Q_ERR;
1072 writel(errmask, ioat_chan->reg_base +
1073 IOAT_CHANERR_MASK_OFFSET);
1078 static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
1080 struct pci_dev *pdev = ioat_dma->pdev;
1081 int dca_en = system_has_dca_enabled(pdev);
1082 struct dma_device *dma;
1084 struct ioatdma_chan *ioat_chan;
1085 bool is_raid_device = false;
1088 dma = &ioat_dma->dma_dev;
1089 dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock;
1090 dma->device_issue_pending = ioat_issue_pending;
1091 dma->device_alloc_chan_resources = ioat_alloc_chan_resources;
1092 dma->device_free_chan_resources = ioat_free_chan_resources;
1094 dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
1095 dma->device_prep_dma_interrupt = ioat_prep_interrupt_lock;
1097 ioat_dma->cap = readl(ioat_dma->reg_base + IOAT_DMA_CAP_OFFSET);
1099 if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
1101 ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
1103 /* dca is incompatible with raid operations */
1104 if (dca_en && (ioat_dma->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
1105 ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
1107 if (ioat_dma->cap & IOAT_CAP_XOR) {
1108 is_raid_device = true;
1111 dma_cap_set(DMA_XOR, dma->cap_mask);
1112 dma->device_prep_dma_xor = ioat_prep_xor;
1114 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1115 dma->device_prep_dma_xor_val = ioat_prep_xor_val;
1118 if (ioat_dma->cap & IOAT_CAP_PQ) {
1119 is_raid_device = true;
1121 dma->device_prep_dma_pq = ioat_prep_pq;
1122 dma->device_prep_dma_pq_val = ioat_prep_pq_val;
1123 dma_cap_set(DMA_PQ, dma->cap_mask);
1124 dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
1126 if (ioat_dma->cap & IOAT_CAP_RAID16SS)
1127 dma_set_maxpq(dma, 16, 0);
1129 dma_set_maxpq(dma, 8, 0);
1131 if (!(ioat_dma->cap & IOAT_CAP_XOR)) {
1132 dma->device_prep_dma_xor = ioat_prep_pqxor;
1133 dma->device_prep_dma_xor_val = ioat_prep_pqxor_val;
1134 dma_cap_set(DMA_XOR, dma->cap_mask);
1135 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1137 if (ioat_dma->cap & IOAT_CAP_RAID16SS)
1144 dma->device_tx_status = ioat_tx_status;
1146 /* starting with CB3.3 super extended descriptors are supported */
1147 if (ioat_dma->cap & IOAT_CAP_RAID16SS) {
1151 for (i = 0; i < MAX_SED_POOLS; i++) {
1152 snprintf(pool_name, 14, "ioat_hw%d_sed", i);
1154 /* allocate SED DMA pool */
1155 ioat_dma->sed_hw_pool[i] = dmam_pool_create(pool_name,
1157 SED_SIZE * (i + 1), 64, 0);
1158 if (!ioat_dma->sed_hw_pool[i])
1164 if (!(ioat_dma->cap & (IOAT_CAP_XOR | IOAT_CAP_PQ)))
1165 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
1167 err = ioat_probe(ioat_dma);
1171 list_for_each_entry(c, &dma->channels, device_node) {
1172 ioat_chan = to_ioat_chan(c);
1173 writel(IOAT_DMA_DCA_ANY_CPU,
1174 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
1177 err = ioat_register(ioat_dma);
1181 ioat_kobject_add(ioat_dma, &ioat_ktype);
1184 ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base);
1189 #define DRV_NAME "ioatdma"
1191 static struct pci_driver ioat_pci_driver = {
1193 .id_table = ioat_pci_tbl,
1194 .probe = ioat_pci_probe,
1195 .remove = ioat_remove,
1198 static struct ioatdma_device *
1199 alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
1201 struct device *dev = &pdev->dev;
1202 struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
1207 d->reg_base = iobase;
1211 static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1213 void __iomem * const *iomap;
1214 struct device *dev = &pdev->dev;
1215 struct ioatdma_device *device;
1218 err = pcim_enable_device(pdev);
1222 err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME);
1225 iomap = pcim_iomap_table(pdev);
1229 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1231 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1235 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1237 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1241 device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
1244 pci_set_master(pdev);
1245 pci_set_drvdata(pdev, device);
1247 device->version = readb(device->reg_base + IOAT_VER_OFFSET);
1248 if (device->version >= IOAT_VER_3_0)
1249 err = ioat3_dma_probe(device, ioat_dca_enabled);
1254 dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
1261 static void ioat_remove(struct pci_dev *pdev)
1263 struct ioatdma_device *device = pci_get_drvdata(pdev);
1268 dev_err(&pdev->dev, "Removing dma and dca services\n");
1270 unregister_dca_provider(device->dca, &pdev->dev);
1271 free_dca_provider(device->dca);
1274 ioat_dma_remove(device);
1277 static int __init ioat_init_module(void)
1281 pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
1282 DRV_NAME, IOAT_DMA_VERSION);
1284 ioat_cache = kmem_cache_create("ioat", sizeof(struct ioat_ring_ent),
1285 0, SLAB_HWCACHE_ALIGN, NULL);
1289 ioat_sed_cache = KMEM_CACHE(ioat_sed_ent, 0);
1290 if (!ioat_sed_cache)
1291 goto err_ioat_cache;
1293 err = pci_register_driver(&ioat_pci_driver);
1295 goto err_ioat3_cache;
1300 kmem_cache_destroy(ioat_sed_cache);
1303 kmem_cache_destroy(ioat_cache);
1307 module_init(ioat_init_module);
1309 static void __exit ioat_exit_module(void)
1311 pci_unregister_driver(&ioat_pci_driver);
1312 kmem_cache_destroy(ioat_cache);
1314 module_exit(ioat_exit_module);