2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2015 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/delay.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/workqueue.h>
28 #include <linux/prefetch.h>
29 #include <linux/dca.h>
30 #include <linux/aer.h>
32 #include "registers.h"
35 #include "../dmaengine.h"
37 MODULE_VERSION(IOAT_DMA_VERSION);
38 MODULE_LICENSE("Dual BSD/GPL");
39 MODULE_AUTHOR("Intel Corporation");
41 static struct pci_device_id ioat_pci_tbl[] = {
42 /* I/OAT v3 platforms */
43 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
44 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
45 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
46 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
47 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
48 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
49 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
50 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
52 /* I/OAT v3.2 platforms */
53 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) },
54 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) },
55 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) },
56 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) },
57 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) },
58 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) },
59 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) },
60 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) },
61 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) },
62 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) },
64 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) },
65 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) },
66 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) },
67 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) },
68 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) },
69 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) },
70 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) },
71 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) },
72 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) },
73 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) },
75 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) },
76 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) },
77 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) },
78 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) },
79 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) },
80 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) },
81 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) },
82 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) },
83 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) },
84 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) },
86 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) },
87 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) },
88 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) },
89 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) },
90 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) },
91 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) },
92 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) },
93 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) },
94 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) },
95 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) },
97 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX0) },
98 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX1) },
99 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX2) },
100 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX3) },
101 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX4) },
102 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX5) },
103 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX6) },
104 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX7) },
105 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) },
106 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) },
108 /* I/OAT v3.3 platforms */
109 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
110 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
111 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
112 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
114 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) },
115 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) },
116 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
117 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
121 MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
123 static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
124 static void ioat_remove(struct pci_dev *pdev);
126 ioat_init_channel(struct ioatdma_device *ioat_dma,
127 struct ioatdma_chan *ioat_chan, int idx);
128 static void ioat_intr_quirk(struct ioatdma_device *ioat_dma);
129 static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
130 static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma);
132 static int ioat_dca_enabled = 1;
133 module_param(ioat_dca_enabled, int, 0644);
134 MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
135 int ioat_pending_level = 4;
136 module_param(ioat_pending_level, int, 0644);
137 MODULE_PARM_DESC(ioat_pending_level,
138 "high-water mark for pushing ioat descriptors (default: 4)");
139 int ioat_ring_alloc_order = 8;
140 module_param(ioat_ring_alloc_order, int, 0644);
141 MODULE_PARM_DESC(ioat_ring_alloc_order,
142 "ioat+: allocate 2^n descriptors per channel (default: 8 max: 16)");
143 int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
144 module_param(ioat_ring_max_alloc_order, int, 0644);
145 MODULE_PARM_DESC(ioat_ring_max_alloc_order,
146 "ioat+: upper limit for ring size (default: 16)");
147 static char ioat_interrupt_style[32] = "msix";
148 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
149 sizeof(ioat_interrupt_style), 0644);
150 MODULE_PARM_DESC(ioat_interrupt_style,
151 "set ioat interrupt style: msix (default), msi, intx");
153 struct kmem_cache *ioat_cache;
154 struct kmem_cache *ioat_sed_cache;
156 static bool is_jf_ioat(struct pci_dev *pdev)
158 switch (pdev->device) {
159 case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
160 case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
161 case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
162 case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
163 case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
164 case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
165 case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
166 case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
167 case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
168 case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
175 static bool is_snb_ioat(struct pci_dev *pdev)
177 switch (pdev->device) {
178 case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
179 case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
180 case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
181 case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
182 case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
183 case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
184 case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
185 case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
186 case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
187 case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
194 static bool is_ivb_ioat(struct pci_dev *pdev)
196 switch (pdev->device) {
197 case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
198 case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
199 case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
200 case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
201 case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
202 case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
203 case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
204 case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
205 case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
206 case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
214 static bool is_hsw_ioat(struct pci_dev *pdev)
216 switch (pdev->device) {
217 case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
218 case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
219 case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
220 case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
221 case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
222 case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
223 case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
224 case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
225 case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
226 case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
234 static bool is_bdx_ioat(struct pci_dev *pdev)
236 switch (pdev->device) {
237 case PCI_DEVICE_ID_INTEL_IOAT_BDX0:
238 case PCI_DEVICE_ID_INTEL_IOAT_BDX1:
239 case PCI_DEVICE_ID_INTEL_IOAT_BDX2:
240 case PCI_DEVICE_ID_INTEL_IOAT_BDX3:
241 case PCI_DEVICE_ID_INTEL_IOAT_BDX4:
242 case PCI_DEVICE_ID_INTEL_IOAT_BDX5:
243 case PCI_DEVICE_ID_INTEL_IOAT_BDX6:
244 case PCI_DEVICE_ID_INTEL_IOAT_BDX7:
245 case PCI_DEVICE_ID_INTEL_IOAT_BDX8:
246 case PCI_DEVICE_ID_INTEL_IOAT_BDX9:
253 static bool is_xeon_cb32(struct pci_dev *pdev)
255 return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
256 is_hsw_ioat(pdev) || is_bdx_ioat(pdev);
259 bool is_bwd_ioat(struct pci_dev *pdev)
261 switch (pdev->device) {
262 case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
263 case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
264 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
265 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
266 /* even though not Atom, BDX-DE has same DMA silicon */
267 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
268 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
269 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
270 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
277 static bool is_bwd_noraid(struct pci_dev *pdev)
279 switch (pdev->device) {
280 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
281 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
282 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
283 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
284 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
285 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
294 * Perform a IOAT transaction to verify the HW works.
296 #define IOAT_TEST_SIZE 2000
298 static void ioat_dma_test_callback(void *dma_async_param)
300 struct completion *cmp = dma_async_param;
306 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
307 * @ioat_dma: dma device to be tested
309 static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
314 struct dma_device *dma = &ioat_dma->dma_dev;
315 struct device *dev = &ioat_dma->pdev->dev;
316 struct dma_chan *dma_chan;
317 struct dma_async_tx_descriptor *tx;
318 dma_addr_t dma_dest, dma_src;
321 struct completion cmp;
325 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
328 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
334 /* Fill in src buffer */
335 for (i = 0; i < IOAT_TEST_SIZE; i++)
338 /* Start copy, using first DMA channel */
339 dma_chan = container_of(dma->channels.next, struct dma_chan,
341 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
342 dev_err(dev, "selftest cannot allocate chan resource\n");
347 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
348 if (dma_mapping_error(dev, dma_src)) {
349 dev_err(dev, "mapping src buffer failed\n");
352 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
353 if (dma_mapping_error(dev, dma_dest)) {
354 dev_err(dev, "mapping dest buffer failed\n");
357 flags = DMA_PREP_INTERRUPT;
358 tx = ioat_dma->dma_dev.device_prep_dma_memcpy(dma_chan, dma_dest,
359 dma_src, IOAT_TEST_SIZE,
362 dev_err(dev, "Self-test prep failed, disabling\n");
368 init_completion(&cmp);
369 tx->callback = ioat_dma_test_callback;
370 tx->callback_param = &cmp;
371 cookie = tx->tx_submit(tx);
373 dev_err(dev, "Self-test setup failed, disabling\n");
377 dma->device_issue_pending(dma_chan);
379 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
382 dma->device_tx_status(dma_chan, cookie, NULL)
384 dev_err(dev, "Self-test copy timed out, disabling\n");
388 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
389 dev_err(dev, "Self-test copy failed compare, disabling\n");
395 dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
397 dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
399 dma->device_free_chan_resources(dma_chan);
407 * ioat_dma_setup_interrupts - setup interrupt handler
408 * @ioat_dma: ioat dma device
410 int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma)
412 struct ioatdma_chan *ioat_chan;
413 struct pci_dev *pdev = ioat_dma->pdev;
414 struct device *dev = &pdev->dev;
415 struct msix_entry *msix;
420 if (!strcmp(ioat_interrupt_style, "msix"))
422 if (!strcmp(ioat_interrupt_style, "msi"))
424 if (!strcmp(ioat_interrupt_style, "intx"))
426 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
430 /* The number of MSI-X vectors should equal the number of channels */
431 msixcnt = ioat_dma->dma_dev.chancnt;
432 for (i = 0; i < msixcnt; i++)
433 ioat_dma->msix_entries[i].entry = i;
435 err = pci_enable_msix_exact(pdev, ioat_dma->msix_entries, msixcnt);
439 for (i = 0; i < msixcnt; i++) {
440 msix = &ioat_dma->msix_entries[i];
441 ioat_chan = ioat_chan_by_index(ioat_dma, i);
442 err = devm_request_irq(dev, msix->vector,
443 ioat_dma_do_interrupt_msix, 0,
444 "ioat-msix", ioat_chan);
446 for (j = 0; j < i; j++) {
447 msix = &ioat_dma->msix_entries[j];
448 ioat_chan = ioat_chan_by_index(ioat_dma, j);
449 devm_free_irq(dev, msix->vector, ioat_chan);
454 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
455 ioat_dma->irq_mode = IOAT_MSIX;
459 err = pci_enable_msi(pdev);
463 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
464 "ioat-msi", ioat_dma);
466 pci_disable_msi(pdev);
469 ioat_dma->irq_mode = IOAT_MSI;
473 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
474 IRQF_SHARED, "ioat-intx", ioat_dma);
478 ioat_dma->irq_mode = IOAT_INTX;
480 if (is_bwd_ioat(pdev))
481 ioat_intr_quirk(ioat_dma);
482 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
483 writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
487 /* Disable all interrupt generation */
488 writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
489 ioat_dma->irq_mode = IOAT_NOIRQ;
490 dev_err(dev, "no usable interrupts\n");
494 static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma)
496 /* Disable all interrupt generation */
497 writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
500 static int ioat_probe(struct ioatdma_device *ioat_dma)
503 struct dma_device *dma = &ioat_dma->dma_dev;
504 struct pci_dev *pdev = ioat_dma->pdev;
505 struct device *dev = &pdev->dev;
507 /* DMA coherent memory pool for DMA descriptor allocations */
508 ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev,
509 sizeof(struct ioat_dma_descriptor),
511 if (!ioat_dma->dma_pool) {
516 ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev,
521 if (!ioat_dma->completion_pool) {
523 goto err_completion_pool;
526 ioat_enumerate_channels(ioat_dma);
528 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
529 dma->dev = &pdev->dev;
532 dev_err(dev, "channel enumeration error\n");
533 goto err_setup_interrupts;
536 err = ioat_dma_setup_interrupts(ioat_dma);
538 goto err_setup_interrupts;
540 err = ioat3_dma_self_test(ioat_dma);
547 ioat_disable_interrupts(ioat_dma);
548 err_setup_interrupts:
549 pci_pool_destroy(ioat_dma->completion_pool);
551 pci_pool_destroy(ioat_dma->dma_pool);
556 static int ioat_register(struct ioatdma_device *ioat_dma)
558 int err = dma_async_device_register(&ioat_dma->dma_dev);
561 ioat_disable_interrupts(ioat_dma);
562 pci_pool_destroy(ioat_dma->completion_pool);
563 pci_pool_destroy(ioat_dma->dma_pool);
569 static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
571 struct dma_device *dma = &ioat_dma->dma_dev;
573 ioat_disable_interrupts(ioat_dma);
575 ioat_kobject_del(ioat_dma);
577 dma_async_device_unregister(dma);
579 pci_pool_destroy(ioat_dma->dma_pool);
580 pci_pool_destroy(ioat_dma->completion_pool);
582 INIT_LIST_HEAD(&dma->channels);
586 * ioat_enumerate_channels - find and initialize the device's channels
587 * @ioat_dma: the ioat dma device to be enumerated
589 static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
591 struct ioatdma_chan *ioat_chan;
592 struct device *dev = &ioat_dma->pdev->dev;
593 struct dma_device *dma = &ioat_dma->dma_dev;
597 INIT_LIST_HEAD(&dma->channels);
598 dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
599 dma->chancnt &= 0x1f; /* bits [4:0] valid */
600 if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) {
601 dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
602 dma->chancnt, ARRAY_SIZE(ioat_dma->idx));
603 dma->chancnt = ARRAY_SIZE(ioat_dma->idx);
605 xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
606 xfercap_log &= 0x1f; /* bits [4:0] valid */
607 if (xfercap_log == 0)
609 dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
611 for (i = 0; i < dma->chancnt; i++) {
612 ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
616 ioat_init_channel(ioat_dma, ioat_chan, i);
617 ioat_chan->xfercap_log = xfercap_log;
618 spin_lock_init(&ioat_chan->prep_lock);
619 if (ioat_reset_hw(ioat_chan)) {
629 * ioat_free_chan_resources - release all the descriptors
630 * @chan: the channel to be cleaned
632 static void ioat_free_chan_resources(struct dma_chan *c)
634 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
635 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
636 struct ioat_ring_ent *desc;
637 const int total_descs = 1 << ioat_chan->alloc_order;
641 /* Before freeing channel resources first check
642 * if they have been previously allocated for this channel.
644 if (!ioat_chan->ring)
647 ioat_stop(ioat_chan);
648 ioat_reset_hw(ioat_chan);
650 spin_lock_bh(&ioat_chan->cleanup_lock);
651 spin_lock_bh(&ioat_chan->prep_lock);
652 descs = ioat_ring_space(ioat_chan);
653 dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs);
654 for (i = 0; i < descs; i++) {
655 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head + i);
656 ioat_free_ring_ent(desc, c);
659 if (descs < total_descs)
660 dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n",
661 total_descs - descs);
663 for (i = 0; i < total_descs - descs; i++) {
664 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail + i);
665 dump_desc_dbg(ioat_chan, desc);
666 ioat_free_ring_ent(desc, c);
669 kfree(ioat_chan->ring);
670 ioat_chan->ring = NULL;
671 ioat_chan->alloc_order = 0;
672 pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
673 ioat_chan->completion_dma);
674 spin_unlock_bh(&ioat_chan->prep_lock);
675 spin_unlock_bh(&ioat_chan->cleanup_lock);
677 ioat_chan->last_completion = 0;
678 ioat_chan->completion_dma = 0;
679 ioat_chan->dmacount = 0;
682 /* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring
683 * @chan: channel to be initialized
685 static int ioat_alloc_chan_resources(struct dma_chan *c)
687 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
688 struct ioat_ring_ent **ring;
694 /* have we already been set up? */
696 return 1 << ioat_chan->alloc_order;
698 /* Setup register to interrupt and write completion status on error */
699 writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
701 /* allocate a completion writeback area */
702 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
703 ioat_chan->completion =
704 pci_pool_alloc(ioat_chan->ioat_dma->completion_pool,
705 GFP_KERNEL, &ioat_chan->completion_dma);
706 if (!ioat_chan->completion)
709 memset(ioat_chan->completion, 0, sizeof(*ioat_chan->completion));
710 writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF,
711 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
712 writel(((u64)ioat_chan->completion_dma) >> 32,
713 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
715 order = ioat_get_alloc_order();
716 ring = ioat_alloc_ring(c, order, GFP_KERNEL);
720 spin_lock_bh(&ioat_chan->cleanup_lock);
721 spin_lock_bh(&ioat_chan->prep_lock);
722 ioat_chan->ring = ring;
724 ioat_chan->issued = 0;
726 ioat_chan->alloc_order = order;
727 set_bit(IOAT_RUN, &ioat_chan->state);
728 spin_unlock_bh(&ioat_chan->prep_lock);
729 spin_unlock_bh(&ioat_chan->cleanup_lock);
731 ioat_start_null_desc(ioat_chan);
733 /* check that we got off the ground */
736 status = ioat_chansts(ioat_chan);
737 } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
739 if (is_ioat_active(status) || is_ioat_idle(status))
740 return 1 << ioat_chan->alloc_order;
742 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
744 dev_WARN(to_dev(ioat_chan),
745 "failed to start channel chanerr: %#x\n", chanerr);
746 ioat_free_chan_resources(c);
750 /* common channel initialization */
752 ioat_init_channel(struct ioatdma_device *ioat_dma,
753 struct ioatdma_chan *ioat_chan, int idx)
755 struct dma_device *dma = &ioat_dma->dma_dev;
756 struct dma_chan *c = &ioat_chan->dma_chan;
757 unsigned long data = (unsigned long) c;
759 ioat_chan->ioat_dma = ioat_dma;
760 ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1));
761 spin_lock_init(&ioat_chan->cleanup_lock);
762 ioat_chan->dma_chan.device = dma;
763 dma_cookie_init(&ioat_chan->dma_chan);
764 list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels);
765 ioat_dma->idx[idx] = ioat_chan;
766 init_timer(&ioat_chan->timer);
767 ioat_chan->timer.function = ioat_timer_event;
768 ioat_chan->timer.data = data;
769 tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data);
772 #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
773 static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
777 struct page *xor_srcs[IOAT_NUM_SRC_TEST];
778 struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
779 dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
781 struct dma_async_tx_descriptor *tx;
782 struct dma_chan *dma_chan;
788 struct completion cmp;
790 struct device *dev = &ioat_dma->pdev->dev;
791 struct dma_device *dma = &ioat_dma->dma_dev;
794 dev_dbg(dev, "%s\n", __func__);
796 if (!dma_has_cap(DMA_XOR, dma->cap_mask))
799 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
800 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
801 if (!xor_srcs[src_idx]) {
803 __free_page(xor_srcs[src_idx]);
808 dest = alloc_page(GFP_KERNEL);
811 __free_page(xor_srcs[src_idx]);
815 /* Fill in src buffers */
816 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
817 u8 *ptr = page_address(xor_srcs[src_idx]);
819 for (i = 0; i < PAGE_SIZE; i++)
820 ptr[i] = (1 << src_idx);
823 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
824 cmp_byte ^= (u8) (1 << src_idx);
826 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
827 (cmp_byte << 8) | cmp_byte;
829 memset(page_address(dest), 0, PAGE_SIZE);
831 dma_chan = container_of(dma->channels.next, struct dma_chan,
833 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
841 dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
842 if (dma_mapping_error(dev, dest_dma))
845 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
846 dma_srcs[i] = DMA_ERROR_CODE;
847 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
848 dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
850 if (dma_mapping_error(dev, dma_srcs[i]))
853 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
854 IOAT_NUM_SRC_TEST, PAGE_SIZE,
858 dev_err(dev, "Self-test xor prep failed\n");
864 init_completion(&cmp);
865 tx->callback = ioat_dma_test_callback;
866 tx->callback_param = &cmp;
867 cookie = tx->tx_submit(tx);
869 dev_err(dev, "Self-test xor setup failed\n");
873 dma->device_issue_pending(dma_chan);
875 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
878 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
879 dev_err(dev, "Self-test xor timed out\n");
884 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
885 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
887 dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
888 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
889 u32 *ptr = page_address(dest);
891 if (ptr[i] != cmp_word) {
892 dev_err(dev, "Self-test xor failed compare\n");
897 dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
899 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
901 /* skip validate if the capability is not present */
902 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
905 op = IOAT_OP_XOR_VAL;
907 /* validate the sources with the destintation page */
908 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
909 xor_val_srcs[i] = xor_srcs[i];
910 xor_val_srcs[i] = dest;
914 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
915 dma_srcs[i] = DMA_ERROR_CODE;
916 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
917 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
919 if (dma_mapping_error(dev, dma_srcs[i]))
922 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
923 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
924 &xor_val_result, DMA_PREP_INTERRUPT);
926 dev_err(dev, "Self-test zero prep failed\n");
932 init_completion(&cmp);
933 tx->callback = ioat_dma_test_callback;
934 tx->callback_param = &cmp;
935 cookie = tx->tx_submit(tx);
937 dev_err(dev, "Self-test zero setup failed\n");
941 dma->device_issue_pending(dma_chan);
943 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
946 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
947 dev_err(dev, "Self-test validate timed out\n");
952 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
953 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
955 if (xor_val_result != 0) {
956 dev_err(dev, "Self-test validate failed compare\n");
961 memset(page_address(dest), 0, PAGE_SIZE);
963 /* test for non-zero parity sum */
964 op = IOAT_OP_XOR_VAL;
967 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
968 dma_srcs[i] = DMA_ERROR_CODE;
969 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
970 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
972 if (dma_mapping_error(dev, dma_srcs[i]))
975 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
976 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
977 &xor_val_result, DMA_PREP_INTERRUPT);
979 dev_err(dev, "Self-test 2nd zero prep failed\n");
985 init_completion(&cmp);
986 tx->callback = ioat_dma_test_callback;
987 tx->callback_param = &cmp;
988 cookie = tx->tx_submit(tx);
990 dev_err(dev, "Self-test 2nd zero setup failed\n");
994 dma->device_issue_pending(dma_chan);
996 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
999 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1000 dev_err(dev, "Self-test 2nd validate timed out\n");
1005 if (xor_val_result != SUM_CHECK_P_RESULT) {
1006 dev_err(dev, "Self-test validate failed compare\n");
1011 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1012 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
1014 goto free_resources;
1016 if (op == IOAT_OP_XOR) {
1017 if (dest_dma != DMA_ERROR_CODE)
1018 dma_unmap_page(dev, dest_dma, PAGE_SIZE,
1020 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1021 if (dma_srcs[i] != DMA_ERROR_CODE)
1022 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1024 } else if (op == IOAT_OP_XOR_VAL) {
1025 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1026 if (dma_srcs[i] != DMA_ERROR_CODE)
1027 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1031 dma->device_free_chan_resources(dma_chan);
1033 src_idx = IOAT_NUM_SRC_TEST;
1035 __free_page(xor_srcs[src_idx]);
1040 static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma)
1044 rc = ioat_dma_self_test(ioat_dma);
1048 rc = ioat_xor_val_self_test(ioat_dma);
1053 static void ioat_intr_quirk(struct ioatdma_device *ioat_dma)
1055 struct dma_device *dma;
1057 struct ioatdma_chan *ioat_chan;
1060 dma = &ioat_dma->dma_dev;
1063 * if we have descriptor write back error status, we mask the
1066 if (ioat_dma->cap & IOAT_CAP_DWBES) {
1067 list_for_each_entry(c, &dma->channels, device_node) {
1068 ioat_chan = to_ioat_chan(c);
1069 errmask = readl(ioat_chan->reg_base +
1070 IOAT_CHANERR_MASK_OFFSET);
1071 errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
1072 IOAT_CHANERR_XOR_Q_ERR;
1073 writel(errmask, ioat_chan->reg_base +
1074 IOAT_CHANERR_MASK_OFFSET);
1079 static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
1081 struct pci_dev *pdev = ioat_dma->pdev;
1082 int dca_en = system_has_dca_enabled(pdev);
1083 struct dma_device *dma;
1085 struct ioatdma_chan *ioat_chan;
1086 bool is_raid_device = false;
1089 dma = &ioat_dma->dma_dev;
1090 dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock;
1091 dma->device_issue_pending = ioat_issue_pending;
1092 dma->device_alloc_chan_resources = ioat_alloc_chan_resources;
1093 dma->device_free_chan_resources = ioat_free_chan_resources;
1095 dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
1096 dma->device_prep_dma_interrupt = ioat_prep_interrupt_lock;
1098 ioat_dma->cap = readl(ioat_dma->reg_base + IOAT_DMA_CAP_OFFSET);
1100 if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
1102 ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
1104 /* dca is incompatible with raid operations */
1105 if (dca_en && (ioat_dma->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
1106 ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
1108 if (ioat_dma->cap & IOAT_CAP_XOR) {
1109 is_raid_device = true;
1112 dma_cap_set(DMA_XOR, dma->cap_mask);
1113 dma->device_prep_dma_xor = ioat_prep_xor;
1115 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1116 dma->device_prep_dma_xor_val = ioat_prep_xor_val;
1119 if (ioat_dma->cap & IOAT_CAP_PQ) {
1120 is_raid_device = true;
1122 dma->device_prep_dma_pq = ioat_prep_pq;
1123 dma->device_prep_dma_pq_val = ioat_prep_pq_val;
1124 dma_cap_set(DMA_PQ, dma->cap_mask);
1125 dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
1127 if (ioat_dma->cap & IOAT_CAP_RAID16SS)
1128 dma_set_maxpq(dma, 16, 0);
1130 dma_set_maxpq(dma, 8, 0);
1132 if (!(ioat_dma->cap & IOAT_CAP_XOR)) {
1133 dma->device_prep_dma_xor = ioat_prep_pqxor;
1134 dma->device_prep_dma_xor_val = ioat_prep_pqxor_val;
1135 dma_cap_set(DMA_XOR, dma->cap_mask);
1136 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1138 if (ioat_dma->cap & IOAT_CAP_RAID16SS)
1145 dma->device_tx_status = ioat_tx_status;
1147 /* starting with CB3.3 super extended descriptors are supported */
1148 if (ioat_dma->cap & IOAT_CAP_RAID16SS) {
1152 for (i = 0; i < MAX_SED_POOLS; i++) {
1153 snprintf(pool_name, 14, "ioat_hw%d_sed", i);
1155 /* allocate SED DMA pool */
1156 ioat_dma->sed_hw_pool[i] = dmam_pool_create(pool_name,
1158 SED_SIZE * (i + 1), 64, 0);
1159 if (!ioat_dma->sed_hw_pool[i])
1165 if (!(ioat_dma->cap & (IOAT_CAP_XOR | IOAT_CAP_PQ)))
1166 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
1168 err = ioat_probe(ioat_dma);
1172 list_for_each_entry(c, &dma->channels, device_node) {
1173 ioat_chan = to_ioat_chan(c);
1174 writel(IOAT_DMA_DCA_ANY_CPU,
1175 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
1178 err = ioat_register(ioat_dma);
1182 ioat_kobject_add(ioat_dma, &ioat_ktype);
1185 ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base);
1190 static void ioat_shutdown(struct pci_dev *pdev)
1192 struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev);
1193 struct ioatdma_chan *ioat_chan;
1199 for (i = 0; i < IOAT_MAX_CHANS; i++) {
1200 ioat_chan = ioat_dma->idx[i];
1204 spin_lock_bh(&ioat_chan->prep_lock);
1205 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
1206 del_timer_sync(&ioat_chan->timer);
1207 spin_unlock_bh(&ioat_chan->prep_lock);
1208 /* this should quiesce then reset */
1209 ioat_reset_hw(ioat_chan);
1212 ioat_disable_interrupts(ioat_dma);
1215 void ioat_resume(struct ioatdma_device *ioat_dma)
1217 struct ioatdma_chan *ioat_chan;
1221 for (i = 0; i < IOAT_MAX_CHANS; i++) {
1222 ioat_chan = ioat_dma->idx[i];
1226 spin_lock_bh(&ioat_chan->prep_lock);
1227 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
1228 spin_unlock_bh(&ioat_chan->prep_lock);
1230 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1231 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1233 /* no need to reset as shutdown already did that */
1237 #define DRV_NAME "ioatdma"
1239 static pci_ers_result_t ioat_pcie_error_detected(struct pci_dev *pdev,
1240 enum pci_channel_state error)
1242 dev_dbg(&pdev->dev, "%s: PCIe AER error %d\n", DRV_NAME, error);
1244 /* quiesce and block I/O */
1245 ioat_shutdown(pdev);
1247 return PCI_ERS_RESULT_NEED_RESET;
1250 static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_dev *pdev)
1252 pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
1255 dev_dbg(&pdev->dev, "%s post reset handling\n", DRV_NAME);
1257 if (pci_enable_device_mem(pdev) < 0) {
1259 "Failed to enable PCIe device after reset.\n");
1260 result = PCI_ERS_RESULT_DISCONNECT;
1262 pci_set_master(pdev);
1263 pci_restore_state(pdev);
1264 pci_save_state(pdev);
1265 pci_wake_from_d3(pdev, false);
1268 err = pci_cleanup_aer_uncorrect_error_status(pdev);
1271 "AER uncorrect error status clear failed: %#x\n", err);
1277 static void ioat_pcie_error_resume(struct pci_dev *pdev)
1279 struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev);
1281 dev_dbg(&pdev->dev, "%s: AER handling resuming\n", DRV_NAME);
1283 /* initialize and bring everything back */
1284 ioat_resume(ioat_dma);
1287 static const struct pci_error_handlers ioat_err_handler = {
1288 .error_detected = ioat_pcie_error_detected,
1289 .slot_reset = ioat_pcie_error_slot_reset,
1290 .resume = ioat_pcie_error_resume,
1293 static struct pci_driver ioat_pci_driver = {
1295 .id_table = ioat_pci_tbl,
1296 .probe = ioat_pci_probe,
1297 .remove = ioat_remove,
1298 .shutdown = ioat_shutdown,
1299 .err_handler = &ioat_err_handler,
1302 static struct ioatdma_device *
1303 alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
1305 struct device *dev = &pdev->dev;
1306 struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
1311 d->reg_base = iobase;
1315 static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1317 void __iomem * const *iomap;
1318 struct device *dev = &pdev->dev;
1319 struct ioatdma_device *device;
1322 err = pcim_enable_device(pdev);
1326 err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME);
1329 iomap = pcim_iomap_table(pdev);
1333 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1335 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1339 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1341 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1345 device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
1348 pci_set_master(pdev);
1349 pci_set_drvdata(pdev, device);
1351 device->version = readb(device->reg_base + IOAT_VER_OFFSET);
1352 if (device->version >= IOAT_VER_3_0) {
1353 err = ioat3_dma_probe(device, ioat_dca_enabled);
1355 if (device->version >= IOAT_VER_3_3)
1356 pci_enable_pcie_error_reporting(pdev);
1361 dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
1362 pci_disable_pcie_error_reporting(pdev);
1369 static void ioat_remove(struct pci_dev *pdev)
1371 struct ioatdma_device *device = pci_get_drvdata(pdev);
1376 dev_err(&pdev->dev, "Removing dma and dca services\n");
1378 unregister_dca_provider(device->dca, &pdev->dev);
1379 free_dca_provider(device->dca);
1383 pci_disable_pcie_error_reporting(pdev);
1384 ioat_dma_remove(device);
1387 static int __init ioat_init_module(void)
1391 pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
1392 DRV_NAME, IOAT_DMA_VERSION);
1394 ioat_cache = kmem_cache_create("ioat", sizeof(struct ioat_ring_ent),
1395 0, SLAB_HWCACHE_ALIGN, NULL);
1399 ioat_sed_cache = KMEM_CACHE(ioat_sed_ent, 0);
1400 if (!ioat_sed_cache)
1401 goto err_ioat_cache;
1403 err = pci_register_driver(&ioat_pci_driver);
1405 goto err_ioat3_cache;
1410 kmem_cache_destroy(ioat_sed_cache);
1413 kmem_cache_destroy(ioat_cache);
1417 module_init(ioat_init_module);
1419 static void __exit ioat_exit_module(void)
1421 pci_unregister_driver(&ioat_pci_driver);
1422 kmem_cache_destroy(ioat_cache);
1424 module_exit(ioat_exit_module);