2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2015 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <linux/gfp.h>
21 #include <linux/dmaengine.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/prefetch.h>
24 #include "../dmaengine.h"
25 #include "registers.h"
31 /* provide a lookup table for setting the source address in the base or
32 * extended descriptor of an xor or pq descriptor
34 static const u8 xor_idx_to_desc = 0xe0;
35 static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
36 static const u8 pq_idx_to_desc = 0xf8;
37 static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1,
38 2, 2, 2, 2, 2, 2, 2 };
39 static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
40 static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
41 0, 1, 2, 3, 4, 5, 6 };
43 static void xor_set_src(struct ioat_raw_descriptor *descs[2],
44 dma_addr_t addr, u32 offset, int idx)
46 struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
48 raw->field[xor_idx_to_field[idx]] = addr + offset;
51 static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
53 struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
55 return raw->field[pq_idx_to_field[idx]];
58 static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx)
60 struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
62 return raw->field[pq16_idx_to_field[idx]];
65 static void pq_set_src(struct ioat_raw_descriptor *descs[2],
66 dma_addr_t addr, u32 offset, u8 coef, int idx)
68 struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
69 struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
71 raw->field[pq_idx_to_field[idx]] = addr + offset;
75 static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
76 dma_addr_t addr, u32 offset, u8 coef, unsigned idx)
78 struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
79 struct ioat_pq16a_descriptor *pq16 =
80 (struct ioat_pq16a_descriptor *)desc[1];
81 struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
83 raw->field[pq16_idx_to_field[idx]] = addr + offset;
88 pq16->coef[idx - 8] = coef;
91 static struct ioat_sed_ent *
92 ioat3_alloc_sed(struct ioatdma_device *ioat_dma, unsigned int hw_pool)
94 struct ioat_sed_ent *sed;
95 gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
97 sed = kmem_cache_alloc(ioat_sed_cache, flags);
101 sed->hw_pool = hw_pool;
102 sed->hw = dma_pool_alloc(ioat_dma->sed_hw_pool[hw_pool],
105 kmem_cache_free(ioat_sed_cache, sed);
112 struct dma_async_tx_descriptor *
113 ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
114 dma_addr_t dma_src, size_t len, unsigned long flags)
116 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
117 struct ioat_dma_descriptor *hw;
118 struct ioat_ring_ent *desc;
119 dma_addr_t dst = dma_dest;
120 dma_addr_t src = dma_src;
121 size_t total_len = len;
122 int num_descs, idx, i;
124 num_descs = ioat_xferlen_to_descs(ioat_chan, len);
125 if (likely(num_descs) &&
126 ioat_check_space_lock(ioat_chan, num_descs) == 0)
127 idx = ioat_chan->head;
132 size_t copy = min_t(size_t, len, 1 << ioat_chan->xfercap_log);
134 desc = ioat_get_ring_ent(ioat_chan, idx + i);
145 dump_desc_dbg(ioat_chan, desc);
146 } while (++i < num_descs);
148 desc->txd.flags = flags;
149 desc->len = total_len;
150 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
151 hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
152 hw->ctl_f.compl_write = 1;
153 dump_desc_dbg(ioat_chan, desc);
154 /* we leave the channel locked to ensure in order submission */
160 static struct dma_async_tx_descriptor *
161 __ioat_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
162 dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
163 size_t len, unsigned long flags)
165 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
166 struct ioat_ring_ent *compl_desc;
167 struct ioat_ring_ent *desc;
168 struct ioat_ring_ent *ext;
169 size_t total_len = len;
170 struct ioat_xor_descriptor *xor;
171 struct ioat_xor_ext_descriptor *xor_ex = NULL;
172 struct ioat_dma_descriptor *hw;
173 int num_descs, with_ext, idx, i;
175 u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
179 num_descs = ioat_xferlen_to_descs(ioat_chan, len);
180 /* we need 2x the number of descriptors to cover greater than 5
189 /* completion writes from the raid engine may pass completion
190 * writes from the legacy engine, so we need one extra null
191 * (legacy) descriptor to ensure all completion writes arrive in
194 if (likely(num_descs) &&
195 ioat_check_space_lock(ioat_chan, num_descs+1) == 0)
196 idx = ioat_chan->head;
201 struct ioat_raw_descriptor *descs[2];
202 size_t xfer_size = min_t(size_t,
203 len, 1 << ioat_chan->xfercap_log);
206 desc = ioat_get_ring_ent(ioat_chan, idx + i);
209 /* save a branch by unconditionally retrieving the
210 * extended descriptor xor_set_src() knows to not write
211 * to it in the single descriptor case
213 ext = ioat_get_ring_ent(ioat_chan, idx + i + 1);
214 xor_ex = ext->xor_ex;
216 descs[0] = (struct ioat_raw_descriptor *) xor;
217 descs[1] = (struct ioat_raw_descriptor *) xor_ex;
218 for (s = 0; s < src_cnt; s++)
219 xor_set_src(descs, src[s], offset, s);
220 xor->size = xfer_size;
221 xor->dst_addr = dest + offset;
224 xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt);
228 dump_desc_dbg(ioat_chan, desc);
229 } while ((i += 1 + with_ext) < num_descs);
231 /* last xor descriptor carries the unmap parameters and fence bit */
232 desc->txd.flags = flags;
233 desc->len = total_len;
235 desc->result = result;
236 xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
238 /* completion descriptor carries interrupt bit */
239 compl_desc = ioat_get_ring_ent(ioat_chan, idx + i);
240 compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
244 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
245 hw->ctl_f.compl_write = 1;
246 hw->size = NULL_DESC_BUFFER_SIZE;
247 dump_desc_dbg(ioat_chan, compl_desc);
249 /* we leave the channel locked to ensure in order submission */
250 return &compl_desc->txd;
253 struct dma_async_tx_descriptor *
254 ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
255 unsigned int src_cnt, size_t len, unsigned long flags)
257 return __ioat_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
260 struct dma_async_tx_descriptor *
261 ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
262 unsigned int src_cnt, size_t len,
263 enum sum_check_flags *result, unsigned long flags)
265 /* the cleanup routine only sets bits on validate failure, it
266 * does not clear bits on validate success... so clear it here
270 return __ioat_prep_xor_lock(chan, result, src[0], &src[1],
271 src_cnt - 1, len, flags);
275 dump_pq_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc,
276 struct ioat_ring_ent *ext)
278 struct device *dev = to_dev(ioat_chan);
279 struct ioat_pq_descriptor *pq = desc->pq;
280 struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL;
281 struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
282 int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
285 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
286 " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
288 desc_id(desc), (unsigned long long) desc->txd.phys,
289 (unsigned long long) (pq_ex ? pq_ex->next : pq->next),
290 desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op,
291 pq->ctl_f.int_en, pq->ctl_f.compl_write,
292 pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
294 for (i = 0; i < src_cnt; i++)
295 dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
296 (unsigned long long) pq_get_src(descs, i), pq->coef[i]);
297 dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
298 dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
299 dev_dbg(dev, "\tNEXT: %#llx\n", pq->next);
302 static void dump_pq16_desc_dbg(struct ioatdma_chan *ioat_chan,
303 struct ioat_ring_ent *desc)
305 struct device *dev = to_dev(ioat_chan);
306 struct ioat_pq_descriptor *pq = desc->pq;
307 struct ioat_raw_descriptor *descs[] = { (void *)pq,
310 int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
314 descs[1] = (void *)desc->sed->hw;
315 descs[2] = (void *)desc->sed->hw + 64;
318 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
319 " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
321 desc_id(desc), (unsigned long long) desc->txd.phys,
322 (unsigned long long) pq->next,
323 desc->txd.flags, pq->size, pq->ctl,
324 pq->ctl_f.op, pq->ctl_f.int_en,
325 pq->ctl_f.compl_write,
326 pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
328 for (i = 0; i < src_cnt; i++) {
329 dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
330 (unsigned long long) pq16_get_src(descs, i),
333 dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
334 dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
337 static struct dma_async_tx_descriptor *
338 __ioat_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
339 const dma_addr_t *dst, const dma_addr_t *src,
340 unsigned int src_cnt, const unsigned char *scf,
341 size_t len, unsigned long flags)
343 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
344 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
345 struct ioat_ring_ent *compl_desc;
346 struct ioat_ring_ent *desc;
347 struct ioat_ring_ent *ext;
348 size_t total_len = len;
349 struct ioat_pq_descriptor *pq;
350 struct ioat_pq_ext_descriptor *pq_ex = NULL;
351 struct ioat_dma_descriptor *hw;
353 u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
354 int i, s, idx, with_ext, num_descs;
355 int cb32 = (ioat_dma->version < IOAT_VER_3_3) ? 1 : 0;
357 dev_dbg(to_dev(ioat_chan), "%s\n", __func__);
358 /* the engine requires at least two sources (we provide
359 * at least 1 implied source in the DMA_PREP_CONTINUE case)
361 BUG_ON(src_cnt + dmaf_continue(flags) < 2);
363 num_descs = ioat_xferlen_to_descs(ioat_chan, len);
364 /* we need 2x the number of descriptors to cover greater than 3
365 * sources (we need 1 extra source in the q-only continuation
366 * case and 3 extra sources in the p+q continuation case.
368 if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
369 (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
375 /* completion writes from the raid engine may pass completion
376 * writes from the legacy engine, so we need one extra null
377 * (legacy) descriptor to ensure all completion writes arrive in
380 if (likely(num_descs) &&
381 ioat_check_space_lock(ioat_chan, num_descs + cb32) == 0)
382 idx = ioat_chan->head;
387 struct ioat_raw_descriptor *descs[2];
388 size_t xfer_size = min_t(size_t, len,
389 1 << ioat_chan->xfercap_log);
391 desc = ioat_get_ring_ent(ioat_chan, idx + i);
394 /* save a branch by unconditionally retrieving the
395 * extended descriptor pq_set_src() knows to not write
396 * to it in the single descriptor case
398 ext = ioat_get_ring_ent(ioat_chan, idx + i + with_ext);
401 descs[0] = (struct ioat_raw_descriptor *) pq;
402 descs[1] = (struct ioat_raw_descriptor *) pq_ex;
404 for (s = 0; s < src_cnt; s++)
405 pq_set_src(descs, src[s], offset, scf[s], s);
407 /* see the comment for dma_maxpq in include/linux/dmaengine.h */
408 if (dmaf_p_disabled_continue(flags))
409 pq_set_src(descs, dst[1], offset, 1, s++);
410 else if (dmaf_continue(flags)) {
411 pq_set_src(descs, dst[0], offset, 0, s++);
412 pq_set_src(descs, dst[1], offset, 1, s++);
413 pq_set_src(descs, dst[1], offset, 0, s++);
415 pq->size = xfer_size;
416 pq->p_addr = dst[0] + offset;
417 pq->q_addr = dst[1] + offset;
420 /* we turn on descriptor write back error status */
421 if (ioat_dma->cap & IOAT_CAP_DWBES)
422 pq->ctl_f.wb_en = result ? 1 : 0;
423 pq->ctl_f.src_cnt = src_cnt_to_hw(s);
424 pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
425 pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
429 } while ((i += 1 + with_ext) < num_descs);
431 /* last pq descriptor carries the unmap parameters and fence bit */
432 desc->txd.flags = flags;
433 desc->len = total_len;
435 desc->result = result;
436 pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
437 dump_pq_desc_dbg(ioat_chan, desc, ext);
440 pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
441 pq->ctl_f.compl_write = 1;
444 /* completion descriptor carries interrupt bit */
445 compl_desc = ioat_get_ring_ent(ioat_chan, idx + i);
446 compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
450 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
451 hw->ctl_f.compl_write = 1;
452 hw->size = NULL_DESC_BUFFER_SIZE;
453 dump_desc_dbg(ioat_chan, compl_desc);
457 /* we leave the channel locked to ensure in order submission */
458 return &compl_desc->txd;
461 static struct dma_async_tx_descriptor *
462 __ioat_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
463 const dma_addr_t *dst, const dma_addr_t *src,
464 unsigned int src_cnt, const unsigned char *scf,
465 size_t len, unsigned long flags)
467 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
468 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
469 struct ioat_ring_ent *desc;
470 size_t total_len = len;
471 struct ioat_pq_descriptor *pq;
474 int i, s, idx, num_descs;
476 /* this function is only called with 9-16 sources */
477 op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
479 dev_dbg(to_dev(ioat_chan), "%s\n", __func__);
481 num_descs = ioat_xferlen_to_descs(ioat_chan, len);
484 * 16 source pq is only available on cb3.3 and has no completion
487 if (num_descs && ioat_check_space_lock(ioat_chan, num_descs) == 0)
488 idx = ioat_chan->head;
495 struct ioat_raw_descriptor *descs[4];
496 size_t xfer_size = min_t(size_t, len,
497 1 << ioat_chan->xfercap_log);
499 desc = ioat_get_ring_ent(ioat_chan, idx + i);
502 descs[0] = (struct ioat_raw_descriptor *) pq;
504 desc->sed = ioat3_alloc_sed(ioat_dma, (src_cnt-2) >> 3);
506 dev_err(to_dev(ioat_chan),
507 "%s: no free sed entries\n", __func__);
511 pq->sed_addr = desc->sed->dma;
512 desc->sed->parent = desc;
514 descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw;
515 descs[2] = (void *)descs[1] + 64;
517 for (s = 0; s < src_cnt; s++)
518 pq16_set_src(descs, src[s], offset, scf[s], s);
520 /* see the comment for dma_maxpq in include/linux/dmaengine.h */
521 if (dmaf_p_disabled_continue(flags))
522 pq16_set_src(descs, dst[1], offset, 1, s++);
523 else if (dmaf_continue(flags)) {
524 pq16_set_src(descs, dst[0], offset, 0, s++);
525 pq16_set_src(descs, dst[1], offset, 1, s++);
526 pq16_set_src(descs, dst[1], offset, 0, s++);
529 pq->size = xfer_size;
530 pq->p_addr = dst[0] + offset;
531 pq->q_addr = dst[1] + offset;
534 pq->ctl_f.src_cnt = src16_cnt_to_hw(s);
535 /* we turn on descriptor write back error status */
536 if (ioat_dma->cap & IOAT_CAP_DWBES)
537 pq->ctl_f.wb_en = result ? 1 : 0;
538 pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
539 pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
543 } while (++i < num_descs);
545 /* last pq descriptor carries the unmap parameters and fence bit */
546 desc->txd.flags = flags;
547 desc->len = total_len;
549 desc->result = result;
550 pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
552 /* with cb3.3 we should be able to do completion w/o a null desc */
553 pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
554 pq->ctl_f.compl_write = 1;
556 dump_pq16_desc_dbg(ioat_chan, desc);
558 /* we leave the channel locked to ensure in order submission */
562 static int src_cnt_flags(unsigned int src_cnt, unsigned long flags)
564 if (dmaf_p_disabled_continue(flags))
566 else if (dmaf_continue(flags))
572 struct dma_async_tx_descriptor *
573 ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
574 unsigned int src_cnt, const unsigned char *scf, size_t len,
577 /* specify valid address for disabled result */
578 if (flags & DMA_PREP_PQ_DISABLE_P)
580 if (flags & DMA_PREP_PQ_DISABLE_Q)
583 /* handle the single source multiply case from the raid6
586 if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
587 dma_addr_t single_source[2];
588 unsigned char single_source_coef[2];
590 BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q);
591 single_source[0] = src[0];
592 single_source[1] = src[0];
593 single_source_coef[0] = scf[0];
594 single_source_coef[1] = 0;
596 return src_cnt_flags(src_cnt, flags) > 8 ?
597 __ioat_prep_pq16_lock(chan, NULL, dst, single_source,
598 2, single_source_coef, len,
600 __ioat_prep_pq_lock(chan, NULL, dst, single_source, 2,
601 single_source_coef, len, flags);
604 return src_cnt_flags(src_cnt, flags) > 8 ?
605 __ioat_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
607 __ioat_prep_pq_lock(chan, NULL, dst, src, src_cnt,
612 struct dma_async_tx_descriptor *
613 ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
614 unsigned int src_cnt, const unsigned char *scf, size_t len,
615 enum sum_check_flags *pqres, unsigned long flags)
617 /* specify valid address for disabled result */
618 if (flags & DMA_PREP_PQ_DISABLE_P)
620 if (flags & DMA_PREP_PQ_DISABLE_Q)
623 /* the cleanup routine only sets bits on validate failure, it
624 * does not clear bits on validate success... so clear it here
628 return src_cnt_flags(src_cnt, flags) > 8 ?
629 __ioat_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
631 __ioat_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
635 struct dma_async_tx_descriptor *
636 ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
637 unsigned int src_cnt, size_t len, unsigned long flags)
639 unsigned char scf[MAX_SCF];
642 if (src_cnt > MAX_SCF)
645 memset(scf, 0, src_cnt);
647 flags |= DMA_PREP_PQ_DISABLE_Q;
648 pq[1] = dst; /* specify valid address for disabled result */
650 return src_cnt_flags(src_cnt, flags) > 8 ?
651 __ioat_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
653 __ioat_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
657 struct dma_async_tx_descriptor *
658 ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
659 unsigned int src_cnt, size_t len,
660 enum sum_check_flags *result, unsigned long flags)
662 unsigned char scf[MAX_SCF];
665 if (src_cnt > MAX_SCF)
668 /* the cleanup routine only sets bits on validate failure, it
669 * does not clear bits on validate success... so clear it here
673 memset(scf, 0, src_cnt);
675 flags |= DMA_PREP_PQ_DISABLE_Q;
676 pq[1] = pq[0]; /* specify valid address for disabled result */
678 return src_cnt_flags(src_cnt, flags) > 8 ?
679 __ioat_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
681 __ioat_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
685 struct dma_async_tx_descriptor *
686 ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
688 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
689 struct ioat_ring_ent *desc;
690 struct ioat_dma_descriptor *hw;
692 if (ioat_check_space_lock(ioat_chan, 1) == 0)
693 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
700 hw->ctl_f.int_en = 1;
701 hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
702 hw->ctl_f.compl_write = 1;
703 hw->size = NULL_DESC_BUFFER_SIZE;
707 desc->txd.flags = flags;
710 dump_desc_dbg(ioat_chan, desc);
712 /* we leave the channel locked to ensure in order submission */