dma: mv_xor: Reduce interrupts by enabling EOD only when needed
authorLior Amsalem <alior@marvell.com>
Wed, 27 Aug 2014 13:52:53 +0000 (10:52 -0300)
committerVinod Koul <vinod.koul@intel.com>
Tue, 23 Sep 2014 14:47:01 +0000 (20:17 +0530)
This commit unmasks the end-of-chain interrupt and removes the
end-of-descriptor command setting on all transactions, except those
explicitly flagged with DMA_PREP_INTERRUPT.

This allows to raise an interrupt only on chain completion, instead of
on each descriptor completion, which reduces interrupt count.

Signed-off-by: Lior Amsalem <alior@marvell.com>
Signed-off-by: Ezequiel Garcia <ezequiel.garcia@free-electrons.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
drivers/dma/mv_xor.c
drivers/dma/mv_xor.h

index 4ee5bb194fd558766f813a3f1359de501749b964..cbc90e5df7ffeba286e00fbfdf7be82b8bb074ce 100644 (file)
@@ -46,13 +46,16 @@ static void mv_xor_issue_pending(struct dma_chan *chan);
        ((chan)->dmadev.dev)
 
 static void mv_desc_init(struct mv_xor_desc_slot *desc,
-                        dma_addr_t addr, u32 byte_count)
+                        dma_addr_t addr, u32 byte_count,
+                        enum dma_ctrl_flags flags)
 {
        struct mv_xor_desc *hw_desc = desc->hw_desc;
 
        hw_desc->status = XOR_DESC_DMA_OWNED;
        hw_desc->phy_next_desc = 0;
-       hw_desc->desc_command = XOR_DESC_EOD_INT_EN;
+       /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
+       hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
+                               XOR_DESC_EOD_INT_EN : 0;
        hw_desc->phy_dest_addr = addr;
        hw_desc->byte_count = byte_count;
 }
@@ -107,7 +110,10 @@ static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
 
 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
 {
-       u32 val = ~(XOR_INT_END_OF_DESC << (chan->idx * 16));
+       u32 val;
+
+       val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
+       val = ~(val << (chan->idx * 16));
        dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
        writel_relaxed(val, XOR_INTR_CAUSE(chan));
 }
@@ -510,7 +516,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
        if (sw_desc) {
                sw_desc->type = DMA_XOR;
                sw_desc->async_tx.flags = flags;
-               mv_desc_init(sw_desc, dest, len);
+               mv_desc_init(sw_desc, dest, len, flags);
                sw_desc->unmap_src_cnt = src_cnt;
                sw_desc->unmap_len = len;
                while (src_cnt--)
index ae41c31c6ea5a7773ce44f8cdb8e3b12d106bd48..21b0828f9697613ae0b1761fbbf09deeeac7d0fb 100644 (file)
@@ -67,7 +67,7 @@
                                 XOR_INT_ERR_WRPROT | XOR_INT_ERR_OWN    | \
                                 XOR_INT_ERR_PAR    | XOR_INT_ERR_MBUS)
 
-#define XOR_INTR_MASK_VALUE    (XOR_INT_END_OF_DESC | \
+#define XOR_INTR_MASK_VALUE    (XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | \
                                 XOR_INT_STOPPED     | XOR_INTR_ERRORS)
 
 #define WINDOW_BASE(w)         (0x50 + ((w) << 2))