[ARM/tegra] dma: add transfer count query. Checking interrupt pending status.
authorJay Cheng <jacheng@nvidia.com>
Sat, 16 Oct 2010 03:55:02 +0000 (23:55 -0400)
committerColin Cross <ccross@android.com>
Mon, 18 Oct 2010 22:44:51 +0000 (15:44 -0700)
 Stopping Dma after last req transfer.

add an API to return the completed transfer count of a pending, active
or finished DMA request

originally fixed by Gary King <gking@nvidia.com>

It is observed that the dma interrupt has the lower priority then
its client interupt priority. When client's isr calls dma get transfer,
the dma status has not been upated as dma isr have not been served yet.
So before reading the status, explicitly checking the interrupt status and
handling accordingly.

The another issue which is observed is that if dma has transferred the data
of amount = requested -4 and if it moves to invalid requestor before stopping
then status got reset and tarnsfered bytes becomes 0. This seems the apb dma
hw behavior. Following is the suggestion to overcome this issue:
- Disable global enable bit.
- Read status.
- Stop dma.
- Enable global status bit.

Added this workaround and it worked fine.

originally fixed by Laxman Dewangan <ldewangan@nvidia.com>

In continous mode, dma should stop after last transfer completed and
if there is no more req pending.
If there is pending req then it should check whether it has updated
in hw for next transfer or not and if it has not started then stop dma
and start new req immediatley.

originally fixed by Laxman Dewangan <ldewangan@nvidia.com>

Change-Id: I49c97c96eacdf4060de6b21cec0e71d940d33f00

arch/arm/mach-tegra/dma.c

index 163993bfa816c0d82226b05a24e9db2e86fad1ac..b2f2b0fb681e0f4401337e22bb2f9b89744f332b 100644 (file)
@@ -50,7 +50,6 @@
 #define CSR_FLOW                               (1<<21)
 #define CSR_REQ_SEL_SHIFT                      16
 #define CSR_REQ_SEL_MASK                       (0x1F<<CSR_REQ_SEL_SHIFT)
-#define CSR_REQ_SEL_INVALID                    (31<<CSR_REQ_SEL_SHIFT)
 #define CSR_WCOUNT_SHIFT                       2
 #define CSR_WCOUNT_MASK                                0xFFFC
 
@@ -127,6 +126,7 @@ struct tegra_dma_channel {
 #define  NV_DMA_MAX_CHANNELS  32
 
 static DEFINE_MUTEX(tegra_dma_lock);
+static DEFINE_SPINLOCK(enable_lock);
 
 static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
 static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
@@ -174,36 +174,68 @@ void tegra_dma_stop(struct tegra_dma_channel *ch)
 
 int tegra_dma_cancel(struct tegra_dma_channel *ch)
 {
-       u32 csr;
        unsigned long irq_flags;
 
        spin_lock_irqsave(&ch->lock, irq_flags);
        while (!list_empty(&ch->list))
                list_del(ch->list.next);
 
-       csr = readl(ch->addr + APB_DMA_CHAN_CSR);
-       csr &= ~CSR_REQ_SEL_MASK;
-       csr |= CSR_REQ_SEL_INVALID;
-       writel(csr, ch->addr + APB_DMA_CHAN_CSR);
-
        tegra_dma_stop(ch);
 
        spin_unlock_irqrestore(&ch->lock, irq_flags);
        return 0;
 }
 
+/* should be called with the channel lock held */
+static unsigned int dma_active_count(struct tegra_dma_channel *ch,
+       struct tegra_dma_req *req, unsigned int status)
+{
+       unsigned int to_transfer;
+       unsigned int req_transfer_count;
+
+       unsigned int bytes_transferred;
+
+       to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
+       req_transfer_count = ch->req_transfer_count;
+       req_transfer_count += 1;
+       to_transfer += 1;
+
+       bytes_transferred = req_transfer_count;
+
+       if (status & STA_BUSY)
+               bytes_transferred -= to_transfer;
+
+       /* In continuous transfer mode, DMA only tracks the count of the
+        * half DMA buffer. So, if the DMA already finished half the DMA
+        * then add the half buffer to the completed count.
+        */
+       if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS)
+               if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
+                       bytes_transferred += req_transfer_count;
+
+       if (status & STA_ISE_EOC)
+               bytes_transferred += req_transfer_count;
+
+       bytes_transferred *= 4;
+
+       return bytes_transferred;
+}
+
 int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
        struct tegra_dma_req *_req)
 {
-       unsigned int csr;
-       unsigned int status;
        struct tegra_dma_req *req = NULL;
        int found = 0;
+       unsigned int status;
        unsigned long irq_flags;
-       int to_transfer;
-       int req_transfer_count;
+       int stop = 0;
+       void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
 
        spin_lock_irqsave(&ch->lock, irq_flags);
+
+       if (list_entry(ch->list.next, struct tegra_dma_req, node) == _req)
+               stop = 1;
+
        list_for_each_entry(req, &ch->list, node) {
                if (req == _req) {
                        list_del(&req->node);
@@ -216,47 +248,27 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
                return 0;
        }
 
+       if (!stop)
+               goto skip_status;
+
        /* STOP the DMA and get the transfer count.
-        * Getting the transfer count is tricky.
-        *  - Change the source selector to invalid to stop the DMA from
-        *    FIFO to memory.
-        *  - Read the status register to know the number of pending
+         * Getting the transfer count is tricky.
+        *  - Globally disable DMA on all channels
+        *  - Read the channel's status register to know the number of pending
         *    bytes to be transfered.
-        *  - Finally stop or program the DMA to the next buffer in the
-        *    list.
+        *  - Stop the dma channel
+        *  - Globally re-enable DMA to resume other transfers
         */
-       csr = readl(ch->addr + APB_DMA_CHAN_CSR);
-       csr &= ~CSR_REQ_SEL_MASK;
-       csr |= CSR_REQ_SEL_INVALID;
-       writel(csr, ch->addr + APB_DMA_CHAN_CSR);
-
-       /* Get the transfer count */
+       spin_lock(&enable_lock);
+       writel(0, addr + APB_DMA_GEN);
+       udelay(20);
        status = readl(ch->addr + APB_DMA_CHAN_STA);
-       to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
-       req_transfer_count = ch->req_transfer_count;
-       req_transfer_count += 1;
-       to_transfer += 1;
-
-       req->bytes_transferred = req_transfer_count;
-
-       if (status & STA_BUSY)
-               req->bytes_transferred -= to_transfer;
-
-       /* In continuous transfer mode, DMA only tracks the count of the
-        * half DMA buffer. So, if the DMA already finished half the DMA
-        * then add the half buffer to the completed count.
-        *
-        *      FIXME: There can be a race here. What if the req to
-        *      dequue happens at the same time as the DMA just moved to
-        *      the new buffer and SW didn't yet received the interrupt?
-        */
-       if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS)
-               if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
-                       req->bytes_transferred += req_transfer_count;
+       tegra_dma_stop(ch);
+       writel(GEN_ENABLE, addr + APB_DMA_GEN);
+       spin_unlock(&enable_lock);
 
-       req->bytes_transferred *= 4;
+       req->bytes_transferred = dma_active_count(ch, req, status);
 
-       tegra_dma_stop(ch);
        if (!list_empty(&ch->list)) {
                /* if the list is not empty, queue the next request */
                struct tegra_dma_req *next_req;
@@ -264,6 +276,7 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
                        typeof(*next_req), node);
                tegra_dma_update_hw(ch, next_req);
        }
+skip_status:
        req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
 
        spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -548,6 +561,7 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch)
 static void handle_continuous_dma(struct tegra_dma_channel *ch)
 {
        struct tegra_dma_req *req;
+       struct tegra_dma_req *next_req;
        unsigned long irq_flags;
 
        spin_lock_irqsave(&ch->lock, irq_flags);
@@ -577,8 +591,6 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch)
                                tegra_dma_stop(ch);
 
                                if (!list_is_last(&req->node, &ch->list)) {
-                                       struct tegra_dma_req *next_req;
-
                                        next_req = list_entry(req->node.next,
                                                typeof(*next_req), node);
                                        tegra_dma_update_hw(ch, next_req);
@@ -594,8 +606,6 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch)
                        /* Load the next request into the hardware, if available
                         * */
                        if (!list_is_last(&req->node, &ch->list)) {
-                               struct tegra_dma_req *next_req;
-
                                next_req = list_entry(req->node.next,
                                        typeof(*next_req), node);
                                tegra_dma_update_hw_partial(ch, next_req);
@@ -621,6 +631,19 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch)
                        req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
                        req->bytes_transferred = bytes_transferred;
                        req->status = TEGRA_DMA_REQ_SUCCESS;
+                       if (list_is_last(&req->node, &ch->list))
+                               tegra_dma_stop(ch);
+                       else {
+                               /* It may be possible that req came after
+                                * half dma complete so it need to start
+                                * immediately */
+                               next_req = list_entry(req->node.next, typeof(*next_req), node);
+                               if (next_req->status != TEGRA_DMA_REQ_INFLIGHT) {
+                                       tegra_dma_stop(ch);
+                                       tegra_dma_update_hw(ch, next_req);
+                               }
+                       }
+
                        list_del(&req->node);
 
                        /* DMA lock is NOT held when callbak is called */
@@ -629,6 +652,8 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch)
                        return;
 
                } else {
+                       tegra_dma_stop(ch);
+                       /* Dma should be stop much earlier */
                        BUG();
                }
        }