From fc84cd7dbaf41b6164aea1554130d0143436340e Mon Sep 17 00:00:00 2001 From: Jay Cheng Date: Fri, 15 Oct 2010 23:55:02 -0400 Subject: [PATCH] [ARM/tegra] dma: add transfer count query. Checking interrupt pending status. Stopping Dma after last req transfer. add an API to return the completed transfer count of a pending, active or finished DMA request originally fixed by Gary King It is observed that the dma interrupt has the lower priority then its client interupt priority. When client's isr calls dma get transfer, the dma status has not been upated as dma isr have not been served yet. So before reading the status, explicitly checking the interrupt status and handling accordingly. The another issue which is observed is that if dma has transferred the data of amount = requested -4 and if it moves to invalid requestor before stopping then status got reset and tarnsfered bytes becomes 0. This seems the apb dma hw behavior. Following is the suggestion to overcome this issue: - Disable global enable bit. - Read status. - Stop dma. - Enable global status bit. Added this workaround and it worked fine. originally fixed by Laxman Dewangan In continous mode, dma should stop after last transfer completed and if there is no more req pending. If there is pending req then it should check whether it has updated in hw for next transfer or not and if it has not started then stop dma and start new req immediatley. originally fixed by Laxman Dewangan Change-Id: I49c97c96eacdf4060de6b21cec0e71d940d33f00 --- arch/arm/mach-tegra/dma.c | 125 +++++++++++++++++++++++--------------- 1 file changed, 75 insertions(+), 50 deletions(-) diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c index 163993bfa816..b2f2b0fb681e 100644 --- a/arch/arm/mach-tegra/dma.c +++ b/arch/arm/mach-tegra/dma.c @@ -50,7 +50,6 @@ #define CSR_FLOW (1<<21) #define CSR_REQ_SEL_SHIFT 16 #define CSR_REQ_SEL_MASK (0x1F<lock, irq_flags); while (!list_empty(&ch->list)) list_del(ch->list.next); - csr = readl(ch->addr + APB_DMA_CHAN_CSR); - csr &= ~CSR_REQ_SEL_MASK; - csr |= CSR_REQ_SEL_INVALID; - writel(csr, ch->addr + APB_DMA_CHAN_CSR); - tegra_dma_stop(ch); spin_unlock_irqrestore(&ch->lock, irq_flags); return 0; } +/* should be called with the channel lock held */ +static unsigned int dma_active_count(struct tegra_dma_channel *ch, + struct tegra_dma_req *req, unsigned int status) +{ + unsigned int to_transfer; + unsigned int req_transfer_count; + + unsigned int bytes_transferred; + + to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT; + req_transfer_count = ch->req_transfer_count; + req_transfer_count += 1; + to_transfer += 1; + + bytes_transferred = req_transfer_count; + + if (status & STA_BUSY) + bytes_transferred -= to_transfer; + + /* In continuous transfer mode, DMA only tracks the count of the + * half DMA buffer. So, if the DMA already finished half the DMA + * then add the half buffer to the completed count. + */ + if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS) + if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) + bytes_transferred += req_transfer_count; + + if (status & STA_ISE_EOC) + bytes_transferred += req_transfer_count; + + bytes_transferred *= 4; + + return bytes_transferred; +} + int tegra_dma_dequeue_req(struct tegra_dma_channel *ch, struct tegra_dma_req *_req) { - unsigned int csr; - unsigned int status; struct tegra_dma_req *req = NULL; int found = 0; + unsigned int status; unsigned long irq_flags; - int to_transfer; - int req_transfer_count; + int stop = 0; + void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); spin_lock_irqsave(&ch->lock, irq_flags); + + if (list_entry(ch->list.next, struct tegra_dma_req, node) == _req) + stop = 1; + list_for_each_entry(req, &ch->list, node) { if (req == _req) { list_del(&req->node); @@ -216,47 +248,27 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch, return 0; } + if (!stop) + goto skip_status; + /* STOP the DMA and get the transfer count. - * Getting the transfer count is tricky. - * - Change the source selector to invalid to stop the DMA from - * FIFO to memory. - * - Read the status register to know the number of pending + * Getting the transfer count is tricky. + * - Globally disable DMA on all channels + * - Read the channel's status register to know the number of pending * bytes to be transfered. - * - Finally stop or program the DMA to the next buffer in the - * list. + * - Stop the dma channel + * - Globally re-enable DMA to resume other transfers */ - csr = readl(ch->addr + APB_DMA_CHAN_CSR); - csr &= ~CSR_REQ_SEL_MASK; - csr |= CSR_REQ_SEL_INVALID; - writel(csr, ch->addr + APB_DMA_CHAN_CSR); - - /* Get the transfer count */ + spin_lock(&enable_lock); + writel(0, addr + APB_DMA_GEN); + udelay(20); status = readl(ch->addr + APB_DMA_CHAN_STA); - to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT; - req_transfer_count = ch->req_transfer_count; - req_transfer_count += 1; - to_transfer += 1; - - req->bytes_transferred = req_transfer_count; - - if (status & STA_BUSY) - req->bytes_transferred -= to_transfer; - - /* In continuous transfer mode, DMA only tracks the count of the - * half DMA buffer. So, if the DMA already finished half the DMA - * then add the half buffer to the completed count. - * - * FIXME: There can be a race here. What if the req to - * dequue happens at the same time as the DMA just moved to - * the new buffer and SW didn't yet received the interrupt? - */ - if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS) - if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) - req->bytes_transferred += req_transfer_count; + tegra_dma_stop(ch); + writel(GEN_ENABLE, addr + APB_DMA_GEN); + spin_unlock(&enable_lock); - req->bytes_transferred *= 4; + req->bytes_transferred = dma_active_count(ch, req, status); - tegra_dma_stop(ch); if (!list_empty(&ch->list)) { /* if the list is not empty, queue the next request */ struct tegra_dma_req *next_req; @@ -264,6 +276,7 @@ int tegra_dma_dequeue_req(struct tegra_dma_channel *ch, typeof(*next_req), node); tegra_dma_update_hw(ch, next_req); } +skip_status: req->status = -TEGRA_DMA_REQ_ERROR_ABORTED; spin_unlock_irqrestore(&ch->lock, irq_flags); @@ -548,6 +561,7 @@ static void handle_oneshot_dma(struct tegra_dma_channel *ch) static void handle_continuous_dma(struct tegra_dma_channel *ch) { struct tegra_dma_req *req; + struct tegra_dma_req *next_req; unsigned long irq_flags; spin_lock_irqsave(&ch->lock, irq_flags); @@ -577,8 +591,6 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) tegra_dma_stop(ch); if (!list_is_last(&req->node, &ch->list)) { - struct tegra_dma_req *next_req; - next_req = list_entry(req->node.next, typeof(*next_req), node); tegra_dma_update_hw(ch, next_req); @@ -594,8 +606,6 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) /* Load the next request into the hardware, if available * */ if (!list_is_last(&req->node, &ch->list)) { - struct tegra_dma_req *next_req; - next_req = list_entry(req->node.next, typeof(*next_req), node); tegra_dma_update_hw_partial(ch, next_req); @@ -621,6 +631,19 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL; req->bytes_transferred = bytes_transferred; req->status = TEGRA_DMA_REQ_SUCCESS; + if (list_is_last(&req->node, &ch->list)) + tegra_dma_stop(ch); + else { + /* It may be possible that req came after + * half dma complete so it need to start + * immediately */ + next_req = list_entry(req->node.next, typeof(*next_req), node); + if (next_req->status != TEGRA_DMA_REQ_INFLIGHT) { + tegra_dma_stop(ch); + tegra_dma_update_hw(ch, next_req); + } + } + list_del(&req->node); /* DMA lock is NOT held when callbak is called */ @@ -629,6 +652,8 @@ static void handle_continuous_dma(struct tegra_dma_channel *ch) return; } else { + tegra_dma_stop(ch); + /* Dma should be stop much earlier */ BUG(); } } -- 2.34.1