#define CSR_FLOW (1<<21)
#define CSR_REQ_SEL_SHIFT 16
#define CSR_REQ_SEL_MASK (0x1F<<CSR_REQ_SEL_SHIFT)
-#define CSR_REQ_SEL_INVALID (31<<CSR_REQ_SEL_SHIFT)
#define CSR_WCOUNT_SHIFT 2
#define CSR_WCOUNT_MASK 0xFFFC
#define NV_DMA_MAX_CHANNELS 32
static DEFINE_MUTEX(tegra_dma_lock);
+static DEFINE_SPINLOCK(enable_lock);
static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
int tegra_dma_cancel(struct tegra_dma_channel *ch)
{
- u32 csr;
unsigned long irq_flags;
spin_lock_irqsave(&ch->lock, irq_flags);
while (!list_empty(&ch->list))
list_del(ch->list.next);
- csr = readl(ch->addr + APB_DMA_CHAN_CSR);
- csr &= ~CSR_REQ_SEL_MASK;
- csr |= CSR_REQ_SEL_INVALID;
- writel(csr, ch->addr + APB_DMA_CHAN_CSR);
-
tegra_dma_stop(ch);
spin_unlock_irqrestore(&ch->lock, irq_flags);
return 0;
}
+/* should be called with the channel lock held */
+static unsigned int dma_active_count(struct tegra_dma_channel *ch,
+ struct tegra_dma_req *req, unsigned int status)
+{
+ unsigned int to_transfer;
+ unsigned int req_transfer_count;
+
+ unsigned int bytes_transferred;
+
+ to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
+ req_transfer_count = ch->req_transfer_count;
+ req_transfer_count += 1;
+ to_transfer += 1;
+
+ bytes_transferred = req_transfer_count;
+
+ if (status & STA_BUSY)
+ bytes_transferred -= to_transfer;
+
+ /* In continuous transfer mode, DMA only tracks the count of the
+ * half DMA buffer. So, if the DMA already finished half the DMA
+ * then add the half buffer to the completed count.
+ */
+ if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS)
+ if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
+ bytes_transferred += req_transfer_count;
+
+ if (status & STA_ISE_EOC)
+ bytes_transferred += req_transfer_count;
+
+ bytes_transferred *= 4;
+
+ return bytes_transferred;
+}
+
int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
struct tegra_dma_req *_req)
{
- unsigned int csr;
- unsigned int status;
struct tegra_dma_req *req = NULL;
int found = 0;
+ unsigned int status;
unsigned long irq_flags;
- int to_transfer;
- int req_transfer_count;
+ int stop = 0;
+ void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
spin_lock_irqsave(&ch->lock, irq_flags);
+
+ if (list_entry(ch->list.next, struct tegra_dma_req, node) == _req)
+ stop = 1;
+
list_for_each_entry(req, &ch->list, node) {
if (req == _req) {
list_del(&req->node);
return 0;
}
+ if (!stop)
+ goto skip_status;
+
/* STOP the DMA and get the transfer count.
- * Getting the transfer count is tricky.
- * - Change the source selector to invalid to stop the DMA from
- * FIFO to memory.
- * - Read the status register to know the number of pending
+ * Getting the transfer count is tricky.
+ * - Globally disable DMA on all channels
+ * - Read the channel's status register to know the number of pending
* bytes to be transfered.
- * - Finally stop or program the DMA to the next buffer in the
- * list.
+ * - Stop the dma channel
+ * - Globally re-enable DMA to resume other transfers
*/
- csr = readl(ch->addr + APB_DMA_CHAN_CSR);
- csr &= ~CSR_REQ_SEL_MASK;
- csr |= CSR_REQ_SEL_INVALID;
- writel(csr, ch->addr + APB_DMA_CHAN_CSR);
-
- /* Get the transfer count */
+ spin_lock(&enable_lock);
+ writel(0, addr + APB_DMA_GEN);
+ udelay(20);
status = readl(ch->addr + APB_DMA_CHAN_STA);
- to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
- req_transfer_count = ch->req_transfer_count;
- req_transfer_count += 1;
- to_transfer += 1;
-
- req->bytes_transferred = req_transfer_count;
-
- if (status & STA_BUSY)
- req->bytes_transferred -= to_transfer;
-
- /* In continuous transfer mode, DMA only tracks the count of the
- * half DMA buffer. So, if the DMA already finished half the DMA
- * then add the half buffer to the completed count.
- *
- * FIXME: There can be a race here. What if the req to
- * dequue happens at the same time as the DMA just moved to
- * the new buffer and SW didn't yet received the interrupt?
- */
- if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS)
- if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
- req->bytes_transferred += req_transfer_count;
+ tegra_dma_stop(ch);
+ writel(GEN_ENABLE, addr + APB_DMA_GEN);
+ spin_unlock(&enable_lock);
- req->bytes_transferred *= 4;
+ req->bytes_transferred = dma_active_count(ch, req, status);
- tegra_dma_stop(ch);
if (!list_empty(&ch->list)) {
/* if the list is not empty, queue the next request */
struct tegra_dma_req *next_req;
typeof(*next_req), node);
tegra_dma_update_hw(ch, next_req);
}
+skip_status:
req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
spin_unlock_irqrestore(&ch->lock, irq_flags);
static void handle_continuous_dma(struct tegra_dma_channel *ch)
{
struct tegra_dma_req *req;
+ struct tegra_dma_req *next_req;
unsigned long irq_flags;
spin_lock_irqsave(&ch->lock, irq_flags);
tegra_dma_stop(ch);
if (!list_is_last(&req->node, &ch->list)) {
- struct tegra_dma_req *next_req;
-
next_req = list_entry(req->node.next,
typeof(*next_req), node);
tegra_dma_update_hw(ch, next_req);
/* Load the next request into the hardware, if available
* */
if (!list_is_last(&req->node, &ch->list)) {
- struct tegra_dma_req *next_req;
-
next_req = list_entry(req->node.next,
typeof(*next_req), node);
tegra_dma_update_hw_partial(ch, next_req);
req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
req->bytes_transferred = bytes_transferred;
req->status = TEGRA_DMA_REQ_SUCCESS;
+ if (list_is_last(&req->node, &ch->list))
+ tegra_dma_stop(ch);
+ else {
+ /* It may be possible that req came after
+ * half dma complete so it need to start
+ * immediately */
+ next_req = list_entry(req->node.next, typeof(*next_req), node);
+ if (next_req->status != TEGRA_DMA_REQ_INFLIGHT) {
+ tegra_dma_stop(ch);
+ tegra_dma_update_hw(ch, next_req);
+ }
+ }
+
list_del(&req->node);
/* DMA lock is NOT held when callbak is called */
return;
} else {
+ tegra_dma_stop(ch);
+ /* Dma should be stop much earlier */
BUG();
}
}