if (req->status == -TEGRA_DMA_REQ_ERROR_ABORTED)
return;
- spin_unlock(&u->lock);
tty_flip_buffer_push(u->state->port.tty);
- spin_lock(&u->lock);
+
+ if (t->rx_done_cb)
+ t->rx_done_cb(u);
}
+ static void tegra_rx_dma_complete_callback(struct tegra_dma_req *req)
+ {
+ struct tegra_uart_port *t = req->dev;
+ unsigned long flags;
+
+ /*
+ * should never get called, dma should be dequeued during threshold
+ * callback
+ */
+
+ dev_warn(t->uport.dev, "possible rx overflow\n");
+
+ spin_lock_irqsave(&t->uport.lock, flags);
+ tegra_rx_dma_complete_req(t, req);
+ spin_unlock_irqrestore(&t->uport.lock, flags);
+ }
+
/* Lock already taken */
static void do_handle_rx_dma(struct tegra_uart_port *t)
{
struct uart_port *u = &t->uport;
if (t->rts_active)
set_rts(t, false);
- tegra_dma_dequeue(t->rx_dma);
+ if (!tegra_dma_dequeue_req(t->rx_dma, &t->rx_dma_req))
+ tegra_rx_dma_complete_req(t, &t->rx_dma_req);
+
tty_flip_buffer_push(u->state->port.tty);
+ if (t->rx_done_cb)
+ t->rx_done_cb(u);
/* enqueue the request again */
tegra_start_dma_rx(t);
if (t->rts_active)
t->ier_shadow = ier;
uart_writeb(t, ier, UART_IER);
t->rx_in_progress = 0;
- }
- if (t->use_rx_dma && t->rx_dma) {
- tegra_dma_dequeue(t->rx_dma);
+
+ if (t->use_rx_dma && t->rx_dma) {
+ if (!tegra_dma_dequeue_req(t->rx_dma, &t->rx_dma_req))
+ tegra_rx_dma_complete_req(t, &t->rx_dma_req);
+ } else {
+ do_handle_rx_pio(t);
+ }
tty_flip_buffer_push(u->state->port.tty);
+ if (t->rx_done_cb)
+ t->rx_done_cb(u);
}
return;