2 * Driver for AMBA serial ports
4 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
6 * Copyright 1999 ARM Limited
7 * Copyright (C) 2000 Deep Blue Solutions Ltd.
8 * Copyright (C) 2010 ST-Ericsson SA
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 * This is a generic driver for ARM AMBA-type serial ports. They
25 * have a lot of 16550-like features, but are not register compatible.
26 * Note that although they do have CTS, DCD and DSR inputs, they do
27 * not have an RI input, nor do they have DTR or RTS outputs. If
28 * required, these have to be supplied via some other means (eg, GPIO)
29 * and hooked into this driver.
33 #if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
37 #include <linux/module.h>
38 #include <linux/ioport.h>
39 #include <linux/init.h>
40 #include <linux/console.h>
41 #include <linux/sysrq.h>
42 #include <linux/device.h>
43 #include <linux/tty.h>
44 #include <linux/tty_flip.h>
45 #include <linux/serial_core.h>
46 #include <linux/serial.h>
47 #include <linux/amba/bus.h>
48 #include <linux/amba/serial.h>
49 #include <linux/clk.h>
50 #include <linux/slab.h>
51 #include <linux/dmaengine.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/scatterlist.h>
54 #include <linux/delay.h>
55 #include <linux/types.h>
57 #include <linux/of_device.h>
58 #include <linux/pinctrl/consumer.h>
59 #include <linux/sizes.h>
61 #include <linux/workqueue.h>
65 #define SERIAL_AMBA_MAJOR 204
66 #define SERIAL_AMBA_MINOR 64
67 #define SERIAL_AMBA_NR UART_NR
69 #define AMBA_ISR_PASS_LIMIT 256
71 #define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
72 #define UART_DUMMY_DR_RX (1 << 16)
74 /* There is by now at least one vendor with differing details, so handle it */
81 bool cts_event_workaround;
83 unsigned int (*get_fifosize)(struct amba_device *dev);
86 static unsigned int get_fifosize_arm(struct amba_device *dev)
88 return amba_rev(dev) < 3 ? 16 : 32;
91 static struct vendor_data vendor_arm = {
92 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
93 .lcrh_tx = UART011_LCRH,
94 .lcrh_rx = UART011_LCRH,
95 .oversampling = false,
96 .dma_threshold = false,
97 .cts_event_workaround = false,
98 .get_fifosize = get_fifosize_arm,
101 static unsigned int get_fifosize_st(struct amba_device *dev)
106 static struct vendor_data vendor_st = {
107 .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
108 .lcrh_tx = ST_UART011_LCRH_TX,
109 .lcrh_rx = ST_UART011_LCRH_RX,
110 .oversampling = true,
111 .dma_threshold = true,
112 .cts_event_workaround = true,
113 .get_fifosize = get_fifosize_st,
116 /* Deals with DMA transactions */
119 struct scatterlist sg;
123 struct pl011_dmarx_data {
124 struct dma_chan *chan;
125 struct completion complete;
127 struct pl011_sgbuf sgbuf_a;
128 struct pl011_sgbuf sgbuf_b;
131 struct timer_list timer;
132 unsigned int last_residue;
133 unsigned long last_jiffies;
135 unsigned int poll_rate;
136 unsigned int poll_timeout;
139 struct pl011_dmatx_data {
140 struct dma_chan *chan;
141 struct scatterlist sg;
147 * We wrap our port structure around the generic uart_port.
149 struct uart_amba_port {
150 struct uart_port port;
152 const struct vendor_data *vendor;
153 unsigned int dmacr; /* dma control reg */
154 unsigned int im; /* interrupt mask */
155 unsigned int old_status;
156 unsigned int fifosize; /* vendor-specific */
157 unsigned int lcrh_tx; /* vendor-specific */
158 unsigned int lcrh_rx; /* vendor-specific */
159 unsigned int old_cr; /* state during shutdown */
160 struct delayed_work tx_softirq_work;
162 unsigned int tx_irq_seen; /* 0=none, 1=1, 2=2 or more */
164 #ifdef CONFIG_DMA_ENGINE
168 struct pl011_dmarx_data dmarx;
169 struct pl011_dmatx_data dmatx;
175 * Reads up to 256 characters from the FIFO or until it's empty and
176 * inserts them into the TTY layer. Returns the number of characters
177 * read from the FIFO.
179 static int pl011_fifo_to_tty(struct uart_amba_port *uap)
182 unsigned int flag, max_count = 256;
185 while (max_count--) {
186 status = readw(uap->port.membase + UART01x_FR);
187 if (status & UART01x_FR_RXFE)
190 /* Take chars from the FIFO and update status */
191 ch = readw(uap->port.membase + UART01x_DR) |
194 uap->port.icount.rx++;
197 if (unlikely(ch & UART_DR_ERROR)) {
198 if (ch & UART011_DR_BE) {
199 ch &= ~(UART011_DR_FE | UART011_DR_PE);
200 uap->port.icount.brk++;
201 if (uart_handle_break(&uap->port))
203 } else if (ch & UART011_DR_PE)
204 uap->port.icount.parity++;
205 else if (ch & UART011_DR_FE)
206 uap->port.icount.frame++;
207 if (ch & UART011_DR_OE)
208 uap->port.icount.overrun++;
210 ch &= uap->port.read_status_mask;
212 if (ch & UART011_DR_BE)
214 else if (ch & UART011_DR_PE)
216 else if (ch & UART011_DR_FE)
220 if (uart_handle_sysrq_char(&uap->port, ch & 255))
223 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
231 * All the DMA operation mode stuff goes inside this ifdef.
232 * This assumes that you have a generic DMA device interface,
233 * no custom DMA interfaces are supported.
235 #ifdef CONFIG_DMA_ENGINE
237 #define PL011_DMA_BUFFER_SIZE PAGE_SIZE
239 static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
240 enum dma_data_direction dir)
244 sg->buf = dma_alloc_coherent(chan->device->dev,
245 PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
249 sg_init_table(&sg->sg, 1);
250 sg_set_page(&sg->sg, phys_to_page(dma_addr),
251 PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
252 sg_dma_address(&sg->sg) = dma_addr;
253 sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
258 static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
259 enum dma_data_direction dir)
262 dma_free_coherent(chan->device->dev,
263 PL011_DMA_BUFFER_SIZE, sg->buf,
264 sg_dma_address(&sg->sg));
268 static void pl011_dma_probe(struct uart_amba_port *uap)
270 /* DMA is the sole user of the platform data right now */
271 struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
272 struct device *dev = uap->port.dev;
273 struct dma_slave_config tx_conf = {
274 .dst_addr = uap->port.mapbase + UART01x_DR,
275 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
276 .direction = DMA_MEM_TO_DEV,
277 .dst_maxburst = uap->fifosize >> 1,
280 struct dma_chan *chan;
283 uap->dma_probed = true;
284 chan = dma_request_slave_channel_reason(dev, "tx");
286 if (PTR_ERR(chan) == -EPROBE_DEFER) {
287 dev_info(uap->port.dev, "DMA driver not ready\n");
288 uap->dma_probed = false;
292 /* We need platform data */
293 if (!plat || !plat->dma_filter) {
294 dev_info(uap->port.dev, "no DMA platform data\n");
298 /* Try to acquire a generic DMA engine slave TX channel */
300 dma_cap_set(DMA_SLAVE, mask);
302 chan = dma_request_channel(mask, plat->dma_filter,
305 dev_err(uap->port.dev, "no TX DMA channel!\n");
310 dmaengine_slave_config(chan, &tx_conf);
311 uap->dmatx.chan = chan;
313 dev_info(uap->port.dev, "DMA channel TX %s\n",
314 dma_chan_name(uap->dmatx.chan));
316 /* Optionally make use of an RX channel as well */
317 chan = dma_request_slave_channel(dev, "rx");
319 if (!chan && plat->dma_rx_param) {
320 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
323 dev_err(uap->port.dev, "no RX DMA channel!\n");
329 struct dma_slave_config rx_conf = {
330 .src_addr = uap->port.mapbase + UART01x_DR,
331 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
332 .direction = DMA_DEV_TO_MEM,
333 .src_maxburst = uap->fifosize >> 2,
336 struct dma_slave_caps caps;
339 * Some DMA controllers provide information on their capabilities.
340 * If the controller does, check for suitable residue processing
341 * otherwise assime all is well.
343 if (0 == dma_get_slave_caps(chan, &caps)) {
344 if (caps.residue_granularity ==
345 DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
346 dma_release_channel(chan);
347 dev_info(uap->port.dev,
348 "RX DMA disabled - no residue processing\n");
352 dmaengine_slave_config(chan, &rx_conf);
353 uap->dmarx.chan = chan;
355 uap->dmarx.auto_poll_rate = false;
356 if (plat && plat->dma_rx_poll_enable) {
357 /* Set poll rate if specified. */
358 if (plat->dma_rx_poll_rate) {
359 uap->dmarx.auto_poll_rate = false;
360 uap->dmarx.poll_rate = plat->dma_rx_poll_rate;
363 * 100 ms defaults to poll rate if not
364 * specified. This will be adjusted with
365 * the baud rate at set_termios.
367 uap->dmarx.auto_poll_rate = true;
368 uap->dmarx.poll_rate = 100;
370 /* 3 secs defaults poll_timeout if not specified. */
371 if (plat->dma_rx_poll_timeout)
372 uap->dmarx.poll_timeout =
373 plat->dma_rx_poll_timeout;
375 uap->dmarx.poll_timeout = 3000;
376 } else if (!plat && dev->of_node) {
377 uap->dmarx.auto_poll_rate = of_property_read_bool(
378 dev->of_node, "auto-poll");
379 if (uap->dmarx.auto_poll_rate) {
382 if (0 == of_property_read_u32(dev->of_node,
384 uap->dmarx.poll_rate = x;
386 uap->dmarx.poll_rate = 100;
387 if (0 == of_property_read_u32(dev->of_node,
388 "poll-timeout-ms", &x))
389 uap->dmarx.poll_timeout = x;
391 uap->dmarx.poll_timeout = 3000;
394 dev_info(uap->port.dev, "DMA channel RX %s\n",
395 dma_chan_name(uap->dmarx.chan));
399 static void pl011_dma_remove(struct uart_amba_port *uap)
402 dma_release_channel(uap->dmatx.chan);
404 dma_release_channel(uap->dmarx.chan);
407 /* Forward declare these for the refill routine */
408 static int pl011_dma_tx_refill(struct uart_amba_port *uap);
409 static void pl011_start_tx_pio(struct uart_amba_port *uap);
412 * The current DMA TX buffer has been sent.
413 * Try to queue up another DMA buffer.
415 static void pl011_dma_tx_callback(void *data)
417 struct uart_amba_port *uap = data;
418 struct pl011_dmatx_data *dmatx = &uap->dmatx;
422 spin_lock_irqsave(&uap->port.lock, flags);
423 if (uap->dmatx.queued)
424 dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
428 uap->dmacr = dmacr & ~UART011_TXDMAE;
429 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
432 * If TX DMA was disabled, it means that we've stopped the DMA for
433 * some reason (eg, XOFF received, or we want to send an X-char.)
435 * Note: we need to be careful here of a potential race between DMA
436 * and the rest of the driver - if the driver disables TX DMA while
437 * a TX buffer completing, we must update the tx queued status to
438 * get further refills (hence we check dmacr).
440 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
441 uart_circ_empty(&uap->port.state->xmit)) {
442 uap->dmatx.queued = false;
443 spin_unlock_irqrestore(&uap->port.lock, flags);
447 if (pl011_dma_tx_refill(uap) <= 0)
449 * We didn't queue a DMA buffer for some reason, but we
450 * have data pending to be sent. Re-enable the TX IRQ.
452 pl011_start_tx_pio(uap);
454 spin_unlock_irqrestore(&uap->port.lock, flags);
458 * Try to refill the TX DMA buffer.
459 * Locking: called with port lock held and IRQs disabled.
461 * 1 if we queued up a TX DMA buffer.
462 * 0 if we didn't want to handle this by DMA
465 static int pl011_dma_tx_refill(struct uart_amba_port *uap)
467 struct pl011_dmatx_data *dmatx = &uap->dmatx;
468 struct dma_chan *chan = dmatx->chan;
469 struct dma_device *dma_dev = chan->device;
470 struct dma_async_tx_descriptor *desc;
471 struct circ_buf *xmit = &uap->port.state->xmit;
475 * Try to avoid the overhead involved in using DMA if the
476 * transaction fits in the first half of the FIFO, by using
477 * the standard interrupt handling. This ensures that we
478 * issue a uart_write_wakeup() at the appropriate time.
480 count = uart_circ_chars_pending(xmit);
481 if (count < (uap->fifosize >> 1)) {
482 uap->dmatx.queued = false;
487 * Bodge: don't send the last character by DMA, as this
488 * will prevent XON from notifying us to restart DMA.
492 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
493 if (count > PL011_DMA_BUFFER_SIZE)
494 count = PL011_DMA_BUFFER_SIZE;
496 if (xmit->tail < xmit->head)
497 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
499 size_t first = UART_XMIT_SIZE - xmit->tail;
504 second = count - first;
506 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
508 memcpy(&dmatx->buf[first], &xmit->buf[0], second);
511 dmatx->sg.length = count;
513 if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
514 uap->dmatx.queued = false;
515 dev_dbg(uap->port.dev, "unable to map TX DMA\n");
519 desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
520 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
522 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
523 uap->dmatx.queued = false;
525 * If DMA cannot be used right now, we complete this
526 * transaction via IRQ and let the TTY layer retry.
528 dev_dbg(uap->port.dev, "TX DMA busy\n");
532 /* Some data to go along to the callback */
533 desc->callback = pl011_dma_tx_callback;
534 desc->callback_param = uap;
536 /* All errors should happen at prepare time */
537 dmaengine_submit(desc);
539 /* Fire the DMA transaction */
540 dma_dev->device_issue_pending(chan);
542 uap->dmacr |= UART011_TXDMAE;
543 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
544 uap->dmatx.queued = true;
547 * Now we know that DMA will fire, so advance the ring buffer
548 * with the stuff we just dispatched.
550 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
551 uap->port.icount.tx += count;
553 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
554 uart_write_wakeup(&uap->port);
560 * We received a transmit interrupt without a pending X-char but with
561 * pending characters.
562 * Locking: called with port lock held and IRQs disabled.
564 * false if we want to use PIO to transmit
565 * true if we queued a DMA buffer
567 static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
569 if (!uap->using_tx_dma)
573 * If we already have a TX buffer queued, but received a
574 * TX interrupt, it will be because we've just sent an X-char.
575 * Ensure the TX DMA is enabled and the TX IRQ is disabled.
577 if (uap->dmatx.queued) {
578 uap->dmacr |= UART011_TXDMAE;
579 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
580 uap->im &= ~UART011_TXIM;
581 writew(uap->im, uap->port.membase + UART011_IMSC);
586 * We don't have a TX buffer queued, so try to queue one.
587 * If we successfully queued a buffer, mask the TX IRQ.
589 if (pl011_dma_tx_refill(uap) > 0) {
590 uap->im &= ~UART011_TXIM;
591 writew(uap->im, uap->port.membase + UART011_IMSC);
598 * Stop the DMA transmit (eg, due to received XOFF).
599 * Locking: called with port lock held and IRQs disabled.
601 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
603 if (uap->dmatx.queued) {
604 uap->dmacr &= ~UART011_TXDMAE;
605 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
610 * Try to start a DMA transmit, or in the case of an XON/OFF
611 * character queued for send, try to get that character out ASAP.
612 * Locking: called with port lock held and IRQs disabled.
614 * false if we want the TX IRQ to be enabled
615 * true if we have a buffer queued
617 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
621 if (!uap->using_tx_dma)
624 if (!uap->port.x_char) {
625 /* no X-char, try to push chars out in DMA mode */
628 if (!uap->dmatx.queued) {
629 if (pl011_dma_tx_refill(uap) > 0) {
630 uap->im &= ~UART011_TXIM;
631 writew(uap->im, uap->port.membase +
635 } else if (!(uap->dmacr & UART011_TXDMAE)) {
636 uap->dmacr |= UART011_TXDMAE;
638 uap->port.membase + UART011_DMACR);
644 * We have an X-char to send. Disable DMA to prevent it loading
645 * the TX fifo, and then see if we can stuff it into the FIFO.
648 uap->dmacr &= ~UART011_TXDMAE;
649 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
651 if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) {
653 * No space in the FIFO, so enable the transmit interrupt
654 * so we know when there is space. Note that once we've
655 * loaded the character, we should just re-enable DMA.
660 writew(uap->port.x_char, uap->port.membase + UART01x_DR);
661 uap->port.icount.tx++;
662 uap->port.x_char = 0;
664 /* Success - restore the DMA state */
666 writew(dmacr, uap->port.membase + UART011_DMACR);
672 * Flush the transmit buffer.
673 * Locking: called with port lock held and IRQs disabled.
675 static void pl011_dma_flush_buffer(struct uart_port *port)
676 __releases(&uap->port.lock)
677 __acquires(&uap->port.lock)
679 struct uart_amba_port *uap =
680 container_of(port, struct uart_amba_port, port);
682 if (!uap->using_tx_dma)
685 /* Avoid deadlock with the DMA engine callback */
686 spin_unlock(&uap->port.lock);
687 dmaengine_terminate_all(uap->dmatx.chan);
688 spin_lock(&uap->port.lock);
689 if (uap->dmatx.queued) {
690 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
692 uap->dmatx.queued = false;
693 uap->dmacr &= ~UART011_TXDMAE;
694 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
698 static void pl011_dma_rx_callback(void *data);
700 static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
702 struct dma_chan *rxchan = uap->dmarx.chan;
703 struct pl011_dmarx_data *dmarx = &uap->dmarx;
704 struct dma_async_tx_descriptor *desc;
705 struct pl011_sgbuf *sgbuf;
710 /* Start the RX DMA job */
711 sgbuf = uap->dmarx.use_buf_b ?
712 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
713 desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
715 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
717 * If the DMA engine is busy and cannot prepare a
718 * channel, no big deal, the driver will fall back
719 * to interrupt mode as a result of this error code.
722 uap->dmarx.running = false;
723 dmaengine_terminate_all(rxchan);
727 /* Some data to go along to the callback */
728 desc->callback = pl011_dma_rx_callback;
729 desc->callback_param = uap;
730 dmarx->cookie = dmaengine_submit(desc);
731 dma_async_issue_pending(rxchan);
733 uap->dmacr |= UART011_RXDMAE;
734 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
735 uap->dmarx.running = true;
737 uap->im &= ~UART011_RXIM;
738 writew(uap->im, uap->port.membase + UART011_IMSC);
744 * This is called when either the DMA job is complete, or
745 * the FIFO timeout interrupt occurred. This must be called
746 * with the port spinlock uap->port.lock held.
748 static void pl011_dma_rx_chars(struct uart_amba_port *uap,
749 u32 pending, bool use_buf_b,
752 struct tty_port *port = &uap->port.state->port;
753 struct pl011_sgbuf *sgbuf = use_buf_b ?
754 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
756 u32 fifotaken = 0; /* only used for vdbg() */
758 struct pl011_dmarx_data *dmarx = &uap->dmarx;
761 if (uap->dmarx.poll_rate) {
762 /* The data can be taken by polling */
763 dmataken = sgbuf->sg.length - dmarx->last_residue;
764 /* Recalculate the pending size */
765 if (pending >= dmataken)
769 /* Pick the remain data from the DMA */
773 * First take all chars in the DMA pipe, then look in the FIFO.
774 * Note that tty_insert_flip_buf() tries to take as many chars
777 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
780 uap->port.icount.rx += dma_count;
781 if (dma_count < pending)
782 dev_warn(uap->port.dev,
783 "couldn't insert all characters (TTY is full?)\n");
786 /* Reset the last_residue for Rx DMA poll */
787 if (uap->dmarx.poll_rate)
788 dmarx->last_residue = sgbuf->sg.length;
791 * Only continue with trying to read the FIFO if all DMA chars have
794 if (dma_count == pending && readfifo) {
795 /* Clear any error flags */
796 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
797 uap->port.membase + UART011_ICR);
800 * If we read all the DMA'd characters, and we had an
801 * incomplete buffer, that could be due to an rx error, or
802 * maybe we just timed out. Read any pending chars and check
805 * Error conditions will only occur in the FIFO, these will
806 * trigger an immediate interrupt and stop the DMA job, so we
807 * will always find the error in the FIFO, never in the DMA
810 fifotaken = pl011_fifo_to_tty(uap);
813 spin_unlock(&uap->port.lock);
814 dev_vdbg(uap->port.dev,
815 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
816 dma_count, fifotaken);
817 tty_flip_buffer_push(port);
818 spin_lock(&uap->port.lock);
821 static void pl011_dma_rx_irq(struct uart_amba_port *uap)
823 struct pl011_dmarx_data *dmarx = &uap->dmarx;
824 struct dma_chan *rxchan = dmarx->chan;
825 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
826 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
828 struct dma_tx_state state;
829 enum dma_status dmastat;
832 * Pause the transfer so we can trust the current counter,
833 * do this before we pause the PL011 block, else we may
836 if (dmaengine_pause(rxchan))
837 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
838 dmastat = rxchan->device->device_tx_status(rxchan,
839 dmarx->cookie, &state);
840 if (dmastat != DMA_PAUSED)
841 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
843 /* Disable RX DMA - incoming data will wait in the FIFO */
844 uap->dmacr &= ~UART011_RXDMAE;
845 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
846 uap->dmarx.running = false;
848 pending = sgbuf->sg.length - state.residue;
849 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
850 /* Then we terminate the transfer - we now know our residue */
851 dmaengine_terminate_all(rxchan);
854 * This will take the chars we have so far and insert
855 * into the framework.
857 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
859 /* Switch buffer & re-trigger DMA job */
860 dmarx->use_buf_b = !dmarx->use_buf_b;
861 if (pl011_dma_rx_trigger_dma(uap)) {
862 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
863 "fall back to interrupt mode\n");
864 uap->im |= UART011_RXIM;
865 writew(uap->im, uap->port.membase + UART011_IMSC);
869 static void pl011_dma_rx_callback(void *data)
871 struct uart_amba_port *uap = data;
872 struct pl011_dmarx_data *dmarx = &uap->dmarx;
873 struct dma_chan *rxchan = dmarx->chan;
874 bool lastbuf = dmarx->use_buf_b;
875 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
876 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
878 struct dma_tx_state state;
882 * This completion interrupt occurs typically when the
883 * RX buffer is totally stuffed but no timeout has yet
884 * occurred. When that happens, we just want the RX
885 * routine to flush out the secondary DMA buffer while
886 * we immediately trigger the next DMA job.
888 spin_lock_irq(&uap->port.lock);
890 * Rx data can be taken by the UART interrupts during
891 * the DMA irq handler. So we check the residue here.
893 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
894 pending = sgbuf->sg.length - state.residue;
895 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
896 /* Then we terminate the transfer - we now know our residue */
897 dmaengine_terminate_all(rxchan);
899 uap->dmarx.running = false;
900 dmarx->use_buf_b = !lastbuf;
901 ret = pl011_dma_rx_trigger_dma(uap);
903 pl011_dma_rx_chars(uap, pending, lastbuf, false);
904 spin_unlock_irq(&uap->port.lock);
906 * Do this check after we picked the DMA chars so we don't
907 * get some IRQ immediately from RX.
910 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
911 "fall back to interrupt mode\n");
912 uap->im |= UART011_RXIM;
913 writew(uap->im, uap->port.membase + UART011_IMSC);
918 * Stop accepting received characters, when we're shutting down or
919 * suspending this port.
920 * Locking: called with port lock held and IRQs disabled.
922 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
924 /* FIXME. Just disable the DMA enable */
925 uap->dmacr &= ~UART011_RXDMAE;
926 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
930 * Timer handler for Rx DMA polling.
931 * Every polling, It checks the residue in the dma buffer and transfer
932 * data to the tty. Also, last_residue is updated for the next polling.
934 static void pl011_dma_rx_poll(unsigned long args)
936 struct uart_amba_port *uap = (struct uart_amba_port *)args;
937 struct tty_port *port = &uap->port.state->port;
938 struct pl011_dmarx_data *dmarx = &uap->dmarx;
939 struct dma_chan *rxchan = uap->dmarx.chan;
940 unsigned long flags = 0;
941 unsigned int dmataken = 0;
942 unsigned int size = 0;
943 struct pl011_sgbuf *sgbuf;
945 struct dma_tx_state state;
947 sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
948 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
949 if (likely(state.residue < dmarx->last_residue)) {
950 dmataken = sgbuf->sg.length - dmarx->last_residue;
951 size = dmarx->last_residue - state.residue;
952 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
954 if (dma_count == size)
955 dmarx->last_residue = state.residue;
956 dmarx->last_jiffies = jiffies;
958 tty_flip_buffer_push(port);
961 * If no data is received in poll_timeout, the driver will fall back
962 * to interrupt mode. We will retrigger DMA at the first interrupt.
964 if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
965 > uap->dmarx.poll_timeout) {
967 spin_lock_irqsave(&uap->port.lock, flags);
968 pl011_dma_rx_stop(uap);
969 uap->im |= UART011_RXIM;
970 writew(uap->im, uap->port.membase + UART011_IMSC);
971 spin_unlock_irqrestore(&uap->port.lock, flags);
973 uap->dmarx.running = false;
974 dmaengine_terminate_all(rxchan);
975 del_timer(&uap->dmarx.timer);
977 mod_timer(&uap->dmarx.timer,
978 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
982 static void pl011_dma_startup(struct uart_amba_port *uap)
986 if (!uap->dma_probed)
987 pl011_dma_probe(uap);
989 if (!uap->dmatx.chan)
992 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA);
993 if (!uap->dmatx.buf) {
994 dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
995 uap->port.fifosize = uap->fifosize;
999 sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
1001 /* The DMA buffer is now the FIFO the TTY subsystem can use */
1002 uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
1003 uap->using_tx_dma = true;
1005 if (!uap->dmarx.chan)
1008 /* Allocate and map DMA RX buffers */
1009 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1012 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1013 "RX buffer A", ret);
1017 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
1020 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1021 "RX buffer B", ret);
1022 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1027 uap->using_rx_dma = true;
1030 /* Turn on DMA error (RX/TX will be enabled on demand) */
1031 uap->dmacr |= UART011_DMAONERR;
1032 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
1035 * ST Micro variants has some specific dma burst threshold
1036 * compensation. Set this to 16 bytes, so burst will only
1037 * be issued above/below 16 bytes.
1039 if (uap->vendor->dma_threshold)
1040 writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
1041 uap->port.membase + ST_UART011_DMAWM);
1043 if (uap->using_rx_dma) {
1044 if (pl011_dma_rx_trigger_dma(uap))
1045 dev_dbg(uap->port.dev, "could not trigger initial "
1046 "RX DMA job, fall back to interrupt mode\n");
1047 if (uap->dmarx.poll_rate) {
1048 init_timer(&(uap->dmarx.timer));
1049 uap->dmarx.timer.function = pl011_dma_rx_poll;
1050 uap->dmarx.timer.data = (unsigned long)uap;
1051 mod_timer(&uap->dmarx.timer,
1053 msecs_to_jiffies(uap->dmarx.poll_rate));
1054 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1055 uap->dmarx.last_jiffies = jiffies;
1060 static void pl011_dma_shutdown(struct uart_amba_port *uap)
1062 if (!(uap->using_tx_dma || uap->using_rx_dma))
1065 /* Disable RX and TX DMA */
1066 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
1069 spin_lock_irq(&uap->port.lock);
1070 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
1071 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
1072 spin_unlock_irq(&uap->port.lock);
1074 if (uap->using_tx_dma) {
1075 /* In theory, this should already be done by pl011_dma_flush_buffer */
1076 dmaengine_terminate_all(uap->dmatx.chan);
1077 if (uap->dmatx.queued) {
1078 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
1080 uap->dmatx.queued = false;
1083 kfree(uap->dmatx.buf);
1084 uap->using_tx_dma = false;
1087 if (uap->using_rx_dma) {
1088 dmaengine_terminate_all(uap->dmarx.chan);
1089 /* Clean up the RX DMA */
1090 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
1091 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
1092 if (uap->dmarx.poll_rate)
1093 del_timer_sync(&uap->dmarx.timer);
1094 uap->using_rx_dma = false;
1098 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1100 return uap->using_rx_dma;
1103 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1105 return uap->using_rx_dma && uap->dmarx.running;
1109 /* Blank functions if the DMA engine is not available */
1110 static inline void pl011_dma_probe(struct uart_amba_port *uap)
1114 static inline void pl011_dma_remove(struct uart_amba_port *uap)
1118 static inline void pl011_dma_startup(struct uart_amba_port *uap)
1122 static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
1126 static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
1131 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
1135 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
1140 static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
1144 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1148 static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
1153 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1158 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1163 #define pl011_dma_flush_buffer NULL
1166 static void pl011_stop_tx(struct uart_port *port)
1168 struct uart_amba_port *uap =
1169 container_of(port, struct uart_amba_port, port);
1171 uap->im &= ~UART011_TXIM;
1172 writew(uap->im, uap->port.membase + UART011_IMSC);
1173 pl011_dma_tx_stop(uap);
1176 static bool pl011_tx_chars(struct uart_amba_port *uap);
1178 /* Start TX with programmed I/O only (no DMA) */
1179 static void pl011_start_tx_pio(struct uart_amba_port *uap)
1181 uap->im |= UART011_TXIM;
1182 writew(uap->im, uap->port.membase + UART011_IMSC);
1183 if (!uap->tx_irq_seen)
1184 pl011_tx_chars(uap);
1187 static void pl011_start_tx(struct uart_port *port)
1189 struct uart_amba_port *uap =
1190 container_of(port, struct uart_amba_port, port);
1192 if (!pl011_dma_tx_start(uap))
1193 pl011_start_tx_pio(uap);
1196 static void pl011_stop_rx(struct uart_port *port)
1198 struct uart_amba_port *uap =
1199 container_of(port, struct uart_amba_port, port);
1201 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
1202 UART011_PEIM|UART011_BEIM|UART011_OEIM);
1203 writew(uap->im, uap->port.membase + UART011_IMSC);
1205 pl011_dma_rx_stop(uap);
1208 static void pl011_enable_ms(struct uart_port *port)
1210 struct uart_amba_port *uap =
1211 container_of(port, struct uart_amba_port, port);
1213 uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
1214 writew(uap->im, uap->port.membase + UART011_IMSC);
1217 static void pl011_rx_chars(struct uart_amba_port *uap)
1218 __releases(&uap->port.lock)
1219 __acquires(&uap->port.lock)
1221 pl011_fifo_to_tty(uap);
1223 spin_unlock(&uap->port.lock);
1224 tty_flip_buffer_push(&uap->port.state->port);
1226 * If we were temporarily out of DMA mode for a while,
1227 * attempt to switch back to DMA mode again.
1229 if (pl011_dma_rx_available(uap)) {
1230 if (pl011_dma_rx_trigger_dma(uap)) {
1231 dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1232 "fall back to interrupt mode again\n");
1233 uap->im |= UART011_RXIM;
1234 writew(uap->im, uap->port.membase + UART011_IMSC);
1236 #ifdef CONFIG_DMA_ENGINE
1237 /* Start Rx DMA poll */
1238 if (uap->dmarx.poll_rate) {
1239 uap->dmarx.last_jiffies = jiffies;
1240 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1241 mod_timer(&uap->dmarx.timer,
1243 msecs_to_jiffies(uap->dmarx.poll_rate));
1248 spin_lock(&uap->port.lock);
1252 * Transmit a character
1253 * There must be at least one free entry in the TX FIFO to accept the char.
1255 * Returns true if the FIFO might have space in it afterwards;
1256 * returns false if the FIFO definitely became full.
1258 static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c)
1260 writew(c, uap->port.membase + UART01x_DR);
1261 uap->port.icount.tx++;
1263 if (likely(uap->tx_irq_seen > 1))
1266 return !(readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF);
1269 static bool pl011_tx_chars(struct uart_amba_port *uap)
1271 struct circ_buf *xmit = &uap->port.state->xmit;
1274 if (unlikely(uap->tx_irq_seen < 2))
1276 * Initial FIFO fill level unknown: we must check TXFF
1277 * after each write, so just try to fill up the FIFO.
1279 count = uap->fifosize;
1280 else /* tx_irq_seen >= 2 */
1282 * FIFO initially at least half-empty, so we can simply
1283 * write half the FIFO without polling TXFF.
1285 * Note: the *first* TX IRQ can still race with
1286 * pl011_start_tx_pio(), which can result in the FIFO
1287 * being fuller than expected in that case.
1289 count = uap->fifosize >> 1;
1292 * If the FIFO is full we're guaranteed a TX IRQ at some later point,
1293 * and can't transmit immediately in any case:
1295 if (unlikely(uap->tx_irq_seen < 2 &&
1296 readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF))
1299 if (uap->port.x_char) {
1300 pl011_tx_char(uap, uap->port.x_char);
1301 uap->port.x_char = 0;
1304 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
1305 pl011_stop_tx(&uap->port);
1309 /* If we are using DMA mode, try to send some characters. */
1310 if (pl011_dma_tx_irq(uap))
1313 while (count-- > 0 && pl011_tx_char(uap, xmit->buf[xmit->tail])) {
1314 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
1315 if (uart_circ_empty(xmit))
1319 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1320 uart_write_wakeup(&uap->port);
1322 if (uart_circ_empty(xmit)) {
1323 pl011_stop_tx(&uap->port);
1327 if (unlikely(!uap->tx_irq_seen))
1328 schedule_delayed_work(&uap->tx_softirq_work, uap->port.timeout);
1334 static void pl011_modem_status(struct uart_amba_port *uap)
1336 unsigned int status, delta;
1338 status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
1340 delta = status ^ uap->old_status;
1341 uap->old_status = status;
1346 if (delta & UART01x_FR_DCD)
1347 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
1349 if (delta & UART01x_FR_DSR)
1350 uap->port.icount.dsr++;
1352 if (delta & UART01x_FR_CTS)
1353 uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS);
1355 wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
1358 static void pl011_tx_softirq(struct work_struct *work)
1360 struct delayed_work *dwork = to_delayed_work(work);
1361 struct uart_amba_port *uap =
1362 container_of(dwork, struct uart_amba_port, tx_softirq_work);
1364 spin_lock(&uap->port.lock);
1365 while (pl011_tx_chars(uap)) ;
1366 spin_unlock(&uap->port.lock);
1369 static void pl011_tx_irq_seen(struct uart_amba_port *uap)
1371 if (likely(uap->tx_irq_seen > 1))
1375 if (uap->tx_irq_seen < 2)
1377 cancel_delayed_work(&uap->tx_softirq_work);
1380 static irqreturn_t pl011_int(int irq, void *dev_id)
1382 struct uart_amba_port *uap = dev_id;
1383 unsigned long flags;
1384 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
1386 unsigned int dummy_read;
1388 spin_lock_irqsave(&uap->port.lock, flags);
1389 status = readw(uap->port.membase + UART011_MIS);
1392 if (uap->vendor->cts_event_workaround) {
1393 /* workaround to make sure that all bits are unlocked.. */
1394 writew(0x00, uap->port.membase + UART011_ICR);
1397 * WA: introduce 26ns(1 uart clk) delay before W1C;
1398 * single apb access will incur 2 pclk(133.12Mhz) delay,
1399 * so add 2 dummy reads
1401 dummy_read = readw(uap->port.membase + UART011_ICR);
1402 dummy_read = readw(uap->port.membase + UART011_ICR);
1405 writew(status & ~(UART011_TXIS|UART011_RTIS|
1407 uap->port.membase + UART011_ICR);
1409 if (status & (UART011_RTIS|UART011_RXIS)) {
1410 if (pl011_dma_rx_running(uap))
1411 pl011_dma_rx_irq(uap);
1413 pl011_rx_chars(uap);
1415 if (status & (UART011_DSRMIS|UART011_DCDMIS|
1416 UART011_CTSMIS|UART011_RIMIS))
1417 pl011_modem_status(uap);
1418 if (status & UART011_TXIS) {
1419 pl011_tx_irq_seen(uap);
1420 pl011_tx_chars(uap);
1423 if (pass_counter-- == 0)
1426 status = readw(uap->port.membase + UART011_MIS);
1427 } while (status != 0);
1431 spin_unlock_irqrestore(&uap->port.lock, flags);
1433 return IRQ_RETVAL(handled);
1436 static unsigned int pl011_tx_empty(struct uart_port *port)
1438 struct uart_amba_port *uap =
1439 container_of(port, struct uart_amba_port, port);
1440 unsigned int status = readw(uap->port.membase + UART01x_FR);
1441 return status & (UART01x_FR_BUSY|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT;
1444 static unsigned int pl011_get_mctrl(struct uart_port *port)
1446 struct uart_amba_port *uap =
1447 container_of(port, struct uart_amba_port, port);
1448 unsigned int result = 0;
1449 unsigned int status = readw(uap->port.membase + UART01x_FR);
1451 #define TIOCMBIT(uartbit, tiocmbit) \
1452 if (status & uartbit) \
1455 TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
1456 TIOCMBIT(UART01x_FR_DSR, TIOCM_DSR);
1457 TIOCMBIT(UART01x_FR_CTS, TIOCM_CTS);
1458 TIOCMBIT(UART011_FR_RI, TIOCM_RNG);
1463 static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
1465 struct uart_amba_port *uap =
1466 container_of(port, struct uart_amba_port, port);
1469 cr = readw(uap->port.membase + UART011_CR);
1471 #define TIOCMBIT(tiocmbit, uartbit) \
1472 if (mctrl & tiocmbit) \
1477 TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
1478 TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
1479 TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
1480 TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
1481 TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
1484 /* We need to disable auto-RTS if we want to turn RTS off */
1485 TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
1489 writew(cr, uap->port.membase + UART011_CR);
1492 static void pl011_break_ctl(struct uart_port *port, int break_state)
1494 struct uart_amba_port *uap =
1495 container_of(port, struct uart_amba_port, port);
1496 unsigned long flags;
1499 spin_lock_irqsave(&uap->port.lock, flags);
1500 lcr_h = readw(uap->port.membase + uap->lcrh_tx);
1501 if (break_state == -1)
1502 lcr_h |= UART01x_LCRH_BRK;
1504 lcr_h &= ~UART01x_LCRH_BRK;
1505 writew(lcr_h, uap->port.membase + uap->lcrh_tx);
1506 spin_unlock_irqrestore(&uap->port.lock, flags);
1509 #ifdef CONFIG_CONSOLE_POLL
1511 static void pl011_quiesce_irqs(struct uart_port *port)
1513 struct uart_amba_port *uap =
1514 container_of(port, struct uart_amba_port, port);
1515 unsigned char __iomem *regs = uap->port.membase;
1517 writew(readw(regs + UART011_MIS), regs + UART011_ICR);
1519 * There is no way to clear TXIM as this is "ready to transmit IRQ", so
1520 * we simply mask it. start_tx() will unmask it.
1522 * Note we can race with start_tx(), and if the race happens, the
1523 * polling user might get another interrupt just after we clear it.
1524 * But it should be OK and can happen even w/o the race, e.g.
1525 * controller immediately got some new data and raised the IRQ.
1527 * And whoever uses polling routines assumes that it manages the device
1528 * (including tx queue), so we're also fine with start_tx()'s caller
1531 writew(readw(regs + UART011_IMSC) & ~UART011_TXIM, regs + UART011_IMSC);
1534 static int pl011_get_poll_char(struct uart_port *port)
1536 struct uart_amba_port *uap =
1537 container_of(port, struct uart_amba_port, port);
1538 unsigned int status;
1541 * The caller might need IRQs lowered, e.g. if used with KDB NMI
1544 pl011_quiesce_irqs(port);
1546 status = readw(uap->port.membase + UART01x_FR);
1547 if (status & UART01x_FR_RXFE)
1548 return NO_POLL_CHAR;
1550 return readw(uap->port.membase + UART01x_DR);
1553 static void pl011_put_poll_char(struct uart_port *port,
1556 struct uart_amba_port *uap =
1557 container_of(port, struct uart_amba_port, port);
1559 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
1562 writew(ch, uap->port.membase + UART01x_DR);
1565 #endif /* CONFIG_CONSOLE_POLL */
1567 static int pl011_hwinit(struct uart_port *port)
1569 struct uart_amba_port *uap =
1570 container_of(port, struct uart_amba_port, port);
1573 /* Optionaly enable pins to be muxed in and configured */
1574 pinctrl_pm_select_default_state(port->dev);
1577 * Try to enable the clock producer.
1579 retval = clk_prepare_enable(uap->clk);
1583 uap->port.uartclk = clk_get_rate(uap->clk);
1585 /* Clear pending error and receive interrupts */
1586 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS |
1587 UART011_RTIS | UART011_RXIS, uap->port.membase + UART011_ICR);
1590 * Save interrupts enable mask, and enable RX interrupts in case if
1591 * the interrupt is used for NMI entry.
1593 uap->im = readw(uap->port.membase + UART011_IMSC);
1594 writew(UART011_RTIM | UART011_RXIM, uap->port.membase + UART011_IMSC);
1596 if (dev_get_platdata(uap->port.dev)) {
1597 struct amba_pl011_data *plat;
1599 plat = dev_get_platdata(uap->port.dev);
1606 static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
1608 writew(lcr_h, uap->port.membase + uap->lcrh_rx);
1609 if (uap->lcrh_rx != uap->lcrh_tx) {
1612 * Wait 10 PCLKs before writing LCRH_TX register,
1613 * to get this delay write read only register 10 times
1615 for (i = 0; i < 10; ++i)
1616 writew(0xff, uap->port.membase + UART011_MIS);
1617 writew(lcr_h, uap->port.membase + uap->lcrh_tx);
1621 static int pl011_startup(struct uart_port *port)
1623 struct uart_amba_port *uap =
1624 container_of(port, struct uart_amba_port, port);
1628 retval = pl011_hwinit(port);
1632 writew(uap->im, uap->port.membase + UART011_IMSC);
1637 retval = request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap);
1641 writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS);
1643 spin_lock_irq(&uap->port.lock);
1645 /* restore RTS and DTR */
1646 cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
1647 cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
1648 writew(cr, uap->port.membase + UART011_CR);
1650 spin_unlock_irq(&uap->port.lock);
1653 * initialise the old status of the modem signals
1655 uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
1658 pl011_dma_startup(uap);
1661 * Finally, enable interrupts, only timeouts when using DMA
1662 * if initial RX DMA job failed, start in interrupt mode
1665 spin_lock_irq(&uap->port.lock);
1666 /* Clear out any spuriously appearing RX interrupts */
1667 writew(UART011_RTIS | UART011_RXIS,
1668 uap->port.membase + UART011_ICR);
1669 uap->im = UART011_RTIM;
1670 if (!pl011_dma_rx_running(uap))
1671 uap->im |= UART011_RXIM;
1672 writew(uap->im, uap->port.membase + UART011_IMSC);
1673 spin_unlock_irq(&uap->port.lock);
1678 clk_disable_unprepare(uap->clk);
1682 static void pl011_shutdown_channel(struct uart_amba_port *uap,
1687 val = readw(uap->port.membase + lcrh);
1688 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
1689 writew(val, uap->port.membase + lcrh);
1692 static void pl011_shutdown(struct uart_port *port)
1694 struct uart_amba_port *uap =
1695 container_of(port, struct uart_amba_port, port);
1698 cancel_delayed_work_sync(&uap->tx_softirq_work);
1701 * disable all interrupts
1703 spin_lock_irq(&uap->port.lock);
1705 writew(uap->im, uap->port.membase + UART011_IMSC);
1706 writew(0xffff & ~UART011_TXIS, uap->port.membase + UART011_ICR);
1707 spin_unlock_irq(&uap->port.lock);
1709 pl011_dma_shutdown(uap);
1712 * Free the interrupt
1714 free_irq(uap->port.irq, uap);
1718 * disable the port. It should not disable RTS and DTR.
1719 * Also RTS and DTR state should be preserved to restore
1720 * it during startup().
1722 uap->autorts = false;
1723 spin_lock_irq(&uap->port.lock);
1724 cr = readw(uap->port.membase + UART011_CR);
1726 cr &= UART011_CR_RTS | UART011_CR_DTR;
1727 cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1728 writew(cr, uap->port.membase + UART011_CR);
1729 spin_unlock_irq(&uap->port.lock);
1732 * disable break condition and fifos
1734 pl011_shutdown_channel(uap, uap->lcrh_rx);
1735 if (uap->lcrh_rx != uap->lcrh_tx)
1736 pl011_shutdown_channel(uap, uap->lcrh_tx);
1739 * Shut down the clock producer
1741 clk_disable_unprepare(uap->clk);
1742 /* Optionally let pins go into sleep states */
1743 pinctrl_pm_select_sleep_state(port->dev);
1745 if (dev_get_platdata(uap->port.dev)) {
1746 struct amba_pl011_data *plat;
1748 plat = dev_get_platdata(uap->port.dev);
1753 if (uap->port.ops->flush_buffer)
1754 uap->port.ops->flush_buffer(port);
1758 pl011_set_termios(struct uart_port *port, struct ktermios *termios,
1759 struct ktermios *old)
1761 struct uart_amba_port *uap =
1762 container_of(port, struct uart_amba_port, port);
1763 unsigned int lcr_h, old_cr;
1764 unsigned long flags;
1765 unsigned int baud, quot, clkdiv;
1767 if (uap->vendor->oversampling)
1773 * Ask the core to calculate the divisor for us.
1775 baud = uart_get_baud_rate(port, termios, old, 0,
1776 port->uartclk / clkdiv);
1777 #ifdef CONFIG_DMA_ENGINE
1779 * Adjust RX DMA polling rate with baud rate if not specified.
1781 if (uap->dmarx.auto_poll_rate)
1782 uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
1785 if (baud > port->uartclk/16)
1786 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
1788 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
1790 switch (termios->c_cflag & CSIZE) {
1792 lcr_h = UART01x_LCRH_WLEN_5;
1795 lcr_h = UART01x_LCRH_WLEN_6;
1798 lcr_h = UART01x_LCRH_WLEN_7;
1801 lcr_h = UART01x_LCRH_WLEN_8;
1804 if (termios->c_cflag & CSTOPB)
1805 lcr_h |= UART01x_LCRH_STP2;
1806 if (termios->c_cflag & PARENB) {
1807 lcr_h |= UART01x_LCRH_PEN;
1808 if (!(termios->c_cflag & PARODD))
1809 lcr_h |= UART01x_LCRH_EPS;
1811 if (uap->fifosize > 1)
1812 lcr_h |= UART01x_LCRH_FEN;
1814 spin_lock_irqsave(&port->lock, flags);
1817 * Update the per-port timeout.
1819 uart_update_timeout(port, termios->c_cflag, baud);
1821 port->read_status_mask = UART011_DR_OE | 255;
1822 if (termios->c_iflag & INPCK)
1823 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
1824 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1825 port->read_status_mask |= UART011_DR_BE;
1828 * Characters to ignore
1830 port->ignore_status_mask = 0;
1831 if (termios->c_iflag & IGNPAR)
1832 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
1833 if (termios->c_iflag & IGNBRK) {
1834 port->ignore_status_mask |= UART011_DR_BE;
1836 * If we're ignoring parity and break indicators,
1837 * ignore overruns too (for real raw support).
1839 if (termios->c_iflag & IGNPAR)
1840 port->ignore_status_mask |= UART011_DR_OE;
1844 * Ignore all characters if CREAD is not set.
1846 if ((termios->c_cflag & CREAD) == 0)
1847 port->ignore_status_mask |= UART_DUMMY_DR_RX;
1849 if (UART_ENABLE_MS(port, termios->c_cflag))
1850 pl011_enable_ms(port);
1852 /* first, disable everything */
1853 old_cr = readw(port->membase + UART011_CR);
1854 writew(0, port->membase + UART011_CR);
1856 if (termios->c_cflag & CRTSCTS) {
1857 if (old_cr & UART011_CR_RTS)
1858 old_cr |= UART011_CR_RTSEN;
1860 old_cr |= UART011_CR_CTSEN;
1861 uap->autorts = true;
1863 old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
1864 uap->autorts = false;
1867 if (uap->vendor->oversampling) {
1868 if (baud > port->uartclk / 16)
1869 old_cr |= ST_UART011_CR_OVSFACT;
1871 old_cr &= ~ST_UART011_CR_OVSFACT;
1875 * Workaround for the ST Micro oversampling variants to
1876 * increase the bitrate slightly, by lowering the divisor,
1877 * to avoid delayed sampling of start bit at high speeds,
1878 * else we see data corruption.
1880 if (uap->vendor->oversampling) {
1881 if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
1883 else if ((baud > 3250000) && (quot > 2))
1887 writew(quot & 0x3f, port->membase + UART011_FBRD);
1888 writew(quot >> 6, port->membase + UART011_IBRD);
1891 * ----------v----------v----------v----------v-----
1892 * NOTE: lcrh_tx and lcrh_rx MUST BE WRITTEN AFTER
1893 * UART011_FBRD & UART011_IBRD.
1894 * ----------^----------^----------^----------^-----
1896 pl011_write_lcr_h(uap, lcr_h);
1897 writew(old_cr, port->membase + UART011_CR);
1899 spin_unlock_irqrestore(&port->lock, flags);
1902 static const char *pl011_type(struct uart_port *port)
1904 struct uart_amba_port *uap =
1905 container_of(port, struct uart_amba_port, port);
1906 return uap->port.type == PORT_AMBA ? uap->type : NULL;
1910 * Release the memory region(s) being used by 'port'
1912 static void pl011_release_port(struct uart_port *port)
1914 release_mem_region(port->mapbase, SZ_4K);
1918 * Request the memory region(s) being used by 'port'
1920 static int pl011_request_port(struct uart_port *port)
1922 return request_mem_region(port->mapbase, SZ_4K, "uart-pl011")
1923 != NULL ? 0 : -EBUSY;
1927 * Configure/autoconfigure the port.
1929 static void pl011_config_port(struct uart_port *port, int flags)
1931 if (flags & UART_CONFIG_TYPE) {
1932 port->type = PORT_AMBA;
1933 pl011_request_port(port);
1938 * verify the new serial_struct (for TIOCSSERIAL).
1940 static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
1943 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
1945 if (ser->irq < 0 || ser->irq >= nr_irqs)
1947 if (ser->baud_base < 9600)
1952 static struct uart_ops amba_pl011_pops = {
1953 .tx_empty = pl011_tx_empty,
1954 .set_mctrl = pl011_set_mctrl,
1955 .get_mctrl = pl011_get_mctrl,
1956 .stop_tx = pl011_stop_tx,
1957 .start_tx = pl011_start_tx,
1958 .stop_rx = pl011_stop_rx,
1959 .enable_ms = pl011_enable_ms,
1960 .break_ctl = pl011_break_ctl,
1961 .startup = pl011_startup,
1962 .shutdown = pl011_shutdown,
1963 .flush_buffer = pl011_dma_flush_buffer,
1964 .set_termios = pl011_set_termios,
1966 .release_port = pl011_release_port,
1967 .request_port = pl011_request_port,
1968 .config_port = pl011_config_port,
1969 .verify_port = pl011_verify_port,
1970 #ifdef CONFIG_CONSOLE_POLL
1971 .poll_init = pl011_hwinit,
1972 .poll_get_char = pl011_get_poll_char,
1973 .poll_put_char = pl011_put_poll_char,
1977 static struct uart_amba_port *amba_ports[UART_NR];
1979 #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
1981 static void pl011_console_putchar(struct uart_port *port, int ch)
1983 struct uart_amba_port *uap =
1984 container_of(port, struct uart_amba_port, port);
1986 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
1988 writew(ch, uap->port.membase + UART01x_DR);
1992 pl011_console_write(struct console *co, const char *s, unsigned int count)
1994 struct uart_amba_port *uap = amba_ports[co->index];
1995 unsigned int status, old_cr, new_cr;
1996 unsigned long flags;
1999 clk_enable(uap->clk);
2001 local_irq_save(flags);
2002 if (uap->port.sysrq)
2004 else if (oops_in_progress)
2005 locked = spin_trylock(&uap->port.lock);
2007 spin_lock(&uap->port.lock);
2010 * First save the CR then disable the interrupts
2012 old_cr = readw(uap->port.membase + UART011_CR);
2013 new_cr = old_cr & ~UART011_CR_CTSEN;
2014 new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
2015 writew(new_cr, uap->port.membase + UART011_CR);
2017 uart_console_write(&uap->port, s, count, pl011_console_putchar);
2020 * Finally, wait for transmitter to become empty
2021 * and restore the TCR
2024 status = readw(uap->port.membase + UART01x_FR);
2025 } while (status & UART01x_FR_BUSY);
2026 writew(old_cr, uap->port.membase + UART011_CR);
2029 spin_unlock(&uap->port.lock);
2030 local_irq_restore(flags);
2032 clk_disable(uap->clk);
2036 pl011_console_get_options(struct uart_amba_port *uap, int *baud,
2037 int *parity, int *bits)
2039 if (readw(uap->port.membase + UART011_CR) & UART01x_CR_UARTEN) {
2040 unsigned int lcr_h, ibrd, fbrd;
2042 lcr_h = readw(uap->port.membase + uap->lcrh_tx);
2045 if (lcr_h & UART01x_LCRH_PEN) {
2046 if (lcr_h & UART01x_LCRH_EPS)
2052 if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
2057 ibrd = readw(uap->port.membase + UART011_IBRD);
2058 fbrd = readw(uap->port.membase + UART011_FBRD);
2060 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
2062 if (uap->vendor->oversampling) {
2063 if (readw(uap->port.membase + UART011_CR)
2064 & ST_UART011_CR_OVSFACT)
2070 static int __init pl011_console_setup(struct console *co, char *options)
2072 struct uart_amba_port *uap;
2080 * Check whether an invalid uart number has been specified, and
2081 * if so, search for the first available port that does have
2084 if (co->index >= UART_NR)
2086 uap = amba_ports[co->index];
2090 /* Allow pins to be muxed in and configured */
2091 pinctrl_pm_select_default_state(uap->port.dev);
2093 ret = clk_prepare(uap->clk);
2097 if (dev_get_platdata(uap->port.dev)) {
2098 struct amba_pl011_data *plat;
2100 plat = dev_get_platdata(uap->port.dev);
2105 uap->port.uartclk = clk_get_rate(uap->clk);
2108 uart_parse_options(options, &baud, &parity, &bits, &flow);
2110 pl011_console_get_options(uap, &baud, &parity, &bits);
2112 return uart_set_options(&uap->port, co, baud, parity, bits, flow);
2115 static struct uart_driver amba_reg;
2116 static struct console amba_console = {
2118 .write = pl011_console_write,
2119 .device = uart_console_device,
2120 .setup = pl011_console_setup,
2121 .flags = CON_PRINTBUFFER,
2126 #define AMBA_CONSOLE (&amba_console)
2128 static void pl011_putc(struct uart_port *port, int c)
2130 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2132 writeb(c, port->membase + UART01x_DR);
2133 while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY)
2137 static void pl011_early_write(struct console *con, const char *s, unsigned n)
2139 struct earlycon_device *dev = con->data;
2141 uart_console_write(&dev->port, s, n, pl011_putc);
2144 static int __init pl011_early_console_setup(struct earlycon_device *device,
2147 if (!device->port.membase)
2150 device->con->write = pl011_early_write;
2153 EARLYCON_DECLARE(pl011, pl011_early_console_setup);
2154 OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
2157 #define AMBA_CONSOLE NULL
2160 static struct uart_driver amba_reg = {
2161 .owner = THIS_MODULE,
2162 .driver_name = "ttyAMA",
2163 .dev_name = "ttyAMA",
2164 .major = SERIAL_AMBA_MAJOR,
2165 .minor = SERIAL_AMBA_MINOR,
2167 .cons = AMBA_CONSOLE,
2170 static int pl011_probe_dt_alias(int index, struct device *dev)
2172 struct device_node *np;
2173 static bool seen_dev_with_alias = false;
2174 static bool seen_dev_without_alias = false;
2177 if (!IS_ENABLED(CONFIG_OF))
2184 ret = of_alias_get_id(np, "serial");
2185 if (IS_ERR_VALUE(ret)) {
2186 seen_dev_without_alias = true;
2189 seen_dev_with_alias = true;
2190 if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
2191 dev_warn(dev, "requested serial port %d not available.\n", ret);
2196 if (seen_dev_with_alias && seen_dev_without_alias)
2197 dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n");
2202 static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
2204 struct uart_amba_port *uap;
2205 struct vendor_data *vendor = id->data;
2209 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2210 if (amba_ports[i] == NULL)
2213 if (i == ARRAY_SIZE(amba_ports))
2216 uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
2221 i = pl011_probe_dt_alias(i, &dev->dev);
2223 base = devm_ioremap(&dev->dev, dev->res.start,
2224 resource_size(&dev->res));
2228 uap->clk = devm_clk_get(&dev->dev, NULL);
2229 if (IS_ERR(uap->clk))
2230 return PTR_ERR(uap->clk);
2232 uap->vendor = vendor;
2233 uap->lcrh_rx = vendor->lcrh_rx;
2234 uap->lcrh_tx = vendor->lcrh_tx;
2236 uap->fifosize = vendor->get_fifosize(dev);
2237 uap->port.dev = &dev->dev;
2238 uap->port.mapbase = dev->res.start;
2239 uap->port.membase = base;
2240 uap->port.iotype = UPIO_MEM;
2241 uap->port.irq = dev->irq[0];
2242 uap->port.fifosize = uap->fifosize;
2243 uap->port.ops = &amba_pl011_pops;
2244 uap->port.flags = UPF_BOOT_AUTOCONF;
2246 INIT_DELAYED_WORK(&uap->tx_softirq_work, pl011_tx_softirq);
2248 /* Ensure interrupts from this UART are masked and cleared */
2249 writew(0, uap->port.membase + UART011_IMSC);
2250 writew(0xffff, uap->port.membase + UART011_ICR);
2252 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
2254 amba_ports[i] = uap;
2256 amba_set_drvdata(dev, uap);
2258 if (!amba_reg.state) {
2259 ret = uart_register_driver(&amba_reg);
2262 "Failed to register AMBA-PL011 driver\n");
2267 ret = uart_add_one_port(&amba_reg, &uap->port);
2269 amba_ports[i] = NULL;
2270 uart_unregister_driver(&amba_reg);
2276 static int pl011_remove(struct amba_device *dev)
2278 struct uart_amba_port *uap = amba_get_drvdata(dev);
2282 uart_remove_one_port(&amba_reg, &uap->port);
2284 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2285 if (amba_ports[i] == uap)
2286 amba_ports[i] = NULL;
2287 else if (amba_ports[i])
2290 pl011_dma_remove(uap);
2292 uart_unregister_driver(&amba_reg);
2296 #ifdef CONFIG_PM_SLEEP
2297 static int pl011_suspend(struct device *dev)
2299 struct uart_amba_port *uap = dev_get_drvdata(dev);
2304 return uart_suspend_port(&amba_reg, &uap->port);
2307 static int pl011_resume(struct device *dev)
2309 struct uart_amba_port *uap = dev_get_drvdata(dev);
2314 return uart_resume_port(&amba_reg, &uap->port);
2318 static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
2320 static struct amba_id pl011_ids[] = {
2324 .data = &vendor_arm,
2334 MODULE_DEVICE_TABLE(amba, pl011_ids);
2336 static struct amba_driver pl011_driver = {
2338 .name = "uart-pl011",
2339 .pm = &pl011_dev_pm_ops,
2341 .id_table = pl011_ids,
2342 .probe = pl011_probe,
2343 .remove = pl011_remove,
2346 static int __init pl011_init(void)
2348 printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
2350 return amba_driver_register(&pl011_driver);
2353 static void __exit pl011_exit(void)
2355 amba_driver_unregister(&pl011_driver);
2359 * While this can be a module, if builtin it's most likely the console
2360 * So let's leave module_exit but move module_init to an earlier place
2362 arch_initcall(pl011_init);
2363 module_exit(pl011_exit);
2365 MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
2366 MODULE_DESCRIPTION("ARM AMBA serial port driver");
2367 MODULE_LICENSE("GPL");