Merge 4.1-rc4 into tty-next
[firefly-linux-kernel-4.4.55.git] / drivers / tty / serial / sirfsoc_uart.c
1 /*
2  * Driver for CSR SiRFprimaII onboard UARTs.
3  *
4  * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
5  *
6  * Licensed under GPLv2 or later.
7  */
8
9 #include <linux/module.h>
10 #include <linux/ioport.h>
11 #include <linux/platform_device.h>
12 #include <linux/init.h>
13 #include <linux/sysrq.h>
14 #include <linux/console.h>
15 #include <linux/tty.h>
16 #include <linux/tty_flip.h>
17 #include <linux/serial_core.h>
18 #include <linux/serial.h>
19 #include <linux/clk.h>
20 #include <linux/of.h>
21 #include <linux/slab.h>
22 #include <linux/io.h>
23 #include <linux/of_gpio.h>
24 #include <linux/dmaengine.h>
25 #include <linux/dma-direction.h>
26 #include <linux/dma-mapping.h>
27 #include <asm/irq.h>
28 #include <asm/mach/irq.h>
29
30 #include "sirfsoc_uart.h"
31
32 static unsigned int
33 sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count);
34 static unsigned int
35 sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count);
36 static struct uart_driver sirfsoc_uart_drv;
37
38 static void sirfsoc_uart_tx_dma_complete_callback(void *param);
39 static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port);
40 static void sirfsoc_uart_rx_dma_complete_callback(void *param);
41 static const struct sirfsoc_baudrate_to_regv baudrate_to_regv[] = {
42         {4000000, 2359296},
43         {3500000, 1310721},
44         {3000000, 1572865},
45         {2500000, 1245186},
46         {2000000, 1572866},
47         {1500000, 1245188},
48         {1152000, 1638404},
49         {1000000, 1572869},
50         {921600, 1114120},
51         {576000, 1245196},
52         {500000, 1245198},
53         {460800, 1572876},
54         {230400, 1310750},
55         {115200, 1310781},
56         {57600, 1310843},
57         {38400, 1114328},
58         {19200, 1114545},
59         {9600, 1114979},
60 };
61
62 static struct sirfsoc_uart_port *sirf_ports[SIRFSOC_UART_NR];
63
64 static inline struct sirfsoc_uart_port *to_sirfport(struct uart_port *port)
65 {
66         return container_of(port, struct sirfsoc_uart_port, port);
67 }
68
69 static inline unsigned int sirfsoc_uart_tx_empty(struct uart_port *port)
70 {
71         unsigned long reg;
72         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
73         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
74         struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
75         reg = rd_regl(port, ureg->sirfsoc_tx_fifo_status);
76         return (reg & ufifo_st->ff_empty(port)) ? TIOCSER_TEMT : 0;
77 }
78
79 static unsigned int sirfsoc_uart_get_mctrl(struct uart_port *port)
80 {
81         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
82         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
83         if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled)
84                 goto cts_asserted;
85         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
86                 if (!(rd_regl(port, ureg->sirfsoc_afc_ctrl) &
87                                                 SIRFUART_AFC_CTS_STATUS))
88                         goto cts_asserted;
89                 else
90                         goto cts_deasserted;
91         } else {
92                 if (!gpio_get_value(sirfport->cts_gpio))
93                         goto cts_asserted;
94                 else
95                         goto cts_deasserted;
96         }
97 cts_deasserted:
98         return TIOCM_CAR | TIOCM_DSR;
99 cts_asserted:
100         return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
101 }
102
103 static void sirfsoc_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
104 {
105         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
106         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
107         unsigned int assert = mctrl & TIOCM_RTS;
108         unsigned int val = assert ? SIRFUART_AFC_CTRL_RX_THD : 0x0;
109         unsigned int current_val;
110
111         if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled)
112                 return;
113         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
114                 current_val = rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0xFF;
115                 val |= current_val;
116                 wr_regl(port, ureg->sirfsoc_afc_ctrl, val);
117         } else {
118                 if (!val)
119                         gpio_set_value(sirfport->rts_gpio, 1);
120                 else
121                         gpio_set_value(sirfport->rts_gpio, 0);
122         }
123 }
124
125 static void sirfsoc_uart_stop_tx(struct uart_port *port)
126 {
127         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
128         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
129         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
130
131         if (sirfport->tx_dma_chan) {
132                 if (sirfport->tx_dma_state == TX_DMA_RUNNING) {
133                         dmaengine_pause(sirfport->tx_dma_chan);
134                         sirfport->tx_dma_state = TX_DMA_PAUSE;
135                 } else {
136                         if (!sirfport->is_atlas7)
137                                 wr_regl(port, ureg->sirfsoc_int_en_reg,
138                                 rd_regl(port, ureg->sirfsoc_int_en_reg) &
139                                 ~uint_en->sirfsoc_txfifo_empty_en);
140                         else
141                                 wr_regl(port, SIRFUART_INT_EN_CLR,
142                                 uint_en->sirfsoc_txfifo_empty_en);
143                 }
144         } else {
145                 if (!sirfport->is_atlas7)
146                         wr_regl(port, ureg->sirfsoc_int_en_reg,
147                                 rd_regl(port, ureg->sirfsoc_int_en_reg) &
148                                 ~uint_en->sirfsoc_txfifo_empty_en);
149                 else
150                         wr_regl(port, SIRFUART_INT_EN_CLR,
151                                 uint_en->sirfsoc_txfifo_empty_en);
152         }
153 }
154
155 static void sirfsoc_uart_tx_with_dma(struct sirfsoc_uart_port *sirfport)
156 {
157         struct uart_port *port = &sirfport->port;
158         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
159         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
160         struct circ_buf *xmit = &port->state->xmit;
161         unsigned long tran_size;
162         unsigned long tran_start;
163         unsigned long pio_tx_size;
164
165         tran_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
166         tran_start = (unsigned long)(xmit->buf + xmit->tail);
167         if (uart_circ_empty(xmit) || uart_tx_stopped(port) ||
168                         !tran_size)
169                 return;
170         if (sirfport->tx_dma_state == TX_DMA_PAUSE) {
171                 dmaengine_resume(sirfport->tx_dma_chan);
172                 return;
173         }
174         if (sirfport->tx_dma_state == TX_DMA_RUNNING)
175                 return;
176         if (!sirfport->is_atlas7)
177                 wr_regl(port, ureg->sirfsoc_int_en_reg,
178                                 rd_regl(port, ureg->sirfsoc_int_en_reg)&
179                                 ~(uint_en->sirfsoc_txfifo_empty_en));
180         else
181                 wr_regl(port, SIRFUART_INT_EN_CLR,
182                                 uint_en->sirfsoc_txfifo_empty_en);
183         /*
184          * DMA requires buffer address and buffer length are both aligned with
185          * 4 bytes, so we use PIO for
186          * 1. if address is not aligned with 4bytes, use PIO for the first 1~3
187          * bytes, and move to DMA for the left part aligned with 4bytes
188          * 2. if buffer length is not aligned with 4bytes, use DMA for aligned
189          * part first, move to PIO for the left 1~3 bytes
190          */
191         if (tran_size < 4 || BYTES_TO_ALIGN(tran_start)) {
192                 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
193                 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
194                         rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)|
195                         SIRFUART_IO_MODE);
196                 if (BYTES_TO_ALIGN(tran_start)) {
197                         pio_tx_size = sirfsoc_uart_pio_tx_chars(sirfport,
198                                 BYTES_TO_ALIGN(tran_start));
199                         tran_size -= pio_tx_size;
200                 }
201                 if (tran_size < 4)
202                         sirfsoc_uart_pio_tx_chars(sirfport, tran_size);
203                 if (!sirfport->is_atlas7)
204                         wr_regl(port, ureg->sirfsoc_int_en_reg,
205                                 rd_regl(port, ureg->sirfsoc_int_en_reg)|
206                                 uint_en->sirfsoc_txfifo_empty_en);
207                 else
208                         wr_regl(port, ureg->sirfsoc_int_en_reg,
209                                 uint_en->sirfsoc_txfifo_empty_en);
210                 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
211         } else {
212                 /* tx transfer mode switch into dma mode */
213                 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
214                 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
215                         rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)&
216                         ~SIRFUART_IO_MODE);
217                 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
218                 tran_size &= ~(0x3);
219
220                 sirfport->tx_dma_addr = dma_map_single(port->dev,
221                         xmit->buf + xmit->tail,
222                         tran_size, DMA_TO_DEVICE);
223                 sirfport->tx_dma_desc = dmaengine_prep_slave_single(
224                         sirfport->tx_dma_chan, sirfport->tx_dma_addr,
225                         tran_size, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
226                 if (!sirfport->tx_dma_desc) {
227                         dev_err(port->dev, "DMA prep slave single fail\n");
228                         return;
229                 }
230                 sirfport->tx_dma_desc->callback =
231                         sirfsoc_uart_tx_dma_complete_callback;
232                 sirfport->tx_dma_desc->callback_param = (void *)sirfport;
233                 sirfport->transfer_size = tran_size;
234
235                 dmaengine_submit(sirfport->tx_dma_desc);
236                 dma_async_issue_pending(sirfport->tx_dma_chan);
237                 sirfport->tx_dma_state = TX_DMA_RUNNING;
238         }
239 }
240
241 static void sirfsoc_uart_start_tx(struct uart_port *port)
242 {
243         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
244         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
245         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
246         if (sirfport->tx_dma_chan)
247                 sirfsoc_uart_tx_with_dma(sirfport);
248         else {
249                 sirfsoc_uart_pio_tx_chars(sirfport, port->fifosize);
250                 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
251                 if (!sirfport->is_atlas7)
252                         wr_regl(port, ureg->sirfsoc_int_en_reg,
253                                         rd_regl(port, ureg->sirfsoc_int_en_reg)|
254                                         uint_en->sirfsoc_txfifo_empty_en);
255                 else
256                         wr_regl(port, ureg->sirfsoc_int_en_reg,
257                                         uint_en->sirfsoc_txfifo_empty_en);
258         }
259 }
260
261 static void sirfsoc_uart_stop_rx(struct uart_port *port)
262 {
263         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
264         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
265         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
266
267         wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
268         if (sirfport->rx_dma_chan) {
269                 if (!sirfport->is_atlas7)
270                         wr_regl(port, ureg->sirfsoc_int_en_reg,
271                                 rd_regl(port, ureg->sirfsoc_int_en_reg) &
272                                 ~(SIRFUART_RX_DMA_INT_EN(port, uint_en) |
273                                 uint_en->sirfsoc_rx_done_en));
274                 else
275                         wr_regl(port, SIRFUART_INT_EN_CLR,
276                                         SIRFUART_RX_DMA_INT_EN(port, uint_en)|
277                                         uint_en->sirfsoc_rx_done_en);
278                 dmaengine_terminate_all(sirfport->rx_dma_chan);
279         } else {
280                 if (!sirfport->is_atlas7)
281                         wr_regl(port, ureg->sirfsoc_int_en_reg,
282                                 rd_regl(port, ureg->sirfsoc_int_en_reg)&
283                                 ~(SIRFUART_RX_IO_INT_EN(port, uint_en)));
284                 else
285                         wr_regl(port, SIRFUART_INT_EN_CLR,
286                                         SIRFUART_RX_IO_INT_EN(port, uint_en));
287         }
288 }
289
290 static void sirfsoc_uart_disable_ms(struct uart_port *port)
291 {
292         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
293         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
294         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
295
296         if (!sirfport->hw_flow_ctrl)
297                 return;
298         sirfport->ms_enabled = false;
299         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
300                 wr_regl(port, ureg->sirfsoc_afc_ctrl,
301                                 rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0x3FF);
302                 if (!sirfport->is_atlas7)
303                         wr_regl(port, ureg->sirfsoc_int_en_reg,
304                                         rd_regl(port, ureg->sirfsoc_int_en_reg)&
305                                         ~uint_en->sirfsoc_cts_en);
306                 else
307                         wr_regl(port, SIRFUART_INT_EN_CLR,
308                                         uint_en->sirfsoc_cts_en);
309         } else
310                 disable_irq(gpio_to_irq(sirfport->cts_gpio));
311 }
312
313 static irqreturn_t sirfsoc_uart_usp_cts_handler(int irq, void *dev_id)
314 {
315         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
316         struct uart_port *port = &sirfport->port;
317         spin_lock(&port->lock);
318         if (gpio_is_valid(sirfport->cts_gpio) && sirfport->ms_enabled)
319                 uart_handle_cts_change(port,
320                                 !gpio_get_value(sirfport->cts_gpio));
321         spin_unlock(&port->lock);
322         return IRQ_HANDLED;
323 }
324
325 static void sirfsoc_uart_enable_ms(struct uart_port *port)
326 {
327         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
328         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
329         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
330
331         if (!sirfport->hw_flow_ctrl)
332                 return;
333         sirfport->ms_enabled = true;
334         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
335                 wr_regl(port, ureg->sirfsoc_afc_ctrl,
336                                 rd_regl(port, ureg->sirfsoc_afc_ctrl) |
337                                 SIRFUART_AFC_TX_EN | SIRFUART_AFC_RX_EN);
338                 if (!sirfport->is_atlas7)
339                         wr_regl(port, ureg->sirfsoc_int_en_reg,
340                                         rd_regl(port, ureg->sirfsoc_int_en_reg)
341                                         | uint_en->sirfsoc_cts_en);
342                 else
343                         wr_regl(port, ureg->sirfsoc_int_en_reg,
344                                         uint_en->sirfsoc_cts_en);
345         } else
346                 enable_irq(gpio_to_irq(sirfport->cts_gpio));
347 }
348
349 static void sirfsoc_uart_break_ctl(struct uart_port *port, int break_state)
350 {
351         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
352         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
353         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
354                 unsigned long ulcon = rd_regl(port, ureg->sirfsoc_line_ctrl);
355                 if (break_state)
356                         ulcon |= SIRFUART_SET_BREAK;
357                 else
358                         ulcon &= ~SIRFUART_SET_BREAK;
359                 wr_regl(port, ureg->sirfsoc_line_ctrl, ulcon);
360         }
361 }
362
363 static unsigned int
364 sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count)
365 {
366         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
367         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
368         struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
369         unsigned int ch, rx_count = 0;
370         struct tty_struct *tty;
371         tty = tty_port_tty_get(&port->state->port);
372         if (!tty)
373                 return -ENODEV;
374         while (!(rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
375                                         ufifo_st->ff_empty(port))) {
376                 ch = rd_regl(port, ureg->sirfsoc_rx_fifo_data) |
377                         SIRFUART_DUMMY_READ;
378                 if (unlikely(uart_handle_sysrq_char(port, ch)))
379                         continue;
380                 uart_insert_char(port, 0, 0, ch, TTY_NORMAL);
381                 rx_count++;
382                 if (rx_count >= max_rx_count)
383                         break;
384         }
385
386         sirfport->rx_io_count += rx_count;
387         port->icount.rx += rx_count;
388
389         return rx_count;
390 }
391
392 static unsigned int
393 sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count)
394 {
395         struct uart_port *port = &sirfport->port;
396         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
397         struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
398         struct circ_buf *xmit = &port->state->xmit;
399         unsigned int num_tx = 0;
400         while (!uart_circ_empty(xmit) &&
401                 !(rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
402                                         ufifo_st->ff_full(port)) &&
403                 count--) {
404                 wr_regl(port, ureg->sirfsoc_tx_fifo_data,
405                                 xmit->buf[xmit->tail]);
406                 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
407                 port->icount.tx++;
408                 num_tx++;
409         }
410         if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
411                 uart_write_wakeup(port);
412         return num_tx;
413 }
414
415 static void sirfsoc_uart_tx_dma_complete_callback(void *param)
416 {
417         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
418         struct uart_port *port = &sirfport->port;
419         struct circ_buf *xmit = &port->state->xmit;
420         unsigned long flags;
421
422         spin_lock_irqsave(&port->lock, flags);
423         xmit->tail = (xmit->tail + sirfport->transfer_size) &
424                                 (UART_XMIT_SIZE - 1);
425         port->icount.tx += sirfport->transfer_size;
426         if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
427                 uart_write_wakeup(port);
428         if (sirfport->tx_dma_addr)
429                 dma_unmap_single(port->dev, sirfport->tx_dma_addr,
430                                 sirfport->transfer_size, DMA_TO_DEVICE);
431         sirfport->tx_dma_state = TX_DMA_IDLE;
432         sirfsoc_uart_tx_with_dma(sirfport);
433         spin_unlock_irqrestore(&port->lock, flags);
434 }
435
436 static void sirfsoc_uart_insert_rx_buf_to_tty(
437                 struct sirfsoc_uart_port *sirfport, int count)
438 {
439         struct uart_port *port = &sirfport->port;
440         struct tty_port *tport = &port->state->port;
441         int inserted;
442
443         inserted = tty_insert_flip_string(tport,
444                 sirfport->rx_dma_items[sirfport->rx_completed].xmit.buf, count);
445         port->icount.rx += inserted;
446 }
447
448 static void sirfsoc_rx_submit_one_dma_desc(struct uart_port *port, int index)
449 {
450         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
451
452         sirfport->rx_dma_items[index].xmit.tail =
453                 sirfport->rx_dma_items[index].xmit.head = 0;
454         sirfport->rx_dma_items[index].desc =
455                 dmaengine_prep_slave_single(sirfport->rx_dma_chan,
456                 sirfport->rx_dma_items[index].dma_addr, SIRFSOC_RX_DMA_BUF_SIZE,
457                 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
458         if (!sirfport->rx_dma_items[index].desc) {
459                 dev_err(port->dev, "DMA slave single fail\n");
460                 return;
461         }
462         sirfport->rx_dma_items[index].desc->callback =
463                 sirfsoc_uart_rx_dma_complete_callback;
464         sirfport->rx_dma_items[index].desc->callback_param = sirfport;
465         sirfport->rx_dma_items[index].cookie =
466                 dmaengine_submit(sirfport->rx_dma_items[index].desc);
467         dma_async_issue_pending(sirfport->rx_dma_chan);
468 }
469
470 static void sirfsoc_rx_tmo_process_tl(unsigned long param)
471 {
472         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
473         struct uart_port *port = &sirfport->port;
474         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
475         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
476         struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
477         unsigned int count;
478         unsigned long flags;
479         struct dma_tx_state tx_state;
480
481         spin_lock_irqsave(&port->lock, flags);
482         while (DMA_COMPLETE == dmaengine_tx_status(sirfport->rx_dma_chan,
483                 sirfport->rx_dma_items[sirfport->rx_completed].cookie, &tx_state)) {
484                 sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
485                                         SIRFSOC_RX_DMA_BUF_SIZE);
486                 sirfport->rx_completed++;
487                 sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
488         }
489         count = CIRC_CNT(sirfport->rx_dma_items[sirfport->rx_issued].xmit.head,
490                 sirfport->rx_dma_items[sirfport->rx_issued].xmit.tail,
491                 SIRFSOC_RX_DMA_BUF_SIZE);
492         if (count > 0)
493                 sirfsoc_uart_insert_rx_buf_to_tty(sirfport, count);
494         wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
495                         rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
496                         SIRFUART_IO_MODE);
497         sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
498         if (sirfport->rx_io_count == 4) {
499                 sirfport->rx_io_count = 0;
500                 wr_regl(port, ureg->sirfsoc_int_st_reg,
501                                 uint_st->sirfsoc_rx_done);
502                 if (!sirfport->is_atlas7)
503                         wr_regl(port, ureg->sirfsoc_int_en_reg,
504                                 rd_regl(port, ureg->sirfsoc_int_en_reg) &
505                                 ~(uint_en->sirfsoc_rx_done_en));
506                 else
507                         wr_regl(port, SIRFUART_INT_EN_CLR,
508                                         uint_en->sirfsoc_rx_done_en);
509                 sirfsoc_uart_start_next_rx_dma(port);
510         } else {
511                 wr_regl(port, ureg->sirfsoc_int_st_reg,
512                                 uint_st->sirfsoc_rx_done);
513                 if (!sirfport->is_atlas7)
514                         wr_regl(port, ureg->sirfsoc_int_en_reg,
515                                 rd_regl(port, ureg->sirfsoc_int_en_reg) |
516                                 (uint_en->sirfsoc_rx_done_en));
517                 else
518                         wr_regl(port, ureg->sirfsoc_int_en_reg,
519                                         uint_en->sirfsoc_rx_done_en);
520         }
521         spin_unlock_irqrestore(&port->lock, flags);
522         tty_flip_buffer_push(&port->state->port);
523 }
524
525 static void sirfsoc_uart_handle_rx_tmo(struct sirfsoc_uart_port *sirfport)
526 {
527         struct uart_port *port = &sirfport->port;
528         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
529         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
530         struct dma_tx_state tx_state;
531         dmaengine_tx_status(sirfport->rx_dma_chan,
532                 sirfport->rx_dma_items[sirfport->rx_issued].cookie, &tx_state);
533         dmaengine_terminate_all(sirfport->rx_dma_chan);
534         sirfport->rx_dma_items[sirfport->rx_issued].xmit.head =
535                 SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue;
536         if (!sirfport->is_atlas7)
537                 wr_regl(port, ureg->sirfsoc_int_en_reg,
538                         rd_regl(port, ureg->sirfsoc_int_en_reg) &
539                         ~(uint_en->sirfsoc_rx_timeout_en));
540         else
541                 wr_regl(port, SIRFUART_INT_EN_CLR,
542                                 uint_en->sirfsoc_rx_timeout_en);
543         tasklet_schedule(&sirfport->rx_tmo_process_tasklet);
544 }
545
546 static void sirfsoc_uart_handle_rx_done(struct sirfsoc_uart_port *sirfport)
547 {
548         struct uart_port *port = &sirfport->port;
549         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
550         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
551         struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
552
553         sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
554         if (sirfport->rx_io_count == 4) {
555                 sirfport->rx_io_count = 0;
556                 if (!sirfport->is_atlas7)
557                         wr_regl(port, ureg->sirfsoc_int_en_reg,
558                                 rd_regl(port, ureg->sirfsoc_int_en_reg) &
559                                 ~(uint_en->sirfsoc_rx_done_en));
560                 else
561                         wr_regl(port, SIRFUART_INT_EN_CLR,
562                                         uint_en->sirfsoc_rx_done_en);
563                 wr_regl(port, ureg->sirfsoc_int_st_reg,
564                                 uint_st->sirfsoc_rx_timeout);
565                 sirfsoc_uart_start_next_rx_dma(port);
566         }
567 }
568
569 static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
570 {
571         unsigned long intr_status;
572         unsigned long cts_status;
573         unsigned long flag = TTY_NORMAL;
574         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
575         struct uart_port *port = &sirfport->port;
576         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
577         struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
578         struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
579         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
580         struct uart_state *state = port->state;
581         struct circ_buf *xmit = &port->state->xmit;
582         spin_lock(&port->lock);
583         intr_status = rd_regl(port, ureg->sirfsoc_int_st_reg);
584         wr_regl(port, ureg->sirfsoc_int_st_reg, intr_status);
585         intr_status &= rd_regl(port, ureg->sirfsoc_int_en_reg);
586         if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT(port, uint_st)))) {
587                 if (intr_status & uint_st->sirfsoc_rxd_brk) {
588                         port->icount.brk++;
589                         if (uart_handle_break(port))
590                                 goto recv_char;
591                 }
592                 if (intr_status & uint_st->sirfsoc_rx_oflow)
593                         port->icount.overrun++;
594                 if (intr_status & uint_st->sirfsoc_frm_err) {
595                         port->icount.frame++;
596                         flag = TTY_FRAME;
597                 }
598                 if (intr_status & uint_st->sirfsoc_parity_err)
599                         flag = TTY_PARITY;
600                 wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
601                 wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
602                 wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
603                 intr_status &= port->read_status_mask;
604                 uart_insert_char(port, intr_status,
605                                         uint_en->sirfsoc_rx_oflow_en, 0, flag);
606         }
607 recv_char:
608         if ((sirfport->uart_reg->uart_type == SIRF_REAL_UART) &&
609                         (intr_status & SIRFUART_CTS_INT_ST(uint_st)) &&
610                         !sirfport->tx_dma_state) {
611                 cts_status = rd_regl(port, ureg->sirfsoc_afc_ctrl) &
612                                         SIRFUART_AFC_CTS_STATUS;
613                 if (cts_status != 0)
614                         cts_status = 0;
615                 else
616                         cts_status = 1;
617                 uart_handle_cts_change(port, cts_status);
618                 wake_up_interruptible(&state->port.delta_msr_wait);
619         }
620         if (sirfport->rx_dma_chan) {
621                 if (intr_status & uint_st->sirfsoc_rx_timeout)
622                         sirfsoc_uart_handle_rx_tmo(sirfport);
623                 if (intr_status & uint_st->sirfsoc_rx_done)
624                         sirfsoc_uart_handle_rx_done(sirfport);
625         } else {
626                 if (intr_status & SIRFUART_RX_IO_INT_ST(uint_st))
627                         sirfsoc_uart_pio_rx_chars(port, port->fifosize);
628         }
629         spin_unlock(&port->lock);
630         tty_flip_buffer_push(&state->port);
631         spin_lock(&port->lock);
632         if (intr_status & uint_st->sirfsoc_txfifo_empty) {
633                 if (sirfport->tx_dma_chan)
634                         sirfsoc_uart_tx_with_dma(sirfport);
635                 else {
636                         if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
637                                 spin_unlock(&port->lock);
638                                 return IRQ_HANDLED;
639                         } else {
640                                 sirfsoc_uart_pio_tx_chars(sirfport,
641                                                 port->fifosize);
642                                 if ((uart_circ_empty(xmit)) &&
643                                 (rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
644                                 ufifo_st->ff_empty(port)))
645                                         sirfsoc_uart_stop_tx(port);
646                         }
647                 }
648         }
649         spin_unlock(&port->lock);
650
651         return IRQ_HANDLED;
652 }
653
654 static void sirfsoc_uart_rx_dma_complete_tl(unsigned long param)
655 {
656         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
657         struct uart_port *port = &sirfport->port;
658         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
659         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
660         unsigned long flags;
661         struct dma_tx_state tx_state;
662         spin_lock_irqsave(&port->lock, flags);
663         while (DMA_COMPLETE == dmaengine_tx_status(sirfport->rx_dma_chan,
664                         sirfport->rx_dma_items[sirfport->rx_completed].cookie, &tx_state)) {
665                 sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
666                                         SIRFSOC_RX_DMA_BUF_SIZE);
667                 if (rd_regl(port, ureg->sirfsoc_int_en_reg) &
668                                 uint_en->sirfsoc_rx_timeout_en)
669                         sirfsoc_rx_submit_one_dma_desc(port,
670                                         sirfport->rx_completed++);
671                 else
672                         sirfport->rx_completed++;
673                 sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
674         }
675         spin_unlock_irqrestore(&port->lock, flags);
676         tty_flip_buffer_push(&port->state->port);
677 }
678
679 static void sirfsoc_uart_rx_dma_complete_callback(void *param)
680 {
681         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
682         unsigned long flags;
683
684         spin_lock_irqsave(&sirfport->port.lock, flags);
685         sirfport->rx_issued++;
686         sirfport->rx_issued %= SIRFSOC_RX_LOOP_BUF_CNT;
687         tasklet_schedule(&sirfport->rx_dma_complete_tasklet);
688         spin_unlock_irqrestore(&sirfport->port.lock, flags);
689 }
690
691 /* submit rx dma task into dmaengine */
692 static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port)
693 {
694         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
695         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
696         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
697         int i;
698         sirfport->rx_io_count = 0;
699         wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
700                 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
701                 ~SIRFUART_IO_MODE);
702         for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
703                 sirfsoc_rx_submit_one_dma_desc(port, i);
704         sirfport->rx_completed = sirfport->rx_issued = 0;
705         if (!sirfport->is_atlas7)
706                 wr_regl(port, ureg->sirfsoc_int_en_reg,
707                                 rd_regl(port, ureg->sirfsoc_int_en_reg) |
708                                 SIRFUART_RX_DMA_INT_EN(port, uint_en));
709         else
710                 wr_regl(port, ureg->sirfsoc_int_en_reg,
711                         SIRFUART_RX_DMA_INT_EN(port, uint_en));
712 }
713
714 static void sirfsoc_uart_start_rx(struct uart_port *port)
715 {
716         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
717         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
718         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
719
720         sirfport->rx_io_count = 0;
721         wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
722         wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
723         wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
724         if (sirfport->rx_dma_chan)
725                 sirfsoc_uart_start_next_rx_dma(port);
726         else {
727                 if (!sirfport->is_atlas7)
728                         wr_regl(port, ureg->sirfsoc_int_en_reg,
729                                 rd_regl(port, ureg->sirfsoc_int_en_reg) |
730                                 SIRFUART_RX_IO_INT_EN(port, uint_en));
731                 else
732                         wr_regl(port, ureg->sirfsoc_int_en_reg,
733                                 SIRFUART_RX_IO_INT_EN(port, uint_en));
734         }
735 }
736
737 static unsigned int
738 sirfsoc_usp_calc_sample_div(unsigned long set_rate,
739                 unsigned long ioclk_rate, unsigned long *sample_reg)
740 {
741         unsigned long min_delta = ~0UL;
742         unsigned short sample_div;
743         unsigned long ioclk_div = 0;
744         unsigned long temp_delta;
745
746         for (sample_div = SIRF_USP_MIN_SAMPLE_DIV;
747                         sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) {
748                 temp_delta = ioclk_rate -
749                 (ioclk_rate + (set_rate * sample_div) / 2)
750                 / (set_rate * sample_div) * set_rate * sample_div;
751
752                 temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta;
753                 if (temp_delta < min_delta) {
754                         ioclk_div = (2 * ioclk_rate /
755                                 (set_rate * sample_div) + 1) / 2 - 1;
756                         if (ioclk_div > SIRF_IOCLK_DIV_MAX)
757                                 continue;
758                         min_delta = temp_delta;
759                         *sample_reg = sample_div;
760                         if (!temp_delta)
761                                 break;
762                 }
763         }
764         return ioclk_div;
765 }
766
767 static unsigned int
768 sirfsoc_uart_calc_sample_div(unsigned long baud_rate,
769                         unsigned long ioclk_rate, unsigned long *set_baud)
770 {
771         unsigned long min_delta = ~0UL;
772         unsigned short sample_div;
773         unsigned int regv = 0;
774         unsigned long ioclk_div;
775         unsigned long baud_tmp;
776         int temp_delta;
777
778         for (sample_div = SIRF_MIN_SAMPLE_DIV;
779                         sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) {
780                 ioclk_div = (ioclk_rate / (baud_rate * (sample_div + 1))) - 1;
781                 if (ioclk_div > SIRF_IOCLK_DIV_MAX)
782                         continue;
783                 baud_tmp = ioclk_rate / ((ioclk_div + 1) * (sample_div + 1));
784                 temp_delta = baud_tmp - baud_rate;
785                 temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta;
786                 if (temp_delta < min_delta) {
787                         regv = regv & (~SIRF_IOCLK_DIV_MASK);
788                         regv = regv | ioclk_div;
789                         regv = regv & (~SIRF_SAMPLE_DIV_MASK);
790                         regv = regv | (sample_div << SIRF_SAMPLE_DIV_SHIFT);
791                         min_delta = temp_delta;
792                         *set_baud = baud_tmp;
793                 }
794         }
795         return regv;
796 }
797
798 static void sirfsoc_uart_set_termios(struct uart_port *port,
799                                        struct ktermios *termios,
800                                        struct ktermios *old)
801 {
802         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
803         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
804         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
805         unsigned long   config_reg = 0;
806         unsigned long   baud_rate;
807         unsigned long   set_baud;
808         unsigned long   flags;
809         unsigned long   ic;
810         unsigned int    clk_div_reg = 0;
811         unsigned long   txfifo_op_reg, ioclk_rate;
812         unsigned long   rx_time_out;
813         int             threshold_div;
814         u32             data_bit_len, stop_bit_len, len_val;
815         unsigned long   sample_div_reg = 0xf;
816         ioclk_rate      = port->uartclk;
817
818         switch (termios->c_cflag & CSIZE) {
819         default:
820         case CS8:
821                 data_bit_len = 8;
822                 config_reg |= SIRFUART_DATA_BIT_LEN_8;
823                 break;
824         case CS7:
825                 data_bit_len = 7;
826                 config_reg |= SIRFUART_DATA_BIT_LEN_7;
827                 break;
828         case CS6:
829                 data_bit_len = 6;
830                 config_reg |= SIRFUART_DATA_BIT_LEN_6;
831                 break;
832         case CS5:
833                 data_bit_len = 5;
834                 config_reg |= SIRFUART_DATA_BIT_LEN_5;
835                 break;
836         }
837         if (termios->c_cflag & CSTOPB) {
838                 config_reg |= SIRFUART_STOP_BIT_LEN_2;
839                 stop_bit_len = 2;
840         } else
841                 stop_bit_len = 1;
842
843         spin_lock_irqsave(&port->lock, flags);
844         port->read_status_mask = uint_en->sirfsoc_rx_oflow_en;
845         port->ignore_status_mask = 0;
846         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
847                 if (termios->c_iflag & INPCK)
848                         port->read_status_mask |= uint_en->sirfsoc_frm_err_en |
849                                 uint_en->sirfsoc_parity_err_en;
850         } else {
851                 if (termios->c_iflag & INPCK)
852                         port->read_status_mask |= uint_en->sirfsoc_frm_err_en;
853         }
854         if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
855                         port->read_status_mask |= uint_en->sirfsoc_rxd_brk_en;
856         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
857                 if (termios->c_iflag & IGNPAR)
858                         port->ignore_status_mask |=
859                                 uint_en->sirfsoc_frm_err_en |
860                                 uint_en->sirfsoc_parity_err_en;
861                 if (termios->c_cflag & PARENB) {
862                         if (termios->c_cflag & CMSPAR) {
863                                 if (termios->c_cflag & PARODD)
864                                         config_reg |= SIRFUART_STICK_BIT_MARK;
865                                 else
866                                         config_reg |= SIRFUART_STICK_BIT_SPACE;
867                         } else if (termios->c_cflag & PARODD) {
868                                 config_reg |= SIRFUART_STICK_BIT_ODD;
869                         } else {
870                                 config_reg |= SIRFUART_STICK_BIT_EVEN;
871                         }
872                 }
873         } else {
874                 if (termios->c_iflag & IGNPAR)
875                         port->ignore_status_mask |=
876                                 uint_en->sirfsoc_frm_err_en;
877                 if (termios->c_cflag & PARENB)
878                         dev_warn(port->dev,
879                                         "USP-UART not support parity err\n");
880         }
881         if (termios->c_iflag & IGNBRK) {
882                 port->ignore_status_mask |=
883                         uint_en->sirfsoc_rxd_brk_en;
884                 if (termios->c_iflag & IGNPAR)
885                         port->ignore_status_mask |=
886                                 uint_en->sirfsoc_rx_oflow_en;
887         }
888         if ((termios->c_cflag & CREAD) == 0)
889                 port->ignore_status_mask |= SIRFUART_DUMMY_READ;
890         /* Hardware Flow Control Settings */
891         if (UART_ENABLE_MS(port, termios->c_cflag)) {
892                 if (!sirfport->ms_enabled)
893                         sirfsoc_uart_enable_ms(port);
894         } else {
895                 if (sirfport->ms_enabled)
896                         sirfsoc_uart_disable_ms(port);
897         }
898         baud_rate = uart_get_baud_rate(port, termios, old, 0, 4000000);
899         if (ioclk_rate == 150000000) {
900                 for (ic = 0; ic < SIRF_BAUD_RATE_SUPPORT_NR; ic++)
901                         if (baud_rate == baudrate_to_regv[ic].baud_rate)
902                                 clk_div_reg = baudrate_to_regv[ic].reg_val;
903         }
904         set_baud = baud_rate;
905         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
906                 if (unlikely(clk_div_reg == 0))
907                         clk_div_reg = sirfsoc_uart_calc_sample_div(baud_rate,
908                                         ioclk_rate, &set_baud);
909                 wr_regl(port, ureg->sirfsoc_divisor, clk_div_reg);
910         } else {
911                 clk_div_reg = sirfsoc_usp_calc_sample_div(baud_rate,
912                                 ioclk_rate, &sample_div_reg);
913                 sample_div_reg--;
914                 set_baud = ((ioclk_rate / (clk_div_reg+1) - 1) /
915                                 (sample_div_reg + 1));
916                 /* setting usp mode 2 */
917                 len_val = ((1 << SIRFSOC_USP_MODE2_RXD_DELAY_OFFSET) |
918                                 (1 << SIRFSOC_USP_MODE2_TXD_DELAY_OFFSET));
919                 len_val |= ((clk_div_reg & SIRFSOC_USP_MODE2_CLK_DIVISOR_MASK)
920                                 << SIRFSOC_USP_MODE2_CLK_DIVISOR_OFFSET);
921                 wr_regl(port, ureg->sirfsoc_mode2, len_val);
922         }
923         if (tty_termios_baud_rate(termios))
924                 tty_termios_encode_baud_rate(termios, set_baud, set_baud);
925         /* set receive timeout && data bits len */
926         rx_time_out = SIRFSOC_UART_RX_TIMEOUT(set_baud, 20000);
927         rx_time_out = SIRFUART_RECV_TIMEOUT_VALUE(rx_time_out);
928         txfifo_op_reg = rd_regl(port, ureg->sirfsoc_tx_fifo_op);
929         wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_STOP);
930         wr_regl(port, ureg->sirfsoc_tx_fifo_op,
931                         (txfifo_op_reg & ~SIRFUART_FIFO_START));
932         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
933                 config_reg |= SIRFUART_RECV_TIMEOUT(port, rx_time_out);
934                 wr_regl(port, ureg->sirfsoc_line_ctrl, config_reg);
935         } else {
936                 /*tx frame ctrl*/
937                 len_val = (data_bit_len - 1) << SIRFSOC_USP_TX_DATA_LEN_OFFSET;
938                 len_val |= (data_bit_len + 1 + stop_bit_len - 1) <<
939                                 SIRFSOC_USP_TX_FRAME_LEN_OFFSET;
940                 len_val |= ((data_bit_len - 1) <<
941                                 SIRFSOC_USP_TX_SHIFTER_LEN_OFFSET);
942                 len_val |= (((clk_div_reg & 0xc00) >> 10) <<
943                                 SIRFSOC_USP_TX_CLK_DIVISOR_OFFSET);
944                 wr_regl(port, ureg->sirfsoc_tx_frame_ctrl, len_val);
945                 /*rx frame ctrl*/
946                 len_val = (data_bit_len - 1) << SIRFSOC_USP_RX_DATA_LEN_OFFSET;
947                 len_val |= (data_bit_len + 1 + stop_bit_len - 1) <<
948                                 SIRFSOC_USP_RX_FRAME_LEN_OFFSET;
949                 len_val |= (data_bit_len - 1) <<
950                                 SIRFSOC_USP_RX_SHIFTER_LEN_OFFSET;
951                 len_val |= (((clk_div_reg & 0xf000) >> 12) <<
952                                 SIRFSOC_USP_RX_CLK_DIVISOR_OFFSET);
953                 wr_regl(port, ureg->sirfsoc_rx_frame_ctrl, len_val);
954                 /*async param*/
955                 wr_regl(port, ureg->sirfsoc_async_param_reg,
956                         (SIRFUART_RECV_TIMEOUT(port, rx_time_out)) |
957                         (sample_div_reg & SIRFSOC_USP_ASYNC_DIV2_MASK) <<
958                         SIRFSOC_USP_ASYNC_DIV2_OFFSET);
959         }
960         if (sirfport->tx_dma_chan)
961                 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_DMA_MODE);
962         else
963                 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_IO_MODE);
964         if (sirfport->rx_dma_chan)
965                 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_DMA_MODE);
966         else
967                 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_IO_MODE);
968         /* Reset Rx/Tx FIFO Threshold level for proper baudrate */
969         if (set_baud < 1000000)
970                 threshold_div = 1;
971         else
972                 threshold_div = 2;
973         wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl,
974                                 SIRFUART_FIFO_THD(port) / threshold_div);
975         wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl,
976                                 SIRFUART_FIFO_THD(port) / threshold_div);
977         txfifo_op_reg |= SIRFUART_FIFO_START;
978         wr_regl(port, ureg->sirfsoc_tx_fifo_op, txfifo_op_reg);
979         uart_update_timeout(port, termios->c_cflag, set_baud);
980         sirfsoc_uart_start_rx(port);
981         wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_TX_EN | SIRFUART_RX_EN);
982         spin_unlock_irqrestore(&port->lock, flags);
983 }
984
985 static void sirfsoc_uart_pm(struct uart_port *port, unsigned int state,
986                               unsigned int oldstate)
987 {
988         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
989         if (!state)
990                 clk_prepare_enable(sirfport->clk);
991         else
992                 clk_disable_unprepare(sirfport->clk);
993 }
994
995 static int sirfsoc_uart_startup(struct uart_port *port)
996 {
997         struct sirfsoc_uart_port *sirfport      = to_sirfport(port);
998         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
999         unsigned int index                      = port->line;
1000         int ret;
1001         set_irq_flags(port->irq, IRQF_VALID | IRQF_NOAUTOEN);
1002         ret = request_irq(port->irq,
1003                                 sirfsoc_uart_isr,
1004                                 0,
1005                                 SIRFUART_PORT_NAME,
1006                                 sirfport);
1007         if (ret != 0) {
1008                 dev_err(port->dev, "UART%d request IRQ line (%d) failed.\n",
1009                                                         index, port->irq);
1010                 goto irq_err;
1011         }
1012         /* initial hardware settings */
1013         wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
1014                 rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl) |
1015                 SIRFUART_IO_MODE);
1016         wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
1017                 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
1018                 SIRFUART_IO_MODE);
1019         wr_regl(port, ureg->sirfsoc_tx_dma_io_len, 0);
1020         wr_regl(port, ureg->sirfsoc_rx_dma_io_len, 0);
1021         wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_RX_EN | SIRFUART_TX_EN);
1022         if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
1023                 wr_regl(port, ureg->sirfsoc_mode1,
1024                         SIRFSOC_USP_ENDIAN_CTRL_LSBF |
1025                         SIRFSOC_USP_EN);
1026         wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_RESET);
1027         wr_regl(port, ureg->sirfsoc_tx_fifo_op, 0);
1028         wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
1029         wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
1030         wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl, SIRFUART_FIFO_THD(port));
1031         wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, SIRFUART_FIFO_THD(port));
1032         if (sirfport->rx_dma_chan)
1033                 wr_regl(port, ureg->sirfsoc_rx_fifo_level_chk,
1034                         SIRFUART_RX_FIFO_CHK_SC(port->line, 0x4) |
1035                         SIRFUART_RX_FIFO_CHK_LC(port->line, 0xe) |
1036                         SIRFUART_RX_FIFO_CHK_HC(port->line, 0x1b));
1037         if (sirfport->tx_dma_chan) {
1038                 sirfport->tx_dma_state = TX_DMA_IDLE;
1039                 wr_regl(port, ureg->sirfsoc_tx_fifo_level_chk,
1040                                 SIRFUART_TX_FIFO_CHK_SC(port->line, 0x1b) |
1041                                 SIRFUART_TX_FIFO_CHK_LC(port->line, 0xe) |
1042                                 SIRFUART_TX_FIFO_CHK_HC(port->line, 0x4));
1043         }
1044         sirfport->ms_enabled = false;
1045         if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
1046                 sirfport->hw_flow_ctrl) {
1047                 set_irq_flags(gpio_to_irq(sirfport->cts_gpio),
1048                         IRQF_VALID | IRQF_NOAUTOEN);
1049                 ret = request_irq(gpio_to_irq(sirfport->cts_gpio),
1050                         sirfsoc_uart_usp_cts_handler, IRQF_TRIGGER_FALLING |
1051                         IRQF_TRIGGER_RISING, "usp_cts_irq", sirfport);
1052                 if (ret != 0) {
1053                         dev_err(port->dev, "UART-USP:request gpio irq fail\n");
1054                         goto init_rx_err;
1055                 }
1056         }
1057
1058         enable_irq(port->irq);
1059
1060         return 0;
1061 init_rx_err:
1062         free_irq(port->irq, sirfport);
1063 irq_err:
1064         return ret;
1065 }
1066
1067 static void sirfsoc_uart_shutdown(struct uart_port *port)
1068 {
1069         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1070         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
1071         if (!sirfport->is_atlas7)
1072                 wr_regl(port, ureg->sirfsoc_int_en_reg, 0);
1073         else
1074                 wr_regl(port, SIRFUART_INT_EN_CLR, ~0UL);
1075
1076         free_irq(port->irq, sirfport);
1077         if (sirfport->ms_enabled)
1078                 sirfsoc_uart_disable_ms(port);
1079         if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
1080                         sirfport->hw_flow_ctrl) {
1081                 gpio_set_value(sirfport->rts_gpio, 1);
1082                 free_irq(gpio_to_irq(sirfport->cts_gpio), sirfport);
1083         }
1084         if (sirfport->tx_dma_chan)
1085                 sirfport->tx_dma_state = TX_DMA_IDLE;
1086 }
1087
1088 static const char *sirfsoc_uart_type(struct uart_port *port)
1089 {
1090         return port->type == SIRFSOC_PORT_TYPE ? SIRFUART_PORT_NAME : NULL;
1091 }
1092
1093 static int sirfsoc_uart_request_port(struct uart_port *port)
1094 {
1095         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1096         struct sirfsoc_uart_param *uart_param = &sirfport->uart_reg->uart_param;
1097         void *ret;
1098         ret = request_mem_region(port->mapbase,
1099                 SIRFUART_MAP_SIZE, uart_param->port_name);
1100         return ret ? 0 : -EBUSY;
1101 }
1102
1103 static void sirfsoc_uart_release_port(struct uart_port *port)
1104 {
1105         release_mem_region(port->mapbase, SIRFUART_MAP_SIZE);
1106 }
1107
1108 static void sirfsoc_uart_config_port(struct uart_port *port, int flags)
1109 {
1110         if (flags & UART_CONFIG_TYPE) {
1111                 port->type = SIRFSOC_PORT_TYPE;
1112                 sirfsoc_uart_request_port(port);
1113         }
1114 }
1115
1116 static struct uart_ops sirfsoc_uart_ops = {
1117         .tx_empty       = sirfsoc_uart_tx_empty,
1118         .get_mctrl      = sirfsoc_uart_get_mctrl,
1119         .set_mctrl      = sirfsoc_uart_set_mctrl,
1120         .stop_tx        = sirfsoc_uart_stop_tx,
1121         .start_tx       = sirfsoc_uart_start_tx,
1122         .stop_rx        = sirfsoc_uart_stop_rx,
1123         .enable_ms      = sirfsoc_uart_enable_ms,
1124         .break_ctl      = sirfsoc_uart_break_ctl,
1125         .startup        = sirfsoc_uart_startup,
1126         .shutdown       = sirfsoc_uart_shutdown,
1127         .set_termios    = sirfsoc_uart_set_termios,
1128         .pm             = sirfsoc_uart_pm,
1129         .type           = sirfsoc_uart_type,
1130         .release_port   = sirfsoc_uart_release_port,
1131         .request_port   = sirfsoc_uart_request_port,
1132         .config_port    = sirfsoc_uart_config_port,
1133 };
1134
1135 #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
1136 static int __init
1137 sirfsoc_uart_console_setup(struct console *co, char *options)
1138 {
1139         unsigned int baud = 115200;
1140         unsigned int bits = 8;
1141         unsigned int parity = 'n';
1142         unsigned int flow = 'n';
1143         struct sirfsoc_uart_port *sirfport;
1144         struct sirfsoc_register *ureg;
1145         if (co->index < 0 || co->index >= SIRFSOC_UART_NR)
1146                 return -EINVAL;
1147         sirfport = sirf_ports[co->index];
1148         if (!sirfport)
1149                 return -ENODEV;
1150         ureg = &sirfport->uart_reg->uart_reg;
1151         if (!sirfport->port.mapbase)
1152                 return -ENODEV;
1153
1154         /* enable usp in mode1 register */
1155         if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
1156                 wr_regl(&sirfport->port, ureg->sirfsoc_mode1, SIRFSOC_USP_EN |
1157                                 SIRFSOC_USP_ENDIAN_CTRL_LSBF);
1158         if (options)
1159                 uart_parse_options(options, &baud, &parity, &bits, &flow);
1160         sirfport->port.cons = co;
1161
1162         /* default console tx/rx transfer using io mode */
1163         sirfport->rx_dma_chan = NULL;
1164         sirfport->tx_dma_chan = NULL;
1165         return uart_set_options(&sirfport->port, co, baud, parity, bits, flow);
1166 }
1167
1168 static void sirfsoc_uart_console_putchar(struct uart_port *port, int ch)
1169 {
1170         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1171         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
1172         struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
1173         while (rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
1174                 ufifo_st->ff_full(port))
1175                 cpu_relax();
1176         wr_regl(port, ureg->sirfsoc_tx_fifo_data, ch);
1177 }
1178
1179 static void sirfsoc_uart_console_write(struct console *co, const char *s,
1180                                                         unsigned int count)
1181 {
1182         struct sirfsoc_uart_port *sirfport = sirf_ports[co->index];
1183
1184         uart_console_write(&sirfport->port, s, count,
1185                         sirfsoc_uart_console_putchar);
1186 }
1187
1188 static struct console sirfsoc_uart_console = {
1189         .name           = SIRFSOC_UART_NAME,
1190         .device         = uart_console_device,
1191         .flags          = CON_PRINTBUFFER,
1192         .index          = -1,
1193         .write          = sirfsoc_uart_console_write,
1194         .setup          = sirfsoc_uart_console_setup,
1195         .data           = &sirfsoc_uart_drv,
1196 };
1197
1198 static int __init sirfsoc_uart_console_init(void)
1199 {
1200         register_console(&sirfsoc_uart_console);
1201         return 0;
1202 }
1203 console_initcall(sirfsoc_uart_console_init);
1204 #endif
1205
1206 static struct uart_driver sirfsoc_uart_drv = {
1207         .owner          = THIS_MODULE,
1208         .driver_name    = SIRFUART_PORT_NAME,
1209         .nr             = SIRFSOC_UART_NR,
1210         .dev_name       = SIRFSOC_UART_NAME,
1211         .major          = SIRFSOC_UART_MAJOR,
1212         .minor          = SIRFSOC_UART_MINOR,
1213 #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
1214         .cons                   = &sirfsoc_uart_console,
1215 #else
1216         .cons                   = NULL,
1217 #endif
1218 };
1219
1220 static const struct of_device_id sirfsoc_uart_ids[] = {
1221         { .compatible = "sirf,prima2-uart", .data = &sirfsoc_uart,},
1222         { .compatible = "sirf,atlas7-uart", .data = &sirfsoc_uart},
1223         { .compatible = "sirf,prima2-usp-uart", .data = &sirfsoc_usp},
1224         {}
1225 };
1226 MODULE_DEVICE_TABLE(of, sirfsoc_uart_ids);
1227
1228 static int sirfsoc_uart_probe(struct platform_device *pdev)
1229 {
1230         struct sirfsoc_uart_port *sirfport;
1231         struct uart_port *port;
1232         struct resource *res;
1233         int ret;
1234         int i, j;
1235         struct dma_slave_config slv_cfg = {
1236                 .src_maxburst = 2,
1237         };
1238         struct dma_slave_config tx_slv_cfg = {
1239                 .dst_maxburst = 2,
1240         };
1241         const struct of_device_id *match;
1242
1243         match = of_match_node(sirfsoc_uart_ids, pdev->dev.of_node);
1244         sirfport = devm_kzalloc(&pdev->dev, sizeof(*sirfport), GFP_KERNEL);
1245         if (!sirfport) {
1246                 ret = -ENOMEM;
1247                 goto err;
1248         }
1249         sirfport->port.line = of_alias_get_id(pdev->dev.of_node, "serial");
1250         sirf_ports[sirfport->port.line] = sirfport;
1251         sirfport->port.iotype = UPIO_MEM;
1252         sirfport->port.flags = UPF_BOOT_AUTOCONF;
1253         port = &sirfport->port;
1254         port->dev = &pdev->dev;
1255         port->private_data = sirfport;
1256         sirfport->uart_reg = (struct sirfsoc_uart_register *)match->data;
1257
1258         sirfport->hw_flow_ctrl = of_property_read_bool(pdev->dev.of_node,
1259                 "sirf,uart-has-rtscts");
1260         if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-uart"))
1261                 sirfport->uart_reg->uart_type = SIRF_REAL_UART;
1262         if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-usp-uart")) {
1263                 sirfport->uart_reg->uart_type = SIRF_USP_UART;
1264                 if (!sirfport->hw_flow_ctrl)
1265                         goto usp_no_flow_control;
1266                 if (of_find_property(pdev->dev.of_node, "cts-gpios", NULL))
1267                         sirfport->cts_gpio = of_get_named_gpio(
1268                                         pdev->dev.of_node, "cts-gpios", 0);
1269                 else
1270                         sirfport->cts_gpio = -1;
1271                 if (of_find_property(pdev->dev.of_node, "rts-gpios", NULL))
1272                         sirfport->rts_gpio = of_get_named_gpio(
1273                                         pdev->dev.of_node, "rts-gpios", 0);
1274                 else
1275                         sirfport->rts_gpio = -1;
1276
1277                 if ((!gpio_is_valid(sirfport->cts_gpio) ||
1278                          !gpio_is_valid(sirfport->rts_gpio))) {
1279                         ret = -EINVAL;
1280                         dev_err(&pdev->dev,
1281                                 "Usp flow control must have cts and rts gpio");
1282                         goto err;
1283                 }
1284                 ret = devm_gpio_request(&pdev->dev, sirfport->cts_gpio,
1285                                 "usp-cts-gpio");
1286                 if (ret) {
1287                         dev_err(&pdev->dev, "Unable request cts gpio");
1288                         goto err;
1289                 }
1290                 gpio_direction_input(sirfport->cts_gpio);
1291                 ret = devm_gpio_request(&pdev->dev, sirfport->rts_gpio,
1292                                 "usp-rts-gpio");
1293                 if (ret) {
1294                         dev_err(&pdev->dev, "Unable request rts gpio");
1295                         goto err;
1296                 }
1297                 gpio_direction_output(sirfport->rts_gpio, 1);
1298         }
1299 usp_no_flow_control:
1300         if (of_device_is_compatible(pdev->dev.of_node, "sirf,atlas7-uart"))
1301                 sirfport->is_atlas7 = true;
1302
1303         if (of_property_read_u32(pdev->dev.of_node,
1304                         "fifosize",
1305                         &port->fifosize)) {
1306                 dev_err(&pdev->dev,
1307                         "Unable to find fifosize in uart node.\n");
1308                 ret = -EFAULT;
1309                 goto err;
1310         }
1311
1312         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1313         if (res == NULL) {
1314                 dev_err(&pdev->dev, "Insufficient resources.\n");
1315                 ret = -EFAULT;
1316                 goto err;
1317         }
1318         tasklet_init(&sirfport->rx_dma_complete_tasklet,
1319                         sirfsoc_uart_rx_dma_complete_tl, (unsigned long)sirfport);
1320         tasklet_init(&sirfport->rx_tmo_process_tasklet,
1321                         sirfsoc_rx_tmo_process_tl, (unsigned long)sirfport);
1322         port->mapbase = res->start;
1323         port->membase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1324         if (!port->membase) {
1325                 dev_err(&pdev->dev, "Cannot remap resource.\n");
1326                 ret = -ENOMEM;
1327                 goto err;
1328         }
1329         res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1330         if (res == NULL) {
1331                 dev_err(&pdev->dev, "Insufficient resources.\n");
1332                 ret = -EFAULT;
1333                 goto err;
1334         }
1335         port->irq = res->start;
1336
1337         sirfport->clk = devm_clk_get(&pdev->dev, NULL);
1338         if (IS_ERR(sirfport->clk)) {
1339                 ret = PTR_ERR(sirfport->clk);
1340                 goto err;
1341         }
1342         port->uartclk = clk_get_rate(sirfport->clk);
1343
1344         port->ops = &sirfsoc_uart_ops;
1345         spin_lock_init(&port->lock);
1346
1347         platform_set_drvdata(pdev, sirfport);
1348         ret = uart_add_one_port(&sirfsoc_uart_drv, port);
1349         if (ret != 0) {
1350                 dev_err(&pdev->dev, "Cannot add UART port(%d).\n", pdev->id);
1351                 goto err;
1352         }
1353
1354         sirfport->rx_dma_chan = dma_request_slave_channel(port->dev, "rx");
1355         for (i = 0; sirfport->rx_dma_chan && i < SIRFSOC_RX_LOOP_BUF_CNT; i++) {
1356                 sirfport->rx_dma_items[i].xmit.buf =
1357                         dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
1358                         &sirfport->rx_dma_items[i].dma_addr, GFP_KERNEL);
1359                 if (!sirfport->rx_dma_items[i].xmit.buf) {
1360                         dev_err(port->dev, "Uart alloc bufa failed\n");
1361                         ret = -ENOMEM;
1362                         goto alloc_coherent_err;
1363                 }
1364                 sirfport->rx_dma_items[i].xmit.head =
1365                         sirfport->rx_dma_items[i].xmit.tail = 0;
1366         }
1367         if (sirfport->rx_dma_chan)
1368                 dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg);
1369         sirfport->tx_dma_chan = dma_request_slave_channel(port->dev, "tx");
1370         if (sirfport->tx_dma_chan)
1371                 dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg);
1372
1373         return 0;
1374 alloc_coherent_err:
1375         for (j = 0; j < i; j++)
1376                 dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
1377                                 sirfport->rx_dma_items[j].xmit.buf,
1378                                 sirfport->rx_dma_items[j].dma_addr);
1379         dma_release_channel(sirfport->rx_dma_chan);
1380 err:
1381         return ret;
1382 }
1383
1384 static int sirfsoc_uart_remove(struct platform_device *pdev)
1385 {
1386         struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
1387         struct uart_port *port = &sirfport->port;
1388         uart_remove_one_port(&sirfsoc_uart_drv, port);
1389         if (sirfport->rx_dma_chan) {
1390                 int i;
1391                 dmaengine_terminate_all(sirfport->rx_dma_chan);
1392                 dma_release_channel(sirfport->rx_dma_chan);
1393                 for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
1394                         dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
1395                                         sirfport->rx_dma_items[i].xmit.buf,
1396                                         sirfport->rx_dma_items[i].dma_addr);
1397         }
1398         if (sirfport->tx_dma_chan) {
1399                 dmaengine_terminate_all(sirfport->tx_dma_chan);
1400                 dma_release_channel(sirfport->tx_dma_chan);
1401         }
1402         return 0;
1403 }
1404
1405 #ifdef CONFIG_PM_SLEEP
1406 static int
1407 sirfsoc_uart_suspend(struct device *pdev)
1408 {
1409         struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev);
1410         struct uart_port *port = &sirfport->port;
1411         uart_suspend_port(&sirfsoc_uart_drv, port);
1412         return 0;
1413 }
1414
1415 static int sirfsoc_uart_resume(struct device *pdev)
1416 {
1417         struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev);
1418         struct uart_port *port = &sirfport->port;
1419         uart_resume_port(&sirfsoc_uart_drv, port);
1420         return 0;
1421 }
1422 #endif
1423
1424 static const struct dev_pm_ops sirfsoc_uart_pm_ops = {
1425         SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_uart_suspend, sirfsoc_uart_resume)
1426 };
1427
1428 static struct platform_driver sirfsoc_uart_driver = {
1429         .probe          = sirfsoc_uart_probe,
1430         .remove         = sirfsoc_uart_remove,
1431         .driver         = {
1432                 .name   = SIRFUART_PORT_NAME,
1433                 .of_match_table = sirfsoc_uart_ids,
1434                 .pm     = &sirfsoc_uart_pm_ops,
1435         },
1436 };
1437
1438 static int __init sirfsoc_uart_init(void)
1439 {
1440         int ret = 0;
1441
1442         ret = uart_register_driver(&sirfsoc_uart_drv);
1443         if (ret)
1444                 goto out;
1445
1446         ret = platform_driver_register(&sirfsoc_uart_driver);
1447         if (ret)
1448                 uart_unregister_driver(&sirfsoc_uart_drv);
1449 out:
1450         return ret;
1451 }
1452 module_init(sirfsoc_uart_init);
1453
1454 static void __exit sirfsoc_uart_exit(void)
1455 {
1456         platform_driver_unregister(&sirfsoc_uart_driver);
1457         uart_unregister_driver(&sirfsoc_uart_drv);
1458 }
1459 module_exit(sirfsoc_uart_exit);
1460
1461 MODULE_LICENSE("GPL v2");
1462 MODULE_AUTHOR("Bin Shi <Bin.Shi@csr.com>, Rong Wang<Rong.Wang@csr.com>");
1463 MODULE_DESCRIPTION("CSR SiRFprimaII Uart Driver");