Merge tag 'efi-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi...
[firefly-linux-kernel-4.4.55.git] / drivers / tty / serial / sirfsoc_uart.c
1 /*
2  * Driver for CSR SiRFprimaII onboard UARTs.
3  *
4  * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
5  *
6  * Licensed under GPLv2 or later.
7  */
8
9 #include <linux/module.h>
10 #include <linux/ioport.h>
11 #include <linux/platform_device.h>
12 #include <linux/init.h>
13 #include <linux/sysrq.h>
14 #include <linux/console.h>
15 #include <linux/tty.h>
16 #include <linux/tty_flip.h>
17 #include <linux/serial_core.h>
18 #include <linux/serial.h>
19 #include <linux/clk.h>
20 #include <linux/of.h>
21 #include <linux/slab.h>
22 #include <linux/io.h>
23 #include <linux/of_gpio.h>
24 #include <linux/dmaengine.h>
25 #include <linux/dma-direction.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/sirfsoc_dma.h>
28 #include <asm/irq.h>
29 #include <asm/mach/irq.h>
30
31 #include "sirfsoc_uart.h"
32
33 static unsigned int
34 sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count);
35 static unsigned int
36 sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count);
37 static struct uart_driver sirfsoc_uart_drv;
38
39 static void sirfsoc_uart_tx_dma_complete_callback(void *param);
40 static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port);
41 static void sirfsoc_uart_rx_dma_complete_callback(void *param);
42 static const struct sirfsoc_baudrate_to_regv baudrate_to_regv[] = {
43         {4000000, 2359296},
44         {3500000, 1310721},
45         {3000000, 1572865},
46         {2500000, 1245186},
47         {2000000, 1572866},
48         {1500000, 1245188},
49         {1152000, 1638404},
50         {1000000, 1572869},
51         {921600, 1114120},
52         {576000, 1245196},
53         {500000, 1245198},
54         {460800, 1572876},
55         {230400, 1310750},
56         {115200, 1310781},
57         {57600, 1310843},
58         {38400, 1114328},
59         {19200, 1114545},
60         {9600, 1114979},
61 };
62
63 static struct sirfsoc_uart_port sirfsoc_uart_ports[SIRFSOC_UART_NR] = {
64         [0] = {
65                 .port = {
66                         .iotype         = UPIO_MEM,
67                         .flags          = UPF_BOOT_AUTOCONF,
68                         .line           = 0,
69                 },
70         },
71         [1] = {
72                 .port = {
73                         .iotype         = UPIO_MEM,
74                         .flags          = UPF_BOOT_AUTOCONF,
75                         .line           = 1,
76                 },
77         },
78         [2] = {
79                 .port = {
80                         .iotype         = UPIO_MEM,
81                         .flags          = UPF_BOOT_AUTOCONF,
82                         .line           = 2,
83                 },
84         },
85         [3] = {
86                 .port = {
87                         .iotype         = UPIO_MEM,
88                         .flags          = UPF_BOOT_AUTOCONF,
89                         .line           = 3,
90                 },
91         },
92         [4] = {
93                 .port = {
94                         .iotype         = UPIO_MEM,
95                         .flags          = UPF_BOOT_AUTOCONF,
96                         .line           = 4,
97                 },
98         },
99         [5] = {
100                 .port = {
101                         .iotype         = UPIO_MEM,
102                         .flags          = UPF_BOOT_AUTOCONF,
103                         .line           = 5,
104                 },
105         },
106 };
107
108 static inline struct sirfsoc_uart_port *to_sirfport(struct uart_port *port)
109 {
110         return container_of(port, struct sirfsoc_uart_port, port);
111 }
112
113 static inline unsigned int sirfsoc_uart_tx_empty(struct uart_port *port)
114 {
115         unsigned long reg;
116         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
117         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
118         struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
119         reg = rd_regl(port, ureg->sirfsoc_tx_fifo_status);
120
121         return (reg & ufifo_st->ff_empty(port->line)) ? TIOCSER_TEMT : 0;
122 }
123
124 static unsigned int sirfsoc_uart_get_mctrl(struct uart_port *port)
125 {
126         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
127         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
128         if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled)
129                 goto cts_asserted;
130         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
131                 if (!(rd_regl(port, ureg->sirfsoc_afc_ctrl) &
132                                                 SIRFUART_AFC_CTS_STATUS))
133                         goto cts_asserted;
134                 else
135                         goto cts_deasserted;
136         } else {
137                 if (!gpio_get_value(sirfport->cts_gpio))
138                         goto cts_asserted;
139                 else
140                         goto cts_deasserted;
141         }
142 cts_deasserted:
143         return TIOCM_CAR | TIOCM_DSR;
144 cts_asserted:
145         return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
146 }
147
148 static void sirfsoc_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
149 {
150         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
151         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
152         unsigned int assert = mctrl & TIOCM_RTS;
153         unsigned int val = assert ? SIRFUART_AFC_CTRL_RX_THD : 0x0;
154         unsigned int current_val;
155
156         if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled)
157                 return;
158         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
159                 current_val = rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0xFF;
160                 val |= current_val;
161                 wr_regl(port, ureg->sirfsoc_afc_ctrl, val);
162         } else {
163                 if (!val)
164                         gpio_set_value(sirfport->rts_gpio, 1);
165                 else
166                         gpio_set_value(sirfport->rts_gpio, 0);
167         }
168 }
169
170 static void sirfsoc_uart_stop_tx(struct uart_port *port)
171 {
172         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
173         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
174         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
175
176         if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) {
177                 if (sirfport->tx_dma_state == TX_DMA_RUNNING) {
178                         dmaengine_pause(sirfport->tx_dma_chan);
179                         sirfport->tx_dma_state = TX_DMA_PAUSE;
180                 } else {
181                         if (!sirfport->is_marco)
182                                 wr_regl(port, ureg->sirfsoc_int_en_reg,
183                                 rd_regl(port, ureg->sirfsoc_int_en_reg) &
184                                 ~uint_en->sirfsoc_txfifo_empty_en);
185                         else
186                                 wr_regl(port, SIRFUART_INT_EN_CLR,
187                                 uint_en->sirfsoc_txfifo_empty_en);
188                 }
189         } else {
190                 if (!sirfport->is_marco)
191                         wr_regl(port, ureg->sirfsoc_int_en_reg,
192                                 rd_regl(port, ureg->sirfsoc_int_en_reg) &
193                                 ~uint_en->sirfsoc_txfifo_empty_en);
194                 else
195                         wr_regl(port, SIRFUART_INT_EN_CLR,
196                                 uint_en->sirfsoc_txfifo_empty_en);
197         }
198 }
199
200 static void sirfsoc_uart_tx_with_dma(struct sirfsoc_uart_port *sirfport)
201 {
202         struct uart_port *port = &sirfport->port;
203         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
204         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
205         struct circ_buf *xmit = &port->state->xmit;
206         unsigned long tran_size;
207         unsigned long tran_start;
208         unsigned long pio_tx_size;
209
210         tran_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
211         tran_start = (unsigned long)(xmit->buf + xmit->tail);
212         if (uart_circ_empty(xmit) || uart_tx_stopped(port) ||
213                         !tran_size)
214                 return;
215         if (sirfport->tx_dma_state == TX_DMA_PAUSE) {
216                 dmaengine_resume(sirfport->tx_dma_chan);
217                 return;
218         }
219         if (sirfport->tx_dma_state == TX_DMA_RUNNING)
220                 return;
221         if (!sirfport->is_marco)
222                 wr_regl(port, ureg->sirfsoc_int_en_reg,
223                                 rd_regl(port, ureg->sirfsoc_int_en_reg)&
224                                 ~(uint_en->sirfsoc_txfifo_empty_en));
225         else
226                 wr_regl(port, SIRFUART_INT_EN_CLR,
227                                 uint_en->sirfsoc_txfifo_empty_en);
228         /*
229          * DMA requires buffer address and buffer length are both aligned with
230          * 4 bytes, so we use PIO for
231          * 1. if address is not aligned with 4bytes, use PIO for the first 1~3
232          * bytes, and move to DMA for the left part aligned with 4bytes
233          * 2. if buffer length is not aligned with 4bytes, use DMA for aligned
234          * part first, move to PIO for the left 1~3 bytes
235          */
236         if (tran_size < 4 || BYTES_TO_ALIGN(tran_start)) {
237                 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
238                 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
239                         rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)|
240                         SIRFUART_IO_MODE);
241                 if (BYTES_TO_ALIGN(tran_start)) {
242                         pio_tx_size = sirfsoc_uart_pio_tx_chars(sirfport,
243                                 BYTES_TO_ALIGN(tran_start));
244                         tran_size -= pio_tx_size;
245                 }
246                 if (tran_size < 4)
247                         sirfsoc_uart_pio_tx_chars(sirfport, tran_size);
248                 if (!sirfport->is_marco)
249                         wr_regl(port, ureg->sirfsoc_int_en_reg,
250                                 rd_regl(port, ureg->sirfsoc_int_en_reg)|
251                                 uint_en->sirfsoc_txfifo_empty_en);
252                 else
253                         wr_regl(port, ureg->sirfsoc_int_en_reg,
254                                 uint_en->sirfsoc_txfifo_empty_en);
255                 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
256         } else {
257                 /* tx transfer mode switch into dma mode */
258                 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
259                 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
260                         rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)&
261                         ~SIRFUART_IO_MODE);
262                 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
263                 tran_size &= ~(0x3);
264
265                 sirfport->tx_dma_addr = dma_map_single(port->dev,
266                         xmit->buf + xmit->tail,
267                         tran_size, DMA_TO_DEVICE);
268                 sirfport->tx_dma_desc = dmaengine_prep_slave_single(
269                         sirfport->tx_dma_chan, sirfport->tx_dma_addr,
270                         tran_size, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
271                 if (!sirfport->tx_dma_desc) {
272                         dev_err(port->dev, "DMA prep slave single fail\n");
273                         return;
274                 }
275                 sirfport->tx_dma_desc->callback =
276                         sirfsoc_uart_tx_dma_complete_callback;
277                 sirfport->tx_dma_desc->callback_param = (void *)sirfport;
278                 sirfport->transfer_size = tran_size;
279
280                 dmaengine_submit(sirfport->tx_dma_desc);
281                 dma_async_issue_pending(sirfport->tx_dma_chan);
282                 sirfport->tx_dma_state = TX_DMA_RUNNING;
283         }
284 }
285
286 static void sirfsoc_uart_start_tx(struct uart_port *port)
287 {
288         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
289         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
290         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
291         if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
292                 sirfsoc_uart_tx_with_dma(sirfport);
293         else {
294                 sirfsoc_uart_pio_tx_chars(sirfport, 1);
295                 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
296                 if (!sirfport->is_marco)
297                         wr_regl(port, ureg->sirfsoc_int_en_reg,
298                                         rd_regl(port, ureg->sirfsoc_int_en_reg)|
299                                         uint_en->sirfsoc_txfifo_empty_en);
300                 else
301                         wr_regl(port, ureg->sirfsoc_int_en_reg,
302                                         uint_en->sirfsoc_txfifo_empty_en);
303         }
304 }
305
306 static void sirfsoc_uart_stop_rx(struct uart_port *port)
307 {
308         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
309         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
310         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
311
312         wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
313         if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) {
314                 if (!sirfport->is_marco)
315                         wr_regl(port, ureg->sirfsoc_int_en_reg,
316                                 rd_regl(port, ureg->sirfsoc_int_en_reg) &
317                                 ~(SIRFUART_RX_DMA_INT_EN(port, uint_en) |
318                                 uint_en->sirfsoc_rx_done_en));
319                 else
320                         wr_regl(port, SIRFUART_INT_EN_CLR,
321                                         SIRFUART_RX_DMA_INT_EN(port, uint_en)|
322                                         uint_en->sirfsoc_rx_done_en);
323                 dmaengine_terminate_all(sirfport->rx_dma_chan);
324         } else {
325                 if (!sirfport->is_marco)
326                         wr_regl(port, ureg->sirfsoc_int_en_reg,
327                                 rd_regl(port, ureg->sirfsoc_int_en_reg)&
328                                 ~(SIRFUART_RX_IO_INT_EN(port, uint_en)));
329                 else
330                         wr_regl(port, SIRFUART_INT_EN_CLR,
331                                         SIRFUART_RX_IO_INT_EN(port, uint_en));
332         }
333 }
334
335 static void sirfsoc_uart_disable_ms(struct uart_port *port)
336 {
337         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
338         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
339         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
340
341         if (!sirfport->hw_flow_ctrl)
342                 return;
343         sirfport->ms_enabled = false;
344         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
345                 wr_regl(port, ureg->sirfsoc_afc_ctrl,
346                                 rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0x3FF);
347                 if (!sirfport->is_marco)
348                         wr_regl(port, ureg->sirfsoc_int_en_reg,
349                                         rd_regl(port, ureg->sirfsoc_int_en_reg)&
350                                         ~uint_en->sirfsoc_cts_en);
351                 else
352                         wr_regl(port, SIRFUART_INT_EN_CLR,
353                                         uint_en->sirfsoc_cts_en);
354         } else
355                 disable_irq(gpio_to_irq(sirfport->cts_gpio));
356 }
357
358 static irqreturn_t sirfsoc_uart_usp_cts_handler(int irq, void *dev_id)
359 {
360         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
361         struct uart_port *port = &sirfport->port;
362         if (gpio_is_valid(sirfport->cts_gpio) && sirfport->ms_enabled)
363                 uart_handle_cts_change(port,
364                                 !gpio_get_value(sirfport->cts_gpio));
365         return IRQ_HANDLED;
366 }
367
368 static void sirfsoc_uart_enable_ms(struct uart_port *port)
369 {
370         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
371         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
372         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
373
374         if (!sirfport->hw_flow_ctrl)
375                 return;
376         sirfport->ms_enabled = true;
377         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
378                 wr_regl(port, ureg->sirfsoc_afc_ctrl,
379                                 rd_regl(port, ureg->sirfsoc_afc_ctrl) |
380                                 SIRFUART_AFC_TX_EN | SIRFUART_AFC_RX_EN);
381                 if (!sirfport->is_marco)
382                         wr_regl(port, ureg->sirfsoc_int_en_reg,
383                                         rd_regl(port, ureg->sirfsoc_int_en_reg)
384                                         | uint_en->sirfsoc_cts_en);
385                 else
386                         wr_regl(port, ureg->sirfsoc_int_en_reg,
387                                         uint_en->sirfsoc_cts_en);
388         } else
389                 enable_irq(gpio_to_irq(sirfport->cts_gpio));
390 }
391
392 static void sirfsoc_uart_break_ctl(struct uart_port *port, int break_state)
393 {
394         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
395         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
396         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
397                 unsigned long ulcon = rd_regl(port, ureg->sirfsoc_line_ctrl);
398                 if (break_state)
399                         ulcon |= SIRFUART_SET_BREAK;
400                 else
401                         ulcon &= ~SIRFUART_SET_BREAK;
402                 wr_regl(port, ureg->sirfsoc_line_ctrl, ulcon);
403         }
404 }
405
406 static unsigned int
407 sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count)
408 {
409         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
410         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
411         struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
412         unsigned int ch, rx_count = 0;
413         struct tty_struct *tty;
414         tty = tty_port_tty_get(&port->state->port);
415         if (!tty)
416                 return -ENODEV;
417         while (!(rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
418                                         ufifo_st->ff_empty(port->line))) {
419                 ch = rd_regl(port, ureg->sirfsoc_rx_fifo_data) |
420                         SIRFUART_DUMMY_READ;
421                 if (unlikely(uart_handle_sysrq_char(port, ch)))
422                         continue;
423                 uart_insert_char(port, 0, 0, ch, TTY_NORMAL);
424                 rx_count++;
425                 if (rx_count >= max_rx_count)
426                         break;
427         }
428
429         sirfport->rx_io_count += rx_count;
430         port->icount.rx += rx_count;
431
432         spin_unlock(&port->lock);
433         tty_flip_buffer_push(&port->state->port);
434         spin_lock(&port->lock);
435
436         return rx_count;
437 }
438
439 static unsigned int
440 sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count)
441 {
442         struct uart_port *port = &sirfport->port;
443         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
444         struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
445         struct circ_buf *xmit = &port->state->xmit;
446         unsigned int num_tx = 0;
447         while (!uart_circ_empty(xmit) &&
448                 !(rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
449                                         ufifo_st->ff_full(port->line)) &&
450                 count--) {
451                 wr_regl(port, ureg->sirfsoc_tx_fifo_data,
452                                 xmit->buf[xmit->tail]);
453                 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
454                 port->icount.tx++;
455                 num_tx++;
456         }
457         if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
458                 uart_write_wakeup(port);
459         return num_tx;
460 }
461
462 static void sirfsoc_uart_tx_dma_complete_callback(void *param)
463 {
464         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
465         struct uart_port *port = &sirfport->port;
466         struct circ_buf *xmit = &port->state->xmit;
467         unsigned long flags;
468
469         xmit->tail = (xmit->tail + sirfport->transfer_size) &
470                                 (UART_XMIT_SIZE - 1);
471         port->icount.tx += sirfport->transfer_size;
472         if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
473                 uart_write_wakeup(port);
474         if (sirfport->tx_dma_addr)
475                 dma_unmap_single(port->dev, sirfport->tx_dma_addr,
476                                 sirfport->transfer_size, DMA_TO_DEVICE);
477         spin_lock_irqsave(&sirfport->tx_lock, flags);
478         sirfport->tx_dma_state = TX_DMA_IDLE;
479         sirfsoc_uart_tx_with_dma(sirfport);
480         spin_unlock_irqrestore(&sirfport->tx_lock, flags);
481 }
482
483 static void sirfsoc_uart_insert_rx_buf_to_tty(
484                 struct sirfsoc_uart_port *sirfport, int count)
485 {
486         struct uart_port *port = &sirfport->port;
487         struct tty_port *tport = &port->state->port;
488         int inserted;
489
490         inserted = tty_insert_flip_string(tport,
491                 sirfport->rx_dma_items[sirfport->rx_completed].xmit.buf, count);
492         port->icount.rx += inserted;
493         tty_flip_buffer_push(tport);
494 }
495
496 static void sirfsoc_rx_submit_one_dma_desc(struct uart_port *port, int index)
497 {
498         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
499
500         sirfport->rx_dma_items[index].xmit.tail =
501                 sirfport->rx_dma_items[index].xmit.head = 0;
502         sirfport->rx_dma_items[index].desc =
503                 dmaengine_prep_slave_single(sirfport->rx_dma_chan,
504                 sirfport->rx_dma_items[index].dma_addr, SIRFSOC_RX_DMA_BUF_SIZE,
505                 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
506         if (!sirfport->rx_dma_items[index].desc) {
507                 dev_err(port->dev, "DMA slave single fail\n");
508                 return;
509         }
510         sirfport->rx_dma_items[index].desc->callback =
511                 sirfsoc_uart_rx_dma_complete_callback;
512         sirfport->rx_dma_items[index].desc->callback_param = sirfport;
513         sirfport->rx_dma_items[index].cookie =
514                 dmaengine_submit(sirfport->rx_dma_items[index].desc);
515         dma_async_issue_pending(sirfport->rx_dma_chan);
516 }
517
518 static void sirfsoc_rx_tmo_process_tl(unsigned long param)
519 {
520         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
521         struct uart_port *port = &sirfport->port;
522         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
523         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
524         struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
525         unsigned int count;
526         unsigned long flags;
527
528         spin_lock_irqsave(&sirfport->rx_lock, flags);
529         while (sirfport->rx_completed != sirfport->rx_issued) {
530                 sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
531                                         SIRFSOC_RX_DMA_BUF_SIZE);
532                 sirfport->rx_completed++;
533                 sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
534         }
535         count = CIRC_CNT(sirfport->rx_dma_items[sirfport->rx_issued].xmit.head,
536                 sirfport->rx_dma_items[sirfport->rx_issued].xmit.tail,
537                 SIRFSOC_RX_DMA_BUF_SIZE);
538         if (count > 0)
539                 sirfsoc_uart_insert_rx_buf_to_tty(sirfport, count);
540         wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
541                         rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
542                         SIRFUART_IO_MODE);
543         sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
544         spin_unlock_irqrestore(&sirfport->rx_lock, flags);
545         if (sirfport->rx_io_count == 4) {
546                 spin_lock_irqsave(&sirfport->rx_lock, flags);
547                 sirfport->rx_io_count = 0;
548                 wr_regl(port, ureg->sirfsoc_int_st_reg,
549                                 uint_st->sirfsoc_rx_done);
550                 if (!sirfport->is_marco)
551                         wr_regl(port, ureg->sirfsoc_int_en_reg,
552                                 rd_regl(port, ureg->sirfsoc_int_en_reg) &
553                                 ~(uint_en->sirfsoc_rx_done_en));
554                 else
555                         wr_regl(port, SIRFUART_INT_EN_CLR,
556                                         uint_en->sirfsoc_rx_done_en);
557                 spin_unlock_irqrestore(&sirfport->rx_lock, flags);
558
559                 sirfsoc_uart_start_next_rx_dma(port);
560         } else {
561                 spin_lock_irqsave(&sirfport->rx_lock, flags);
562                 wr_regl(port, ureg->sirfsoc_int_st_reg,
563                                 uint_st->sirfsoc_rx_done);
564                 if (!sirfport->is_marco)
565                         wr_regl(port, ureg->sirfsoc_int_en_reg,
566                                 rd_regl(port, ureg->sirfsoc_int_en_reg) |
567                                 (uint_en->sirfsoc_rx_done_en));
568                 else
569                         wr_regl(port, ureg->sirfsoc_int_en_reg,
570                                         uint_en->sirfsoc_rx_done_en);
571                 spin_unlock_irqrestore(&sirfport->rx_lock, flags);
572         }
573 }
574
575 static void sirfsoc_uart_handle_rx_tmo(struct sirfsoc_uart_port *sirfport)
576 {
577         struct uart_port *port = &sirfport->port;
578         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
579         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
580         struct dma_tx_state tx_state;
581         spin_lock(&sirfport->rx_lock);
582
583         dmaengine_tx_status(sirfport->rx_dma_chan,
584                 sirfport->rx_dma_items[sirfport->rx_issued].cookie, &tx_state);
585         dmaengine_terminate_all(sirfport->rx_dma_chan);
586         sirfport->rx_dma_items[sirfport->rx_issued].xmit.head =
587                 SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue;
588         if (!sirfport->is_marco)
589                 wr_regl(port, ureg->sirfsoc_int_en_reg,
590                         rd_regl(port, ureg->sirfsoc_int_en_reg) &
591                         ~(uint_en->sirfsoc_rx_timeout_en));
592         else
593                 wr_regl(port, SIRFUART_INT_EN_CLR,
594                                 uint_en->sirfsoc_rx_timeout_en);
595         spin_unlock(&sirfport->rx_lock);
596         tasklet_schedule(&sirfport->rx_tmo_process_tasklet);
597 }
598
599 static void sirfsoc_uart_handle_rx_done(struct sirfsoc_uart_port *sirfport)
600 {
601         struct uart_port *port = &sirfport->port;
602         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
603         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
604         struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
605
606         sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
607         if (sirfport->rx_io_count == 4) {
608                 sirfport->rx_io_count = 0;
609                 if (!sirfport->is_marco)
610                         wr_regl(port, ureg->sirfsoc_int_en_reg,
611                                 rd_regl(port, ureg->sirfsoc_int_en_reg) &
612                                 ~(uint_en->sirfsoc_rx_done_en));
613                 else
614                         wr_regl(port, SIRFUART_INT_EN_CLR,
615                                         uint_en->sirfsoc_rx_done_en);
616                 wr_regl(port, ureg->sirfsoc_int_st_reg,
617                                 uint_st->sirfsoc_rx_timeout);
618                 sirfsoc_uart_start_next_rx_dma(port);
619         }
620 }
621
622 static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
623 {
624         unsigned long intr_status;
625         unsigned long cts_status;
626         unsigned long flag = TTY_NORMAL;
627         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
628         struct uart_port *port = &sirfport->port;
629         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
630         struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
631         struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
632         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
633         struct uart_state *state = port->state;
634         struct circ_buf *xmit = &port->state->xmit;
635         spin_lock(&port->lock);
636         intr_status = rd_regl(port, ureg->sirfsoc_int_st_reg);
637         wr_regl(port, ureg->sirfsoc_int_st_reg, intr_status);
638         intr_status &= rd_regl(port, ureg->sirfsoc_int_en_reg);
639         if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT(port, uint_st)))) {
640                 if (intr_status & uint_st->sirfsoc_rxd_brk) {
641                         port->icount.brk++;
642                         if (uart_handle_break(port))
643                                 goto recv_char;
644                 }
645                 if (intr_status & uint_st->sirfsoc_rx_oflow)
646                         port->icount.overrun++;
647                 if (intr_status & uint_st->sirfsoc_frm_err) {
648                         port->icount.frame++;
649                         flag = TTY_FRAME;
650                 }
651                 if (intr_status & uint_st->sirfsoc_parity_err)
652                         flag = TTY_PARITY;
653                 wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
654                 wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
655                 wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
656                 intr_status &= port->read_status_mask;
657                 uart_insert_char(port, intr_status,
658                                         uint_en->sirfsoc_rx_oflow_en, 0, flag);
659                 tty_flip_buffer_push(&state->port);
660         }
661 recv_char:
662         if ((sirfport->uart_reg->uart_type == SIRF_REAL_UART) &&
663                         (intr_status & SIRFUART_CTS_INT_ST(uint_st)) &&
664                         !sirfport->tx_dma_state) {
665                 cts_status = rd_regl(port, ureg->sirfsoc_afc_ctrl) &
666                                         SIRFUART_AFC_CTS_STATUS;
667                 if (cts_status != 0)
668                         cts_status = 0;
669                 else
670                         cts_status = 1;
671                 uart_handle_cts_change(port, cts_status);
672                 wake_up_interruptible(&state->port.delta_msr_wait);
673         }
674         if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) {
675                 if (intr_status & uint_st->sirfsoc_rx_timeout)
676                         sirfsoc_uart_handle_rx_tmo(sirfport);
677                 if (intr_status & uint_st->sirfsoc_rx_done)
678                         sirfsoc_uart_handle_rx_done(sirfport);
679         } else {
680                 if (intr_status & SIRFUART_RX_IO_INT_ST(uint_st))
681                         sirfsoc_uart_pio_rx_chars(port,
682                                         SIRFSOC_UART_IO_RX_MAX_CNT);
683         }
684         if (intr_status & uint_st->sirfsoc_txfifo_empty) {
685                 if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
686                         sirfsoc_uart_tx_with_dma(sirfport);
687                 else {
688                         if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
689                                 spin_unlock(&port->lock);
690                                 return IRQ_HANDLED;
691                         } else {
692                                 sirfsoc_uart_pio_tx_chars(sirfport,
693                                         SIRFSOC_UART_IO_TX_REASONABLE_CNT);
694                                 if ((uart_circ_empty(xmit)) &&
695                                 (rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
696                                 ufifo_st->ff_empty(port->line)))
697                                         sirfsoc_uart_stop_tx(port);
698                         }
699                 }
700         }
701         spin_unlock(&port->lock);
702         return IRQ_HANDLED;
703 }
704
705 static void sirfsoc_uart_rx_dma_complete_tl(unsigned long param)
706 {
707         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
708         struct uart_port *port = &sirfport->port;
709         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
710         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
711         unsigned long flags;
712         spin_lock_irqsave(&sirfport->rx_lock, flags);
713         while (sirfport->rx_completed != sirfport->rx_issued) {
714                 sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
715                                         SIRFSOC_RX_DMA_BUF_SIZE);
716                 if (rd_regl(port, ureg->sirfsoc_int_en_reg) &
717                                 uint_en->sirfsoc_rx_timeout_en)
718                         sirfsoc_rx_submit_one_dma_desc(port,
719                                         sirfport->rx_completed++);
720                 else
721                         sirfport->rx_completed++;
722                 sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
723         }
724         spin_unlock_irqrestore(&sirfport->rx_lock, flags);
725 }
726
727 static void sirfsoc_uart_rx_dma_complete_callback(void *param)
728 {
729         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
730         spin_lock(&sirfport->rx_lock);
731         sirfport->rx_issued++;
732         sirfport->rx_issued %= SIRFSOC_RX_LOOP_BUF_CNT;
733         spin_unlock(&sirfport->rx_lock);
734         tasklet_schedule(&sirfport->rx_dma_complete_tasklet);
735 }
736
737 /* submit rx dma task into dmaengine */
738 static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port)
739 {
740         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
741         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
742         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
743         unsigned long flags;
744         int i;
745         spin_lock_irqsave(&sirfport->rx_lock, flags);
746         sirfport->rx_io_count = 0;
747         wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
748                 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
749                 ~SIRFUART_IO_MODE);
750         spin_unlock_irqrestore(&sirfport->rx_lock, flags);
751         for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
752                 sirfsoc_rx_submit_one_dma_desc(port, i);
753         sirfport->rx_completed = sirfport->rx_issued = 0;
754         spin_lock_irqsave(&sirfport->rx_lock, flags);
755         if (!sirfport->is_marco)
756                 wr_regl(port, ureg->sirfsoc_int_en_reg,
757                                 rd_regl(port, ureg->sirfsoc_int_en_reg) |
758                                 SIRFUART_RX_DMA_INT_EN(port, uint_en));
759         else
760                 wr_regl(port, ureg->sirfsoc_int_en_reg,
761                         SIRFUART_RX_DMA_INT_EN(port, uint_en));
762         spin_unlock_irqrestore(&sirfport->rx_lock, flags);
763 }
764
765 static void sirfsoc_uart_start_rx(struct uart_port *port)
766 {
767         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
768         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
769         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
770
771         sirfport->rx_io_count = 0;
772         wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
773         wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
774         wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
775         if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no))
776                 sirfsoc_uart_start_next_rx_dma(port);
777         else {
778                 if (!sirfport->is_marco)
779                         wr_regl(port, ureg->sirfsoc_int_en_reg,
780                                 rd_regl(port, ureg->sirfsoc_int_en_reg) |
781                                 SIRFUART_RX_IO_INT_EN(port, uint_en));
782                 else
783                         wr_regl(port, ureg->sirfsoc_int_en_reg,
784                                 SIRFUART_RX_IO_INT_EN(port, uint_en));
785         }
786 }
787
788 static unsigned int
789 sirfsoc_usp_calc_sample_div(unsigned long set_rate,
790                 unsigned long ioclk_rate, unsigned long *sample_reg)
791 {
792         unsigned long min_delta = ~0UL;
793         unsigned short sample_div;
794         unsigned long ioclk_div = 0;
795         unsigned long temp_delta;
796
797         for (sample_div = SIRF_MIN_SAMPLE_DIV;
798                         sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) {
799                 temp_delta = ioclk_rate -
800                 (ioclk_rate + (set_rate * sample_div) / 2)
801                 / (set_rate * sample_div) * set_rate * sample_div;
802
803                 temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta;
804                 if (temp_delta < min_delta) {
805                         ioclk_div = (2 * ioclk_rate /
806                                 (set_rate * sample_div) + 1) / 2 - 1;
807                         if (ioclk_div > SIRF_IOCLK_DIV_MAX)
808                                 continue;
809                         min_delta = temp_delta;
810                         *sample_reg = sample_div;
811                         if (!temp_delta)
812                                 break;
813                 }
814         }
815         return ioclk_div;
816 }
817
818 static unsigned int
819 sirfsoc_uart_calc_sample_div(unsigned long baud_rate,
820                         unsigned long ioclk_rate, unsigned long *set_baud)
821 {
822         unsigned long min_delta = ~0UL;
823         unsigned short sample_div;
824         unsigned int regv = 0;
825         unsigned long ioclk_div;
826         unsigned long baud_tmp;
827         int temp_delta;
828
829         for (sample_div = SIRF_MIN_SAMPLE_DIV;
830                         sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) {
831                 ioclk_div = (ioclk_rate / (baud_rate * (sample_div + 1))) - 1;
832                 if (ioclk_div > SIRF_IOCLK_DIV_MAX)
833                         continue;
834                 baud_tmp = ioclk_rate / ((ioclk_div + 1) * (sample_div + 1));
835                 temp_delta = baud_tmp - baud_rate;
836                 temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta;
837                 if (temp_delta < min_delta) {
838                         regv = regv & (~SIRF_IOCLK_DIV_MASK);
839                         regv = regv | ioclk_div;
840                         regv = regv & (~SIRF_SAMPLE_DIV_MASK);
841                         regv = regv | (sample_div << SIRF_SAMPLE_DIV_SHIFT);
842                         min_delta = temp_delta;
843                         *set_baud = baud_tmp;
844                 }
845         }
846         return regv;
847 }
848
849 static void sirfsoc_uart_set_termios(struct uart_port *port,
850                                        struct ktermios *termios,
851                                        struct ktermios *old)
852 {
853         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
854         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
855         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
856         unsigned long   config_reg = 0;
857         unsigned long   baud_rate;
858         unsigned long   set_baud;
859         unsigned long   flags;
860         unsigned long   ic;
861         unsigned int    clk_div_reg = 0;
862         unsigned long   txfifo_op_reg, ioclk_rate;
863         unsigned long   rx_time_out;
864         int             threshold_div;
865         u32             data_bit_len, stop_bit_len, len_val;
866         unsigned long   sample_div_reg = 0xf;
867         ioclk_rate      = port->uartclk;
868
869         switch (termios->c_cflag & CSIZE) {
870         default:
871         case CS8:
872                 data_bit_len = 8;
873                 config_reg |= SIRFUART_DATA_BIT_LEN_8;
874                 break;
875         case CS7:
876                 data_bit_len = 7;
877                 config_reg |= SIRFUART_DATA_BIT_LEN_7;
878                 break;
879         case CS6:
880                 data_bit_len = 6;
881                 config_reg |= SIRFUART_DATA_BIT_LEN_6;
882                 break;
883         case CS5:
884                 data_bit_len = 5;
885                 config_reg |= SIRFUART_DATA_BIT_LEN_5;
886                 break;
887         }
888         if (termios->c_cflag & CSTOPB) {
889                 config_reg |= SIRFUART_STOP_BIT_LEN_2;
890                 stop_bit_len = 2;
891         } else
892                 stop_bit_len = 1;
893
894         spin_lock_irqsave(&port->lock, flags);
895         port->read_status_mask = uint_en->sirfsoc_rx_oflow_en;
896         port->ignore_status_mask = 0;
897         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
898                 if (termios->c_iflag & INPCK)
899                         port->read_status_mask |= uint_en->sirfsoc_frm_err_en |
900                                 uint_en->sirfsoc_parity_err_en;
901         } else {
902                 if (termios->c_iflag & INPCK)
903                         port->read_status_mask |= uint_en->sirfsoc_frm_err_en;
904         }
905         if (termios->c_iflag & (BRKINT | PARMRK))
906                         port->read_status_mask |= uint_en->sirfsoc_rxd_brk_en;
907         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
908                 if (termios->c_iflag & IGNPAR)
909                         port->ignore_status_mask |=
910                                 uint_en->sirfsoc_frm_err_en |
911                                 uint_en->sirfsoc_parity_err_en;
912                 if (termios->c_cflag & PARENB) {
913                         if (termios->c_cflag & CMSPAR) {
914                                 if (termios->c_cflag & PARODD)
915                                         config_reg |= SIRFUART_STICK_BIT_MARK;
916                                 else
917                                         config_reg |= SIRFUART_STICK_BIT_SPACE;
918                         } else if (termios->c_cflag & PARODD) {
919                                 config_reg |= SIRFUART_STICK_BIT_ODD;
920                         } else {
921                                 config_reg |= SIRFUART_STICK_BIT_EVEN;
922                         }
923                 }
924         } else {
925                 if (termios->c_iflag & IGNPAR)
926                         port->ignore_status_mask |=
927                                 uint_en->sirfsoc_frm_err_en;
928                 if (termios->c_cflag & PARENB)
929                         dev_warn(port->dev,
930                                         "USP-UART not support parity err\n");
931         }
932         if (termios->c_iflag & IGNBRK) {
933                 port->ignore_status_mask |=
934                         uint_en->sirfsoc_rxd_brk_en;
935                 if (termios->c_iflag & IGNPAR)
936                         port->ignore_status_mask |=
937                                 uint_en->sirfsoc_rx_oflow_en;
938         }
939         if ((termios->c_cflag & CREAD) == 0)
940                 port->ignore_status_mask |= SIRFUART_DUMMY_READ;
941         /* Hardware Flow Control Settings */
942         if (UART_ENABLE_MS(port, termios->c_cflag)) {
943                 if (!sirfport->ms_enabled)
944                         sirfsoc_uart_enable_ms(port);
945         } else {
946                 if (sirfport->ms_enabled)
947                         sirfsoc_uart_disable_ms(port);
948         }
949         baud_rate = uart_get_baud_rate(port, termios, old, 0, 4000000);
950         if (ioclk_rate == 150000000) {
951                 for (ic = 0; ic < SIRF_BAUD_RATE_SUPPORT_NR; ic++)
952                         if (baud_rate == baudrate_to_regv[ic].baud_rate)
953                                 clk_div_reg = baudrate_to_regv[ic].reg_val;
954         }
955         set_baud = baud_rate;
956         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
957                 if (unlikely(clk_div_reg == 0))
958                         clk_div_reg = sirfsoc_uart_calc_sample_div(baud_rate,
959                                         ioclk_rate, &set_baud);
960                 wr_regl(port, ureg->sirfsoc_divisor, clk_div_reg);
961         } else {
962                 clk_div_reg = sirfsoc_usp_calc_sample_div(baud_rate,
963                                 ioclk_rate, &sample_div_reg);
964                 sample_div_reg--;
965                 set_baud = ((ioclk_rate / (clk_div_reg+1) - 1) /
966                                 (sample_div_reg + 1));
967                 /* setting usp mode 2 */
968                 len_val = ((1 << SIRFSOC_USP_MODE2_RXD_DELAY_OFFSET) |
969                                 (1 << SIRFSOC_USP_MODE2_TXD_DELAY_OFFSET));
970                 len_val |= ((clk_div_reg & SIRFSOC_USP_MODE2_CLK_DIVISOR_MASK)
971                                 << SIRFSOC_USP_MODE2_CLK_DIVISOR_OFFSET);
972                 wr_regl(port, ureg->sirfsoc_mode2, len_val);
973         }
974         if (tty_termios_baud_rate(termios))
975                 tty_termios_encode_baud_rate(termios, set_baud, set_baud);
976         /* set receive timeout && data bits len */
977         rx_time_out = SIRFSOC_UART_RX_TIMEOUT(set_baud, 20000);
978         rx_time_out = SIRFUART_RECV_TIMEOUT_VALUE(rx_time_out);
979         txfifo_op_reg = rd_regl(port, ureg->sirfsoc_tx_fifo_op);
980         wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_STOP);
981         wr_regl(port, ureg->sirfsoc_tx_fifo_op,
982                         (txfifo_op_reg & ~SIRFUART_FIFO_START));
983         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
984                 config_reg |= SIRFUART_RECV_TIMEOUT(port, rx_time_out);
985                 wr_regl(port, ureg->sirfsoc_line_ctrl, config_reg);
986         } else {
987                 /*tx frame ctrl*/
988                 len_val = (data_bit_len - 1) << SIRFSOC_USP_TX_DATA_LEN_OFFSET;
989                 len_val |= (data_bit_len + 1 + stop_bit_len - 1) <<
990                                 SIRFSOC_USP_TX_FRAME_LEN_OFFSET;
991                 len_val |= ((data_bit_len - 1) <<
992                                 SIRFSOC_USP_TX_SHIFTER_LEN_OFFSET);
993                 len_val |= (((clk_div_reg & 0xc00) >> 10) <<
994                                 SIRFSOC_USP_TX_CLK_DIVISOR_OFFSET);
995                 wr_regl(port, ureg->sirfsoc_tx_frame_ctrl, len_val);
996                 /*rx frame ctrl*/
997                 len_val = (data_bit_len - 1) << SIRFSOC_USP_RX_DATA_LEN_OFFSET;
998                 len_val |= (data_bit_len + 1 + stop_bit_len - 1) <<
999                                 SIRFSOC_USP_RX_FRAME_LEN_OFFSET;
1000                 len_val |= (data_bit_len - 1) <<
1001                                 SIRFSOC_USP_RX_SHIFTER_LEN_OFFSET;
1002                 len_val |= (((clk_div_reg & 0xf000) >> 12) <<
1003                                 SIRFSOC_USP_RX_CLK_DIVISOR_OFFSET);
1004                 wr_regl(port, ureg->sirfsoc_rx_frame_ctrl, len_val);
1005                 /*async param*/
1006                 wr_regl(port, ureg->sirfsoc_async_param_reg,
1007                         (SIRFUART_RECV_TIMEOUT(port, rx_time_out)) |
1008                         (sample_div_reg & SIRFSOC_USP_ASYNC_DIV2_MASK) <<
1009                         SIRFSOC_USP_ASYNC_DIV2_OFFSET);
1010         }
1011         if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
1012                 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_DMA_MODE);
1013         else
1014                 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_IO_MODE);
1015         if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no))
1016                 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_DMA_MODE);
1017         else
1018                 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_IO_MODE);
1019         /* Reset Rx/Tx FIFO Threshold level for proper baudrate */
1020         if (set_baud < 1000000)
1021                 threshold_div = 1;
1022         else
1023                 threshold_div = 2;
1024         wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl,
1025                                 SIRFUART_FIFO_THD(port) / threshold_div);
1026         wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl,
1027                                 SIRFUART_FIFO_THD(port) / threshold_div);
1028         txfifo_op_reg |= SIRFUART_FIFO_START;
1029         wr_regl(port, ureg->sirfsoc_tx_fifo_op, txfifo_op_reg);
1030         uart_update_timeout(port, termios->c_cflag, set_baud);
1031         sirfsoc_uart_start_rx(port);
1032         wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_TX_EN | SIRFUART_RX_EN);
1033         spin_unlock_irqrestore(&port->lock, flags);
1034 }
1035
1036 static unsigned int sirfsoc_uart_init_tx_dma(struct uart_port *port)
1037 {
1038         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1039         dma_cap_mask_t dma_mask;
1040         struct dma_slave_config tx_slv_cfg = {
1041                 .dst_maxburst = 2,
1042         };
1043
1044         dma_cap_zero(dma_mask);
1045         dma_cap_set(DMA_SLAVE, dma_mask);
1046         sirfport->tx_dma_chan = dma_request_channel(dma_mask,
1047                 (dma_filter_fn)sirfsoc_dma_filter_id,
1048                 (void *)sirfport->tx_dma_no);
1049         if (!sirfport->tx_dma_chan) {
1050                 dev_err(port->dev, "Uart Request Dma Channel Fail %d\n",
1051                                         sirfport->tx_dma_no);
1052                 return  -EPROBE_DEFER;
1053         }
1054         dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg);
1055
1056         return 0;
1057 }
1058
1059 static unsigned int sirfsoc_uart_init_rx_dma(struct uart_port *port)
1060 {
1061         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1062         dma_cap_mask_t dma_mask;
1063         int ret;
1064         int i, j;
1065         struct dma_slave_config slv_cfg = {
1066                 .src_maxburst = 2,
1067         };
1068
1069         dma_cap_zero(dma_mask);
1070         dma_cap_set(DMA_SLAVE, dma_mask);
1071         sirfport->rx_dma_chan = dma_request_channel(dma_mask,
1072                                         (dma_filter_fn)sirfsoc_dma_filter_id,
1073                                         (void *)sirfport->rx_dma_no);
1074         if (!sirfport->rx_dma_chan) {
1075                 dev_err(port->dev, "Uart Request Dma Channel Fail %d\n",
1076                                 sirfport->rx_dma_no);
1077                 ret = -EPROBE_DEFER;
1078                 goto request_err;
1079         }
1080         for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++) {
1081                 sirfport->rx_dma_items[i].xmit.buf =
1082                         dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
1083                         &sirfport->rx_dma_items[i].dma_addr, GFP_KERNEL);
1084                 if (!sirfport->rx_dma_items[i].xmit.buf) {
1085                         dev_err(port->dev, "Uart alloc bufa failed\n");
1086                         ret = -ENOMEM;
1087                         goto alloc_coherent_err;
1088                 }
1089                 sirfport->rx_dma_items[i].xmit.head =
1090                         sirfport->rx_dma_items[i].xmit.tail = 0;
1091         }
1092         dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg);
1093
1094         return 0;
1095 alloc_coherent_err:
1096         for (j = 0; j < i; j++)
1097                 dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
1098                                 sirfport->rx_dma_items[j].xmit.buf,
1099                                 sirfport->rx_dma_items[j].dma_addr);
1100         dma_release_channel(sirfport->rx_dma_chan);
1101 request_err:
1102         return ret;
1103 }
1104
1105 static void sirfsoc_uart_uninit_tx_dma(struct sirfsoc_uart_port *sirfport)
1106 {
1107         dmaengine_terminate_all(sirfport->tx_dma_chan);
1108         dma_release_channel(sirfport->tx_dma_chan);
1109 }
1110
1111 static void sirfsoc_uart_uninit_rx_dma(struct sirfsoc_uart_port *sirfport)
1112 {
1113         int i;
1114         struct uart_port *port = &sirfport->port;
1115         dmaengine_terminate_all(sirfport->rx_dma_chan);
1116         dma_release_channel(sirfport->rx_dma_chan);
1117         for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
1118                 dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
1119                                 sirfport->rx_dma_items[i].xmit.buf,
1120                                 sirfport->rx_dma_items[i].dma_addr);
1121 }
1122
1123 static int sirfsoc_uart_startup(struct uart_port *port)
1124 {
1125         struct sirfsoc_uart_port *sirfport      = to_sirfport(port);
1126         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
1127         unsigned int index                      = port->line;
1128         int ret;
1129         set_irq_flags(port->irq, IRQF_VALID | IRQF_NOAUTOEN);
1130         ret = request_irq(port->irq,
1131                                 sirfsoc_uart_isr,
1132                                 0,
1133                                 SIRFUART_PORT_NAME,
1134                                 sirfport);
1135         if (ret != 0) {
1136                 dev_err(port->dev, "UART%d request IRQ line (%d) failed.\n",
1137                                                         index, port->irq);
1138                 goto irq_err;
1139         }
1140
1141         /* initial hardware settings */
1142         wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
1143                 rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl) |
1144                 SIRFUART_IO_MODE);
1145         wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
1146                 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
1147                 SIRFUART_IO_MODE);
1148         wr_regl(port, ureg->sirfsoc_tx_dma_io_len, 0);
1149         wr_regl(port, ureg->sirfsoc_rx_dma_io_len, 0);
1150         wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_RX_EN | SIRFUART_TX_EN);
1151         if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
1152                 wr_regl(port, ureg->sirfsoc_mode1,
1153                         SIRFSOC_USP_ENDIAN_CTRL_LSBF |
1154                         SIRFSOC_USP_EN);
1155         wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_RESET);
1156         wr_regl(port, ureg->sirfsoc_tx_fifo_op, 0);
1157         wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
1158         wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
1159         wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl, SIRFUART_FIFO_THD(port));
1160         wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, SIRFUART_FIFO_THD(port));
1161
1162         if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) {
1163                 ret = sirfsoc_uart_init_rx_dma(port);
1164                 if (ret)
1165                         goto init_rx_err;
1166                 wr_regl(port, ureg->sirfsoc_rx_fifo_level_chk,
1167                                 SIRFUART_RX_FIFO_CHK_SC(port->line, 0x4) |
1168                                 SIRFUART_RX_FIFO_CHK_LC(port->line, 0xe) |
1169                                 SIRFUART_RX_FIFO_CHK_HC(port->line, 0x1b));
1170         }
1171         if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) {
1172                 sirfsoc_uart_init_tx_dma(port);
1173                 sirfport->tx_dma_state = TX_DMA_IDLE;
1174                 wr_regl(port, ureg->sirfsoc_tx_fifo_level_chk,
1175                                 SIRFUART_TX_FIFO_CHK_SC(port->line, 0x1b) |
1176                                 SIRFUART_TX_FIFO_CHK_LC(port->line, 0xe) |
1177                                 SIRFUART_TX_FIFO_CHK_HC(port->line, 0x4));
1178         }
1179         sirfport->ms_enabled = false;
1180         if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
1181                 sirfport->hw_flow_ctrl) {
1182                 set_irq_flags(gpio_to_irq(sirfport->cts_gpio),
1183                         IRQF_VALID | IRQF_NOAUTOEN);
1184                 ret = request_irq(gpio_to_irq(sirfport->cts_gpio),
1185                         sirfsoc_uart_usp_cts_handler, IRQF_TRIGGER_FALLING |
1186                         IRQF_TRIGGER_RISING, "usp_cts_irq", sirfport);
1187                 if (ret != 0) {
1188                         dev_err(port->dev, "UART-USP:request gpio irq fail\n");
1189                         goto init_rx_err;
1190                 }
1191         }
1192
1193         enable_irq(port->irq);
1194
1195         return 0;
1196 init_rx_err:
1197         free_irq(port->irq, sirfport);
1198 irq_err:
1199         return ret;
1200 }
1201
1202 static void sirfsoc_uart_shutdown(struct uart_port *port)
1203 {
1204         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1205         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
1206         if (!sirfport->is_marco)
1207                 wr_regl(port, ureg->sirfsoc_int_en_reg, 0);
1208         else
1209                 wr_regl(port, SIRFUART_INT_EN_CLR, ~0UL);
1210
1211         free_irq(port->irq, sirfport);
1212         if (sirfport->ms_enabled)
1213                 sirfsoc_uart_disable_ms(port);
1214         if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
1215                         sirfport->hw_flow_ctrl) {
1216                 gpio_set_value(sirfport->rts_gpio, 1);
1217                 free_irq(gpio_to_irq(sirfport->cts_gpio), sirfport);
1218         }
1219         if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no))
1220                 sirfsoc_uart_uninit_rx_dma(sirfport);
1221         if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) {
1222                 sirfsoc_uart_uninit_tx_dma(sirfport);
1223                 sirfport->tx_dma_state = TX_DMA_IDLE;
1224         }
1225 }
1226
1227 static const char *sirfsoc_uart_type(struct uart_port *port)
1228 {
1229         return port->type == SIRFSOC_PORT_TYPE ? SIRFUART_PORT_NAME : NULL;
1230 }
1231
1232 static int sirfsoc_uart_request_port(struct uart_port *port)
1233 {
1234         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1235         struct sirfsoc_uart_param *uart_param = &sirfport->uart_reg->uart_param;
1236         void *ret;
1237         ret = request_mem_region(port->mapbase,
1238                 SIRFUART_MAP_SIZE, uart_param->port_name);
1239         return ret ? 0 : -EBUSY;
1240 }
1241
1242 static void sirfsoc_uart_release_port(struct uart_port *port)
1243 {
1244         release_mem_region(port->mapbase, SIRFUART_MAP_SIZE);
1245 }
1246
1247 static void sirfsoc_uart_config_port(struct uart_port *port, int flags)
1248 {
1249         if (flags & UART_CONFIG_TYPE) {
1250                 port->type = SIRFSOC_PORT_TYPE;
1251                 sirfsoc_uart_request_port(port);
1252         }
1253 }
1254
1255 static struct uart_ops sirfsoc_uart_ops = {
1256         .tx_empty       = sirfsoc_uart_tx_empty,
1257         .get_mctrl      = sirfsoc_uart_get_mctrl,
1258         .set_mctrl      = sirfsoc_uart_set_mctrl,
1259         .stop_tx        = sirfsoc_uart_stop_tx,
1260         .start_tx       = sirfsoc_uart_start_tx,
1261         .stop_rx        = sirfsoc_uart_stop_rx,
1262         .enable_ms      = sirfsoc_uart_enable_ms,
1263         .break_ctl      = sirfsoc_uart_break_ctl,
1264         .startup        = sirfsoc_uart_startup,
1265         .shutdown       = sirfsoc_uart_shutdown,
1266         .set_termios    = sirfsoc_uart_set_termios,
1267         .type           = sirfsoc_uart_type,
1268         .release_port   = sirfsoc_uart_release_port,
1269         .request_port   = sirfsoc_uart_request_port,
1270         .config_port    = sirfsoc_uart_config_port,
1271 };
1272
1273 #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
1274 static int __init
1275 sirfsoc_uart_console_setup(struct console *co, char *options)
1276 {
1277         unsigned int baud = 115200;
1278         unsigned int bits = 8;
1279         unsigned int parity = 'n';
1280         unsigned int flow = 'n';
1281         struct uart_port *port = &sirfsoc_uart_ports[co->index].port;
1282         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1283         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
1284         if (co->index < 0 || co->index >= SIRFSOC_UART_NR)
1285                 return -EINVAL;
1286
1287         if (!port->mapbase)
1288                 return -ENODEV;
1289
1290         /* enable usp in mode1 register */
1291         if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
1292                 wr_regl(port, ureg->sirfsoc_mode1, SIRFSOC_USP_EN |
1293                                 SIRFSOC_USP_ENDIAN_CTRL_LSBF);
1294         if (options)
1295                 uart_parse_options(options, &baud, &parity, &bits, &flow);
1296         port->cons = co;
1297
1298         /* default console tx/rx transfer using io mode */
1299         sirfport->rx_dma_no = UNVALID_DMA_CHAN;
1300         sirfport->tx_dma_no = UNVALID_DMA_CHAN;
1301         return uart_set_options(port, co, baud, parity, bits, flow);
1302 }
1303
1304 static void sirfsoc_uart_console_putchar(struct uart_port *port, int ch)
1305 {
1306         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1307         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
1308         struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
1309         while (rd_regl(port,
1310                 ureg->sirfsoc_tx_fifo_status) & ufifo_st->ff_full(port->line))
1311                 cpu_relax();
1312         wr_regb(port, ureg->sirfsoc_tx_fifo_data, ch);
1313 }
1314
1315 static void sirfsoc_uart_console_write(struct console *co, const char *s,
1316                                                         unsigned int count)
1317 {
1318         struct uart_port *port = &sirfsoc_uart_ports[co->index].port;
1319         uart_console_write(port, s, count, sirfsoc_uart_console_putchar);
1320 }
1321
1322 static struct console sirfsoc_uart_console = {
1323         .name           = SIRFSOC_UART_NAME,
1324         .device         = uart_console_device,
1325         .flags          = CON_PRINTBUFFER,
1326         .index          = -1,
1327         .write          = sirfsoc_uart_console_write,
1328         .setup          = sirfsoc_uart_console_setup,
1329         .data           = &sirfsoc_uart_drv,
1330 };
1331
1332 static int __init sirfsoc_uart_console_init(void)
1333 {
1334         register_console(&sirfsoc_uart_console);
1335         return 0;
1336 }
1337 console_initcall(sirfsoc_uart_console_init);
1338 #endif
1339
1340 static struct uart_driver sirfsoc_uart_drv = {
1341         .owner          = THIS_MODULE,
1342         .driver_name    = SIRFUART_PORT_NAME,
1343         .nr             = SIRFSOC_UART_NR,
1344         .dev_name       = SIRFSOC_UART_NAME,
1345         .major          = SIRFSOC_UART_MAJOR,
1346         .minor          = SIRFSOC_UART_MINOR,
1347 #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
1348         .cons                   = &sirfsoc_uart_console,
1349 #else
1350         .cons                   = NULL,
1351 #endif
1352 };
1353
1354 static struct of_device_id sirfsoc_uart_ids[] = {
1355         { .compatible = "sirf,prima2-uart", .data = &sirfsoc_uart,},
1356         { .compatible = "sirf,marco-uart", .data = &sirfsoc_uart},
1357         { .compatible = "sirf,prima2-usp-uart", .data = &sirfsoc_usp},
1358         {}
1359 };
1360 MODULE_DEVICE_TABLE(of, sirfsoc_uart_ids);
1361
1362 static int sirfsoc_uart_probe(struct platform_device *pdev)
1363 {
1364         struct sirfsoc_uart_port *sirfport;
1365         struct uart_port *port;
1366         struct resource *res;
1367         int ret;
1368         const struct of_device_id *match;
1369
1370         match = of_match_node(sirfsoc_uart_ids, pdev->dev.of_node);
1371         if (of_property_read_u32(pdev->dev.of_node, "cell-index", &pdev->id)) {
1372                 dev_err(&pdev->dev,
1373                         "Unable to find cell-index in uart node.\n");
1374                 ret = -EFAULT;
1375                 goto err;
1376         }
1377         if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-usp-uart"))
1378                 pdev->id += ((struct sirfsoc_uart_register *)
1379                                 match->data)->uart_param.register_uart_nr;
1380         sirfport = &sirfsoc_uart_ports[pdev->id];
1381         port = &sirfport->port;
1382         port->dev = &pdev->dev;
1383         port->private_data = sirfport;
1384         sirfport->uart_reg = (struct sirfsoc_uart_register *)match->data;
1385
1386         sirfport->hw_flow_ctrl = of_property_read_bool(pdev->dev.of_node,
1387                 "sirf,uart-has-rtscts");
1388         if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-uart")) {
1389                 sirfport->uart_reg->uart_type = SIRF_REAL_UART;
1390                 if (of_property_read_u32(pdev->dev.of_node,
1391                                 "sirf,uart-dma-rx-channel",
1392                                 &sirfport->rx_dma_no))
1393                         sirfport->rx_dma_no = UNVALID_DMA_CHAN;
1394                 if (of_property_read_u32(pdev->dev.of_node,
1395                                 "sirf,uart-dma-tx-channel",
1396                                 &sirfport->tx_dma_no))
1397                         sirfport->tx_dma_no = UNVALID_DMA_CHAN;
1398         }
1399         if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-usp-uart")) {
1400                 sirfport->uart_reg->uart_type = SIRF_USP_UART;
1401                 if (of_property_read_u32(pdev->dev.of_node,
1402                                 "sirf,usp-dma-rx-channel",
1403                                 &sirfport->rx_dma_no))
1404                         sirfport->rx_dma_no = UNVALID_DMA_CHAN;
1405                 if (of_property_read_u32(pdev->dev.of_node,
1406                                 "sirf,usp-dma-tx-channel",
1407                                 &sirfport->tx_dma_no))
1408                         sirfport->tx_dma_no = UNVALID_DMA_CHAN;
1409                 if (!sirfport->hw_flow_ctrl)
1410                         goto usp_no_flow_control;
1411                 if (of_find_property(pdev->dev.of_node, "cts-gpios", NULL))
1412                         sirfport->cts_gpio = of_get_named_gpio(
1413                                         pdev->dev.of_node, "cts-gpios", 0);
1414                 else
1415                         sirfport->cts_gpio = -1;
1416                 if (of_find_property(pdev->dev.of_node, "rts-gpios", NULL))
1417                         sirfport->rts_gpio = of_get_named_gpio(
1418                                         pdev->dev.of_node, "rts-gpios", 0);
1419                 else
1420                         sirfport->rts_gpio = -1;
1421
1422                 if ((!gpio_is_valid(sirfport->cts_gpio) ||
1423                          !gpio_is_valid(sirfport->rts_gpio))) {
1424                         ret = -EINVAL;
1425                         dev_err(&pdev->dev,
1426                                 "Usp flow control must have cts and rts gpio");
1427                         goto err;
1428                 }
1429                 ret = devm_gpio_request(&pdev->dev, sirfport->cts_gpio,
1430                                 "usp-cts-gpio");
1431                 if (ret) {
1432                         dev_err(&pdev->dev, "Unable request cts gpio");
1433                         goto err;
1434                 }
1435                 gpio_direction_input(sirfport->cts_gpio);
1436                 ret = devm_gpio_request(&pdev->dev, sirfport->rts_gpio,
1437                                 "usp-rts-gpio");
1438                 if (ret) {
1439                         dev_err(&pdev->dev, "Unable request rts gpio");
1440                         goto err;
1441                 }
1442                 gpio_direction_output(sirfport->rts_gpio, 1);
1443         }
1444 usp_no_flow_control:
1445         if (of_device_is_compatible(pdev->dev.of_node, "sirf,marco-uart"))
1446                 sirfport->is_marco = true;
1447
1448         if (of_property_read_u32(pdev->dev.of_node,
1449                         "fifosize",
1450                         &port->fifosize)) {
1451                 dev_err(&pdev->dev,
1452                         "Unable to find fifosize in uart node.\n");
1453                 ret = -EFAULT;
1454                 goto err;
1455         }
1456
1457         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1458         if (res == NULL) {
1459                 dev_err(&pdev->dev, "Insufficient resources.\n");
1460                 ret = -EFAULT;
1461                 goto err;
1462         }
1463         spin_lock_init(&sirfport->rx_lock);
1464         spin_lock_init(&sirfport->tx_lock);
1465         tasklet_init(&sirfport->rx_dma_complete_tasklet,
1466                         sirfsoc_uart_rx_dma_complete_tl, (unsigned long)sirfport);
1467         tasklet_init(&sirfport->rx_tmo_process_tasklet,
1468                         sirfsoc_rx_tmo_process_tl, (unsigned long)sirfport);
1469         port->mapbase = res->start;
1470         port->membase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1471         if (!port->membase) {
1472                 dev_err(&pdev->dev, "Cannot remap resource.\n");
1473                 ret = -ENOMEM;
1474                 goto err;
1475         }
1476         res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1477         if (res == NULL) {
1478                 dev_err(&pdev->dev, "Insufficient resources.\n");
1479                 ret = -EFAULT;
1480                 goto err;
1481         }
1482         port->irq = res->start;
1483
1484         sirfport->clk = clk_get(&pdev->dev, NULL);
1485         if (IS_ERR(sirfport->clk)) {
1486                 ret = PTR_ERR(sirfport->clk);
1487                 goto err;
1488         }
1489         clk_prepare_enable(sirfport->clk);
1490         port->uartclk = clk_get_rate(sirfport->clk);
1491
1492         port->ops = &sirfsoc_uart_ops;
1493         spin_lock_init(&port->lock);
1494
1495         platform_set_drvdata(pdev, sirfport);
1496         ret = uart_add_one_port(&sirfsoc_uart_drv, port);
1497         if (ret != 0) {
1498                 dev_err(&pdev->dev, "Cannot add UART port(%d).\n", pdev->id);
1499                 goto port_err;
1500         }
1501
1502         return 0;
1503
1504 port_err:
1505         clk_disable_unprepare(sirfport->clk);
1506         clk_put(sirfport->clk);
1507 err:
1508         return ret;
1509 }
1510
1511 static int sirfsoc_uart_remove(struct platform_device *pdev)
1512 {
1513         struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
1514         struct uart_port *port = &sirfport->port;
1515         clk_disable_unprepare(sirfport->clk);
1516         clk_put(sirfport->clk);
1517         uart_remove_one_port(&sirfsoc_uart_drv, port);
1518         return 0;
1519 }
1520
1521 static int
1522 sirfsoc_uart_suspend(struct platform_device *pdev, pm_message_t state)
1523 {
1524         struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
1525         struct uart_port *port = &sirfport->port;
1526         uart_suspend_port(&sirfsoc_uart_drv, port);
1527         return 0;
1528 }
1529
1530 static int sirfsoc_uart_resume(struct platform_device *pdev)
1531 {
1532         struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
1533         struct uart_port *port = &sirfport->port;
1534         uart_resume_port(&sirfsoc_uart_drv, port);
1535         return 0;
1536 }
1537
1538 static struct platform_driver sirfsoc_uart_driver = {
1539         .probe          = sirfsoc_uart_probe,
1540         .remove         = sirfsoc_uart_remove,
1541         .suspend        = sirfsoc_uart_suspend,
1542         .resume         = sirfsoc_uart_resume,
1543         .driver         = {
1544                 .name   = SIRFUART_PORT_NAME,
1545                 .owner  = THIS_MODULE,
1546                 .of_match_table = sirfsoc_uart_ids,
1547         },
1548 };
1549
1550 static int __init sirfsoc_uart_init(void)
1551 {
1552         int ret = 0;
1553
1554         ret = uart_register_driver(&sirfsoc_uart_drv);
1555         if (ret)
1556                 goto out;
1557
1558         ret = platform_driver_register(&sirfsoc_uart_driver);
1559         if (ret)
1560                 uart_unregister_driver(&sirfsoc_uart_drv);
1561 out:
1562         return ret;
1563 }
1564 module_init(sirfsoc_uart_init);
1565
1566 static void __exit sirfsoc_uart_exit(void)
1567 {
1568         platform_driver_unregister(&sirfsoc_uart_driver);
1569         uart_unregister_driver(&sirfsoc_uart_drv);
1570 }
1571 module_exit(sirfsoc_uart_exit);
1572
1573 MODULE_LICENSE("GPL v2");
1574 MODULE_AUTHOR("Bin Shi <Bin.Shi@csr.com>, Rong Wang<Rong.Wang@csr.com>");
1575 MODULE_DESCRIPTION("CSR SiRFprimaII Uart Driver");