9d15abc10d5998b3e5194c360589603cb3045598
[firefly-linux-kernel-4.4.55.git] / drivers / spi / rk29_spim.c
1 /*drivers/serial/rk29xx_spim.c - driver for rk29xx spim device \r
2  *\r
3  * Copyright (C) 2010 ROCKCHIP, Inc.\r
4  *\r
5  * This software is licensed under the terms of the GNU General Public\r
6  * License version 2, as published by the Free Software Foundation, and\r
7  * may be copied, distributed, and modified under those terms.\r
8  *\r
9  * This program is distributed in the hope that it will be useful,\r
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of\r
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\r
12  * GNU General Public License for more details.\r
13  */\r
14 \r
15 #include <linux/dma-mapping.h>\r
16 #include <linux/interrupt.h>\r
17 #include <linux/highmem.h>\r
18 #include <linux/delay.h>\r
19 #include <linux/slab.h>\r
20 #include <linux/platform_device.h>\r
21 #include <linux/clk.h>\r
22 #include <linux/cpufreq.h>\r
23 #include <mach/gpio.h>\r
24 #include <linux/dma-mapping.h>\r
25 #include <asm/dma.h>\r
26 \r
27 #include "rk29xx_spim.h"\r
28 #include <linux/spi/spi.h>\r
29 #include <mach/board.h>\r
30 \r
31 #ifdef CONFIG_DEBUG_FS\r
32 #include <linux/debugfs.h>\r
33 #endif\r
34 \r
35 /*Ô­ÓеÄspiÇý¶¯Ð§ÂʱȽϵͣ¬\r
36 ÎÞ·¨Âú×ã´óÊý¾ÝÁ¿µÄ´«Ê䣻\r
37 QUICK_TRANSFERÓÃÓÚ¿ìËÙ´«Ê䣬ͬʱ¿ÉÖ¸¶¨°ëË«¹¤»òÈ«Ë«¹¤£¬\r
38 ĬÈÏʹÓðëË«¹¤\r
39 */\r
40 \r
41 //#define QUICK_TRANSFER         \r
42 \r
43 #if 1\r
44 #define DBG   printk\r
45 #else\r
46 #define DBG(x...)\r
47 #endif\r
48 \r
49 #define DMA_MIN_BYTES 8\r
50 \r
51 \r
52 #define START_STATE     ((void *)0)\r
53 #define RUNNING_STATE   ((void *)1)\r
54 #define DONE_STATE      ((void *)2)\r
55 #define ERROR_STATE     ((void *)-1)\r
56 \r
57 #define QUEUE_RUNNING   0\r
58 #define QUEUE_STOPPED   1\r
59 \r
60 #define MRST_SPI_DEASSERT       0\r
61 #define MRST_SPI_ASSERT         1  ///CS0\r
62 #define MRST_SPI_ASSERT1        2  ///CS1\r
63 \r
64 /* Slave spi_dev related */\r
65 struct chip_data {\r
66         u16 cr0;\r
67         u8 cs;                  /* chip select pin */\r
68         u8 n_bytes;             /* current is a 1/2/4 byte op */\r
69         u8 tmode;               /* TR/TO/RO/EEPROM */\r
70         u8 type;                /* SPI/SSP/MicroWire */\r
71 \r
72         u8 poll_mode;           /* 1 means use poll mode */\r
73 \r
74         u32 dma_width;\r
75         u32 rx_threshold;\r
76         u32 tx_threshold;\r
77         u8 enable_dma:1;\r
78         u8 bits_per_word;\r
79         u16 clk_div;            /* baud rate divider */\r
80         u32 speed_hz;           /* baud rate */\r
81         int (*write)(struct rk29xx_spi *dws);\r
82         int (*read)(struct rk29xx_spi *dws);\r
83         void (*cs_control)(struct rk29xx_spi *dws, u32 cs, u8 flag);\r
84 };\r
85 \r
86 #define SUSPND    (1<<0)\r
87 #define SPIBUSY   (1<<1)\r
88 #define RXBUSY    (1<<2)\r
89 #define TXBUSY    (1<<3)\r
90 \r
91 #ifdef CONFIG_DEBUG_FS\r
92 static int spi_show_regs_open(struct inode *inode, struct file *file)\r
93 {\r
94         file->private_data = inode->i_private;\r
95         return 0;\r
96 }\r
97 \r
98 #define SPI_REGS_BUFSIZE        1024\r
99 static ssize_t  spi_show_regs(struct file *file, char __user *user_buf,\r
100                                 size_t count, loff_t *ppos)\r
101 {\r
102         struct rk29xx_spi *dws;\r
103         char *buf;\r
104         u32 len = 0;\r
105         ssize_t ret;\r
106 \r
107         dws = file->private_data;\r
108 \r
109         buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL);\r
110         if (!buf)\r
111                 return 0;\r
112 \r
113         len += printk(buf + len, SPI_REGS_BUFSIZE - len,\r
114                         "MRST SPI0 registers:\n");\r
115         len += printk(buf + len, SPI_REGS_BUFSIZE - len,\r
116                         "=================================\n");\r
117         len += printk(buf + len, SPI_REGS_BUFSIZE - len,\r
118                         "CTRL0: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_CTRLR0));\r
119         len += printk(buf + len, SPI_REGS_BUFSIZE - len,\r
120                         "CTRL1: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_CTRLR1));\r
121         len += printk(buf + len, SPI_REGS_BUFSIZE - len,\r
122                         "SSIENR: \t0x%08x\n", rk29xx_readl(dws, SPIM_ENR));\r
123         len += printk(buf + len, SPI_REGS_BUFSIZE - len,\r
124                         "SER: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_SER));\r
125         len += printk(buf + len, SPI_REGS_BUFSIZE - len,\r
126                         "BAUDR: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_BAUDR));\r
127         len += printk(buf + len, SPI_REGS_BUFSIZE - len,\r
128                         "TXFTLR: \t0x%08x\n", rk29xx_readl(dws, SPIM_TXFTLR));\r
129         len += printk(buf + len, SPI_REGS_BUFSIZE - len,\r
130                         "RXFTLR: \t0x%08x\n", rk29xx_readl(dws, SPIM_RXFTLR));\r
131         len += printk(buf + len, SPI_REGS_BUFSIZE - len,\r
132                         "TXFLR: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_TXFLR));\r
133         len += printk(buf + len, SPI_REGS_BUFSIZE - len,\r
134                         "RXFLR: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_RXFLR));\r
135         len += printk(buf + len, SPI_REGS_BUFSIZE - len,\r
136                         "SR: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_SR));\r
137         len += printk(buf + len, SPI_REGS_BUFSIZE - len,\r
138                         "IMR: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_IMR));\r
139         len += printk(buf + len, SPI_REGS_BUFSIZE - len,\r
140                         "ISR: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_ISR));\r
141         len += printk(buf + len, SPI_REGS_BUFSIZE - len,\r
142                         "DMACR: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_DMACR));\r
143         len += printk(buf + len, SPI_REGS_BUFSIZE - len,\r
144                         "DMATDLR: \t0x%08x\n", rk29xx_readl(dws, SPIM_DMATDLR));\r
145         len += printk(buf + len, SPI_REGS_BUFSIZE - len,\r
146                         "DMARDLR: \t0x%08x\n", rk29xx_readl(dws, SPIM_DMARDLR));\r
147         len += printk(buf + len, SPI_REGS_BUFSIZE - len,\r
148                         "=================================\n");\r
149 \r
150         ret =  simple_read_from_buffer(user_buf, count, ppos, buf, len);\r
151         kfree(buf);\r
152         return ret;\r
153 }\r
154 \r
155 static const struct file_operations mrst_spi_regs_ops = {\r
156         .owner          = THIS_MODULE,\r
157         .open           = spi_show_regs_open,\r
158         .read           = spi_show_regs,\r
159 };\r
160 \r
161 static int mrst_spi_debugfs_init(struct rk29xx_spi *dws)\r
162 {\r
163         dws->debugfs = debugfs_create_dir("mrst_spi", NULL);\r
164         if (!dws->debugfs)\r
165                 return -ENOMEM;\r
166 \r
167         debugfs_create_file("registers", S_IFREG | S_IRUGO,\r
168                 dws->debugfs, (void *)dws, &mrst_spi_regs_ops);\r
169         return 0;\r
170 }\r
171 \r
172 static void mrst_spi_debugfs_remove(struct rk29xx_spi *dws)\r
173 {\r
174         if (dws->debugfs)\r
175                 debugfs_remove_recursive(dws->debugfs);\r
176 }\r
177 \r
178 #else\r
179 static inline int mrst_spi_debugfs_init(struct rk29xx_spi *dws)\r
180 {\r
181         return 0;\r
182 }\r
183 \r
184 static inline void mrst_spi_debugfs_remove(struct rk29xx_spi *dws)\r
185 {\r
186 }\r
187 #endif /* CONFIG_DEBUG_FS */\r
188 \r
189 static void wait_till_not_busy(struct rk29xx_spi *dws)\r
190 {\r
191         unsigned long end = jiffies + 1 + usecs_to_jiffies(1000);\r
192 \r
193         while (time_before(jiffies, end)) {\r
194                 if (!(rk29xx_readw(dws, SPIM_SR) & SR_BUSY))\r
195                         return;\r
196         }\r
197         dev_err(&dws->master->dev,\r
198                 "DW SPI: Status keeps busy for 1000us after a read/write!\n");\r
199 }\r
200 \r
201 #if defined(QUICK_TRANSFER)\r
202 static void wait_till_tf_empty(struct rk29xx_spi *dws)\r
203 {\r
204         unsigned long end = jiffies + 1 + usecs_to_jiffies(1000);\r
205 \r
206         while (time_before(jiffies, end)) {\r
207                 if (rk29xx_readw(dws, SPIM_SR) & SR_TF_EMPT)\r
208                         return;\r
209         }\r
210         dev_err(&dws->master->dev,\r
211                 "DW SPI: Status keeps busy for 1000us after a read/write!\n");\r
212 }\r
213 #endif\r
214 \r
215 static void flush(struct rk29xx_spi *dws)\r
216 {\r
217         while (!(rk29xx_readw(dws, SPIM_SR) & SR_RF_EMPT))\r
218                 rk29xx_readw(dws, SPIM_RXDR);\r
219 \r
220         wait_till_not_busy(dws);\r
221 }\r
222 \r
223 #if 0\r
224 static void spi_cs_control(struct rk29xx_spi *dws, u32 cs, u8 flag)\r
225 {\r
226         struct rk29xx_spi_platform_data *pdata = dws->master->dev.platform_data;\r
227         struct spi_cs_gpio *cs_gpios = pdata->chipselect_gpios;\r
228 \r
229         if (flag == 0)\r
230                 gpio_direction_output(cs_gpios[cs].cs_gpio, GPIO_HIGH);\r
231         else\r
232                 gpio_direction_output(cs_gpios[cs].cs_gpio, GPIO_LOW);\r
233 }\r
234 #endif\r
235 \r
236 static int null_writer(struct rk29xx_spi *dws)\r
237 {\r
238         u8 n_bytes = dws->n_bytes;\r
239 \r
240         if ((rk29xx_readw(dws, SPIM_SR) & SR_TF_FULL)\r
241                 || (dws->tx == dws->tx_end))\r
242                 return 0;\r
243         rk29xx_writew(dws, SPIM_TXDR, 0);\r
244         dws->tx += n_bytes;\r
245         //wait_till_not_busy(dws);\r
246 \r
247         return 1;\r
248 }\r
249 \r
250 static int null_reader(struct rk29xx_spi *dws)\r
251 {\r
252         u8 n_bytes = dws->n_bytes;\r
253         while ((!(rk29xx_readw(dws, SPIM_SR) & SR_RF_EMPT))\r
254                 && (dws->rx < dws->rx_end)) {\r
255                 rk29xx_readw(dws, SPIM_RXDR);\r
256                 dws->rx += n_bytes;\r
257         }\r
258         wait_till_not_busy(dws);\r
259         return dws->rx == dws->rx_end;\r
260 }\r
261 \r
262 static int u8_writer(struct rk29xx_spi *dws)\r
263 {       \r
264         if ((rk29xx_readw(dws, SPIM_SR) & SR_TF_FULL)\r
265                 || (dws->tx == dws->tx_end))\r
266                 return 0;\r
267         rk29xx_writew(dws, SPIM_TXDR, *(u8 *)(dws->tx));\r
268         ++dws->tx;\r
269         //wait_till_not_busy(dws);\r
270 \r
271         return 1;\r
272 }\r
273 \r
274 static int u8_reader(struct rk29xx_spi *dws)\r
275 {\r
276         while (!(rk29xx_readw(dws, SPIM_SR) & SR_RF_EMPT)\r
277                 && (dws->rx < dws->rx_end)) {\r
278                 *(u8 *)(dws->rx) = rk29xx_readw(dws, SPIM_RXDR) & 0xFFU;\r
279                 ++dws->rx;\r
280         }\r
281 \r
282         wait_till_not_busy(dws);\r
283         return dws->rx == dws->rx_end;\r
284 }\r
285 \r
286 static int u16_writer(struct rk29xx_spi *dws)\r
287 {\r
288         if ((rk29xx_readw(dws, SPIM_SR) & SR_TF_FULL)\r
289                 || (dws->tx == dws->tx_end))\r
290                 return 0;\r
291 \r
292         rk29xx_writew(dws, SPIM_TXDR, *(u16 *)(dws->tx));\r
293         dws->tx += 2;\r
294         //wait_till_not_busy(dws);\r
295 \r
296         return 1;\r
297 }\r
298 \r
299 static int u16_reader(struct rk29xx_spi *dws)\r
300 {\r
301         u16 temp;\r
302 \r
303         while (!(rk29xx_readw(dws, SPIM_SR) & SR_RF_EMPT)\r
304                 && (dws->rx < dws->rx_end)) {\r
305                 temp = rk29xx_readw(dws, SPIM_RXDR);\r
306                 *(u16 *)(dws->rx) = temp;\r
307                 dws->rx += 2;\r
308         }\r
309 \r
310         wait_till_not_busy(dws);\r
311         return dws->rx == dws->rx_end;\r
312 }\r
313 \r
314 static void *next_transfer(struct rk29xx_spi *dws)\r
315 {\r
316         struct spi_message *msg = dws->cur_msg;\r
317         struct spi_transfer *trans = dws->cur_transfer;\r
318 \r
319         /* Move to next transfer */\r
320         if (trans->transfer_list.next != &msg->transfers) {\r
321                 dws->cur_transfer =\r
322                         list_entry(trans->transfer_list.next,\r
323                                         struct spi_transfer,\r
324                                         transfer_list);\r
325                 return RUNNING_STATE;\r
326         } else\r
327                 return DONE_STATE;\r
328 }\r
329 \r
330 static void rk29_spi_dma_rxcb(void *buf_id,\r
331                                  int size, enum rk29_dma_buffresult res)\r
332 {\r
333         struct rk29xx_spi *dws = buf_id;\r
334         unsigned long flags;\r
335 \r
336         spin_lock_irqsave(&dws->lock, flags);\r
337 \r
338         if (res == RK29_RES_OK)\r
339                 dws->state &= ~RXBUSY;\r
340         else\r
341                 dev_err(&dws->master->dev, "DmaAbrtRx-%d, size: %d\n", res, size);\r
342 \r
343         /* If the other done */\r
344         if (!(dws->state & TXBUSY))\r
345                 complete(&dws->xfer_completion);\r
346 \r
347         spin_unlock_irqrestore(&dws->lock, flags);\r
348 }\r
349 \r
350 static void rk29_spi_dma_txcb(void *buf_id,\r
351                                  int size, enum rk29_dma_buffresult res)\r
352 {\r
353         struct rk29xx_spi *dws = buf_id;\r
354         unsigned long flags;\r
355 \r
356         spin_lock_irqsave(&dws->lock, flags);\r
357 \r
358         if (res == RK29_RES_OK)\r
359                 dws->state &= ~TXBUSY;\r
360         else\r
361                 dev_err(&dws->master->dev, "DmaAbrtTx-%d \n", size);\r
362 \r
363         /* If the other done */\r
364         if (!(dws->state & RXBUSY)) \r
365                 complete(&dws->xfer_completion);\r
366 \r
367         spin_unlock_irqrestore(&dws->lock, flags);\r
368 }\r
369 \r
370 \r
371 static struct rk29_dma_client rk29_spi_dma_client = {\r
372         .name = "rk29xx-spi-dma",\r
373 };\r
374 \r
375 static int acquire_dma(struct rk29xx_spi *dws)\r
376 {       \r
377         if (dws->dma_inited) {\r
378                 return 0;\r
379         }\r
380 \r
381         if(rk29_dma_request(dws->rx_dmach, \r
382                 &rk29_spi_dma_client, NULL) < 0) {\r
383                 dev_err(&dws->master->dev, "dws->rx_dmach : %d, cannot get RxDMA\n", dws->rx_dmach);\r
384                 return -1;\r
385         }\r
386 \r
387         if (rk29_dma_request(dws->tx_dmach,\r
388                                         &rk29_spi_dma_client, NULL) < 0) {\r
389                 dev_err(&dws->master->dev, "dws->tx_dmach : %d, cannot get TxDMA\n", dws->tx_dmach);\r
390                 rk29_dma_free(dws->rx_dmach, &rk29_spi_dma_client);\r
391                 return -1;\r
392         }\r
393         \r
394     dws->dma_inited = 1;\r
395         return 0;\r
396 }\r
397 \r
398 static void release_dma(struct rk29xx_spi *dws)\r
399 {\r
400         if(!dws && dws->dma_inited) {\r
401                 rk29_dma_free(dws->rx_dmach, &rk29_spi_dma_client);\r
402                 rk29_dma_free(dws->tx_dmach, &rk29_spi_dma_client);\r
403         }\r
404 }\r
405 \r
406 /*\r
407  * Note: first step is the protocol driver prepares\r
408  * a dma-capable memory, and this func just need translate\r
409  * the virt addr to physical\r
410  */\r
411 static int map_dma_buffers(struct rk29xx_spi *dws)\r
412 {\r
413         if (!dws->cur_msg->is_dma_mapped || !dws->dma_inited\r
414                 || !dws->cur_chip->enable_dma)\r
415                 return -1;\r
416 \r
417         if (dws->cur_transfer->tx_dma) {\r
418                 dws->tx_dma = dws->cur_transfer->tx_dma;\r
419                 if (rk29_dma_set_buffdone_fn(dws->tx_dmach, rk29_spi_dma_txcb)) {\r
420                         dev_err(&dws->master->dev, "rk29_dma_set_buffdone_fn fail\n");\r
421                         return -1;\r
422                 }\r
423                 if (rk29_dma_devconfig(dws->tx_dmach, RK29_DMASRC_MEM,\r
424                                         (unsigned long)dws->sfr_start + SPIM_TXDR)) {\r
425                         dev_err(&dws->master->dev, "rk29_dma_devconfig fail\n");\r
426                         return -1;\r
427                 }\r
428         }\r
429 \r
430         if (dws->cur_transfer->rx_dma) {\r
431                 dws->rx_dma = dws->cur_transfer->rx_dma;\r
432                 if (rk29_dma_set_buffdone_fn(dws->rx_dmach, rk29_spi_dma_rxcb)) {\r
433                         dev_err(&dws->master->dev, "rk29_dma_set_buffdone_fn fail\n");\r
434                         return -1;\r
435                 }\r
436                 if (rk29_dma_devconfig(dws->rx_dmach, RK29_DMASRC_HW,\r
437                                         (unsigned long)dws->sfr_start + SPIM_RXDR)) {\r
438                         dev_err(&dws->master->dev, "rk29_dma_devconfig fail\n");\r
439                         return -1;\r
440                 }\r
441         }\r
442 \r
443         return 0;\r
444 }\r
445 \r
446 /* Caller already set message->status; dma and pio irqs are blocked */\r
447 static void giveback(struct rk29xx_spi *dws)\r
448 {\r
449         struct spi_transfer *last_transfer;\r
450         unsigned long flags;\r
451         struct spi_message *msg;\r
452 \r
453         spin_lock_irqsave(&dws->lock, flags);\r
454         msg = dws->cur_msg;\r
455         dws->cur_msg = NULL;\r
456         dws->cur_transfer = NULL;\r
457         dws->prev_chip = dws->cur_chip;\r
458         dws->cur_chip = NULL;\r
459         dws->dma_mapped = 0;\r
460         queue_work(dws->workqueue, &dws->pump_messages);\r
461         spin_unlock_irqrestore(&dws->lock, flags);\r
462 \r
463         last_transfer = list_entry(msg->transfers.prev,\r
464                                         struct spi_transfer,\r
465                                         transfer_list);\r
466 \r
467         if (!last_transfer->cs_change)\r
468                 dws->cs_control(dws,msg->spi->chip_select, MRST_SPI_DEASSERT);\r
469 \r
470         msg->state = NULL;\r
471         if (msg->complete)\r
472                 msg->complete(msg->context);\r
473 }\r
474 \r
475 static void int_error_stop(struct rk29xx_spi *dws, const char *msg)\r
476 {\r
477         /* Stop and reset hw */\r
478         flush(dws);\r
479         spi_enable_chip(dws, 0);\r
480 \r
481         dev_err(&dws->master->dev, "%s\n", msg);\r
482         dws->cur_msg->state = ERROR_STATE;\r
483         tasklet_schedule(&dws->pump_transfers);\r
484 }\r
485 \r
486 static void transfer_complete(struct rk29xx_spi *dws)\r
487 {\r
488         /* Update total byte transfered return count actual bytes read */\r
489         dws->cur_msg->actual_length += dws->len;\r
490 \r
491         /* Move to next transfer */\r
492         dws->cur_msg->state = next_transfer(dws);\r
493 \r
494         /* Handle end of message */\r
495         if (dws->cur_msg->state == DONE_STATE) {\r
496                 dws->cur_msg->status = 0;\r
497                 giveback(dws);\r
498         } else\r
499                 tasklet_schedule(&dws->pump_transfers);\r
500 }\r
501 \r
502 static irqreturn_t interrupt_transfer(struct rk29xx_spi *dws)\r
503 {\r
504         u16 irq_status, irq_mask = 0x3f;\r
505         u32 int_level = dws->fifo_len / 2;\r
506         u32 left;\r
507         \r
508         irq_status = rk29xx_readw(dws, SPIM_ISR) & irq_mask;\r
509         /* Error handling */\r
510         if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {\r
511                 rk29xx_writew(dws, SPIM_ICR, SPI_CLEAR_INT_TXOI | SPI_CLEAR_INT_RXOI | SPI_CLEAR_INT_RXUI);\r
512                 int_error_stop(dws, "interrupt_transfer: fifo overrun");\r
513                 return IRQ_HANDLED;\r
514         }\r
515 \r
516         if (irq_status & SPI_INT_TXEI) {\r
517                 spi_mask_intr(dws, SPI_INT_TXEI);\r
518 \r
519                 left = (dws->tx_end - dws->tx) / dws->n_bytes;\r
520                 left = (left > int_level) ? int_level : left;\r
521 \r
522                 while (left--) {\r
523                         dws->write(dws);\r
524                         wait_till_not_busy(dws);\r
525                 }\r
526                 dws->read(dws);\r
527 \r
528                 /* Re-enable the IRQ if there is still data left to tx */\r
529                 if (dws->tx_end > dws->tx)\r
530                         spi_umask_intr(dws, SPI_INT_TXEI);\r
531                 else\r
532                         transfer_complete(dws);\r
533         }\r
534 \r
535         return IRQ_HANDLED;\r
536 }\r
537 \r
538 static irqreturn_t rk29xx_spi_irq(int irq, void *dev_id)\r
539 {\r
540         struct rk29xx_spi *dws = dev_id;\r
541 \r
542         if (!dws->cur_msg) {\r
543                 spi_mask_intr(dws, SPI_INT_TXEI);\r
544                 /* Never fail */\r
545                 return IRQ_HANDLED;\r
546         }\r
547 \r
548         return dws->transfer_handler(dws);\r
549 }\r
550 \r
551 /* Must be called inside pump_transfers() */\r
552 static void poll_transfer(struct rk29xx_spi *dws)\r
553 {\r
554         while (dws->write(dws)) {\r
555                 wait_till_not_busy(dws);\r
556                 dws->read(dws);\r
557         }\r
558         transfer_complete(dws);\r
559 }\r
560 static void spi_chip_sel(struct rk29xx_spi *dws, u16 cs)\r
561 {\r
562     if(cs >= dws->master->num_chipselect)\r
563                 return;\r
564 \r
565         if (dws->cs_control){\r
566             dws->cs_control(dws, cs, 1);\r
567         }\r
568         rk29xx_writel(dws, SPIM_SER, 1 << cs);\r
569 }\r
570 \r
571 static void pump_transfers(unsigned long data)\r
572 {\r
573         struct rk29xx_spi *dws = (struct rk29xx_spi *)data;\r
574         struct spi_message *message = NULL;\r
575         struct spi_transfer *transfer = NULL;\r
576         struct spi_transfer *previous = NULL;\r
577         struct spi_device *spi = NULL;\r
578         struct chip_data *chip = NULL;\r
579         u8 bits = 0;\r
580         u8 spi_dfs = 0;\r
581         u8 imask = 0;\r
582         u8 cs_change = 0;\r
583         u16 txint_level = 0;\r
584         u16 clk_div = 0;\r
585         u32 speed = 0;\r
586         u32 cr0 = 0;\r
587 \r
588         DBG(KERN_INFO "pump_transfers");\r
589 \r
590         /* Get current state information */\r
591         message = dws->cur_msg;\r
592         transfer = dws->cur_transfer;\r
593         chip = dws->cur_chip;\r
594         spi = message->spi;     \r
595         if (unlikely(!chip->clk_div))\r
596                 //chip->clk_div = clk_get_rate(dws->clock_spim) / chip->speed_hz;       \r
597             chip->clk_div = 40000000 / chip->speed_hz;  \r
598         if (message->state == ERROR_STATE) {\r
599                 message->status = -EIO;\r
600                 goto early_exit;\r
601         }\r
602 \r
603         /* Handle end of message */\r
604         if (message->state == DONE_STATE) {\r
605                 message->status = 0;\r
606                 goto early_exit;\r
607         }\r
608 \r
609         /* Delay if requested at end of transfer*/\r
610         if (message->state == RUNNING_STATE) {\r
611                 previous = list_entry(transfer->transfer_list.prev,\r
612                                         struct spi_transfer,\r
613                                         transfer_list);\r
614                 if (previous->delay_usecs)\r
615                         udelay(previous->delay_usecs);\r
616         }\r
617 \r
618         dws->n_bytes = chip->n_bytes;\r
619         dws->dma_width = chip->dma_width;\r
620         dws->cs_control = chip->cs_control;\r
621 \r
622         dws->rx_dma = transfer->rx_dma;\r
623         dws->tx_dma = transfer->tx_dma;\r
624         dws->tx = (void *)transfer->tx_buf;\r
625         dws->tx_end = dws->tx + transfer->len;\r
626         dws->rx = transfer->rx_buf;\r
627         dws->rx_end = dws->rx + transfer->len;\r
628         dws->write = dws->tx ? chip->write : null_writer;\r
629         dws->read = dws->rx ? chip->read : null_reader;\r
630         dws->cs_change = transfer->cs_change;\r
631         dws->len = dws->cur_transfer->len;\r
632         if (chip != dws->prev_chip)\r
633                 cs_change = 1;\r
634 \r
635         cr0 = chip->cr0;\r
636 \r
637         /* Handle per transfer options for bpw and speed */\r
638         if (transfer->speed_hz) {\r
639                 speed = chip->speed_hz;\r
640 \r
641                 if (transfer->speed_hz != speed) {\r
642                         speed = transfer->speed_hz;\r
643                         if (speed > clk_get_rate(dws->clock_spim)) {\r
644                                 dev_err(&dws->master->dev, "MRST SPI0: unsupported"\r
645                                         "freq: %dHz\n", speed);\r
646                                 message->status = -EIO;\r
647                                 goto early_exit;\r
648                         }\r
649 \r
650                         /* clk_div doesn't support odd number */\r
651                         clk_div = clk_get_rate(dws->clock_spim) / speed;\r
652                         clk_div = (clk_div + 1) & 0xfffe;\r
653 \r
654                         chip->speed_hz = speed;\r
655                         chip->clk_div = clk_div;\r
656                 }\r
657         }\r
658         \r
659         if (transfer->bits_per_word) {\r
660                 bits = transfer->bits_per_word;\r
661 \r
662                 switch (bits) {\r
663                 case 8:\r
664                         dws->n_bytes = 1;\r
665                         dws->dma_width = 1;\r
666                         dws->read = (dws->read != null_reader) ?\r
667                                         u8_reader : null_reader;\r
668                         dws->write = (dws->write != null_writer) ?\r
669                                         u8_writer : null_writer;\r
670                         spi_dfs = SPI_DFS_8BIT;\r
671                         break;\r
672                 case 16:\r
673                         dws->n_bytes = 2;\r
674                         dws->dma_width = 2;\r
675                         dws->read = (dws->read != null_reader) ?\r
676                                         u16_reader : null_reader;\r
677                         dws->write = (dws->write != null_writer) ?\r
678                                         u16_writer : null_writer;\r
679                         spi_dfs = SPI_DFS_16BIT;\r
680                         break;\r
681                 default:\r
682                         dev_err(&dws->master->dev, "MRST SPI0: unsupported bits:"\r
683                                 "%db\n", bits);\r
684                         message->status = -EIO;\r
685                         goto early_exit;\r
686                 }\r
687 \r
688                 cr0 = (spi_dfs << SPI_DFS_OFFSET)\r
689                         | (SPI_HALF_WORLD_OFF << SPI_HALF_WORLD_TX_OFFSET)\r
690                         | (SPI_SSN_DELAY_ONE << SPI_SSN_DELAY_OFFSET)\r
691                         | (chip->type << SPI_FRF_OFFSET)\r
692                         | (spi->mode << SPI_MODE_OFFSET)\r
693                         | (chip->tmode << SPI_TMOD_OFFSET);\r
694         }\r
695         message->state = RUNNING_STATE;\r
696  \r
697         /*\r
698          * Adjust transfer mode if necessary. Requires platform dependent\r
699          * chipselect mechanism.\r
700          */\r
701         if (dws->cs_control) {\r
702                 if (dws->rx && dws->tx)\r
703                         chip->tmode = SPI_TMOD_TR;\r
704                 else if (dws->rx)\r
705                         chip->tmode = SPI_TMOD_RO;\r
706                 else\r
707                         chip->tmode = SPI_TMOD_TO;\r
708 \r
709                 cr0 &= ~(0x3 << SPI_MODE_OFFSET);\r
710                 cr0 |= (chip->tmode << SPI_TMOD_OFFSET);\r
711         } \r
712 \r
713         /*\r
714          * Interrupt mode\r
715          * we only need set the TXEI IRQ, as TX/RX always happen syncronizely\r
716          */\r
717         if (!dws->dma_mapped && !chip->poll_mode) {\r
718                 int templen = dws->len / dws->n_bytes;\r
719                 txint_level = dws->fifo_len / 2;\r
720                 txint_level = (templen > txint_level) ? txint_level : templen;\r
721 \r
722                 imask |= SPI_INT_TXEI;\r
723                 dws->transfer_handler = interrupt_transfer;\r
724         }\r
725 \r
726         /*\r
727          * Reprogram registers only if\r
728          *      1. chip select changes\r
729          *      2. clk_div is changed\r
730          *      3. control value changes\r
731          */\r
732         if (rk29xx_readw(dws, SPIM_CTRLR0) != cr0 || cs_change || clk_div || imask) {\r
733                 spi_enable_chip(dws, 0);\r
734                 if (rk29xx_readw(dws, SPIM_CTRLR0) != cr0)\r
735                         rk29xx_writew(dws, SPIM_CTRLR0, cr0);\r
736 \r
737                 spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);            \r
738                 spi_chip_sel(dws, spi->chip_select);\r
739                 /* Set the interrupt mask, for poll mode just diable all int */\r
740                 spi_mask_intr(dws, 0xff);\r
741                 if (imask)\r
742                         spi_umask_intr(dws, imask);\r
743                 if (txint_level)\r
744                         rk29xx_writew(dws, SPIM_TXFTLR, txint_level);\r
745 \r
746         rk29xx_writew(dws, SPIM_CTRLR1, dws->len-1);\r
747                 spi_enable_chip(dws, 1);\r
748                 if (cs_change)\r
749                         dws->prev_chip = chip;\r
750         } \r
751 \r
752         if (chip->poll_mode)\r
753                 poll_transfer(dws);\r
754 \r
755         return;\r
756 \r
757 early_exit:\r
758         giveback(dws);\r
759         return;\r
760 }\r
761 \r
762 static void dma_transfer(struct rk29xx_spi *dws) //int cs_change)\r
763 {\r
764         struct spi_message *message = NULL;\r
765         struct spi_transfer *transfer = NULL;\r
766         struct spi_transfer *previous = NULL;\r
767         struct spi_device *spi = NULL;\r
768         struct chip_data *chip = NULL;\r
769         unsigned long val;\r
770         int ms;\r
771         int iRet;\r
772         u8 bits = 0;\r
773         u8 spi_dfs = 0;\r
774         u8 cs_change = 0;\r
775         u16 clk_div = 0;\r
776         u32 speed = 0;\r
777         u32 cr0 = 0;\r
778         u32 dmacr = 0;\r
779 \r
780         DBG(KERN_INFO "dma_transfer\n");\r
781 \r
782         if (acquire_dma(dws)) {\r
783                 dev_err(&dws->master->dev, "acquire dma failed\n");\r
784                 goto err_out;\r
785         }\r
786 \r
787         if (map_dma_buffers(dws)) {\r
788                 dev_err(&dws->master->dev, "acquire dma failed\n");\r
789                 goto err_out;\r
790         }\r
791 \r
792         /* Get current state information */\r
793         message = dws->cur_msg;\r
794         transfer = dws->cur_transfer;\r
795         chip = dws->cur_chip;\r
796         spi = message->spi;     \r
797         if (unlikely(!chip->clk_div))\r
798                 chip->clk_div = 40000000 / chip->speed_hz;      \r
799         if (message->state == ERROR_STATE) {\r
800                 message->status = -EIO;\r
801                 goto err_out;\r
802         }\r
803 \r
804         /* Handle end of message */\r
805         if (message->state == DONE_STATE) {\r
806                 message->status = 0;\r
807                 goto err_out;\r
808         }\r
809 \r
810         /* Delay if requested at end of transfer*/\r
811         if (message->state == RUNNING_STATE) {\r
812                 previous = list_entry(transfer->transfer_list.prev,\r
813                                         struct spi_transfer,\r
814                                         transfer_list);\r
815                 if (previous->delay_usecs)\r
816                         udelay(previous->delay_usecs);\r
817         }\r
818 \r
819         dws->n_bytes = chip->n_bytes;\r
820         dws->dma_width = chip->dma_width;\r
821         dws->cs_control = chip->cs_control;\r
822 \r
823         dws->rx_dma = transfer->rx_dma;\r
824         dws->tx_dma = transfer->tx_dma;\r
825         dws->tx = (void *)transfer->tx_buf;\r
826         dws->tx_end = dws->tx + transfer->len;\r
827         dws->rx = transfer->rx_buf;\r
828         dws->rx_end = dws->rx + transfer->len;\r
829         dws->write = dws->tx ? chip->write : null_writer;\r
830         dws->read = dws->rx ? chip->read : null_reader;\r
831         dws->cs_change = transfer->cs_change;\r
832         dws->len = dws->cur_transfer->len;\r
833         if (chip != dws->prev_chip)\r
834                 cs_change = 1;\r
835 \r
836         cr0 = chip->cr0;\r
837 \r
838         /* Handle per transfer options for bpw and speed */\r
839         if (transfer->speed_hz) {\r
840                 speed = chip->speed_hz;\r
841 \r
842                 if (transfer->speed_hz != speed) {\r
843                         speed = transfer->speed_hz;\r
844                         if (speed > clk_get_rate(dws->clock_spim)) {\r
845                                 dev_err(&dws->master->dev, "MRST SPI0: unsupported"\r
846                                         "freq: %dHz\n", speed);\r
847                                 message->status = -EIO;\r
848                                 goto err_out;\r
849                         }\r
850 \r
851                         /* clk_div doesn't support odd number */\r
852                         clk_div = clk_get_rate(dws->clock_spim) / speed;\r
853                         clk_div = (clk_div + 1) & 0xfffe;\r
854 \r
855                         chip->speed_hz = speed;\r
856                         chip->clk_div = clk_div;\r
857                 }\r
858         }\r
859 \r
860         if (transfer->bits_per_word) {\r
861                 bits = transfer->bits_per_word;\r
862 \r
863                 switch (bits) {\r
864                 case 8:\r
865                         dws->n_bytes = 1;\r
866                         dws->dma_width = 1;\r
867                         spi_dfs = SPI_DFS_8BIT;\r
868                         break;\r
869                 case 16:\r
870                         dws->n_bytes = 2;\r
871                         dws->dma_width = 2;\r
872                         spi_dfs = SPI_DFS_16BIT;\r
873                         break;\r
874                 default:\r
875                         dev_err(&dws->master->dev, "MRST SPI0: unsupported bits:"\r
876                                 "%db\n", bits);\r
877                         message->status = -EIO;\r
878                         goto err_out;\r
879                 }\r
880 \r
881                 cr0 = (spi_dfs << SPI_DFS_OFFSET)\r
882                         | (SPI_HALF_WORLD_OFF << SPI_HALF_WORLD_TX_OFFSET)\r
883                         | (SPI_SSN_DELAY_ONE << SPI_SSN_DELAY_OFFSET)\r
884                         | (chip->type << SPI_FRF_OFFSET)\r
885                         | (spi->mode << SPI_MODE_OFFSET)\r
886                         | (chip->tmode << SPI_TMOD_OFFSET);\r
887         }\r
888         message->state = RUNNING_STATE;\r
889  \r
890         /*\r
891          * Adjust transfer mode if necessary. Requires platform dependent\r
892          * chipselect mechanism.\r
893          */\r
894         if (dws->cs_control) {\r
895                 if (dws->rx && dws->tx)\r
896                         chip->tmode = SPI_TMOD_TR;\r
897                 else if (dws->rx)\r
898                         chip->tmode = SPI_TMOD_RO;\r
899                 else\r
900                         chip->tmode = SPI_TMOD_TO;\r
901 \r
902                 cr0 &= ~(0x3 << SPI_MODE_OFFSET);\r
903                 cr0 |= (chip->tmode << SPI_TMOD_OFFSET);\r
904         }\r
905 \r
906         /*\r
907          * Reprogram registers only if\r
908          *      1. chip select changes\r
909          *      2. clk_div is changed\r
910          *      3. control value changes\r
911          */\r
912         if (rk29xx_readw(dws, SPIM_CTRLR0) != cr0 || cs_change || clk_div) {\r
913                 spi_enable_chip(dws, 0);\r
914                 if (rk29xx_readw(dws, SPIM_CTRLR0) != cr0) {\r
915                         rk29xx_writew(dws, SPIM_CTRLR0, cr0);\r
916                 }\r
917 \r
918                 spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);            \r
919                 spi_chip_sel(dws, spi->chip_select);\r
920                 /* Set the interrupt mask, for poll mode just diable all int */\r
921                 spi_mask_intr(dws, 0xff);\r
922                 \r
923                 if (transfer->tx_buf != NULL) {\r
924                         dmacr |= SPI_DMACR_TX_ENABLE;\r
925                         rk29xx_writew(dws, SPIM_DMATDLR, 0);\r
926                 }\r
927                 if (transfer->rx_buf != NULL) {\r
928                         dmacr |= SPI_DMACR_RX_ENABLE;\r
929                         rk29xx_writew(dws, SPIM_DMARDLR, 0);\r
930                         rk29xx_writew(dws, SPIM_CTRLR1, transfer->len-1);\r
931                 }\r
932                 rk29xx_writew(dws, SPIM_DMACR, dmacr);\r
933                 spi_enable_chip(dws, 1);\r
934                 if (cs_change)\r
935                         dws->prev_chip = chip;\r
936         } \r
937 \r
938         INIT_COMPLETION(dws->xfer_completion);\r
939         \r
940         if (transfer->tx_buf != NULL) {\r
941                 dws->state |= TXBUSY;\r
942                 if (rk29_dma_config(dws->tx_dmach, 1)) {\r
943                         dev_err(&dws->master->dev, "function: %s, line: %d\n", __FUNCTION__, __LINE__);\r
944                         goto err_out;\r
945                 }\r
946                 \r
947                 iRet = rk29_dma_enqueue(dws->tx_dmach, (void *)dws,\r
948                                         transfer->tx_dma, transfer->len);\r
949                 if (iRet) {\r
950                         dev_err(&dws->master->dev, "function: %s, line: %d, iRet: %d(dws->tx_dmach: %d, transfer->tx_dma: 0x%x)\n", __FUNCTION__, __LINE__, iRet, \r
951                                 dws->tx_dmach, (unsigned int)transfer->tx_dma);\r
952                         goto err_out;\r
953                 }\r
954                 \r
955                 if (rk29_dma_ctrl(dws->tx_dmach, RK29_DMAOP_START)) {\r
956                         dev_err(&dws->master->dev, "function: %s, line: %d\n", __FUNCTION__, __LINE__);\r
957                         goto err_out;\r
958                 }\r
959         }\r
960 \r
961         if (transfer->rx_buf != NULL) {\r
962                 dws->state |= RXBUSY;\r
963                 if (rk29_dma_config(dws->rx_dmach, 1)) {\r
964                         dev_err(&dws->master->dev, "function: %s, line: %d\n", __FUNCTION__, __LINE__);\r
965                         goto err_out;\r
966                 }\r
967                 \r
968                 iRet = rk29_dma_enqueue(dws->rx_dmach, (void *)dws,\r
969                                         transfer->rx_dma, transfer->len);\r
970                 if (iRet) {\r
971                         dev_err(&dws->master->dev, "function: %s, line: %d\n", __FUNCTION__, __LINE__);\r
972                         goto err_out;\r
973                 }\r
974                 \r
975                 if (rk29_dma_ctrl(dws->rx_dmach, RK29_DMAOP_START)) {\r
976                         dev_err(&dws->master->dev, "function: %s, line: %d\n", __FUNCTION__, __LINE__);\r
977                         goto err_out;\r
978                 }\r
979         }\r
980 \r
981         /* millisecs to xfer 'len' bytes @ 'cur_speed' */\r
982         ms = transfer->len * 8 * 1000 / dws->cur_chip->speed_hz;\r
983         ms += 10; \r
984 \r
985         val = msecs_to_jiffies(ms) + 500;\r
986         if (!wait_for_completion_timeout(&dws->xfer_completion, val)) {\r
987                 if (transfer->rx_buf != NULL && (dws->state & RXBUSY)) {\r
988                         rk29_dma_ctrl(dws->rx_dmach, RK29_DMAOP_FLUSH);\r
989                         dws->state &= ~RXBUSY;\r
990                         dev_err(&dws->master->dev, "function: %s, line: %d\n", __FUNCTION__, __LINE__);\r
991                         goto NEXT_TRANSFER;\r
992                 }\r
993                 if (transfer->tx_buf != NULL && (dws->state & TXBUSY)) {\r
994                         rk29_dma_ctrl(dws->tx_dmach, RK29_DMAOP_FLUSH);\r
995                         dws->state &= ~TXBUSY;\r
996                         dev_err(&dws->master->dev, "function: %s, line: %d\n", __FUNCTION__, __LINE__);\r
997                         goto NEXT_TRANSFER;\r
998                 }\r
999         }\r
1000 \r
1001         wait_till_not_busy(dws);\r
1002 \r
1003 NEXT_TRANSFER:\r
1004         /* Update total byte transfered return count actual bytes read */\r
1005         dws->cur_msg->actual_length += dws->len;\r
1006 \r
1007         /* Move to next transfer */\r
1008         dws->cur_msg->state = next_transfer(dws);\r
1009 \r
1010         /* Handle end of message */\r
1011         if (dws->cur_msg->state == DONE_STATE) {\r
1012                 dws->cur_msg->status = 0;\r
1013                 giveback(dws);\r
1014         } else\r
1015                 dma_transfer(dws);\r
1016         \r
1017         return;\r
1018 \r
1019 err_out:\r
1020         giveback(dws);\r
1021         return;\r
1022 \r
1023 }\r
1024 \r
1025 static void pump_messages(struct work_struct *work)\r
1026 {\r
1027         struct rk29xx_spi *dws =\r
1028                 container_of(work, struct rk29xx_spi, pump_messages);\r
1029         unsigned long flags;\r
1030 \r
1031         DBG(KERN_INFO "pump_messages\n");\r
1032 \r
1033         /* Lock queue and check for queue work */\r
1034         spin_lock_irqsave(&dws->lock, flags);\r
1035         if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) {\r
1036                 dws->busy = 0;\r
1037                 spin_unlock_irqrestore(&dws->lock, flags);\r
1038                 return;\r
1039         }\r
1040 \r
1041         /* Make sure we are not already running a message */\r
1042         if (dws->cur_msg) {\r
1043                 spin_unlock_irqrestore(&dws->lock, flags);\r
1044                 return;\r
1045         }\r
1046 \r
1047         /* Extract head of queue */\r
1048         dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue);\r
1049         list_del_init(&dws->cur_msg->queue);\r
1050 \r
1051         /* Initial message state*/\r
1052         dws->cur_msg->state = START_STATE;\r
1053         dws->cur_transfer = list_entry(dws->cur_msg->transfers.next,\r
1054                                                 struct spi_transfer,\r
1055                                                 transfer_list);\r
1056         dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi);\r
1057     dws->prev_chip = NULL; //ÿ¸öpump messageʱǿÖƸüÐÂcs dxj\r
1058     \r
1059         /* Mark as busy and launch transfers */\r
1060         if(dws->cur_msg->is_dma_mapped && dws->cur_transfer->len > DMA_MIN_BYTES) {\r
1061                 dws->busy = 1;\r
1062             spin_unlock_irqrestore(&dws->lock, flags);\r
1063                 dma_transfer(dws);\r
1064                 return;\r
1065         }\r
1066         else {\r
1067                 tasklet_schedule(&dws->pump_transfers);\r
1068         }\r
1069 \r
1070         dws->busy = 1;\r
1071         spin_unlock_irqrestore(&dws->lock, flags);\r
1072 }\r
1073 \r
1074 #if defined(QUICK_TRANSFER)\r
1075 static void do_read(struct rk29xx_spi *dws)\r
1076 {\r
1077         int count = 0;\r
1078 \r
1079         spi_enable_chip(dws, 0);\r
1080         rk29xx_writew(dws, SPIM_CTRLR1, dws->rx_end-dws->rx-1);\r
1081         spi_enable_chip(dws, 1);                \r
1082         rk29xx_writew(dws, SPIM_TXDR, 0);\r
1083         while (1) {\r
1084                 if (dws->read(dws))\r
1085                         break;\r
1086                 if (count++ == 0x20) {\r
1087                         dev_err(&dws->master->dev, "+++++++++++spi receive data time out+++++++++++++\n");\r
1088                         break;\r
1089                 }\r
1090                 \r
1091         }\r
1092 }\r
1093 \r
1094 static void do_write(struct rk29xx_spi *dws)\r
1095 {\r
1096         while (dws->tx<dws->tx_end) {\r
1097                 dws->write(dws);\r
1098         }\r
1099 }\r
1100 \r
1101 /* Caller already set message->status; dma and pio irqs are blocked */\r
1102 static void msg_giveback(struct rk29xx_spi *dws)\r
1103 {\r
1104         struct spi_transfer *last_transfer;\r
1105         struct spi_message *msg;\r
1106 \r
1107         DBG("+++++++++++++++enter %s++++++++++++++++++\n", __func__);\r
1108 \r
1109         msg = dws->cur_msg;\r
1110         dws->cur_msg = NULL;\r
1111         dws->cur_transfer = NULL;\r
1112         dws->prev_chip = dws->cur_chip;\r
1113         dws->cur_chip = NULL;\r
1114         dws->dma_mapped = 0;\r
1115         dws->busy = 0;\r
1116 \r
1117         last_transfer = list_entry(msg->transfers.prev,\r
1118                                         struct spi_transfer,\r
1119                                         transfer_list);\r
1120 \r
1121         if (!last_transfer->cs_change)\r
1122                 dws->cs_control(dws,msg->spi->chip_select,MRST_SPI_DEASSERT);\r
1123 \r
1124         msg->state = NULL;      \r
1125 }\r
1126 \r
1127 /* Must be called inside pump_transfers() */\r
1128 static int do_full_transfer(struct rk29xx_spi *dws)\r
1129 {\r
1130         if ((dws->read(dws))) {\r
1131                 goto comple;\r
1132         }\r
1133         \r
1134         while (dws->tx<dws->tx_end){\r
1135                 dws->write(dws);                \r
1136                 dws->read(dws);\r
1137         }\r
1138         \r
1139         if (dws->rx < dws->rx_end) {\r
1140                 dws->read(dws);\r
1141         }\r
1142 \r
1143 comple:\r
1144         \r
1145         dws->cur_msg->actual_length += dws->len;\r
1146         \r
1147         /* Move to next transfer */\r
1148         dws->cur_msg->state = next_transfer(dws);\r
1149                                         \r
1150         if (dws->cur_msg->state == DONE_STATE) {\r
1151                 dws->cur_msg->status = 0;\r
1152                 //msg_giveback(dws);\r
1153                 return 0;\r
1154         }\r
1155         else {\r
1156                 return -1;\r
1157         }\r
1158         \r
1159 }\r
1160 \r
1161 \r
1162 /* Must be called inside pump_transfers() */\r
1163 static int do_half_transfer(struct rk29xx_spi *dws)\r
1164 {\r
1165         if (dws->rx) {\r
1166                 if (dws->tx) {\r
1167                         do_write(dws);\r
1168                 }\r
1169                 wait_till_tf_empty(dws);\r
1170                 wait_till_not_busy(dws);\r
1171                 do_read(dws);\r
1172         }\r
1173         else {\r
1174                 do_write(dws);\r
1175                 wait_till_tf_empty(dws);\r
1176                 wait_till_not_busy(dws);\r
1177         }\r
1178         \r
1179         dws->cur_msg->actual_length += dws->len;\r
1180         \r
1181         /* Move to next transfer */\r
1182         dws->cur_msg->state = next_transfer(dws);\r
1183                                         \r
1184         if (dws->cur_msg->state == DONE_STATE) {\r
1185                 dws->cur_msg->status = 0;\r
1186                 //msg_giveback(dws);\r
1187                 return 0;\r
1188         }\r
1189         else {\r
1190                 return -1;\r
1191         }\r
1192 }\r
1193 \r
1194 \r
1195 static int rk29xx_pump_transfers(struct rk29xx_spi *dws, int mode)\r
1196 {\r
1197         struct spi_message *message = NULL;\r
1198         struct spi_transfer *transfer = NULL;\r
1199         struct spi_transfer *previous = NULL;\r
1200         struct spi_device *spi = NULL;\r
1201         struct chip_data *chip = NULL;\r
1202         u8 bits = 0;\r
1203         u8 spi_dfs = 0;\r
1204         u8 cs_change = 0;\r
1205         u16 clk_div = 0;\r
1206         u32 speed = 0;\r
1207         u32 cr0 = 0;\r
1208         u32 dmacr = 0;\r
1209         \r
1210         DBG(KERN_INFO "+++++++++++++++enter %s++++++++++++++++++\n", __func__);\r
1211 \r
1212         /* Get current state information */\r
1213         message = dws->cur_msg;\r
1214         transfer = dws->cur_transfer;\r
1215         chip = dws->cur_chip;\r
1216         spi = message->spi;     \r
1217 \r
1218         if (unlikely(!chip->clk_div))\r
1219                 chip->clk_div = clk_get_rate(dws->clock_spim) / chip->speed_hz;\r
1220         if (message->state == ERROR_STATE) {\r
1221                 message->status = -EIO;\r
1222                 goto early_exit;\r
1223         }\r
1224 \r
1225         /* Handle end of message */\r
1226         if (message->state == DONE_STATE) {\r
1227                 message->status = 0;\r
1228                 goto early_exit;\r
1229         }\r
1230 \r
1231         /* Delay if requested at end of transfer*/\r
1232         if (message->state == RUNNING_STATE) {\r
1233                 previous = list_entry(transfer->transfer_list.prev,\r
1234                                         struct spi_transfer,\r
1235                                         transfer_list);\r
1236                 if (previous->delay_usecs)\r
1237                         udelay(previous->delay_usecs);\r
1238         }\r
1239 \r
1240         dws->n_bytes = chip->n_bytes;\r
1241         dws->dma_width = chip->dma_width;\r
1242         dws->cs_control = chip->cs_control;\r
1243 \r
1244         dws->rx_dma = transfer->rx_dma;\r
1245         dws->tx_dma = transfer->tx_dma;\r
1246         dws->tx = (void *)transfer->tx_buf;\r
1247         dws->tx_end = dws->tx + transfer->len;\r
1248         dws->rx = transfer->rx_buf;\r
1249         dws->rx_end = dws->rx + transfer->len;\r
1250         dws->write = dws->tx ? chip->write : null_writer;\r
1251         dws->read = dws->rx ? chip->read : null_reader;\r
1252         if (dws->rx && dws->tx) {\r
1253                 int temp_len = transfer->len;\r
1254                 int len;\r
1255                 unsigned char *tx_buf;\r
1256                 for (len=0; *tx_buf++ != 0; len++);\r
1257                 dws->tx_end = dws->tx + len;\r
1258                 dws->rx_end = dws->rx + temp_len - len;\r
1259         }\r
1260         dws->cs_change = transfer->cs_change;\r
1261         dws->len = dws->cur_transfer->len;\r
1262         if (chip != dws->prev_chip)\r
1263                 cs_change = 1;\r
1264 \r
1265         cr0 = chip->cr0;\r
1266 \r
1267         /* Handle per transfer options for bpw and speed */\r
1268         if (transfer->speed_hz) {\r
1269                 speed = chip->speed_hz;\r
1270 \r
1271                 if (transfer->speed_hz != speed) {\r
1272                         speed = transfer->speed_hz;\r
1273                         if (speed > clk_get_rate(dws->clock_spim)) {\r
1274                                 dev_err(&dws->master->dev, "MRST SPI0: unsupported"\r
1275                                         "freq: %dHz\n", speed);\r
1276                                 message->status = -EIO;\r
1277                                 goto early_exit;\r
1278                         }\r
1279 \r
1280                         /* clk_div doesn't support odd number */\r
1281                         clk_div = clk_get_rate(dws->clock_spim) / speed;\r
1282                         clk_div = (clk_div + 1) & 0xfffe;\r
1283 \r
1284                         chip->speed_hz = speed;\r
1285                         chip->clk_div = clk_div;\r
1286                 }\r
1287         }\r
1288         if (transfer->bits_per_word) {\r
1289                 bits = transfer->bits_per_word;\r
1290 \r
1291                 switch (bits) {\r
1292                 case 8:\r
1293                         dws->n_bytes = 1;\r
1294                         dws->dma_width = 1;\r
1295                         dws->read = (dws->read != null_reader) ?\r
1296                                         u8_reader : null_reader;\r
1297                         dws->write = (dws->write != null_writer) ?\r
1298                                         u8_writer : null_writer;\r
1299                         spi_dfs = SPI_DFS_8BIT;\r
1300                         break;\r
1301                 case 16:\r
1302                         dws->n_bytes = 2;\r
1303                         dws->dma_width = 2;\r
1304                         dws->read = (dws->read != null_reader) ?\r
1305                                         u16_reader : null_reader;\r
1306                         dws->write = (dws->write != null_writer) ?\r
1307                                         u16_writer : null_writer;\r
1308                         spi_dfs = SPI_DFS_16BIT;\r
1309                         break;\r
1310                 default:\r
1311                         dev_err(&dws->master->dev, "MRST SPI0: unsupported bits:"\r
1312                                 "%db\n", bits);\r
1313                         message->status = -EIO;\r
1314                         goto early_exit;\r
1315                 }\r
1316 \r
1317                 cr0 = (spi_dfs << SPI_DFS_OFFSET)\r
1318                         | (chip->type << SPI_FRF_OFFSET)\r
1319                         | (spi->mode << SPI_MODE_OFFSET)\r
1320                         | (chip->tmode << SPI_TMOD_OFFSET);\r
1321         }\r
1322         message->state = RUNNING_STATE;\r
1323  \r
1324         /*\r
1325          * Adjust transfer mode if necessary. Requires platform dependent\r
1326          * chipselect mechanism.\r
1327          */\r
1328         if (dws->cs_control) {\r
1329                 if (dws->rx && dws->tx)\r
1330                         chip->tmode = SPI_TMOD_TR;\r
1331                 else if (dws->rx)\r
1332                         chip->tmode = SPI_TMOD_RO;\r
1333                 else\r
1334                         chip->tmode = SPI_TMOD_TO;\r
1335 \r
1336                 cr0 &= ~(0x3 << SPI_MODE_OFFSET);\r
1337                 cr0 |= (chip->tmode << SPI_TMOD_OFFSET);\r
1338         }\r
1339         \r
1340         /* Check if current transfer is a DMA transaction */\r
1341         dws->dma_mapped = map_dma_buffers(dws);\r
1342 \r
1343         /*\r
1344          * Reprogram registers only if\r
1345          *      1. chip select changes\r
1346          *      2. clk_div is changed\r
1347          *      3. control value changes\r
1348          */\r
1349         spi_enable_chip(dws, 0);\r
1350         if (rk29xx_readw(dws, SPIM_CTRLR0) != cr0)\r
1351                 rk29xx_writew(dws, SPIM_CTRLR0, cr0);\r
1352 \r
1353     DBG(KERN_INFO "clk_div: 0x%x, chip->clk_div: 0x%x\n", clk_div, chip->clk_div);\r
1354         spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);            \r
1355         spi_chip_sel(dws, spi->chip_select);            \r
1356         rk29xx_writew(dws, SPIM_CTRLR1, 0);//add by lyx\r
1357         if(dws->dma_mapped ) {\r
1358                 dmacr = rk29xx_readw(dws, SPIM_DMACR);\r
1359                 dmacr = dmacr | SPI_DMACR_TX_ENABLE;\r
1360                 if (mode) \r
1361                         dmacr = dmacr | SPI_DMACR_RX_ENABLE;\r
1362                 rk29xx_writew(dws, SPIM_DMACR, dmacr);\r
1363         }\r
1364         spi_enable_chip(dws, 1);\r
1365         if (cs_change)\r
1366                 dws->prev_chip = chip;\r
1367         \r
1368         if (mode)\r
1369                 return do_full_transfer(dws);\r
1370         else\r
1371                 return do_half_transfer(dws);   \r
1372         \r
1373 early_exit:\r
1374         \r
1375         //msg_giveback(dws);\r
1376         \r
1377         return 0;\r
1378 }\r
1379 \r
1380 static void rk29xx_pump_messages(struct rk29xx_spi *dws, int mode)\r
1381 {\r
1382         DBG(KERN_INFO "+++++++++++++++enter %s++++++++++++++++++\n", __func__);\r
1383         \r
1384         while (!acquire_dma(dws))\r
1385                         msleep(10);\r
1386 \r
1387         if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) {\r
1388                 dws->busy = 0;\r
1389                 return;\r
1390         }\r
1391 \r
1392         /* Make sure we are not already running a message */\r
1393         if (dws->cur_msg) {\r
1394                 return;\r
1395         }\r
1396 \r
1397         /* Extract head of queue */\r
1398         dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue);\r
1399         list_del_init(&dws->cur_msg->queue);\r
1400 \r
1401         /* Initial message state*/\r
1402         dws->cur_msg->state = START_STATE;\r
1403         dws->cur_transfer = list_entry(dws->cur_msg->transfers.next,\r
1404                                                 struct spi_transfer,\r
1405                                                 transfer_list);\r
1406         dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi);\r
1407     dws->prev_chip = NULL; //ÿ¸öpump messageʱǿÖƸüÐÂcs dxj\r
1408     \r
1409         /* Mark as busy and launch transfers */\r
1410         dws->busy = 1;\r
1411 \r
1412         while (rk29xx_pump_transfers(dws, mode)) ;\r
1413 }\r
1414 \r
1415 /* spi_device use this to queue in their spi_msg */\r
1416 static int rk29xx_spi_quick_transfer(struct spi_device *spi, struct spi_message *msg)\r
1417 {\r
1418         struct rk29xx_spi *dws = spi_master_get_devdata(spi->master);\r
1419         unsigned long flags;\r
1420         struct rk29xx_spi_chip *chip_info = spi->controller_data;\r
1421         struct spi_message *mmsg;\r
1422         \r
1423         DBG(KERN_INFO "+++++++++++++++enter %s++++++++++++++++++\n", __func__);\r
1424         \r
1425         spin_lock_irqsave(&dws->lock, flags);\r
1426 \r
1427         if (dws->run == QUEUE_STOPPED) {\r
1428                 spin_unlock_irqrestore(&dws->lock, flags);\r
1429                 return -ESHUTDOWN;\r
1430         }\r
1431 \r
1432         msg->actual_length = 0;\r
1433         msg->status = -EINPROGRESS;\r
1434         msg->state = START_STATE;\r
1435 \r
1436         list_add_tail(&msg->queue, &dws->queue);\r
1437 \r
1438         if (chip_info && (chip_info->transfer_mode == rk29xx_SPI_FULL_DUPLEX)) {\r
1439                 rk29xx_pump_messages(dws,1);\r
1440         }\r
1441         else {          \r
1442                 rk29xx_pump_messages(dws,0);\r
1443         }\r
1444 \r
1445         mmsg = dws->cur_msg;\r
1446         msg_giveback(dws);\r
1447         \r
1448         spin_unlock_irqrestore(&dws->lock, flags);\r
1449 \r
1450         if (mmsg->complete)\r
1451                 mmsg->complete(mmsg->context);\r
1452         \r
1453         return 0;\r
1454 }\r
1455 \r
1456 #else\r
1457 \r
1458 /* spi_device use this to queue in their spi_msg */\r
1459 static int rk29xx_spi_transfer(struct spi_device *spi, struct spi_message *msg)\r
1460 {\r
1461         struct rk29xx_spi *dws = spi_master_get_devdata(spi->master);\r
1462         unsigned long flags;\r
1463 \r
1464         spin_lock_irqsave(&dws->lock, flags);\r
1465 \r
1466         if (dws->run == QUEUE_STOPPED) {\r
1467                 spin_unlock_irqrestore(&dws->lock, flags);\r
1468                 return -ESHUTDOWN;\r
1469         }\r
1470 \r
1471         msg->actual_length = 0;\r
1472         msg->status = -EINPROGRESS;\r
1473         msg->state = START_STATE;\r
1474 \r
1475         list_add_tail(&msg->queue, &dws->queue);\r
1476 \r
1477         if (dws->run == QUEUE_RUNNING && !dws->busy) {\r
1478 \r
1479                 if (dws->cur_transfer || dws->cur_msg)\r
1480                         queue_work(dws->workqueue,\r
1481                                         &dws->pump_messages);\r
1482                 else {\r
1483                         /* If no other data transaction in air, just go */\r
1484                         spin_unlock_irqrestore(&dws->lock, flags);\r
1485                         pump_messages(&dws->pump_messages);\r
1486                         return 0;\r
1487                 }\r
1488         }\r
1489 \r
1490         spin_unlock_irqrestore(&dws->lock, flags);\r
1491         \r
1492         return 0;\r
1493 }\r
1494 \r
1495 #endif\r
1496 \r
1497 /* This may be called twice for each spi dev */\r
1498 static int rk29xx_spi_setup(struct spi_device *spi)\r
1499 {\r
1500         struct rk29xx_spi_chip *chip_info = NULL;\r
1501         struct chip_data *chip;\r
1502         u8 spi_dfs = 0;\r
1503 \r
1504         if (spi->bits_per_word != 8 && spi->bits_per_word != 16)\r
1505                 return -EINVAL;\r
1506 \r
1507         /* Only alloc on first setup */\r
1508         chip = spi_get_ctldata(spi);\r
1509         if (!chip) {\r
1510                 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);\r
1511                 if (!chip)\r
1512                         return -ENOMEM;\r
1513 \r
1514                 chip->cs_control = NULL;\r
1515                 chip->enable_dma = 1;  //0;\r
1516         }\r
1517 \r
1518         /*\r
1519          * Protocol drivers may change the chip settings, so...\r
1520          * if chip_info exists, use it\r
1521          */\r
1522         chip_info = spi->controller_data;\r
1523 \r
1524         /* chip_info doesn't always exist */\r
1525         if (chip_info) {\r
1526                 if (chip_info->cs_control)\r
1527                         chip->cs_control = chip_info->cs_control;\r
1528 \r
1529                 chip->poll_mode = chip_info->poll_mode;\r
1530                 chip->type = chip_info->type;\r
1531 \r
1532                 chip->rx_threshold = 0;\r
1533                 chip->tx_threshold = 0;\r
1534 \r
1535                 chip->enable_dma = chip_info->enable_dma;\r
1536         }\r
1537 \r
1538         if (spi->bits_per_word == 8) {\r
1539                 chip->n_bytes = 1;\r
1540                 chip->dma_width = 1;\r
1541                 chip->read = u8_reader;\r
1542                 chip->write = u8_writer;\r
1543                 spi_dfs = SPI_DFS_8BIT;\r
1544         } else if (spi->bits_per_word == 16) {\r
1545                 chip->n_bytes = 2;\r
1546                 chip->dma_width = 2;\r
1547                 chip->read = u16_reader;\r
1548                 chip->write = u16_writer;\r
1549                 spi_dfs = SPI_DFS_16BIT;\r
1550         } else {\r
1551                 /* Never take >16b case for MRST SPIC */\r
1552                 dev_err(&spi->dev, "invalid wordsize\n");\r
1553                 return -EINVAL;\r
1554         }\r
1555         chip->bits_per_word = spi->bits_per_word;\r
1556 \r
1557         if (!spi->max_speed_hz) {\r
1558                 dev_err(&spi->dev, "No max speed HZ parameter\n");\r
1559                 return -EINVAL;\r
1560         }\r
1561         chip->speed_hz = spi->max_speed_hz;\r
1562 \r
1563         chip->tmode = 0; /* Tx & Rx */\r
1564         /* Default SPI mode is SCPOL = 0, SCPH = 0 */\r
1565         chip->cr0 = (spi_dfs << SPI_DFS_OFFSET)\r
1566                 | (SPI_HALF_WORLD_OFF << SPI_HALF_WORLD_TX_OFFSET)\r
1567                         | (SPI_SSN_DELAY_ONE << SPI_SSN_DELAY_OFFSET)\r
1568                         | (chip->type << SPI_FRF_OFFSET)\r
1569                         | (spi->mode  << SPI_MODE_OFFSET)\r
1570                         | (chip->tmode << SPI_TMOD_OFFSET);\r
1571 \r
1572         spi_set_ctldata(spi, chip);\r
1573         return 0;\r
1574 }\r
1575 \r
1576 static void rk29xx_spi_cleanup(struct spi_device *spi)\r
1577 {\r
1578         struct chip_data *chip = spi_get_ctldata(spi);\r
1579         kfree(chip);\r
1580 }\r
1581 \r
1582 static int __devinit init_queue(struct rk29xx_spi *dws)\r
1583 {\r
1584         INIT_LIST_HEAD(&dws->queue);\r
1585         spin_lock_init(&dws->lock);\r
1586 \r
1587         dws->run = QUEUE_STOPPED;\r
1588         dws->busy = 0;\r
1589 \r
1590         init_completion(&dws->xfer_completion);\r
1591 \r
1592         tasklet_init(&dws->pump_transfers,\r
1593                         pump_transfers, (unsigned long)dws);\r
1594 \r
1595         INIT_WORK(&dws->pump_messages, pump_messages);\r
1596         dws->workqueue = create_singlethread_workqueue(\r
1597                                         dev_name(dws->master->dev.parent));\r
1598         if (dws->workqueue == NULL)\r
1599                 return -EBUSY;\r
1600 \r
1601         return 0;\r
1602 }\r
1603 \r
1604 static int start_queue(struct rk29xx_spi *dws)\r
1605 {\r
1606         unsigned long flags;\r
1607 \r
1608         spin_lock_irqsave(&dws->lock, flags);\r
1609 \r
1610         if (dws->run == QUEUE_RUNNING || dws->busy) {\r
1611                 spin_unlock_irqrestore(&dws->lock, flags);\r
1612                 return -EBUSY;\r
1613         }\r
1614 \r
1615         dws->run = QUEUE_RUNNING;\r
1616         dws->cur_msg = NULL;\r
1617         dws->cur_transfer = NULL;\r
1618         dws->cur_chip = NULL;\r
1619         dws->prev_chip = NULL;\r
1620         spin_unlock_irqrestore(&dws->lock, flags);\r
1621 \r
1622         queue_work(dws->workqueue, &dws->pump_messages);\r
1623 \r
1624         return 0;\r
1625 }\r
1626 \r
1627 static int stop_queue(struct rk29xx_spi *dws)\r
1628 {\r
1629         unsigned long flags;\r
1630         unsigned limit = 50;\r
1631         int status = 0;\r
1632 \r
1633         spin_lock_irqsave(&dws->lock, flags);\r
1634         dws->run = QUEUE_STOPPED;\r
1635         while (!list_empty(&dws->queue) && dws->busy && limit--) {\r
1636                 spin_unlock_irqrestore(&dws->lock, flags);\r
1637                 msleep(10);\r
1638                 spin_lock_irqsave(&dws->lock, flags);\r
1639         }\r
1640 \r
1641         if (!list_empty(&dws->queue) || dws->busy)\r
1642                 status = -EBUSY;\r
1643         spin_unlock_irqrestore(&dws->lock, flags);\r
1644 \r
1645         return status;\r
1646 }\r
1647 \r
1648 static int destroy_queue(struct rk29xx_spi *dws)\r
1649 {\r
1650         int status;\r
1651 \r
1652         status = stop_queue(dws);\r
1653         if (status != 0)\r
1654                 return status;\r
1655         destroy_workqueue(dws->workqueue);\r
1656         return 0;\r
1657 }\r
1658 \r
1659 /* Restart the controller, disable all interrupts, clean rx fifo */\r
1660 static void spi_hw_init(struct rk29xx_spi *dws)\r
1661 {\r
1662         spi_enable_chip(dws, 0);\r
1663         spi_mask_intr(dws, 0xff);\r
1664         spi_enable_chip(dws, 1);\r
1665         flush(dws);\r
1666 \r
1667         /*\r
1668          * Try to detect the FIFO depth if not set by interface driver,\r
1669          * the depth could be from 2 to 32 from HW spec\r
1670          */\r
1671         if (!dws->fifo_len) {\r
1672                 u32 fifo;\r
1673                 for (fifo = 2; fifo <= 31; fifo++) {\r
1674                         rk29xx_writew(dws, SPIM_TXFTLR, fifo);\r
1675                         if (fifo != rk29xx_readw(dws, SPIM_TXFTLR))\r
1676                                 break;\r
1677                 }\r
1678 \r
1679                 dws->fifo_len = (fifo == 31) ? 0 : fifo;\r
1680                 rk29xx_writew(dws, SPIM_TXFTLR, 0);\r
1681         }\r
1682 }\r
1683 \r
1684 /* cpufreq driver support */\r
1685 #ifdef CONFIG_CPU_FREQ\r
1686 \r
1687 static int rk29xx_spim_cpufreq_transition(struct notifier_block *nb, unsigned long val, void *data)\r
1688 {\r
1689         struct rk29xx_spi *info;\r
1690         unsigned long newclk;\r
1691 \r
1692         info = container_of(nb, struct rk29xx_spi, freq_transition);\r
1693         newclk = clk_get_rate(info->clock_spim);\r
1694 \r
1695         return 0;\r
1696 }\r
1697 \r
1698 static inline int rk29xx_spim_cpufreq_register(struct rk29xx_spi *info)\r
1699 {\r
1700         info->freq_transition.notifier_call = rk29xx_spim_cpufreq_transition;\r
1701 \r
1702         return cpufreq_register_notifier(&info->freq_transition, CPUFREQ_TRANSITION_NOTIFIER);\r
1703 }\r
1704 \r
1705 static inline void rk29xx_spim_cpufreq_deregister(struct rk29xx_spi *info)\r
1706 {\r
1707         cpufreq_unregister_notifier(&info->freq_transition, CPUFREQ_TRANSITION_NOTIFIER);\r
1708 }\r
1709 \r
1710 #else\r
1711 static inline int rk29xx_spim_cpufreq_register(struct rk29xx_spi *info)\r
1712 {\r
1713         return 0;\r
1714 }\r
1715 \r
1716 static inline void rk29xx_spim_cpufreq_deregister(struct rk29xx_spi *info)\r
1717 {\r
1718 }\r
1719 #endif\r
1720 static int __init rk29xx_spim_probe(struct platform_device *pdev)\r
1721 {\r
1722         struct resource         *regs, *dmatx_res, *dmarx_res;\r
1723         struct rk29xx_spi   *dws;\r
1724         struct spi_master   *master;\r
1725         int                     irq; \r
1726         int ret;\r
1727         struct rk29xx_spi_platform_data *pdata = pdev->dev.platform_data;\r
1728         char szBuf[8];\r
1729 \r
1730         if (pdata && pdata->io_init) {\r
1731                 ret = pdata->io_init(pdata->chipselect_gpios, pdata->num_chipselect);\r
1732                 if (ret) {                      \r
1733                         return -ENXIO;  \r
1734                 }\r
1735         }\r
1736         \r
1737         regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);\r
1738         if (!regs)\r
1739                 return -ENXIO;\r
1740         dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);\r
1741         if (dmatx_res == NULL) {\r
1742                 dev_err(&pdev->dev, "Unable to get SPI-Tx dma resource\n");\r
1743                 return -ENXIO;\r
1744         }\r
1745 \r
1746         dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);\r
1747         if (dmarx_res == NULL) {\r
1748                 dev_err(&pdev->dev, "Unable to get SPI-Rx dma resource\n");\r
1749                 return -ENXIO;\r
1750         }\r
1751         irq = platform_get_irq(pdev, 0);\r
1752         if (irq < 0)\r
1753                 return irq;                     \r
1754         /* setup spi core then atmel-specific driver state */\r
1755         ret = -ENOMEM;  \r
1756         master = spi_alloc_master(&pdev->dev, sizeof *dws);\r
1757         if (!master) {\r
1758                 ret = -ENOMEM;\r
1759                 goto exit;\r
1760         }\r
1761 \r
1762         platform_set_drvdata(pdev, master);\r
1763         dws = spi_master_get_devdata(master);\r
1764         memset(szBuf, 0, sizeof(szBuf));\r
1765         sprintf(szBuf, "%s%d", "spi", pdev->id);\r
1766     dws->clock_spim = clk_get(&pdev->dev, szBuf);\r
1767         clk_enable(dws->clock_spim);\r
1768         if (IS_ERR(dws->clock_spim)) {\r
1769                 dev_err(&pdev->dev, "clk_get for %s fail(%p)\n", szBuf, dws->clock_spim);\r
1770                 return PTR_ERR(dws->clock_spim);\r
1771         }\r
1772         \r
1773         dws->regs = ioremap(regs->start, (regs->end - regs->start) + 1);\r
1774         if (!dws->regs){\r
1775         release_mem_region(regs->start, (regs->end - regs->start) + 1);\r
1776                 return -EBUSY;\r
1777         }\r
1778         DBG(KERN_INFO "dws->regs: %p\n", dws->regs);\r
1779     dws->irq = irq;\r
1780         dws->irq_polarity = IRQF_TRIGGER_NONE;\r
1781         dws->master = master;\r
1782         dws->type = SSI_MOTO_SPI;\r
1783         dws->prev_chip = NULL;\r
1784         dws->sfr_start = regs->start;\r
1785         dws->tx_dmach = dmatx_res->start;\r
1786         dws->rx_dmach = dmarx_res->start;\r
1787         dws->dma_inited = 0;  ///0;\r
1788         ///dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);\r
1789         ret = request_irq(dws->irq, rk29xx_spi_irq, dws->irq_polarity,\r
1790                         "rk29xx_spim", dws);\r
1791         if (ret < 0) {\r
1792                 dev_err(&master->dev, "can not get IRQ\n");\r
1793                 goto err_free_master;\r
1794         }\r
1795 \r
1796         master->mode_bits = SPI_CPOL | SPI_CPHA;\r
1797         master->bus_num = pdev->id;\r
1798         master->num_chipselect = pdata->num_chipselect;\r
1799         master->dev.platform_data = pdata;\r
1800         master->cleanup = rk29xx_spi_cleanup;\r
1801         master->setup = rk29xx_spi_setup;\r
1802         #if defined(QUICK_TRANSFER)\r
1803         master->transfer = rk29xx_spi_quick_transfer;\r
1804         #else\r
1805         master->transfer = rk29xx_spi_transfer;\r
1806         #endif\r
1807         \r
1808         dws->pdev = pdev;\r
1809         /* Basic HW init */\r
1810         spi_hw_init(dws);\r
1811         /* Initial and start queue */\r
1812         ret = init_queue(dws);\r
1813         if (ret) {\r
1814                 dev_err(&master->dev, "problem initializing queue\n");\r
1815                 goto err_diable_hw;\r
1816         }\r
1817 \r
1818         ret = start_queue(dws);\r
1819         if (ret) {\r
1820                 dev_err(&master->dev, "problem starting queue\n");\r
1821                 goto err_diable_hw;\r
1822         }\r
1823 \r
1824         spi_master_set_devdata(master, dws);\r
1825         ret = spi_register_master(master);\r
1826         if (ret) {\r
1827                 dev_err(&master->dev, "problem registering spi master\n");\r
1828                 goto err_queue_alloc;\r
1829         }\r
1830 \r
1831     ret =rk29xx_spim_cpufreq_register(dws);\r
1832     if (ret < 0) {\r
1833         dev_err(&master->dev, "rk29xx spim failed to init cpufreq support\n");\r
1834         goto err_queue_alloc;\r
1835     }\r
1836         DBG(KERN_INFO "rk29xx_spim: driver initialized\n");\r
1837         mrst_spi_debugfs_init(dws);\r
1838         return 0;\r
1839 \r
1840 err_queue_alloc:\r
1841         destroy_queue(dws);\r
1842 err_diable_hw:\r
1843         spi_enable_chip(dws, 0);\r
1844         free_irq(dws->irq, dws);\r
1845 err_free_master:\r
1846         spi_master_put(master);\r
1847         iounmap(dws->regs);\r
1848 exit:\r
1849         return ret;\r
1850 }\r
1851 \r
1852 static void __exit rk29xx_spim_remove(struct platform_device *pdev)\r
1853 {\r
1854         struct spi_master *master = platform_get_drvdata(pdev);\r
1855         struct rk29xx_spi *dws = spi_master_get_devdata(master);\r
1856         int status = 0;\r
1857 \r
1858         if (!dws)\r
1859                 return;\r
1860         rk29xx_spim_cpufreq_deregister(dws);\r
1861         mrst_spi_debugfs_remove(dws);\r
1862 \r
1863         release_dma(dws);\r
1864 \r
1865         /* Remove the queue */\r
1866         status = destroy_queue(dws);\r
1867         if (status != 0)\r
1868                 dev_err(&dws->master->dev, "rk29xx_spi_remove: workqueue will not "\r
1869                         "complete, message memory not freed\n");\r
1870         clk_put(dws->clock_spim);\r
1871         clk_disable(dws->clock_spim);\r
1872         spi_enable_chip(dws, 0);\r
1873         /* Disable clk */\r
1874         spi_set_clk(dws, 0);\r
1875         free_irq(dws->irq, dws);\r
1876 \r
1877         /* Disconnect from the SPI framework */\r
1878         spi_unregister_master(dws->master);\r
1879         iounmap(dws->regs);\r
1880 }\r
1881 \r
1882 \r
1883 #ifdef  CONFIG_PM\r
1884 \r
1885 static int rk29xx_spim_suspend(struct platform_device *pdev, pm_message_t mesg)\r
1886 {\r
1887         struct spi_master *master = platform_get_drvdata(pdev);\r
1888         struct rk29xx_spi *dws = spi_master_get_devdata(master);\r
1889         struct rk29xx_spi_platform_data *pdata = pdev->dev.platform_data;\r
1890         int status;\r
1891 \r
1892         status = stop_queue(dws);\r
1893         if (status != 0)\r
1894                 return status;\r
1895         clk_disable(dws->clock_spim);\r
1896         if (pdata && pdata->io_fix_leakage_bug)\r
1897         {\r
1898                 pdata->io_fix_leakage_bug( );\r
1899         }\r
1900         return 0;\r
1901 }\r
1902 \r
1903 static int rk29xx_spim_resume(struct platform_device *pdev)\r
1904 {\r
1905         struct spi_master *master = platform_get_drvdata(pdev);\r
1906         struct rk29xx_spi *dws = spi_master_get_devdata(master);\r
1907         struct rk29xx_spi_platform_data *pdata = pdev->dev.platform_data;\r
1908         int ret;\r
1909         \r
1910         clk_enable(dws->clock_spim);    \r
1911         spi_hw_init(dws);\r
1912         ret = start_queue(dws);\r
1913         if (ret)\r
1914                 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);\r
1915         if (pdata && pdata->io_resume_leakage_bug)\r
1916         {\r
1917                 pdata->io_resume_leakage_bug( ); \r
1918         }\r
1919         return ret;\r
1920 }\r
1921 \r
1922 #else\r
1923 #define rk29xx_spim_suspend     NULL\r
1924 #define rk29xx_spim_resume      NULL\r
1925 #endif\r
1926 \r
1927 static struct platform_driver rk29xx_platform_spim_driver = {\r
1928         .remove         = __exit_p(rk29xx_spim_remove),\r
1929         .driver         = {\r
1930                 .name   = "rk29xx_spim",\r
1931                 .owner  = THIS_MODULE,\r
1932         },\r
1933         .suspend        = rk29xx_spim_suspend,\r
1934         .resume         = rk29xx_spim_resume,\r
1935 };\r
1936 \r
1937 static int __init rk29xx_spim_init(void)\r
1938 {\r
1939         int ret;\r
1940         ret = platform_driver_probe(&rk29xx_platform_spim_driver, rk29xx_spim_probe);   \r
1941         return ret;\r
1942 }\r
1943 \r
1944 static void __exit rk29xx_spim_exit(void)\r
1945 {\r
1946         platform_driver_unregister(&rk29xx_platform_spim_driver);\r
1947 }\r
1948 \r
1949 subsys_initcall(rk29xx_spim_init);\r
1950 module_exit(rk29xx_spim_exit);\r
1951 \r
1952 MODULE_AUTHOR("www.rock-chips.com");\r
1953 MODULE_DESCRIPTION("Rockchip RK29xx spim port driver");\r
1954 MODULE_LICENSE("GPL");;\r
1955 \r