1 /*drivers/serial/rk29xx_spim.c - driver for rk29xx spim device
\r
3 * Copyright (C) 2010 ROCKCHIP, Inc.
\r
5 * This software is licensed under the terms of the GNU General Public
\r
6 * License version 2, as published by the Free Software Foundation, and
\r
7 * may be copied, distributed, and modified under those terms.
\r
9 * This program is distributed in the hope that it will be useful,
\r
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
\r
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
\r
12 * GNU General Public License for more details.
\r
15 #include <linux/dma-mapping.h>
\r
16 #include <linux/interrupt.h>
\r
17 #include <linux/highmem.h>
\r
18 #include <linux/delay.h>
\r
19 #include <linux/slab.h>
\r
20 #include <linux/platform_device.h>
\r
21 #include <linux/clk.h>
\r
22 #include <linux/cpufreq.h>
\r
23 #include <mach/gpio.h>
\r
24 #include <mach/irqs.h>
\r
25 #include <linux/dma-mapping.h>
\r
26 #include <asm/dma.h>
\r
27 #include <linux/preempt.h>
\r
28 #include "rk29_spim.h"
\r
29 #include <linux/spi/spi.h>
\r
30 #include <mach/board.h>
\r
32 #ifdef CONFIG_DEBUG_FS
\r
33 #include <linux/debugfs.h>
\r
36 /*ÔÓеÄspiÇý¶¯Ð§ÂʱȽϵͣ¬
\r
37 ÎÞ·¨Âú×ã´óÊý¾ÝÁ¿µÄ´«Ê䣻
\r
38 QUICK_TRANSFERÓÃÓÚ¿ìËÙ´«Ê䣬ͬʱ¿ÉÖ¸¶¨°ëË«¹¤»òÈ«Ë«¹¤£¬
\r
42 //#define QUICK_TRANSFER
\r
45 //#define PRINT_TRANS_DATA
\r
50 #define DMA_BUFFER_SIZE PAGE_SIZE
\r
51 #define DMA_MIN_BYTES 32 //>32x16bits FIFO
\r
54 #define START_STATE ((void *)0)
\r
55 #define RUNNING_STATE ((void *)1)
\r
56 #define DONE_STATE ((void *)2)
\r
57 #define ERROR_STATE ((void *)-1)
\r
59 #define QUEUE_RUNNING 0
\r
60 #define QUEUE_STOPPED 1
\r
62 #define MRST_SPI_DEASSERT 0
\r
63 #define MRST_SPI_ASSERT 1 ///CS0
\r
64 #define MRST_SPI_ASSERT1 2 ///CS1
\r
66 /* Slave spi_dev related */
\r
69 u8 cs; /* chip select pin */
\r
70 u8 n_bytes; /* current is a 1/2/4 byte op */
\r
71 u8 tmode; /* TR/TO/RO/EEPROM */
\r
73 u8 type; /* SPI/SSP/MicroWire */
\r
75 u8 poll_mode; /* 1 means use poll mode */
\r
82 u16 clk_div; /* baud rate divider */
\r
83 u32 speed_hz; /* baud rate */
\r
84 int (*write)(struct rk29xx_spi *dws);
\r
85 int (*read)(struct rk29xx_spi *dws);
\r
86 void (*cs_control)(struct rk29xx_spi *dws, u32 cs, u8 flag);
\r
89 #define SUSPND (1<<0)
\r
90 #define SPIBUSY (1<<1)
\r
91 #define RXBUSY (1<<2)
\r
92 #define TXBUSY (1<<3)
\r
95 #ifdef CONFIG_LCD_USE_SPIM_CONTROL
\r
96 void rk29_lcd_spim_spin_lock(void)
\r
98 #ifdef CONFIG_LCD_USE_SPI0
\r
99 disable_irq(IRQ_SPI0);
\r
102 #ifdef CONFIG_LCD_USE_SPI1
\r
103 disable_irq(IRQ_SPI1);
\r
109 void rk29_lcd_spim_spin_unlock(void)
\r
113 #ifdef CONFIG_LCD_USE_SPI0
\r
114 enable_irq(IRQ_SPI0);
\r
117 #ifdef CONFIG_LCD_USE_SPI1
\r
118 enable_irq(IRQ_SPI1);
\r
122 void rk29_lcd_spim_spin_lock(void)
\r
127 void rk29_lcd_spim_spin_unlock(void)
\r
133 #if defined(PRINT_TRANS_DATA)
\r
134 static void printk_transfer_data(unsigned char *buf, int len)
\r
137 for(i=0; i<len; i++)
\r
138 printk("0x%x,",*buf++);
\r
146 static void spi_dump_regs(struct rk29xx_spi *dws) {
\r
147 DBG("MRST SPI0 registers:\n");
\r
148 DBG("=================================\n");
\r
149 DBG("CTRL0: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_CTRLR0));
\r
150 DBG("CTRL1: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_CTRLR1));
\r
151 DBG("SSIENR: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_ENR));
\r
152 DBG("SER: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_SER));
\r
153 DBG("BAUDR: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_BAUDR));
\r
154 DBG("TXFTLR: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_TXFTLR));
\r
155 DBG("RXFTLR: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_RXFTLR));
\r
156 DBG("TXFLR: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_TXFLR));
\r
157 DBG("RXFLR: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_RXFLR));
\r
158 DBG("SR: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_SR));
\r
159 DBG("IMR: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_IMR));
\r
160 DBG("ISR: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_ISR));
\r
161 DBG("DMACR: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_DMACR));
\r
162 DBG("DMATDLR: \t0x%08x\n", rk29xx_readl(dws, SPIM_DMATDLR));
\r
163 DBG("DMARDLR: \t0x%08x\n", rk29xx_readl(dws, SPIM_DMARDLR));
\r
164 DBG("=================================\n");
\r
169 #ifdef CONFIG_DEBUG_FS
\r
170 static int spi_show_regs_open(struct inode *inode, struct file *file)
\r
172 file->private_data = inode->i_private;
\r
176 #define SPI_REGS_BUFSIZE 1024
\r
177 static ssize_t spi_show_regs(struct file *file, char __user *user_buf,
\r
178 size_t count, loff_t *ppos)
\r
180 struct rk29xx_spi *dws;
\r
185 dws = file->private_data;
\r
187 buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL);
\r
191 len += printk(buf + len, SPI_REGS_BUFSIZE - len,
\r
192 "MRST SPI0 registers:\n");
\r
193 len += printk(buf + len, SPI_REGS_BUFSIZE - len,
\r
194 "=================================\n");
\r
195 len += printk(buf + len, SPI_REGS_BUFSIZE - len,
\r
196 "CTRL0: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_CTRLR0));
\r
197 len += printk(buf + len, SPI_REGS_BUFSIZE - len,
\r
198 "CTRL1: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_CTRLR1));
\r
199 len += printk(buf + len, SPI_REGS_BUFSIZE - len,
\r
200 "SSIENR: \t0x%08x\n", rk29xx_readl(dws, SPIM_ENR));
\r
201 len += printk(buf + len, SPI_REGS_BUFSIZE - len,
\r
202 "SER: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_SER));
\r
203 len += printk(buf + len, SPI_REGS_BUFSIZE - len,
\r
204 "BAUDR: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_BAUDR));
\r
205 len += printk(buf + len, SPI_REGS_BUFSIZE - len,
\r
206 "TXFTLR: \t0x%08x\n", rk29xx_readl(dws, SPIM_TXFTLR));
\r
207 len += printk(buf + len, SPI_REGS_BUFSIZE - len,
\r
208 "RXFTLR: \t0x%08x\n", rk29xx_readl(dws, SPIM_RXFTLR));
\r
209 len += printk(buf + len, SPI_REGS_BUFSIZE - len,
\r
210 "TXFLR: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_TXFLR));
\r
211 len += printk(buf + len, SPI_REGS_BUFSIZE - len,
\r
212 "RXFLR: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_RXFLR));
\r
213 len += printk(buf + len, SPI_REGS_BUFSIZE - len,
\r
214 "SR: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_SR));
\r
215 len += printk(buf + len, SPI_REGS_BUFSIZE - len,
\r
216 "IMR: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_IMR));
\r
217 len += printk(buf + len, SPI_REGS_BUFSIZE - len,
\r
218 "ISR: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_ISR));
\r
219 len += printk(buf + len, SPI_REGS_BUFSIZE - len,
\r
220 "DMACR: \t\t0x%08x\n", rk29xx_readl(dws, SPIM_DMACR));
\r
221 len += printk(buf + len, SPI_REGS_BUFSIZE - len,
\r
222 "DMATDLR: \t0x%08x\n", rk29xx_readl(dws, SPIM_DMATDLR));
\r
223 len += printk(buf + len, SPI_REGS_BUFSIZE - len,
\r
224 "DMARDLR: \t0x%08x\n", rk29xx_readl(dws, SPIM_DMARDLR));
\r
225 len += printk(buf + len, SPI_REGS_BUFSIZE - len,
\r
226 "=================================\n");
\r
228 ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
\r
233 static const struct file_operations mrst_spi_regs_ops = {
\r
234 .owner = THIS_MODULE,
\r
235 .open = spi_show_regs_open,
\r
236 .read = spi_show_regs,
\r
239 static int mrst_spi_debugfs_init(struct rk29xx_spi *dws)
\r
241 dws->debugfs = debugfs_create_dir("mrst_spi", NULL);
\r
245 debugfs_create_file("registers", S_IFREG | S_IRUGO,
\r
246 dws->debugfs, (void *)dws, &mrst_spi_regs_ops);
\r
250 static void mrst_spi_debugfs_remove(struct rk29xx_spi *dws)
\r
253 debugfs_remove_recursive(dws->debugfs);
\r
257 static inline int mrst_spi_debugfs_init(struct rk29xx_spi *dws)
\r
262 static inline void mrst_spi_debugfs_remove(struct rk29xx_spi *dws)
\r
265 #endif /* CONFIG_DEBUG_FS */
\r
267 static void dma_transfer(struct rk29xx_spi *dws) ;
\r
268 static void transfer_complete(struct rk29xx_spi *dws);
\r
270 static void wait_till_not_busy(struct rk29xx_spi *dws)
\r
272 unsigned long end = jiffies + 1 + usecs_to_jiffies(1000);
\r
273 //if spi was slave, it is SR_BUSY always.
\r
274 if(dws->cur_chip) {
\r
275 if(dws->cur_chip->slave_enable == 1)
\r
279 while (time_before(jiffies, end)) {
\r
280 if (!(rk29xx_readw(dws, SPIM_SR) & SR_BUSY))
\r
283 dev_err(&dws->master->dev,
\r
284 "DW SPI: Status keeps busy for 1000us after a read/write!\n");
\r
287 #if defined(QUICK_TRANSFER)
\r
288 static void wait_till_tf_empty(struct rk29xx_spi *dws)
\r
290 unsigned long end = jiffies + 1 + usecs_to_jiffies(1000);
\r
292 while (time_before(jiffies, end)) {
\r
293 if (rk29xx_readw(dws, SPIM_SR) & SR_TF_EMPT)
\r
296 dev_err(&dws->master->dev,
\r
297 "DW SPI: Status keeps busy for 1000us after a read/write!\n");
\r
301 static void flush(struct rk29xx_spi *dws)
\r
303 while (!(rk29xx_readw(dws, SPIM_SR) & SR_RF_EMPT))
\r
304 rk29xx_readw(dws, SPIM_RXDR);
\r
306 wait_till_not_busy(dws);
\r
309 static void spi_cs_control(struct rk29xx_spi *dws, u32 cs, u8 flag)
\r
313 rk29xx_writel(dws, SPIM_SER, 1 << cs);
\r
315 rk29xx_writel(dws, SPIM_SER, 0);
\r
319 #error "Warning: not support"
\r
320 struct rk29xx_spi_platform_data *pdata = dws->master->dev.platform_data;
\r
321 struct spi_cs_gpio *cs_gpios = pdata->chipselect_gpios;
\r
324 gpio_direction_output(cs_gpios[cs].cs_gpio, GPIO_HIGH);
\r
327 gpio_direction_output(cs_gpios[cs].cs_gpio, GPIO_LOW);
\r
332 static int null_writer(struct rk29xx_spi *dws)
\r
334 u8 n_bytes = dws->n_bytes;
\r
336 if ((rk29xx_readw(dws, SPIM_SR) & SR_TF_FULL)
\r
337 || (dws->tx == dws->tx_end))
\r
339 rk29xx_writew(dws, SPIM_TXDR, 0);
\r
340 dws->tx += n_bytes;
\r
341 //wait_till_not_busy(dws);
\r
346 static int null_reader(struct rk29xx_spi *dws)
\r
348 u8 n_bytes = dws->n_bytes;
\r
349 DBG("func: %s, line: %d\n", __FUNCTION__, __LINE__);
\r
350 while ((!(rk29xx_readw(dws, SPIM_SR) & SR_RF_EMPT))
\r
351 && (dws->rx < dws->rx_end)) {
\r
352 rk29xx_readw(dws, SPIM_RXDR);
\r
353 dws->rx += n_bytes;
\r
355 wait_till_not_busy(dws);
\r
356 return dws->rx == dws->rx_end;
\r
359 static int u8_writer(struct rk29xx_spi *dws)
\r
361 //spi_dump_regs(dws)
\r
362 #if defined(PRINT_TRANS_DATA)
\r
363 DBG("tx: 0x%02x\n", *(u8 *)(dws->tx));
\r
365 if ((rk29xx_readw(dws, SPIM_SR) & SR_TF_FULL)
\r
366 || (dws->tx == dws->tx_end))
\r
368 rk29xx_writew(dws, SPIM_TXDR, *(u8 *)(dws->tx));
\r
370 //wait_till_not_busy(dws);
\r
375 static int u8_reader(struct rk29xx_spi *dws)
\r
377 //spi_dump_regs(dws);
\r
378 while (!(rk29xx_readw(dws, SPIM_SR) & SR_RF_EMPT)
\r
379 && (dws->rx < dws->rx_end)) {
\r
380 *(u8 *)(dws->rx) = rk29xx_readw(dws, SPIM_RXDR) & 0xFFU;
\r
381 #if defined(PRINT_TRANS_DATA)
\r
382 DBG("rx: 0x%02x\n", *(u8 *)(dws->rx));
\r
387 wait_till_not_busy(dws);
\r
388 return dws->rx == dws->rx_end;
\r
391 static int u16_writer(struct rk29xx_spi *dws)
\r
393 #if defined(PRINT_TRANS_DATA)
\r
394 DBG("tx: 0x%04x\n", *(u16 *)(dws->tx));
\r
396 if ((rk29xx_readw(dws, SPIM_SR) & SR_TF_FULL)
\r
397 || (dws->tx == dws->tx_end))
\r
400 rk29xx_writew(dws, SPIM_TXDR, *(u16 *)(dws->tx));
\r
402 //wait_till_not_busy(dws);
\r
407 static int u16_reader(struct rk29xx_spi *dws)
\r
411 while (!(rk29xx_readw(dws, SPIM_SR) & SR_RF_EMPT)
\r
412 && (dws->rx < dws->rx_end)) {
\r
413 temp = rk29xx_readw(dws, SPIM_RXDR);
\r
414 *(u16 *)(dws->rx) = temp;
\r
415 #if defined(PRINT_TRANS_DATA)
\r
416 DBG("rx: 0x%04x\n", *(u16 *)(dws->rx));
\r
421 wait_till_not_busy(dws);
\r
422 return dws->rx == dws->rx_end;
\r
425 static void *next_transfer(struct rk29xx_spi *dws)
\r
427 struct spi_message *msg = dws->cur_msg;
\r
428 struct spi_transfer *trans = dws->cur_transfer;
\r
430 /* Move to next transfer */
\r
431 if (trans->transfer_list.next != &msg->transfers) {
\r
432 dws->cur_transfer =
\r
433 list_entry(trans->transfer_list.next,
\r
434 struct spi_transfer,
\r
436 return RUNNING_STATE;
\r
441 static void rk29_spi_dma_rxcb(void *buf_id,
\r
442 int size, enum rk29_dma_buffresult res)
\r
444 struct rk29xx_spi *dws = buf_id;
\r
445 unsigned long flags;
\r
447 DBG("func: %s, line: %d\n", __FUNCTION__, __LINE__);
\r
449 spin_lock_irqsave(&dws->lock, flags);
\r
451 if (res == RK29_RES_OK)
\r
452 dws->state &= ~RXBUSY;
\r
454 dev_err(&dws->master->dev, "error:DmaAbrtRx-%d, size: %d,res=%d\n", res, size,res);
\r
456 //copy data from dma to transfer buf
\r
457 if(dws->cur_transfer && (dws->cur_transfer->rx_buf != NULL))
\r
459 memcpy(dws->cur_transfer->rx_buf, dws->buffer_rx_dma, dws->cur_transfer->len);
\r
461 #if defined(PRINT_TRANS_DATA)
\r
463 printk_transfer_data(dws->cur_transfer->rx_buf, dws->cur_transfer->len);
\r
467 spin_unlock_irqrestore(&dws->lock, flags);
\r
469 /* If the other done */
\r
470 if (!(dws->state & TXBUSY))
\r
472 //complete(&dws->xfer_completion);
\r
473 DBG("func: %s, line: %d,dma transfer complete\n", __FUNCTION__, __LINE__);
\r
474 //DMA could not lose intterupt
\r
475 transfer_complete(dws);
\r
480 static void rk29_spi_dma_txcb(void *buf_id,
\r
481 int size, enum rk29_dma_buffresult res)
\r
483 struct rk29xx_spi *dws = buf_id;
\r
484 unsigned long flags;
\r
486 DBG("func: %s, line: %d\n", __FUNCTION__, __LINE__);
\r
488 spin_lock_irqsave(&dws->lock, flags);
\r
490 if (res == RK29_RES_OK)
\r
491 dws->state &= ~TXBUSY;
\r
493 dev_err(&dws->master->dev, "error:DmaAbrtTx-%d, size: %d,res=%d \n", res, size,res);
\r
495 spin_unlock_irqrestore(&dws->lock, flags);
\r
497 /* If the other done */
\r
498 if (!(dws->state & RXBUSY))
\r
500 //complete(&dws->xfer_completion);
\r
502 DBG("func: %s, line: %d,dma transfer complete\n", __FUNCTION__, __LINE__);
\r
503 //DMA could not lose intterupt
\r
504 transfer_complete(dws);
\r
510 static struct rk29_dma_client rk29_spi_dma_client = {
\r
511 .name = "rk29xx-spi-dma",
\r
514 static int acquire_dma(struct rk29xx_spi *dws)
\r
516 if (dws->dma_inited) {
\r
520 dws->buffer_tx_dma = dma_alloc_coherent(&dws->pdev->dev, DMA_BUFFER_SIZE, &dws->tx_dma, GFP_KERNEL | GFP_DMA);
\r
521 if (!dws->buffer_tx_dma)
\r
523 dev_err(&dws->pdev->dev, "fail to dma tx buffer alloc\n");
\r
527 dws->buffer_rx_dma = dma_alloc_coherent(&dws->pdev->dev, DMA_BUFFER_SIZE, &dws->rx_dma, GFP_KERNEL | GFP_DMA);
\r
528 if (!dws->buffer_rx_dma)
\r
530 dev_err(&dws->pdev->dev, "fail to dma rx buffer alloc\n");
\r
534 if(rk29_dma_request(dws->rx_dmach,
\r
535 &rk29_spi_dma_client, NULL) < 0) {
\r
536 dev_err(&dws->master->dev, "dws->rx_dmach : %d, cannot get RxDMA\n", dws->rx_dmach);
\r
540 if (rk29_dma_request(dws->tx_dmach,
\r
541 &rk29_spi_dma_client, NULL) < 0) {
\r
542 dev_err(&dws->master->dev, "dws->tx_dmach : %d, cannot get TxDMA\n", dws->tx_dmach);
\r
543 rk29_dma_free(dws->rx_dmach, &rk29_spi_dma_client);
\r
548 if (rk29_dma_set_buffdone_fn(dws->tx_dmach, rk29_spi_dma_txcb)) {
\r
549 dev_err(&dws->master->dev, "rk29_dma_set_buffdone_fn fail\n");
\r
552 if (rk29_dma_devconfig(dws->tx_dmach, RK29_DMASRC_MEM,
\r
553 dws->sfr_start + SPIM_TXDR)) {
\r
554 dev_err(&dws->master->dev, "rk29_dma_devconfig fail\n");
\r
560 if (rk29_dma_set_buffdone_fn(dws->rx_dmach, rk29_spi_dma_rxcb)) {
\r
561 dev_err(&dws->master->dev, "rk29_dma_set_buffdone_fn fail\n");
\r
564 if (rk29_dma_devconfig(dws->rx_dmach, RK29_DMASRC_HW,
\r
565 dws->sfr_start + SPIM_RXDR)) {
\r
566 dev_err(&dws->master->dev, "rk29_dma_devconfig fail\n");
\r
571 dws->dma_inited = 1;
\r
575 static void release_dma(struct rk29xx_spi *dws)
\r
577 if(!dws && dws->dma_inited) {
\r
578 rk29_dma_free(dws->rx_dmach, &rk29_spi_dma_client);
\r
579 rk29_dma_free(dws->tx_dmach, &rk29_spi_dma_client);
\r
584 * Note: first step is the protocol driver prepares
\r
585 * a dma-capable memory, and this func just need translate
\r
586 * the virt addr to physical
\r
588 static int map_dma_buffers(struct rk29xx_spi *dws)
\r
590 if (!dws->dma_inited || !dws->cur_chip->enable_dma)
\r
592 printk("%s:error\n",__func__);
\r
596 if(dws->cur_transfer->tx_buf)
\r
598 memcpy(dws->buffer_tx_dma,dws->cur_transfer->tx_buf,dws->cur_transfer->len);
\r
601 dws->cur_transfer->tx_dma = dws->tx_dma;
\r
602 dws->cur_transfer->rx_dma = dws->rx_dma;
\r
607 /* Caller already set message->status; dma and pio irqs are blocked */
\r
608 static void giveback(struct rk29xx_spi *dws)
\r
610 struct spi_transfer *last_transfer;
\r
611 unsigned long flags;
\r
612 struct spi_message *msg;
\r
614 spin_lock_irqsave(&dws->lock, flags);
\r
615 msg = dws->cur_msg;
\r
616 dws->cur_msg = NULL;
\r
617 dws->cur_transfer = NULL;
\r
618 dws->prev_chip = dws->cur_chip;
\r
619 dws->cur_chip = NULL;
\r
620 dws->dma_mapped = 0;
\r
623 /*it is important to close intterrupt*/
\r
624 spi_mask_intr(dws, 0xff);
\r
625 rk29xx_writew(dws, SPIM_DMACR, 0);
\r
627 queue_work(dws->workqueue, &dws->pump_messages);
\r
628 spin_unlock_irqrestore(&dws->lock, flags);
\r
630 last_transfer = list_entry(msg->transfers.prev,
\r
631 struct spi_transfer,
\r
634 if (!last_transfer->cs_change && dws->cs_control)
\r
635 dws->cs_control(dws,msg->spi->chip_select, MRST_SPI_DEASSERT);
\r
639 msg->complete(msg->context);
\r
641 DBG("%s ok\n",__func__);
\r
645 static void int_error_stop(struct rk29xx_spi *dws, const char *msg)
\r
647 /* Stop and reset hw */
\r
649 spi_enable_chip(dws, 0);
\r
651 dev_err(&dws->master->dev, "%s\n", msg);
\r
652 dws->cur_msg->state = ERROR_STATE;
\r
653 tasklet_schedule(&dws->pump_transfers);
\r
656 static void transfer_complete(struct rk29xx_spi *dws)
\r
658 /* Update total byte transfered return count actual bytes read */
\r
659 dws->cur_msg->actual_length += dws->len;
\r
661 /* Move to next transfer */
\r
662 dws->cur_msg->state = next_transfer(dws);
\r
664 /* Handle end of message */
\r
665 if (dws->cur_msg->state == DONE_STATE) {
\r
666 dws->cur_msg->status = 0;
\r
669 tasklet_schedule(&dws->pump_transfers);
\r
672 static irqreturn_t interrupt_transfer(struct rk29xx_spi *dws)
\r
674 u16 irq_status, irq_mask = 0x1f;
\r
675 u32 int_level = dws->fifo_len / 2;
\r
678 irq_status = rk29xx_readw(dws, SPIM_ISR) & irq_mask;
\r
679 /* Error handling */
\r
680 if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
\r
681 rk29xx_writew(dws, SPIM_ICR, SPI_CLEAR_INT_TXOI | SPI_CLEAR_INT_RXOI | SPI_CLEAR_INT_RXUI);
\r
682 int_error_stop(dws, "interrupt_transfer: fifo overrun");
\r
683 mutex_unlock(&dws->dma_lock);
\r
684 return IRQ_HANDLED;
\r
687 if (irq_status & SPI_INT_TXEI) {
\r
688 spi_mask_intr(dws, SPI_INT_TXEI);
\r
690 left = (dws->tx_end - dws->tx) / dws->n_bytes;
\r
691 left = (left > int_level) ? int_level : left;
\r
695 wait_till_not_busy(dws);
\r
701 /* Re-enable the IRQ if there is still data left to tx */
\r
702 if (dws->tx_end > dws->tx)
\r
703 spi_umask_intr(dws, SPI_INT_TXEI);
\r
705 transfer_complete(dws);
\r
708 if (irq_status & SPI_INT_RXFI) {
\r
709 spi_mask_intr(dws, SPI_INT_RXFI);
\r
713 /* Re-enable the IRQ if there is still data left to rx */
\r
714 if (dws->rx_end > dws->rx) {
\r
715 left = ((dws->rx_end - dws->rx) / dws->n_bytes) - 1;
\r
716 left = (left > int_level) ? int_level : left;
\r
718 rk29xx_writew(dws, SPIM_RXFTLR, left);
\r
719 spi_umask_intr(dws, SPI_INT_RXFI);
\r
722 transfer_complete(dws);
\r
727 return IRQ_HANDLED;
\r
730 static irqreturn_t rk29xx_spi_irq(int irq, void *dev_id)
\r
732 struct rk29xx_spi *dws = dev_id;
\r
734 if (!dws->cur_msg) {
\r
735 spi_mask_intr(dws, SPI_INT_TXEI);
\r
737 return IRQ_HANDLED;
\r
740 return dws->transfer_handler(dws);
\r
743 /* Must be called inside pump_transfers() */
\r
744 static void poll_transfer(struct rk29xx_spi *dws)
\r
746 #if defined(PRINT_TRANS_DATA)
\r
747 DBG("%s\n",__func__);
\r
749 while (dws->write(dws)) {
\r
750 wait_till_not_busy(dws);
\r
753 transfer_complete(dws);
\r
755 static void spi_chip_sel(struct rk29xx_spi *dws, u16 cs)
\r
757 if(cs >= dws->master->num_chipselect)
\r
760 if (dws->cs_control){
\r
761 dws->cs_control(dws, cs, MRST_SPI_ASSERT);
\r
763 rk29xx_writel(dws, SPIM_SER, 1 << cs);
\r
766 static void pump_transfers(unsigned long data)
\r
768 struct rk29xx_spi *dws = (struct rk29xx_spi *)data;
\r
769 struct spi_message *message = NULL;
\r
770 struct spi_transfer *transfer = NULL;
\r
771 struct spi_transfer *previous = NULL;
\r
772 struct spi_device *spi = NULL;
\r
773 struct chip_data *chip = NULL;
\r
778 u16 txint_level = 0;
\r
779 u16 rxint_level = 0;
\r
784 if((dws->cur_chip->enable_dma) && (dws->cur_transfer->len > DMA_MIN_BYTES) && (dws->cur_transfer->len < DMA_BUFFER_SIZE)){
\r
789 DBG(KERN_INFO "pump_transfers,len=%d\n",dws->cur_transfer->len);
\r
791 /* Get current state information */
\r
792 message = dws->cur_msg;
\r
793 transfer = dws->cur_transfer;
\r
794 chip = dws->cur_chip;
\r
795 spi = message->spi;
\r
796 if (unlikely(!chip->clk_div))
\r
797 chip->clk_div = clk_get_rate(dws->clock_spim) / chip->speed_hz;
\r
798 if (message->state == ERROR_STATE) {
\r
799 message->status = -EIO;
\r
803 /* Handle end of message */
\r
804 if (message->state == DONE_STATE) {
\r
805 message->status = 0;
\r
809 /* Delay if requested at end of transfer*/
\r
810 if (message->state == RUNNING_STATE) {
\r
811 previous = list_entry(transfer->transfer_list.prev,
\r
812 struct spi_transfer,
\r
814 if (previous->delay_usecs)
\r
815 udelay(previous->delay_usecs);
\r
818 dws->n_bytes = chip->n_bytes;
\r
819 dws->dma_width = chip->dma_width;
\r
820 dws->cs_control = chip->cs_control;
\r
822 //dws->rx_dma = transfer->rx_dma;
\r
823 //dws->tx_dma = transfer->tx_dma;
\r
824 dws->tx = (void *)transfer->tx_buf;
\r
825 dws->tx_end = dws->tx + transfer->len;
\r
826 dws->rx = (void *)transfer->rx_buf;
\r
827 dws->rx_end = dws->rx + transfer->len;
\r
828 dws->write = dws->tx ? chip->write : null_writer;
\r
829 dws->read = dws->rx ? chip->read : null_reader;
\r
830 dws->cs_change = transfer->cs_change;
\r
831 dws->len = dws->cur_transfer->len;
\r
832 if (chip != dws->prev_chip)
\r
837 /* Handle per transfer options for bpw and speed */
\r
838 if (transfer->speed_hz) {
\r
839 speed = chip->speed_hz;
\r
841 if (transfer->speed_hz != speed) {
\r
842 speed = transfer->speed_hz;
\r
843 if (speed > clk_get_rate(dws->clock_spim)) {
\r
844 dev_err(&dws->master->dev, "MRST SPI0: unsupported "
\r
845 "freq: %dHz\n", speed);
\r
846 message->status = -EIO;
\r
850 /* clk_div doesn't support odd number */
\r
851 clk_div = clk_get_rate(dws->clock_spim) / speed;
\r
852 clk_div = (clk_div + 1) & 0xfffe;
\r
854 chip->speed_hz = speed;
\r
855 chip->clk_div = clk_div;
\r
859 if (transfer->bits_per_word) {
\r
860 bits = transfer->bits_per_word;
\r
865 dws->dma_width = 1;
\r
866 dws->read = (dws->read != null_reader) ?
\r
867 u8_reader : null_reader;
\r
868 dws->write = (dws->write != null_writer) ?
\r
869 u8_writer : null_writer;
\r
870 spi_dfs = SPI_DFS_8BIT;
\r
874 dws->dma_width = 2;
\r
875 dws->read = (dws->read != null_reader) ?
\r
876 u16_reader : null_reader;
\r
877 dws->write = (dws->write != null_writer) ?
\r
878 u16_writer : null_writer;
\r
879 spi_dfs = SPI_DFS_16BIT;
\r
882 dev_err(&dws->master->dev, "MRST SPI0: unsupported bits:"
\r
884 message->status = -EIO;
\r
888 cr0 = (spi_dfs << SPI_DFS_OFFSET)
\r
889 | (SPI_HALF_WORLD_OFF << SPI_HALF_WORLD_TX_OFFSET)
\r
890 | (SPI_SSN_DELAY_ONE << SPI_SSN_DELAY_OFFSET)
\r
891 | (chip->type << SPI_FRF_OFFSET)
\r
892 | (spi->mode << SPI_MODE_OFFSET)
\r
893 | (chip->tmode << SPI_TMOD_OFFSET);
\r
895 message->state = RUNNING_STATE;
\r
898 * Adjust transfer mode if necessary. Requires platform dependent
\r
899 * chipselect mechanism.
\r
901 if (dws->cs_control) {
\r
902 if (dws->rx && dws->tx)
\r
903 chip->tmode = SPI_TMOD_TR;
\r
905 chip->tmode = SPI_TMOD_RO;
\r
907 chip->tmode = SPI_TMOD_TO;
\r
909 cr0 &= ~(0x3 << SPI_MODE_OFFSET);
\r
910 cr0 &= ~(0x3 << SPI_TMOD_OFFSET);
\r
911 cr0 &= ~(0x1 << SPI_OPMOD_OFFSET);
\r
912 cr0 |= (spi->mode << SPI_MODE_OFFSET);
\r
913 cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
\r
914 cr0 |= ((chip->slave_enable & 1) << SPI_OPMOD_OFFSET);
\r
919 * we only need set the TXEI IRQ, as TX/RX always happen syncronizely
\r
921 if (!dws->dma_mapped && !chip->poll_mode) {
\r
924 if (chip->tmode == SPI_TMOD_RO) {
\r
925 templen = dws->len / dws->n_bytes - 1;
\r
926 rxint_level = dws->fifo_len / 2;
\r
927 rxint_level = (templen > rxint_level) ? rxint_level : templen;
\r
928 imask |= SPI_INT_RXFI;
\r
931 templen = dws->len / dws->n_bytes;
\r
932 txint_level = dws->fifo_len / 2;
\r
933 txint_level = (templen > txint_level) ? txint_level : templen;
\r
934 imask |= SPI_INT_TXEI;
\r
936 dws->transfer_handler = interrupt_transfer;
\r
940 * Reprogram registers only if
\r
941 * 1. chip select changes
\r
942 * 2. clk_div is changed
\r
943 * 3. control value changes
\r
945 if ((rk29xx_readl(dws, SPIM_CTRLR0) != cr0) || cs_change || clk_div || imask) {
\r
946 spi_enable_chip(dws, 0);
\r
947 if (rk29xx_readl(dws, SPIM_CTRLR0) != cr0)
\r
948 rk29xx_writel(dws, SPIM_CTRLR0, cr0);
\r
950 spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
\r
951 spi_chip_sel(dws, spi->chip_select);
\r
953 rk29xx_writew(dws, SPIM_CTRLR1, dws->len-1);
\r
956 rk29xx_writew(dws, SPIM_TXFTLR, txint_level);
\r
957 spi_enable_chip(dws, 1);
\r
960 rk29xx_writew(dws, SPIM_RXFTLR, rxint_level);
\r
961 /* Set the interrupt mask, for poll mode just diable all int */
\r
962 spi_mask_intr(dws, 0xff);
\r
964 spi_umask_intr(dws, imask);
\r
967 dws->prev_chip = chip;
\r
970 if (chip->poll_mode)
\r
971 poll_transfer(dws);
\r
980 static void dma_transfer(struct rk29xx_spi *dws)
\r
982 struct spi_message *message = NULL;
\r
983 struct spi_transfer *transfer = NULL;
\r
984 struct spi_transfer *previous = NULL;
\r
985 struct spi_device *spi = NULL;
\r
986 struct chip_data *chip = NULL;
\r
987 //unsigned long val;
\r
988 //unsigned long flags;
\r
1000 DBG(KERN_INFO "dma_transfer,len=%d\n",dws->cur_transfer->len);
\r
1002 if (acquire_dma(dws)) {
\r
1003 dev_err(&dws->master->dev, "acquire dma failed\n");
\r
1007 if (map_dma_buffers(dws)) {
\r
1008 dev_err(&dws->master->dev, "acquire dma failed\n");
\r
1012 /* Get current state information */
\r
1013 message = dws->cur_msg;
\r
1014 transfer = dws->cur_transfer;
\r
1015 chip = dws->cur_chip;
\r
1016 spi = message->spi;
\r
1017 if (unlikely(!chip->clk_div))
\r
1018 chip->clk_div = clk_get_rate(dws->clock_spim) / chip->speed_hz;
\r
1019 if (message->state == ERROR_STATE) {
\r
1020 message->status = -EIO;
\r
1024 /* Handle end of message */
\r
1025 if (message->state == DONE_STATE) {
\r
1026 message->status = 0;
\r
1030 /* Delay if requested at end of transfer*/
\r
1031 if (message->state == RUNNING_STATE) {
\r
1032 previous = list_entry(transfer->transfer_list.prev,
\r
1033 struct spi_transfer,
\r
1035 if (previous->delay_usecs)
\r
1036 udelay(previous->delay_usecs);
\r
1039 dws->n_bytes = chip->n_bytes;
\r
1040 dws->dma_width = chip->dma_width;
\r
1041 dws->cs_control = chip->cs_control;
\r
1043 //dws->rx_dma = transfer->rx_dma;
\r
1044 //dws->tx_dma = transfer->tx_dma;
\r
1045 dws->tx = (void *)transfer->tx_buf;
\r
1046 dws->tx_end = dws->tx + transfer->len;
\r
1047 dws->rx = (void *)transfer->rx_buf;
\r
1048 dws->rx_end = dws->rx + transfer->len;
\r
1049 dws->write = dws->tx ? chip->write : null_writer;
\r
1050 dws->read = dws->rx ? chip->read : null_reader;
\r
1051 dws->cs_change = transfer->cs_change;
\r
1052 dws->len = dws->cur_transfer->len;
\r
1053 if (chip != dws->prev_chip)
\r
1058 /* Handle per transfer options for bpw and speed */
\r
1059 if (transfer->speed_hz) {
\r
1060 speed = chip->speed_hz;
\r
1061 if (transfer->speed_hz != speed) {
\r
1062 speed = transfer->speed_hz;
\r
1063 if (speed > clk_get_rate(dws->clock_spim)) {
\r
1064 dev_err(&dws->master->dev, "MRST SPI0: unsupported "
\r
1065 "freq: %dHz\n", speed);
\r
1066 message->status = -EIO;
\r
1070 /* clk_div doesn't support odd number */
\r
1071 clk_div = clk_get_rate(dws->clock_spim) / speed;
\r
1072 clk_div = (clk_div + 1) & 0xfffe;
\r
1074 chip->speed_hz = speed;
\r
1075 chip->clk_div = clk_div;
\r
1080 if (transfer->bits_per_word) {
\r
1081 bits = transfer->bits_per_word;
\r
1086 dws->dma_width = 1;
\r
1087 spi_dfs = SPI_DFS_8BIT;
\r
1091 dws->dma_width = 2;
\r
1092 spi_dfs = SPI_DFS_16BIT;
\r
1095 dev_err(&dws->master->dev, "MRST SPI0: unsupported bits:"
\r
1097 message->status = -EIO;
\r
1101 cr0 = (spi_dfs << SPI_DFS_OFFSET)
\r
1102 | (SPI_HALF_WORLD_OFF << SPI_HALF_WORLD_TX_OFFSET)
\r
1103 | (SPI_SSN_DELAY_ONE << SPI_SSN_DELAY_OFFSET)
\r
1104 | (chip->type << SPI_FRF_OFFSET)
\r
1105 | (spi->mode << SPI_MODE_OFFSET)
\r
1106 | (chip->tmode << SPI_TMOD_OFFSET);
\r
1108 message->state = RUNNING_STATE;
\r
1111 * Adjust transfer mode if necessary. Requires platform dependent
\r
1112 * chipselect mechanism.
\r
1114 if (dws->cs_control) {
\r
1115 if (dws->rx && dws->tx)
\r
1116 chip->tmode = SPI_TMOD_TR;
\r
1118 chip->tmode = SPI_TMOD_RO;
\r
1120 chip->tmode = SPI_TMOD_TO;
\r
1122 cr0 &= ~(0x3 << SPI_MODE_OFFSET);
\r
1123 cr0 &= ~(0x3 << SPI_TMOD_OFFSET);
\r
1124 cr0 &= ~(0x1 << SPI_OPMOD_OFFSET);
\r
1125 cr0 |= (spi->mode << SPI_MODE_OFFSET);
\r
1126 cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
\r
1127 cr0 |= ((chip->slave_enable & 1) << SPI_OPMOD_OFFSET);
\r
1131 * Reprogram registers only if
\r
1132 * 1. chip select changes
\r
1133 * 2. clk_div is changed
\r
1134 * 3. control value changes
\r
1136 if ((rk29xx_readl(dws, SPIM_CTRLR0) != cr0) || cs_change || clk_div) {
\r
1137 spi_enable_chip(dws, 0);
\r
1138 if (rk29xx_readl(dws, SPIM_CTRLR0) != cr0) {
\r
1139 rk29xx_writel(dws, SPIM_CTRLR0, cr0);
\r
1141 spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
\r
1142 spi_chip_sel(dws, spi->chip_select);
\r
1143 /* Set the interrupt mask, for poll mode just diable all int */
\r
1144 spi_mask_intr(dws, 0xff);
\r
1146 if (transfer->tx_buf != NULL) {
\r
1147 dmacr |= SPI_DMACR_TX_ENABLE;
\r
1148 rk29xx_writew(dws, SPIM_DMATDLR, 0);
\r
1150 if (transfer->rx_buf != NULL) {
\r
1151 dmacr |= SPI_DMACR_RX_ENABLE;
\r
1152 rk29xx_writew(dws, SPIM_DMARDLR, 0);
\r
1153 rk29xx_writew(dws, SPIM_CTRLR1, transfer->len-1);
\r
1155 rk29xx_writew(dws, SPIM_DMACR, dmacr);
\r
1156 spi_enable_chip(dws, 1);
\r
1158 dws->prev_chip = chip;
\r
1161 //INIT_COMPLETION(dws->xfer_completion);
\r
1163 //spi_dump_regs(dws);
\r
1165 DBG("dws->tx_dmach: %d, dws->rx_dmach: %d, dws->tx_dma: 0x%x,dws->rx_dma: 0x%x\n", dws->tx_dmach, dws->rx_dmach, (unsigned int)dws->tx_dma,(unsigned int)dws->rx_dma);
\r
1166 DBG("dws->buffer_tx_dma: 0x%p, dws->buffer_rx_dma: 0x%p,dws->dma_width=%d\n", dws->buffer_tx_dma, dws->buffer_rx_dma,dws->dma_width);
\r
1168 if (transfer->tx_buf != NULL)
\r
1169 dws->state |= TXBUSY;
\r
1170 if (transfer->rx_buf != NULL)
\r
1171 dws->state |= RXBUSY;
\r
1173 if (transfer->tx_buf != NULL) {
\r
1174 DBG("%s:start dma tx,dws->state=0x%x\n",__func__,dws->state);
\r
1175 #if defined(PRINT_TRANS_DATA)
\r
1176 printk("dma tx:");
\r
1177 printk_transfer_data(dws->buffer_tx_dma, dws->cur_transfer->len);
\r
1179 /*if (transfer->len & 0x3) {
\r
1185 if (rk29_dma_config(dws->tx_dmach, burst)) {*/
\r
1186 if (rk29_dma_config(dws->tx_dmach, dws->dma_width, 1)) {//there is not dma burst but bitwide, set it 1 alwayss
\r
1187 dev_err(&dws->master->dev, "function: %s, line: %d\n", __FUNCTION__, __LINE__);
\r
1191 rk29_dma_ctrl(dws->tx_dmach, RK29_DMAOP_FLUSH);
\r
1193 iRet = rk29_dma_enqueue(dws->tx_dmach, (void *)dws,
\r
1194 dws->tx_dma, transfer->len);
\r
1196 dev_err(&dws->master->dev, "function: %s, line: %d, iRet: %d(dws->tx_dmach: %d, transfer->tx_dma: 0x%x)\n", __FUNCTION__, __LINE__, iRet,
\r
1197 dws->tx_dmach, (unsigned int)transfer->tx_dma);
\r
1201 if (rk29_dma_ctrl(dws->tx_dmach, RK29_DMAOP_START)) {
\r
1202 dev_err(&dws->master->dev, "function: %s, line: %d\n", __FUNCTION__, __LINE__);
\r
1207 //wait_till_not_busy(dws);
\r
1209 if (transfer->rx_buf != NULL) {
\r
1210 DBG("%s:start dma rx,dws->state=0x%x\n",__func__,dws->state);
\r
1211 if (rk29_dma_config(dws->rx_dmach, dws->dma_width, 1)) {
\r
1212 dev_err(&dws->master->dev, "function: %s, line: %d\n", __FUNCTION__, __LINE__);
\r
1216 rk29_dma_ctrl(dws->rx_dmach, RK29_DMAOP_FLUSH);
\r
1218 iRet = rk29_dma_enqueue(dws->rx_dmach, (void *)dws,
\r
1219 dws->rx_dma, transfer->len);
\r
1221 dev_err(&dws->master->dev, "function: %s, line: %d\n", __FUNCTION__, __LINE__);
\r
1225 if (rk29_dma_ctrl(dws->rx_dmach, RK29_DMAOP_START)) {
\r
1226 dev_err(&dws->master->dev, "function: %s, line: %d\n", __FUNCTION__, __LINE__);
\r
1231 //wait_till_not_busy(dws);
\r
1241 static void pump_messages(struct work_struct *work)
\r
1243 struct rk29xx_spi *dws =
\r
1244 container_of(work, struct rk29xx_spi, pump_messages);
\r
1245 unsigned long flags;
\r
1247 DBG(KERN_INFO "pump_messages,line=%d\n",__LINE__);
\r
1249 /* Lock queue and check for queue work */
\r
1250 spin_lock_irqsave(&dws->lock, flags);
\r
1251 if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) {
\r
1253 spin_unlock_irqrestore(&dws->lock, flags);
\r
1254 DBG("%s:line=%d,list_empty\n",__func__,__LINE__);
\r
1258 /* Make sure we are not already running a message */
\r
1259 if (dws->cur_msg) {
\r
1260 spin_unlock_irqrestore(&dws->lock, flags);
\r
1261 DBG("%s:line=%d,dws->cur_msg\n",__func__,__LINE__);
\r
1265 /* Extract head of queue */
\r
1266 dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue);
\r
1267 list_del_init(&dws->cur_msg->queue);
\r
1269 /* Initial message state*/
\r
1270 dws->cur_msg->state = START_STATE;
\r
1271 dws->cur_transfer = list_entry(dws->cur_msg->transfers.next,
\r
1272 struct spi_transfer,
\r
1274 dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi);
\r
1275 dws->prev_chip = NULL; //ÿ¸öpump messageʱǿÖƸüÐÂcs dxj
\r
1278 /* Mark as busy and launch transfers */
\r
1279 tasklet_schedule(&dws->pump_transfers);
\r
1281 spin_unlock_irqrestore(&dws->lock, flags);
\r
1285 #if defined(QUICK_TRANSFER)
\r
1286 static void do_read(struct rk29xx_spi *dws)
\r
1290 spi_enable_chip(dws, 0);
\r
1291 rk29xx_writew(dws, SPIM_CTRLR1, dws->rx_end-dws->rx-1);
\r
1292 spi_enable_chip(dws, 1);
\r
1293 rk29xx_writew(dws, SPIM_TXDR, 0);
\r
1295 if (dws->read(dws))
\r
1297 if (count++ == 0x20) {
\r
1298 dev_err(&dws->master->dev, "+++++++++++spi receive data time out+++++++++++++\n");
\r
1305 static void do_write(struct rk29xx_spi *dws)
\r
1307 while (dws->tx<dws->tx_end) {
\r
1312 /* Caller already set message->status; dma and pio irqs are blocked */
\r
1313 static void msg_giveback(struct rk29xx_spi *dws)
\r
1315 struct spi_transfer *last_transfer;
\r
1316 struct spi_message *msg;
\r
1318 DBG("+++++++++++++++enter %s++++++++++++++++++\n", __func__);
\r
1320 msg = dws->cur_msg;
\r
1321 dws->cur_msg = NULL;
\r
1322 dws->cur_transfer = NULL;
\r
1323 dws->prev_chip = dws->cur_chip;
\r
1324 dws->cur_chip = NULL;
\r
1325 dws->dma_mapped = 0;
\r
1328 last_transfer = list_entry(msg->transfers.prev,
\r
1329 struct spi_transfer,
\r
1332 if (!last_transfer->cs_change && dws->cs_control)
\r
1333 dws->cs_control(dws,msg->spi->chip_select,MRST_SPI_DEASSERT);
\r
1335 msg->state = NULL;
\r
1338 /* Must be called inside pump_transfers() */
\r
1339 static int do_full_transfer(struct rk29xx_spi *dws)
\r
1341 if ((dws->read(dws))) {
\r
1345 while (dws->tx<dws->tx_end){
\r
1350 if (dws->rx < dws->rx_end) {
\r
1356 dws->cur_msg->actual_length += dws->len;
\r
1358 /* Move to next transfer */
\r
1359 dws->cur_msg->state = next_transfer(dws);
\r
1361 if (dws->cur_msg->state == DONE_STATE) {
\r
1362 dws->cur_msg->status = 0;
\r
1363 //msg_giveback(dws);
\r
1373 /* Must be called inside pump_transfers() */
\r
1374 static int do_half_transfer(struct rk29xx_spi *dws)
\r
1380 wait_till_tf_empty(dws);
\r
1381 wait_till_not_busy(dws);
\r
1386 wait_till_tf_empty(dws);
\r
1387 wait_till_not_busy(dws);
\r
1390 dws->cur_msg->actual_length += dws->len;
\r
1392 /* Move to next transfer */
\r
1393 dws->cur_msg->state = next_transfer(dws);
\r
1395 if (dws->cur_msg->state == DONE_STATE) {
\r
1396 dws->cur_msg->status = 0;
\r
1397 //msg_giveback(dws);
\r
1406 static int rk29xx_pump_transfers(struct rk29xx_spi *dws, int mode)
\r
1408 struct spi_message *message = NULL;
\r
1409 struct spi_transfer *transfer = NULL;
\r
1410 struct spi_transfer *previous = NULL;
\r
1411 struct spi_device *spi = NULL;
\r
1412 struct chip_data *chip = NULL;
\r
1421 DBG(KERN_INFO "+++++++++++++++enter %s++++++++++++++++++\n", __func__);
\r
1423 /* Get current state information */
\r
1424 message = dws->cur_msg;
\r
1425 transfer = dws->cur_transfer;
\r
1426 chip = dws->cur_chip;
\r
1427 spi = message->spi;
\r
1429 if (unlikely(!chip->clk_div))
\r
1430 chip->clk_div = clk_get_rate(dws->clock_spim) / chip->speed_hz;
\r
1431 if (message->state == ERROR_STATE) {
\r
1432 message->status = -EIO;
\r
1436 /* Handle end of message */
\r
1437 if (message->state == DONE_STATE) {
\r
1438 message->status = 0;
\r
1442 /* Delay if requested at end of transfer*/
\r
1443 if (message->state == RUNNING_STATE) {
\r
1444 previous = list_entry(transfer->transfer_list.prev,
\r
1445 struct spi_transfer,
\r
1447 if (previous->delay_usecs)
\r
1448 udelay(previous->delay_usecs);
\r
1451 dws->n_bytes = chip->n_bytes;
\r
1452 dws->dma_width = chip->dma_width;
\r
1453 dws->cs_control = chip->cs_control;
\r
1455 dws->rx_dma = transfer->rx_dma;
\r
1456 dws->tx_dma = transfer->tx_dma;
\r
1457 dws->tx = (void *)transfer->tx_buf;
\r
1458 dws->tx_end = dws->tx + transfer->len;
\r
1459 dws->rx = transfer->rx_buf;
\r
1460 dws->rx_end = dws->rx + transfer->len;
\r
1461 dws->write = dws->tx ? chip->write : null_writer;
\r
1462 dws->read = dws->rx ? chip->read : null_reader;
\r
1463 if (dws->rx && dws->tx) {
\r
1464 int temp_len = transfer->len;
\r
1466 unsigned char *tx_buf;
\r
1467 for (len=0; *tx_buf++ != 0; len++);
\r
1468 dws->tx_end = dws->tx + len;
\r
1469 dws->rx_end = dws->rx + temp_len - len;
\r
1471 dws->cs_change = transfer->cs_change;
\r
1472 dws->len = dws->cur_transfer->len;
\r
1473 if (chip != dws->prev_chip)
\r
1478 /* Handle per transfer options for bpw and speed */
\r
1479 if (transfer->speed_hz) {
\r
1480 speed = chip->speed_hz;
\r
1482 if (transfer->speed_hz != speed) {
\r
1483 speed = transfer->speed_hz;
\r
1484 if (speed > clk_get_rate(dws->clock_spim)) {
\r
1485 dev_err(&dws->master->dev, "MRST SPI0: unsupported"
\r
1486 "freq: %dHz\n", speed);
\r
1487 message->status = -EIO;
\r
1491 /* clk_div doesn't support odd number */
\r
1492 clk_div = clk_get_rate(dws->clock_spim) / speed;
\r
1493 clk_div = (clk_div + 1) & 0xfffe;
\r
1495 chip->speed_hz = speed;
\r
1496 chip->clk_div = clk_div;
\r
1499 if (transfer->bits_per_word) {
\r
1500 bits = transfer->bits_per_word;
\r
1505 dws->dma_width = 1;
\r
1506 dws->read = (dws->read != null_reader) ?
\r
1507 u8_reader : null_reader;
\r
1508 dws->write = (dws->write != null_writer) ?
\r
1509 u8_writer : null_writer;
\r
1510 spi_dfs = SPI_DFS_8BIT;
\r
1514 dws->dma_width = 2;
\r
1515 dws->read = (dws->read != null_reader) ?
\r
1516 u16_reader : null_reader;
\r
1517 dws->write = (dws->write != null_writer) ?
\r
1518 u16_writer : null_writer;
\r
1519 spi_dfs = SPI_DFS_16BIT;
\r
1522 dev_err(&dws->master->dev, "MRST SPI0: unsupported bits:"
\r
1524 message->status = -EIO;
\r
1528 cr0 = (spi_dfs << SPI_DFS_OFFSET)
\r
1529 | (chip->type << SPI_FRF_OFFSET)
\r
1530 | (spi->mode << SPI_MODE_OFFSET)
\r
1531 | (chip->tmode << SPI_TMOD_OFFSET);
\r
1533 message->state = RUNNING_STATE;
\r
1536 * Adjust transfer mode if necessary. Requires platform dependent
\r
1537 * chipselect mechanism.
\r
1539 if (dws->cs_control) {
\r
1540 if (dws->rx && dws->tx)
\r
1541 chip->tmode = SPI_TMOD_TR;
\r
1543 chip->tmode = SPI_TMOD_RO;
\r
1545 chip->tmode = SPI_TMOD_TO;
\r
1547 cr0 &= ~(0x3 << SPI_MODE_OFFSET);
\r
1548 cr0 &= ~(0x3 << SPI_TMOD_OFFSET);
\r
1549 cr0 |= (spi->mode << SPI_MODE_OFFSET);
\r
1550 cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
\r
1553 /* Check if current transfer is a DMA transaction */
\r
1554 dws->dma_mapped = map_dma_buffers(dws);
\r
1557 * Reprogram registers only if
\r
1558 * 1. chip select changes
\r
1559 * 2. clk_div is changed
\r
1560 * 3. control value changes
\r
1562 spi_enable_chip(dws, 0);
\r
1563 if (rk29xx_readl(dws, SPIM_CTRLR0) != cr0)
\r
1564 rk29xx_writel(dws, SPIM_CTRLR0, cr0);
\r
1566 DBG(KERN_INFO "clk_div: 0x%x, chip->clk_div: 0x%x\n", clk_div, chip->clk_div);
\r
1567 spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
\r
1568 spi_chip_sel(dws, spi->chip_select);
\r
1569 rk29xx_writew(dws, SPIM_CTRLR1, 0);//add by lyx
\r
1570 if(dws->dma_mapped ) {
\r
1571 dmacr = rk29xx_readw(dws, SPIM_DMACR);
\r
1572 dmacr = dmacr | SPI_DMACR_TX_ENABLE;
\r
1574 dmacr = dmacr | SPI_DMACR_RX_ENABLE;
\r
1575 rk29xx_writew(dws, SPIM_DMACR, dmacr);
\r
1577 spi_enable_chip(dws, 1);
\r
1579 dws->prev_chip = chip;
\r
1582 return do_full_transfer(dws);
\r
1584 return do_half_transfer(dws);
\r
1588 //msg_giveback(dws);
\r
1593 static void rk29xx_pump_messages(struct rk29xx_spi *dws, int mode)
\r
1595 DBG(KERN_INFO "+++++++++++++++enter %s++++++++++++++++++\n", __func__);
\r
1597 while (!acquire_dma(dws))
\r
1600 if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) {
\r
1605 /* Make sure we are not already running a message */
\r
1606 if (dws->cur_msg) {
\r
1610 /* Extract head of queue */
\r
1611 dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue);
\r
1612 list_del_init(&dws->cur_msg->queue);
\r
1614 /* Initial message state*/
\r
1615 dws->cur_msg->state = START_STATE;
\r
1616 dws->cur_transfer = list_entry(dws->cur_msg->transfers.next,
\r
1617 struct spi_transfer,
\r
1619 dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi);
\r
1620 dws->prev_chip = NULL; //ÿ¸öpump messageʱǿÖƸüÐÂcs dxj
\r
1622 /* Mark as busy and launch transfers */
\r
1625 while (rk29xx_pump_transfers(dws, mode)) ;
\r
1628 /* spi_device use this to queue in their spi_msg */
\r
1629 static int rk29xx_spi_quick_transfer(struct spi_device *spi, struct spi_message *msg)
\r
1631 struct rk29xx_spi *dws = spi_master_get_devdata(spi->master);
\r
1632 unsigned long flags;
\r
1633 struct rk29xx_spi_chip *chip_info = spi->controller_data;
\r
1634 struct spi_message *mmsg;
\r
1636 DBG(KERN_INFO "+++++++++++++++enter %s++++++++++++++++++\n", __func__);
\r
1638 spin_lock_irqsave(&dws->lock, flags);
\r
1640 if (dws->run == QUEUE_STOPPED) {
\r
1641 spin_unlock_irqrestore(&dws->lock, flags);
\r
1642 return -ESHUTDOWN;
\r
1645 msg->actual_length = 0;
\r
1646 msg->status = -EINPROGRESS;
\r
1647 msg->state = START_STATE;
\r
1649 list_add_tail(&msg->queue, &dws->queue);
\r
1651 if (chip_info && (chip_info->transfer_mode == rk29xx_SPI_FULL_DUPLEX)) {
\r
1652 rk29xx_pump_messages(dws,1);
\r
1655 rk29xx_pump_messages(dws,0);
\r
1658 mmsg = dws->cur_msg;
\r
1659 msg_giveback(dws);
\r
1661 spin_unlock_irqrestore(&dws->lock, flags);
\r
1663 if (mmsg->complete)
\r
1664 mmsg->complete(mmsg->context);
\r
1671 /* spi_device use this to queue in their spi_msg */
\r
1672 static int rk29xx_spi_transfer(struct spi_device *spi, struct spi_message *msg)
\r
1674 struct rk29xx_spi *dws = spi_master_get_devdata(spi->master);
\r
1675 unsigned long flags;
\r
1677 spin_lock_irqsave(&dws->lock, flags);
\r
1679 if (dws->run == QUEUE_STOPPED) {
\r
1680 spin_unlock_irqrestore(&dws->lock, flags);
\r
1681 return -ESHUTDOWN;
\r
1684 msg->actual_length = 0;
\r
1685 msg->status = -EINPROGRESS;
\r
1686 msg->state = START_STATE;
\r
1688 list_add_tail(&msg->queue, &dws->queue);
\r
1690 if (dws->run == QUEUE_RUNNING && !dws->busy) {
\r
1692 if (dws->cur_transfer || dws->cur_msg)
\r
1693 queue_work(dws->workqueue,
\r
1694 &dws->pump_messages);
\r
1696 /* If no other data transaction in air, just go */
\r
1697 spin_unlock_irqrestore(&dws->lock, flags);
\r
1698 pump_messages(&dws->pump_messages);
\r
1703 spin_unlock_irqrestore(&dws->lock, flags);
\r
1710 /* This may be called twice for each spi dev */
\r
1711 static int rk29xx_spi_setup(struct spi_device *spi)
\r
1713 struct rk29xx_spi_chip *chip_info = NULL;
\r
1714 struct chip_data *chip;
\r
1717 if (spi->bits_per_word != 8 && spi->bits_per_word != 16)
\r
1720 /* Only alloc on first setup */
\r
1721 chip = spi_get_ctldata(spi);
\r
1723 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
\r
1727 chip->cs_control = spi_cs_control;
\r
1728 chip->enable_dma = 0; //0;
\r
1732 * Protocol drivers may change the chip settings, so...
\r
1733 * if chip_info exists, use it
\r
1735 chip_info = spi->controller_data;
\r
1737 /* chip_info doesn't always exist */
\r
1739 if (chip_info->cs_control)
\r
1740 chip->cs_control = chip_info->cs_control;
\r
1742 chip->poll_mode = chip_info->poll_mode;
\r
1743 chip->type = chip_info->type;
\r
1744 chip->slave_enable = chip_info->slave_enable;
\r
1745 chip->rx_threshold = 0;
\r
1746 chip->tx_threshold = 0;
\r
1748 chip->enable_dma = chip_info->enable_dma;
\r
1751 if (spi->bits_per_word == 8) {
\r
1752 chip->n_bytes = 1;
\r
1753 chip->dma_width = 1;
\r
1754 chip->read = u8_reader;
\r
1755 chip->write = u8_writer;
\r
1756 spi_dfs = SPI_DFS_8BIT;
\r
1757 } else if (spi->bits_per_word == 16) {
\r
1758 chip->n_bytes = 2;
\r
1759 chip->dma_width = 2;
\r
1760 chip->read = u16_reader;
\r
1761 chip->write = u16_writer;
\r
1762 spi_dfs = SPI_DFS_16BIT;
\r
1764 /* Never take >16b case for MRST SPIC */
\r
1765 dev_err(&spi->dev, "invalid wordsize\n");
\r
1768 chip->bits_per_word = spi->bits_per_word;
\r
1770 if (!spi->max_speed_hz) {
\r
1771 dev_err(&spi->dev, "No max speed HZ parameter\n");
\r
1774 chip->speed_hz = spi->max_speed_hz;
\r
1776 chip->tmode = 0; /* Tx & Rx */
\r
1777 /* Default SPI mode is SCPOL = 0, SCPH = 0 */
\r
1778 chip->cr0 = (spi_dfs << SPI_DFS_OFFSET)
\r
1779 | (SPI_HALF_WORLD_OFF << SPI_HALF_WORLD_TX_OFFSET)
\r
1780 | (SPI_SSN_DELAY_ONE << SPI_SSN_DELAY_OFFSET)
\r
1781 | (chip->type << SPI_FRF_OFFSET)
\r
1782 | (spi->mode << SPI_MODE_OFFSET)
\r
1783 | (chip->tmode << SPI_TMOD_OFFSET);
\r
1785 spi_set_ctldata(spi, chip);
\r
1789 static void rk29xx_spi_cleanup(struct spi_device *spi)
\r
1791 struct chip_data *chip = spi_get_ctldata(spi);
\r
1795 static int __devinit init_queue(struct rk29xx_spi *dws)
\r
1797 INIT_LIST_HEAD(&dws->queue);
\r
1798 spin_lock_init(&dws->lock);
\r
1800 dws->run = QUEUE_STOPPED;
\r
1803 init_completion(&dws->xfer_completion);
\r
1805 tasklet_init(&dws->pump_transfers,
\r
1806 pump_transfers, (unsigned long)dws);
\r
1808 INIT_WORK(&dws->pump_messages, pump_messages);
\r
1809 dws->workqueue = create_singlethread_workqueue(
\r
1810 dev_name(dws->master->dev.parent));
\r
1811 if (dws->workqueue == NULL)
\r
1818 static int start_queue(struct rk29xx_spi *dws)
\r
1820 unsigned long flags;
\r
1822 spin_lock_irqsave(&dws->lock, flags);
\r
1824 if (dws->run == QUEUE_RUNNING || dws->busy) {
\r
1825 spin_unlock_irqrestore(&dws->lock, flags);
\r
1829 dws->run = QUEUE_RUNNING;
\r
1830 dws->cur_msg = NULL;
\r
1831 dws->cur_transfer = NULL;
\r
1832 dws->cur_chip = NULL;
\r
1833 dws->prev_chip = NULL;
\r
1834 spin_unlock_irqrestore(&dws->lock, flags);
\r
1836 queue_work(dws->workqueue, &dws->pump_messages);
\r
1841 static int stop_queue(struct rk29xx_spi *dws)
\r
1843 unsigned long flags;
\r
1844 unsigned limit = 50;
\r
1847 spin_lock_irqsave(&dws->lock, flags);
\r
1848 dws->run = QUEUE_STOPPED;
\r
1849 while (!list_empty(&dws->queue) && dws->busy && limit--) {
\r
1850 spin_unlock_irqrestore(&dws->lock, flags);
\r
1852 spin_lock_irqsave(&dws->lock, flags);
\r
1855 if (!list_empty(&dws->queue) || dws->busy)
\r
1857 spin_unlock_irqrestore(&dws->lock, flags);
\r
1862 static int destroy_queue(struct rk29xx_spi *dws)
\r
1866 status = stop_queue(dws);
\r
1869 destroy_workqueue(dws->workqueue);
\r
1873 /* Restart the controller, disable all interrupts, clean rx fifo */
\r
1874 static void spi_hw_init(struct rk29xx_spi *dws)
\r
1876 spi_enable_chip(dws, 0);
\r
1877 spi_mask_intr(dws, 0xff);
\r
1880 * Try to detect the FIFO depth if not set by interface driver,
\r
1881 * the depth could be from 2 to 32 from HW spec
\r
1883 if (!dws->fifo_len) {
\r
1885 for (fifo = 2; fifo <= 31; fifo++) {
\r
1886 rk29xx_writew(dws, SPIM_TXFTLR, fifo);
\r
1887 if (fifo != rk29xx_readw(dws, SPIM_TXFTLR))
\r
1891 dws->fifo_len = (fifo == 31) ? 0 : fifo;
\r
1892 rk29xx_writew(dws, SPIM_TXFTLR, 0);
\r
1895 spi_enable_chip(dws, 1);
\r
1899 /* cpufreq driver support */
\r
1900 #ifdef CONFIG_CPU_FREQ
\r
1902 static int rk29xx_spim_cpufreq_transition(struct notifier_block *nb, unsigned long val, void *data)
\r
1904 struct rk29xx_spi *info;
\r
1905 unsigned long newclk;
\r
1907 info = container_of(nb, struct rk29xx_spi, freq_transition);
\r
1908 newclk = clk_get_rate(info->clock_spim);
\r
1913 static inline int rk29xx_spim_cpufreq_register(struct rk29xx_spi *info)
\r
1915 info->freq_transition.notifier_call = rk29xx_spim_cpufreq_transition;
\r
1917 return cpufreq_register_notifier(&info->freq_transition, CPUFREQ_TRANSITION_NOTIFIER);
\r
1920 static inline void rk29xx_spim_cpufreq_deregister(struct rk29xx_spi *info)
\r
1922 cpufreq_unregister_notifier(&info->freq_transition, CPUFREQ_TRANSITION_NOTIFIER);
\r
1926 static inline int rk29xx_spim_cpufreq_register(struct rk29xx_spi *info)
\r
1931 static inline void rk29xx_spim_cpufreq_deregister(struct rk29xx_spi *info)
\r
1935 static int __init rk29xx_spim_probe(struct platform_device *pdev)
\r
1937 struct resource *regs, *dmatx_res, *dmarx_res;
\r
1938 struct rk29xx_spi *dws;
\r
1939 struct spi_master *master;
\r
1942 struct rk29xx_spi_platform_data *pdata = pdev->dev.platform_data;
\r
1944 if (pdata && pdata->io_init) {
\r
1945 ret = pdata->io_init(pdata->chipselect_gpios, pdata->num_chipselect);
\r
1951 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
\r
1954 dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
\r
1955 if (dmatx_res == NULL) {
\r
1956 dev_err(&pdev->dev, "Unable to get SPI-Tx dma resource\n");
\r
1960 dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
\r
1961 if (dmarx_res == NULL) {
\r
1962 dev_err(&pdev->dev, "Unable to get SPI-Rx dma resource\n");
\r
1965 irq = platform_get_irq(pdev, 0);
\r
1968 /* setup spi core then atmel-specific driver state */
\r
1970 master = spi_alloc_master(&pdev->dev, sizeof *dws);
\r
1976 platform_set_drvdata(pdev, master);
\r
1977 dws = spi_master_get_devdata(master);
\r
1978 dws->clock_spim = clk_get(&pdev->dev, "spi");
\r
1979 clk_enable(dws->clock_spim);
\r
1980 if (IS_ERR(dws->clock_spim)) {
\r
1981 dev_err(&pdev->dev, "clk_get for spi fail(%p)\n", dws->clock_spim);
\r
1982 return PTR_ERR(dws->clock_spim);
\r
1985 dws->pclk = clk_get(&pdev->dev, "pclk_spi");
\r
1986 clk_enable(dws->pclk);
\r
1988 mutex_init(&dws->dma_lock);
\r
1990 dws->regs = ioremap(regs->start, (regs->end - regs->start) + 1);
\r
1992 release_mem_region(regs->start, (regs->end - regs->start) + 1);
\r
1995 DBG(KERN_INFO "dws->regs: %p\n", dws->regs);
\r
1997 dws->irq_polarity = IRQF_TRIGGER_NONE;
\r
1998 dws->master = master;
\r
1999 dws->type = SSI_MOTO_SPI;
\r
2000 dws->prev_chip = NULL;
\r
2001 dws->sfr_start = regs->start;
\r
2002 dws->tx_dmach = dmatx_res->start;
\r
2003 dws->rx_dmach = dmarx_res->start;
\r
2004 dws->dma_inited = 0; ///0;
\r
2005 ///dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
\r
2006 ret = request_irq(dws->irq, rk29xx_spi_irq, dws->irq_polarity,
\r
2007 "rk29xx_spim", dws);
\r
2009 dev_err(&master->dev, "can not get IRQ\n");
\r
2010 goto err_free_master;
\r
2013 master->mode_bits = SPI_CPOL | SPI_CPHA;
\r
2014 master->bus_num = pdev->id;
\r
2015 master->num_chipselect = pdata->num_chipselect;
\r
2016 master->dev.platform_data = pdata;
\r
2017 master->cleanup = rk29xx_spi_cleanup;
\r
2018 master->setup = rk29xx_spi_setup;
\r
2019 #if defined(QUICK_TRANSFER)
\r
2020 master->transfer = rk29xx_spi_quick_transfer;
\r
2022 master->transfer = rk29xx_spi_transfer;
\r
2026 /* Basic HW init */
\r
2029 /* Initial and start queue */
\r
2030 ret = init_queue(dws);
\r
2032 dev_err(&master->dev, "problem initializing queue\n");
\r
2033 goto err_diable_hw;
\r
2036 ret = start_queue(dws);
\r
2038 dev_err(&master->dev, "problem starting queue\n");
\r
2039 goto err_diable_hw;
\r
2042 spi_master_set_devdata(master, dws);
\r
2043 ret = spi_register_master(master);
\r
2045 dev_err(&master->dev, "problem registering spi master\n");
\r
2046 goto err_queue_alloc;
\r
2049 ret =rk29xx_spim_cpufreq_register(dws);
\r
2051 dev_err(&master->dev, "rk29xx spim failed to init cpufreq support\n");
\r
2052 goto err_queue_alloc;
\r
2054 printk(KERN_INFO "rk29xx_spim: driver initialized, fifo_len=%d,bus_num=%d\n", dws->fifo_len,master->bus_num);
\r
2055 mrst_spi_debugfs_init(dws);
\r
2059 destroy_queue(dws);
\r
2061 spi_enable_chip(dws, 0);
\r
2062 free_irq(dws->irq, dws);
\r
2064 spi_master_put(master);
\r
2065 iounmap(dws->regs);
\r
2070 static void __exit rk29xx_spim_remove(struct platform_device *pdev)
\r
2072 struct spi_master *master = platform_get_drvdata(pdev);
\r
2073 struct rk29xx_spi *dws = spi_master_get_devdata(master);
\r
2078 rk29xx_spim_cpufreq_deregister(dws);
\r
2079 mrst_spi_debugfs_remove(dws);
\r
2081 if(dws->buffer_tx_dma)
\r
2082 dma_free_coherent(&pdev->dev, DMA_BUFFER_SIZE, dws->buffer_tx_dma, dws->tx_dma);
\r
2083 if(dws->buffer_rx_dma)
\r
2084 dma_free_coherent(&pdev->dev, DMA_BUFFER_SIZE, dws->buffer_rx_dma, dws->rx_dma);
\r
2087 /* Remove the queue */
\r
2088 status = destroy_queue(dws);
\r
2090 dev_err(&dws->master->dev, "rk29xx_spi_remove: workqueue will not "
\r
2091 "complete, message memory not freed\n");
\r
2093 clk_disable(dws->clock_spim);
\r
2094 clk_put(dws->clock_spim);
\r
2095 clk_disable(dws->pclk);
\r
2096 clk_put(dws->pclk);
\r
2097 spi_enable_chip(dws, 0);
\r
2099 spi_set_clk(dws, 0);
\r
2100 free_irq(dws->irq, dws);
\r
2102 /* Disconnect from the SPI framework */
\r
2103 spi_unregister_master(dws->master);
\r
2104 iounmap(dws->regs);
\r
2110 static int rk29xx_spim_suspend(struct platform_device *pdev, pm_message_t mesg)
\r
2112 struct spi_master *master = platform_get_drvdata(pdev);
\r
2113 struct rk29xx_spi *dws = spi_master_get_devdata(master);
\r
2114 struct rk29xx_spi_platform_data *pdata = pdev->dev.platform_data;
\r
2118 status = stop_queue(dws);
\r
2121 clk_disable(dws->clock_spim);
\r
2122 if (pdata && pdata->io_fix_leakage_bug)
\r
2124 pdata->io_fix_leakage_bug( );
\r
2126 clk_disable(dws->pclk);
\r
2130 static int rk29xx_spim_resume(struct platform_device *pdev)
\r
2132 struct spi_master *master = platform_get_drvdata(pdev);
\r
2133 struct rk29xx_spi *dws = spi_master_get_devdata(master);
\r
2134 struct rk29xx_spi_platform_data *pdata = pdev->dev.platform_data;
\r
2137 clk_enable(dws->pclk);
\r
2138 clk_enable(dws->clock_spim);
\r
2140 ret = start_queue(dws);
\r
2142 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
\r
2143 if (pdata && pdata->io_resume_leakage_bug)
\r
2145 pdata->io_resume_leakage_bug( );
\r
2151 #define rk29xx_spim_suspend NULL
\r
2152 #define rk29xx_spim_resume NULL
\r
2155 static struct platform_driver rk29xx_platform_spim_driver = {
\r
2156 .remove = __exit_p(rk29xx_spim_remove),
\r
2158 .name = "rk29xx_spim",
\r
2159 .owner = THIS_MODULE,
\r
2161 .suspend = rk29xx_spim_suspend,
\r
2162 .resume = rk29xx_spim_resume,
\r
2165 static int __init rk29xx_spim_init(void)
\r
2168 ret = platform_driver_probe(&rk29xx_platform_spim_driver, rk29xx_spim_probe);
\r
2172 static void __exit rk29xx_spim_exit(void)
\r
2174 platform_driver_unregister(&rk29xx_platform_spim_driver);
\r
2177 arch_initcall_sync(rk29xx_spim_init);
\r
2178 module_exit(rk29xx_spim_exit);
\r
2180 MODULE_AUTHOR("www.rock-chips.com");
\r
2181 MODULE_DESCRIPTION("Rockchip RK29xx spim port driver");
\r
2182 MODULE_LICENSE("GPL");;
\r