#define DBG(x...)\r
#endif\r
\r
-#define DMA_MIN_BYTES 8\r
+#define DMA_BUFFER_SIZE PAGE_SIZE\r
+#define DMA_MIN_BYTES 32 //>32x16bits FIFO\r
\r
\r
#define START_STATE ((void *)0)\r
}\r
#endif /* CONFIG_DEBUG_FS */\r
\r
+static void dma_transfer(struct rk29xx_spi *dws) ;\r
+static void transfer_complete(struct rk29xx_spi *dws);\r
+\r
static void wait_till_not_busy(struct rk29xx_spi *dws)\r
{\r
unsigned long end = jiffies + 1 + usecs_to_jiffies(1000);\r
struct rk29xx_spi *dws = buf_id;\r
unsigned long flags;\r
\r
+ DBG("func: %s, line: %d\n", __FUNCTION__, __LINE__);\r
+ \r
spin_lock_irqsave(&dws->lock, flags);\r
\r
if (res == RK29_RES_OK)\r
dws->state &= ~RXBUSY;\r
else\r
- dev_err(&dws->master->dev, "DmaAbrtRx-%d, size: %d\n", res, size);\r
+ dev_err(&dws->master->dev, "DmaAbrtRx-%d, size: %d,res=%d\n", res, size,res);\r
\r
/* If the other done */\r
- if (!(dws->state & TXBUSY))\r
- complete(&dws->xfer_completion);\r
+ //if (!(dws->state & TXBUSY))\r
+ // complete(&dws->rx_completion);\r
+ \r
+ //DMA could not lose intterupt\r
+ transfer_complete(dws);\r
\r
spin_unlock_irqrestore(&dws->lock, flags);\r
}\r
unsigned long flags;\r
\r
DBG("func: %s, line: %d\n", __FUNCTION__, __LINE__);\r
-\r
+ \r
spin_lock_irqsave(&dws->lock, flags);\r
\r
if (res == RK29_RES_OK)\r
dws->state &= ~TXBUSY;\r
else\r
- dev_err(&dws->master->dev, "DmaAbrtTx-%d, size: %d \n", res, size);\r
+ dev_err(&dws->master->dev, "DmaAbrtTx-%d, size: %d,res=%d \n", res, size,res);\r
\r
/* If the other done */\r
- if (!(dws->state & RXBUSY)) \r
- complete(&dws->xfer_completion);\r
+ //if (!(dws->state & RXBUSY)) \r
+ // complete(&dws->tx_completion);\r
+\r
+ //DMA could not lose intterupt\r
+ transfer_complete(dws);\r
\r
spin_unlock_irqrestore(&dws->lock, flags);\r
}\r
rk29_dma_free(dws->rx_dmach, &rk29_spi_dma_client);\r
return -1;\r
}\r
+\r
+ if (dws->tx_dma) {\r
+ if (rk29_dma_set_buffdone_fn(dws->tx_dmach, rk29_spi_dma_txcb)) {\r
+ dev_err(&dws->master->dev, "rk29_dma_set_buffdone_fn fail\n");\r
+ return -1;\r
+ }\r
+ if (rk29_dma_devconfig(dws->tx_dmach, RK29_DMASRC_MEM,\r
+ dws->sfr_start + SPIM_TXDR)) {\r
+ dev_err(&dws->master->dev, "rk29_dma_devconfig fail\n");\r
+ return -1;\r
+ }\r
+ }\r
+\r
+ if (dws->rx_dma) {\r
+ if (rk29_dma_set_buffdone_fn(dws->rx_dmach, rk29_spi_dma_rxcb)) {\r
+ dev_err(&dws->master->dev, "rk29_dma_set_buffdone_fn fail\n");\r
+ return -1;\r
+ }\r
+ if (rk29_dma_devconfig(dws->rx_dmach, RK29_DMASRC_HW,\r
+ dws->sfr_start + SPIM_RXDR)) {\r
+ dev_err(&dws->master->dev, "rk29_dma_devconfig fail\n");\r
+ return -1;\r
+ }\r
+ }\r
\r
- dws->dma_inited = 1;\r
+ dws->dma_inited = 1;\r
return 0;\r
}\r
\r
*/\r
static int map_dma_buffers(struct rk29xx_spi *dws)\r
{\r
- if (!dws->cur_msg->is_dma_mapped || !dws->dma_inited\r
- || !dws->cur_chip->enable_dma)\r
+ if (!dws->dma_inited || !dws->cur_chip->enable_dma)\r
+ {\r
+ printk("%s:error\n",__func__);\r
return -1;\r
-\r
- if (dws->cur_transfer->tx_dma) {\r
- dws->tx_dma = dws->cur_transfer->tx_dma;\r
- if (rk29_dma_set_buffdone_fn(dws->tx_dmach, rk29_spi_dma_txcb)) {\r
- dev_err(&dws->master->dev, "rk29_dma_set_buffdone_fn fail\n");\r
- return -1;\r
- }\r
- if (rk29_dma_devconfig(dws->tx_dmach, RK29_DMASRC_MEM,\r
- dws->sfr_start + SPIM_TXDR)) {\r
- dev_err(&dws->master->dev, "rk29_dma_devconfig fail\n");\r
- return -1;\r
- }\r
}\r
\r
- if (dws->cur_transfer->rx_dma) {\r
- dws->rx_dma = dws->cur_transfer->rx_dma;\r
- if (rk29_dma_set_buffdone_fn(dws->rx_dmach, rk29_spi_dma_rxcb)) {\r
- dev_err(&dws->master->dev, "rk29_dma_set_buffdone_fn fail\n");\r
- return -1;\r
- }\r
- if (rk29_dma_devconfig(dws->rx_dmach, RK29_DMASRC_HW,\r
- dws->sfr_start + SPIM_RXDR)) {\r
- dev_err(&dws->master->dev, "rk29_dma_devconfig fail\n");\r
- return -1;\r
- }\r
+ if(dws->cur_transfer->tx_buf)\r
+ {\r
+ memcpy(dws->buffer_tx_dma,dws->cur_transfer->tx_buf,dws->cur_transfer->len);\r
+ dws->cur_transfer->tx_buf = dws->buffer_tx_dma; \r
}\r
\r
+ if(dws->cur_transfer->rx_buf)\r
+ {\r
+ //memcpy(dws->buffer_rx_dma,dws->cur_transfer->rx_buf,dws->cur_transfer->len);\r
+ dws->cur_transfer->rx_buf = dws->buffer_rx_dma; \r
+ }\r
+ \r
+ dws->cur_transfer->tx_dma = dws->tx_dma;\r
+ dws->cur_transfer->rx_dma = dws->rx_dma;\r
+ \r
return 0;\r
}\r
\r
dws->prev_chip = dws->cur_chip;\r
dws->cur_chip = NULL;\r
dws->dma_mapped = 0;\r
+ \r
+ /*it is important to close intterrupt*/\r
+ spi_umask_intr(dws, 0);\r
+ rk29xx_writew(dws, SPIM_DMACR, 0);\r
+ \r
queue_work(dws->workqueue, &dws->pump_messages);\r
spin_unlock_irqrestore(&dws->lock, flags);\r
\r
msg->state = NULL;\r
if (msg->complete)\r
msg->complete(msg->context);\r
+\r
}\r
\r
static void int_error_stop(struct rk29xx_spi *dws, const char *msg)\r
u16 irq_status, irq_mask = 0x1f;\r
u32 int_level = dws->fifo_len / 2;\r
u32 left;\r
- \r
+\r
irq_status = rk29xx_readw(dws, SPIM_ISR) & irq_mask;\r
/* Error handling */\r
if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {\r
rk29xx_writew(dws, SPIM_ICR, SPI_CLEAR_INT_TXOI | SPI_CLEAR_INT_RXOI | SPI_CLEAR_INT_RXUI);\r
int_error_stop(dws, "interrupt_transfer: fifo overrun");\r
+ mutex_unlock(&dws->dma_lock); \r
return IRQ_HANDLED;\r
}\r
\r
else {\r
transfer_complete(dws);\r
}\r
- }\r
+ \r
+ } \r
\r
return IRQ_HANDLED;\r
}\r
/* Must be called inside pump_transfers() */\r
static void poll_transfer(struct rk29xx_spi *dws)\r
{\r
+ DBG("%s\n",__func__);\r
while (dws->write(dws)) {\r
wait_till_not_busy(dws);\r
dws->read(dws);\r
u32 speed = 0;\r
u32 cr0 = 0;\r
\r
- DBG(KERN_INFO "pump_transfers\n");\r
+ if((dws->cur_chip->enable_dma) && (dws->cur_transfer->len > DMA_MIN_BYTES) && (dws->cur_transfer->len < DMA_BUFFER_SIZE)){ \r
+ dma_transfer(dws);\r
+ return;\r
+ } \r
+ \r
+ DBG(KERN_INFO "pump_transfers,len=%d\n",dws->cur_transfer->len);\r
\r
/* Get current state information */\r
message = dws->cur_msg;\r
dws->dma_width = chip->dma_width;\r
dws->cs_control = chip->cs_control;\r
\r
- dws->rx_dma = transfer->rx_dma;\r
- dws->tx_dma = transfer->tx_dma;\r
+ //dws->rx_dma = transfer->rx_dma;\r
+ //dws->tx_dma = transfer->tx_dma;\r
dws->tx = (void *)transfer->tx_buf;\r
dws->tx_end = dws->tx + transfer->len;\r
- dws->rx = transfer->rx_buf;\r
+ dws->rx = (void *)transfer->rx_buf;\r
dws->rx_end = dws->rx + transfer->len;\r
dws->write = dws->tx ? chip->write : null_writer;\r
dws->read = dws->rx ? chip->read : null_reader;\r
* Interrupt mode\r
* we only need set the TXEI IRQ, as TX/RX always happen syncronizely\r
*/\r
- if (!dws->dma_mapped && !chip->poll_mode) {\r
+ if (!dws->dma_mapped && !chip->poll_mode) { \r
int templen ;\r
\r
if (chip->tmode == SPI_TMOD_RO) {\r
return;\r
}\r
\r
-static void dma_transfer(struct rk29xx_spi *dws) //int cs_change)\r
+static void dma_transfer(struct rk29xx_spi *dws) \r
{\r
struct spi_message *message = NULL;\r
struct spi_transfer *transfer = NULL;\r
struct spi_transfer *previous = NULL;\r
struct spi_device *spi = NULL;\r
struct chip_data *chip = NULL;\r
- unsigned long val;\r
- int ms;\r
+ //unsigned long val; \r
+ //unsigned long flags;\r
+ //int ms;\r
int iRet;\r
- int burst;\r
+ //int burst;\r
u8 bits = 0;\r
u8 spi_dfs = 0;\r
u8 cs_change = 0;\r
u32 speed = 0;\r
u32 cr0 = 0;\r
u32 dmacr = 0;\r
-\r
- DBG(KERN_INFO "dma_transfer\n");\r
-\r
+ \r
+ DBG(KERN_INFO "dma_transfer,len=%d\n",dws->cur_transfer->len); \r
+ \r
if (acquire_dma(dws)) {\r
dev_err(&dws->master->dev, "acquire dma failed\n");\r
goto err_out;\r
dws->dma_width = chip->dma_width;\r
dws->cs_control = chip->cs_control;\r
\r
- dws->rx_dma = transfer->rx_dma;\r
- dws->tx_dma = transfer->tx_dma;\r
+ //dws->rx_dma = transfer->rx_dma;\r
+ //dws->tx_dma = transfer->tx_dma;\r
dws->tx = (void *)transfer->tx_buf;\r
dws->tx_end = dws->tx + transfer->len;\r
- dws->rx = transfer->rx_buf;\r
+ dws->rx = (void *)transfer->rx_buf;\r
dws->rx_end = dws->rx + transfer->len;\r
dws->write = dws->tx ? chip->write : null_writer;\r
dws->read = dws->rx ? chip->read : null_reader;\r
cs_change = 1;\r
\r
cr0 = chip->cr0;\r
-\r
+ \r
/* Handle per transfer options for bpw and speed */\r
if (transfer->speed_hz) {\r
speed = chip->speed_hz;\r
-\r
if (transfer->speed_hz != speed) {\r
speed = transfer->speed_hz;\r
if (speed > clk_get_rate(dws->clock_spim)) {\r
}\r
}\r
\r
+ \r
if (transfer->bits_per_word) {\r
bits = transfer->bits_per_word;\r
\r
dws->prev_chip = chip;\r
} \r
\r
- INIT_COMPLETION(dws->xfer_completion);\r
+ //INIT_COMPLETION(dws->xfer_completion);\r
\r
spi_dump_regs(dws);\r
DBG("dws->tx_dmach: %d, dws->rx_dmach: %d, transfer->tx_dma: 0x%x\n", dws->tx_dmach, dws->rx_dmach, (unsigned int)transfer->tx_dma);\r
}\r
}\r
\r
- wait_till_not_busy(dws);\r
-\r
+ //wait_till_not_busy(dws);\r
+ \r
if (transfer->rx_buf != NULL) {\r
dws->state |= RXBUSY;\r
if (rk29_dma_config(dws->rx_dmach, 1, 1)) {\r
goto err_out;\r
}\r
}\r
-\r
- /* millisecs to xfer 'len' bytes @ 'cur_speed' */\r
- ms = transfer->len * 8 / dws->cur_chip->speed_hz;\r
- ms += 10; \r
-\r
- val = msecs_to_jiffies(ms) + 10;\r
- if (!wait_for_completion_timeout(&dws->xfer_completion, val)) {\r
- if (transfer->rx_buf != NULL && (dws->state & RXBUSY)) {\r
- rk29_dma_ctrl(dws->rx_dmach, RK29_DMAOP_FLUSH);\r
- dws->state &= ~RXBUSY;\r
- dev_err(&dws->master->dev, "function: %s, line: %d\n", __FUNCTION__, __LINE__);\r
- goto NEXT_TRANSFER;\r
- }\r
- if (transfer->tx_buf != NULL && (dws->state & TXBUSY)) {\r
- rk29_dma_ctrl(dws->tx_dmach, RK29_DMAOP_FLUSH);\r
- dws->state &= ~TXBUSY;\r
- dev_err(&dws->master->dev, "function: %s, line: %d\n", __FUNCTION__, __LINE__);\r
- goto NEXT_TRANSFER;\r
- }\r
- }\r
-\r
- wait_till_not_busy(dws);\r
-\r
-NEXT_TRANSFER:\r
- /* Update total byte transfered return count actual bytes read */\r
- dws->cur_msg->actual_length += dws->len;\r
-\r
- /* Move to next transfer */\r
- dws->cur_msg->state = next_transfer(dws);\r
-\r
- /* Handle end of message */\r
- if (dws->cur_msg->state == DONE_STATE) {\r
- dws->cur_msg->status = 0;\r
- giveback(dws);\r
- } else\r
- dma_transfer(dws);\r
\r
return;\r
\r
unsigned long flags;\r
\r
DBG(KERN_INFO "pump_messages\n");\r
-\r
+ \r
/* Lock queue and check for queue work */\r
spin_lock_irqsave(&dws->lock, flags);\r
if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) {\r
dws->busy = 0;\r
spin_unlock_irqrestore(&dws->lock, flags);\r
+ mutex_unlock(&dws->dma_lock);\r
return;\r
}\r
\r
/* Make sure we are not already running a message */\r
if (dws->cur_msg) {\r
spin_unlock_irqrestore(&dws->lock, flags);\r
+ mutex_unlock(&dws->dma_lock);\r
return;\r
}\r
\r
struct spi_transfer,\r
transfer_list);\r
dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi);\r
- dws->prev_chip = NULL; //ÿ¸öpump messageʱǿÖƸüÐÂcs dxj\r
- \r
- /* Mark as busy and launch transfers */\r
- if(dws->cur_msg->is_dma_mapped /*&& dws->cur_transfer->len > DMA_MIN_BYTES*/) {\r
- dws->busy = 1;\r
- spin_unlock_irqrestore(&dws->lock, flags);\r
- dma_transfer(dws);\r
- return;\r
- }\r
- else {\r
- tasklet_schedule(&dws->pump_transfers);\r
- }\r
+ dws->prev_chip = NULL; //ÿ¸öpump messageʱǿÖƸüÐÂcs dxj\r
\r
+ /* Mark as busy and launch transfers */\r
+ tasklet_schedule(&dws->pump_transfers);\r
dws->busy = 1;\r
spin_unlock_irqrestore(&dws->lock, flags);\r
+ \r
}\r
\r
#if defined(QUICK_TRANSFER)\r
else {\r
/* If no other data transaction in air, just go */\r
spin_unlock_irqrestore(&dws->lock, flags);\r
- pump_messages(&dws->pump_messages);\r
+ pump_messages(&dws->pump_messages); \r
return 0;\r
}\r
}\r
if (dws->workqueue == NULL)\r
return -EBUSY;\r
\r
+\r
return 0;\r
}\r
\r
dev_err(&pdev->dev, "clk_get for spi fail(%p)\n", dws->clock_spim);\r
return PTR_ERR(dws->clock_spim);\r
}\r
+\r
+ \r
+ dws->buffer_tx_dma = dma_alloc_coherent(&pdev->dev, DMA_BUFFER_SIZE, &dws->tx_dma, GFP_KERNEL | GFP_DMA);\r
+ if (!dws->buffer_tx_dma)\r
+ {\r
+ dev_err(&pdev->dev, "fail to dma tx buffer alloc\n");\r
+ goto exit;\r
+ }\r
+\r
+ dws->buffer_rx_dma = dma_alloc_coherent(&pdev->dev, DMA_BUFFER_SIZE, &dws->rx_dma, GFP_KERNEL | GFP_DMA);\r
+ if (!dws->buffer_rx_dma)\r
+ {\r
+ dev_err(&pdev->dev, "fail to dma rx buffer alloc\n");\r
+ goto exit;\r
+ }\r
+ \r
+ mutex_init(&dws->dma_lock);\r
\r
dws->regs = ioremap(regs->start, (regs->end - regs->start) + 1);\r
if (!dws->regs){\r
free_irq(dws->irq, dws);\r
err_free_master:\r
spi_master_put(master);\r
+ dma_free_coherent(&pdev->dev, DMA_BUFFER_SIZE, dws->buffer_tx_dma, dws->tx_dma); \r
+ dma_free_coherent(&pdev->dev, DMA_BUFFER_SIZE, dws->buffer_rx_dma, dws->rx_dma);\r
iounmap(dws->regs);\r
exit:\r
return ret;\r
rk29xx_spim_cpufreq_deregister(dws);\r
mrst_spi_debugfs_remove(dws);\r
\r
+ \r
+ dma_free_coherent(&pdev->dev, DMA_BUFFER_SIZE, dws->buffer_tx_dma, dws->tx_dma); \r
+ dma_free_coherent(&pdev->dev, DMA_BUFFER_SIZE, dws->buffer_rx_dma, dws->rx_dma);\r
release_dma(dws);\r
\r
/* Remove the queue */\r