add dma support for spi driver
authorluowei <lw@rock-chips.com>
Tue, 11 Mar 2014 14:05:04 +0000 (22:05 +0800)
committerluowei <lw@rock-chips.com>
Tue, 11 Mar 2014 14:05:58 +0000 (22:05 +0800)
arch/arm/boot/dts/rk3188.dtsi
drivers/spi/spi-rockchip-core.c
drivers/spi/spi-rockchip-core.h
drivers/spi/spi-rockchip-dma.c
drivers/spi/spi-rockchip-test.c
drivers/spi/spi-rockchip.c

index 612c3c8357e549cf06fc471cf59b1e57530a02c7..c702126c7b4493e6710a6c460cf5098cc924e8c1 100755 (executable)
                        interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
                        #dma-cells = <1>;
+
                };
 
                pdma1: pdma@20078000 {
                        interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
                        #dma-cells = <1>;
+
                };
        };
 
+
        uart0: serial@10124000 {
                compatible = "rockchip,serial";
                reg = <0x10124000 0x100>;
 
        spi0: spi@20070000 {
                compatible = "rockchip,rockchip-spi";
-               reg = <0x20070000 0x500>;
+               reg = <0x20070000 0x1000>;
                interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
                #address-cells = <1>;
                #size-cells = <0>;
                num-cs = <2>;
                clocks =<&clk_spi0>, <&clk_gates7 12>;
                clock-names = "spi","pclk_spi0";
-               dmas = <&pdma0 16>, <&pdma0 17>;
+               dmas = <&pdma1 10>, <&pdma1 11>;
                #dma-cells = <2>;
                dma-names = "tx", "rx";
                status = "disabled";
 
        spi1: spi@20074000 {
                compatible = "rockchip,rockchip-spi";
-               reg = <0x20074000 0x500>;
+               reg = <0x20074000 0x1000>;
                interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
                #address-cells = <1>;
                #size-cells = <0>;
                num-cs = <2>;
                clocks = <&clk_spi1>, <&clk_gates7 13>;
                clock-names = "spi","pclk_spi1";
-               dmas = <&pdma0 16>, <&pdma0 17>;
+               dmas = <&pdma1 12>, <&pdma1 13>;
                #dma-cells = <2>;
                dma-names = "tx", "rx";
                status = "disabled";
index 05051a1fbc6d84a889e03b57b671e62c69dec40e..71b34aeca78cc26276c21bbf87910ac80e13f233 100755 (executable)
@@ -217,6 +217,7 @@ static inline void spi_debugfs_remove(struct dw_spi *dws)
 }
 #endif /* CONFIG_DEBUG_FS */
 
+
 static void wait_till_not_busy(struct dw_spi *dws)
 {
        unsigned long end = jiffies + 1 + usecs_to_jiffies(1000);
@@ -369,8 +370,7 @@ static void *next_transfer(struct dw_spi *dws)
  */
 static int map_dma_buffers(struct dw_spi *dws)
 {
-       if (!dws->cur_msg->is_dma_mapped
-               || !dws->dma_inited
+       if (!dws->dma_inited
                || !dws->cur_chip->enable_dma
                || !dws->dma_ops)
                return 0;
@@ -400,6 +400,7 @@ static void giveback(struct dw_spi *dws)
        dws->prev_chip = dws->cur_chip;
        dws->cur_chip = NULL;
        dws->dma_mapped = 0;
+       dws->state = 0;
        //queue_work(dws->workqueue, &dws->pump_messages);
 
        /*it is important to close intterrupt*/
@@ -442,8 +443,7 @@ static void int_error_stop(struct dw_spi *dws, const char *msg)
 
        dev_err(&dws->master->dev, "%s\n", msg);
        dws->cur_msg->state = ERROR_STATE;
-       tasklet_schedule(&dws->pump_transfers);
-       
+       tasklet_schedule(&dws->pump_transfers); 
        DBG_SPI("%s:line=%d\n",__func__,__LINE__);
 }
 
@@ -570,7 +570,9 @@ static void pump_transfers(unsigned long data)
        u16 rxint_level = 0;
        u16 clk_div = 0;
        u32 speed = 0;
-       u32 cr0 = 0;
+       u32 cr0 = 0;    
+       u16 dma_ctrl = 0;
+       int ret = 0;
 
 
        /* Get current state information */
@@ -679,8 +681,6 @@ static void pump_transfers(unsigned long data)
                else
                        chip->tmode = SPI_TMOD_TO;
 
-               //cr0 &= ~SPI_TMOD_MASK;
-               //cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
 
                cr0 &= ~(0x3 << SPI_MODE_OFFSET);               
                cr0 &= ~(0x3 << SPI_TMOD_OFFSET);
@@ -715,13 +715,16 @@ static void pump_transfers(unsigned long data)
                dws->transfer_handler = interrupt_transfer;
        }
 
+
        /*
         * Reprogram registers only if
         *      1. chip select changes
         *      2. clk_div is changed
         *      3. control value changes
         */
-       if (dw_readw(dws, SPIM_CTRLR0) != cr0 || cs_change || clk_div || imask) {
+       //if (dw_readw(dws, SPIM_CTRLR0) != cr0 || cs_change || clk_div || imask)               
+       if(dws->tx || dws->rx)
+       {
                spi_enable_chip(dws, 0);
                if (dw_readl(dws, SPIM_CTRLR0) != cr0)
                        dw_writel(dws, SPIM_CTRLR0, cr0);
@@ -743,15 +746,42 @@ static void pump_transfers(unsigned long data)
                spi_mask_intr(dws, 0xff);
                if (imask)
                        spi_umask_intr(dws, imask);
-       
+
+               /* setup DMA related registers */
+               if(dws->dma_mapped)
+               {
+                       /* Set the interrupt mask, for poll mode just diable all int */
+                       spi_mask_intr(dws, 0xff);               
+                       if(dws->tx)
+                       {
+                               dma_ctrl |= SPI_DMACR_TX_ENABLE;                
+                               dw_writew(dws, SPIM_DMATDLR, 8);
+                               dw_writew(dws, SPIM_CTRLR1, dws->len-1);        
+                       }
+                       
+                       if (dws->rx)
+                       {
+                               dma_ctrl |= SPI_DMACR_RX_ENABLE;        
+                               dw_writew(dws, SPIM_DMARDLR, 0);                        
+                               dw_writew(dws, SPIM_CTRLR1, dws->len-1);        
+                       }
+                       dw_writew(dws, SPIM_DMACR, dma_ctrl);
+
+                       DBG_SPI("%s:dma_ctrl=0x%x\n",__func__,dw_readw(dws, SPIM_DMACR));
+                       
+               }
+               
                spi_enable_chip(dws, 1);        
                
                if (cs_change)
                        dws->prev_chip = chip;
+
+               DBG_SPI("%s:ctrl0=0x%x\n",__func__,dw_readw(dws, SPIM_CTRLR0));
        }
 
+       /*dma should be ready before spi_enable_chip*/
        if (dws->dma_mapped)
-               dws->dma_ops->dma_transfer(dws, cs_change);
+       dws->dma_ops->dma_transfer(dws, cs_change); 
 
        if (chip->poll_mode)
                poll_transfer(dws);
@@ -763,50 +793,12 @@ early_exit:
        return;
 }
 
-static void pump_messages(struct work_struct *work)
-{
-       struct dw_spi *dws =
-               container_of(work, struct dw_spi, pump_messages);
-       unsigned long flags;
-
-       /* Lock queue and check for queue work */
-       spin_lock_irqsave(&dws->lock, flags);
-       if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) {
-               dws->busy = 0;
-               spin_unlock_irqrestore(&dws->lock, flags);
-               return;
-       }
-
-       /* Make sure we are not already running a message */
-       if (dws->cur_msg) {
-               spin_unlock_irqrestore(&dws->lock, flags);
-               return;
-       }
-
-       /* Extract head of queue */
-       dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue);
-       list_del_init(&dws->cur_msg->queue);
-
-       /* Initial message state*/
-       dws->cur_msg->state = START_STATE;
-       dws->cur_transfer = list_entry(dws->cur_msg->transfers.next,
-                                               struct spi_transfer,
-                                               transfer_list);
-       dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi);
-
-       /* Mark as busy and launch transfers */
-       tasklet_schedule(&dws->pump_transfers);
-
-       dws->busy = 1;
-       spin_unlock_irqrestore(&dws->lock, flags);
-}
-
-
 static int dw_spi_transfer_one_message(struct spi_master *master,
                                           struct spi_message *msg)
 {
        struct dw_spi *dws = spi_master_get_devdata(master);
-
+       int ret = 0;
+       
        dws->cur_msg = msg;
        /* Initial message state*/
        dws->cur_msg->state = START_STATE;
@@ -817,11 +809,28 @@ static int dw_spi_transfer_one_message(struct spi_master *master,
        /* prepare to setup the SSP, in pump_transfers, using the per
         * chip configuration */
        dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi);
-
+       
+       dws->dma_mapped = map_dma_buffers(dws); 
+       INIT_COMPLETION(dws->xfer_completion);
+       
        /* Mark as busy and launch transfers */
        tasklet_schedule(&dws->pump_transfers);
        
        DBG_SPI("%s:line=%d\n",__func__,__LINE__);
+       if (dws->dma_mapped)
+       {
+               ret = wait_for_completion_timeout(&dws->xfer_completion,
+                                                       msecs_to_jiffies(2000));
+               if(ret == 0)
+               {
+                       dev_err(&dws->master->dev, "dma transfer timeout\n");                   
+                       giveback(dws);
+                       return 0;
+               }
+               
+               DBG_SPI("%s:wait %d\n",__func__, ret);
+       }
+               
        return 0;
 }
 
@@ -953,7 +962,7 @@ static void spi_hw_init(struct dw_spi *dws)
                dw_writew(dws, SPIM_TXFTLR, 0);
        }
        
-       spi_enable_chip(dws, 1);
+       //spi_enable_chip(dws, 1);
        flush(dws);
        DBG_SPI("%s:fifo_len=%d\n",__func__, dws->fifo_len);
 }
@@ -975,7 +984,8 @@ int dw_spi_add_host(struct dw_spi *dws)
        dws->type = SSI_MOTO_SPI;
        dws->prev_chip = NULL;
        dws->dma_inited = 0;
-       dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
+       dws->tx_dma_addr = (dma_addr_t)(dws->paddr + SPIM_TXDR);        
+       dws->rx_dma_addr = (dma_addr_t)(dws->paddr + SPIM_RXDR);
        snprintf(dws->name, sizeof(dws->name), "dw_spi%d",
                        dws->bus_num);
 
@@ -1006,7 +1016,7 @@ int dw_spi_add_host(struct dw_spi *dws)
        if (dws->dma_ops && dws->dma_ops->dma_init) {
                ret = dws->dma_ops->dma_init(dws);
                if (ret) {
-                       dev_warn(&master->dev, "DMA init failed\n");
+                       dev_warn(&master->dev, "DMA init failed,ret=%d\n",ret);
                        dws->dma_inited = 0;
                }
        }
index 3225a98d62214619ab1764e33f385dd9f2c6cdd1..0bb9e5753d32b585d4478fceeef5730b2bb445a6 100755 (executable)
@@ -3,6 +3,7 @@
 
 #include <linux/io.h>
 #include <linux/scatterlist.h>
+#include <linux/dmaengine.h>
 
 
 #if 1
 #define SPI_CLEAR_INT_RXOI      (1 << 2)
 #define SPI_CLEAR_INT_TXOI      (1 << 3)
 
+#define SUSPND    (1<<0)
+#define SPIBUSY   (1<<1)
+#define RXBUSY    (1<<2)
+#define TXBUSY    (1<<3)
 
-#if 0
-
-
-/* Bit fields in CTRLR0 */
-#define SPI_DFS_OFFSET                 0
-
-#define SPI_FRF_OFFSET                 4
-#define SPI_FRF_SPI                    0x0
-#define SPI_FRF_SSP                    0x1
-#define SPI_FRF_MICROWIRE              0x2
-#define SPI_FRF_RESV                   0x3
-
-#define SPI_MODE_OFFSET                        6
-#define SPI_SCPH_OFFSET                        6
-#define SPI_SCOL_OFFSET                        7
-
-#define SPI_TMOD_OFFSET                        8
-#define SPI_TMOD_MASK                  (0x3 << SPI_TMOD_OFFSET)
-#define        SPI_TMOD_TR                     0x0             /* xmit & recv */
-#define SPI_TMOD_TO                    0x1             /* xmit only */
-#define SPI_TMOD_RO                    0x2             /* recv only */
-#define SPI_TMOD_EPROMREAD             0x3             /* eeprom read mode */
-
-#define SPI_SLVOE_OFFSET               10
-#define SPI_SRL_OFFSET                 11
-#define SPI_CFS_OFFSET                 12
-
-/* Bit fields in SR, 7 bits */
-#define SR_MASK                                0x7f            /* cover 7 bits */
-#define SR_BUSY                                (1 << 0)
-#define SR_TF_NOT_FULL                 (1 << 1)
-#define SR_TF_EMPT                     (1 << 2)
-#define SR_RF_NOT_EMPT                 (1 << 3)
-#define SR_RF_FULL                     (1 << 4)
-#define SR_TX_ERR                      (1 << 5)
-#define SR_DCOL                                (1 << 6)
-
-/* Bit fields in ISR, IMR, RISR, 7 bits */
-#define SPI_INT_TXEI                   (1 << 0)
-#define SPI_INT_TXOI                   (1 << 1)
-#define SPI_INT_RXUI                   (1 << 2)
-#define SPI_INT_RXOI                   (1 << 3)
-#define SPI_INT_RXFI                   (1 << 4)
-#define SPI_INT_MSTI                   (1 << 5)
-
-/* Bit fields in DMACR */
-#define SPI_DMACR_TX_ENABLE     (1 << 1)
-#define SPI_DMACR_RX_ENABLE     (1 << 0)
-
-/* Bit fields in ICR */
-#define SPI_CLEAR_INT_ALL       (1<< 0)
-#define SPI_CLEAR_INT_RXUI      (1 << 1)
-#define SPI_CLEAR_INT_RXOI      (1 << 2)
-#define SPI_CLEAR_INT_TXOI      (1 << 3)
-
-
-/* TX RX interrupt level threshold, max can be 256 */
-#define SPI_INT_THRESHOLD              16
-#endif
 
 enum dw_ssi_type {
        SSI_MOTO_SPI = 0,
@@ -225,7 +171,7 @@ struct dw_spi {
        void                    *tx_end;
        void                    *rx;
        void                    *rx_end;
-       int                     dma_mapped;
+       int                             dma_mapped;
        dma_addr_t              rx_dma;
        dma_addr_t              tx_dma;
        size_t                  rx_map_len;
@@ -234,6 +180,14 @@ struct dw_spi {
        u8                      max_bits_per_word;      /* maxim is 16b */
        u32                     dma_width;
        int                     cs_change;
+       void                    *tx_buffer;
+       void                    *rx_buffer;
+       dma_addr_t              rx_dma_init;
+       dma_addr_t              tx_dma_init;
+       dma_cookie_t            rx_cookie;
+       dma_cookie_t            tx_cookie;
+       int                     state;
+       struct completion       xfer_completion;
        irqreturn_t             (*transfer_handler)(struct dw_spi *dws);
        void                    (*cs_control)(struct dw_spi *dws, u32 cs, u8 flag);
 
@@ -245,7 +199,8 @@ struct dw_spi {
        struct scatterlist      rx_sgl;
        int                     dma_chan_done;
        struct device           *dma_dev;
-       dma_addr_t              dma_addr; /* phy address of the Data register */
+       dma_addr_t              tx_dma_addr; /* phy address of the Data register */     
+       dma_addr_t              rx_dma_addr; /* phy address of the Data register */
        struct dw_spi_dma_ops   *dma_ops;
        void                    *dma_priv; /* platform relate info */
        
@@ -341,7 +296,7 @@ struct dw_spi_chip {
        u8 poll_mode;   /* 0 for contoller polling mode */
        u8 type;        /* SPI/SSP/Micrwire */
        u8 enable_dma;
-       void (*cs_control)(u32 command);
+       void (*cs_control)(struct dw_spi *dws, u32 cs, u8 flag);
 };
 
 extern int dw_spi_add_host(struct dw_spi *dws);
index 653396a52acca3fd2cde46675b2df49f707543c1..750d598f2b3e56e73cfac491f1489adfd7e921e5 100755 (executable)
@@ -39,6 +39,8 @@
 #include "spi-rockchip-core.h"
 
 #ifdef CONFIG_SPI_ROCKCHIP_DMA
+#define DMA_BUFFER_SIZE (PAGE_SIZE<<4)
+
 
 struct spi_dma_slave {
        struct dma_chan *ch;
@@ -52,155 +54,286 @@ struct spi_dma {
        struct spi_dma_slave    dmas_rx;
 };
 
-static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param)
+static void printk_transfer_data(struct dw_spi *dws, char *buf, int len)
 {
-       struct dw_spi *dws = param;
-       int ret = 0;
-       ret = dws->parent_dev && (&dws->parent_dev == chan->device->dev);
+       int i = 0;
+       for(i=0; i<len; i++)
+               DBG_SPI("0x%02x,",*buf++);
 
-       printk("%s:ret=%d\n",__func__, ret);
-       return ret;
-}
+       DBG_SPI("\n");
 
+}
 
 static int mid_spi_dma_init(struct dw_spi *dws)
 {
        struct spi_dma *dw_dma = dws->dma_priv;
        struct spi_dma_slave *rxs, *txs;
        dma_cap_mask_t mask;
-
-       /*
-        * Get pci device for DMA controller, currently it could only
-        * be the DMA controller of either Moorestown or Medfield
-        */
-       //dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0813, NULL);
-       //if (!dws->dmac)
-       //      dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
-
-       dma_cap_zero(mask);
-       dma_cap_set(DMA_SLAVE, mask);
+       
+       DBG_SPI("%s:start\n",__func__);
 
        /* 1. Init rx channel */
-       dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws);
+       dws->rxchan = dma_request_slave_channel(dws->parent_dev, "rx");
        if (!dws->rxchan)
+       {
+               dev_err(dws->parent_dev, "Failed to get RX DMA channel\n");
                goto err_exit;
+       }
+       
+       DBG_SPI("%s:rx_chan_id=%d\n",__func__,dws->rxchan->chan_id);
+       
        rxs = &dw_dma->dmas_rx;
-       //rxs->hs_mode = LNW_DMA_HW_HS;
-       //rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
        dws->rxchan->private = rxs;
 
        /* 2. Init tx channel */
-       dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws);
-       if (!dws->txchan)
-               goto free_rxchan;
+       dws->txchan = dma_request_slave_channel(dws->parent_dev, "tx");
+       if (!dws->rxchan)
+       {
+               dev_err(dws->parent_dev, "Failed to get RX DMA channel\n");
+               goto err_exit;
+       }
        txs = &dw_dma->dmas_tx;
-       //txs->hs_mode = LNW_DMA_HW_HS;
-       //txs->cfg_mode = LNW_DMA_MEM_TO_PER;
        dws->txchan->private = txs;
+       
+       DBG_SPI("%s:tx_chan_id=%d\n",__func__,dws->txchan->chan_id);
 
        dws->dma_inited = 1;
+
+       DBG_SPI("%s:line=%d\n",__func__,__LINE__);
        return 0;
 
-       free_rxchan:
+free_rxchan:
        dma_release_channel(dws->rxchan);
-       err_exit:
+err_exit:
        return -1;
 
 }
 
 static void mid_spi_dma_exit(struct dw_spi *dws)
 {
+       DBG_SPI("%s:start\n",__func__);
        dma_release_channel(dws->txchan);
        dma_release_channel(dws->rxchan);
 }
 
-/*
- * dws->dma_chan_done is cleared before the dma transfer starts,
- * callback for rx/tx channel will each increment it by 1.
- * Reaching 2 means the whole spi transaction is done.
- */
-static void dw_spi_dma_done(void *arg)
+
+static void dw_spi_dma_rxcb(void *arg)
 {
        struct dw_spi *dws = arg;
+       unsigned long flags;
+       struct dma_tx_state             state;
+       int                             dma_status;
+
+       dma_sync_single_for_device(dws->rxchan->device->dev, dws->rx_dma,
+                                  dws->len, DMA_FROM_DEVICE);
+       
+       dma_status = dmaengine_tx_status(dws->rxchan, dws->rx_cookie, &state);
+       
+       DBG_SPI("%s:dma_status=0x%x\n", __FUNCTION__, dma_status);
+       
+       spin_lock_irqsave(&dws->lock, flags);           
+       if (dma_status == DMA_SUCCESS)
+               dws->state &= ~RXBUSY;
+       else
+               dev_err(&dws->master->dev, "error:rx dma_status=%x\n", dma_status);
+
+       //copy data from dma to transfer buf
+       if(dws->cur_transfer && (dws->cur_transfer->rx_buf != NULL))
+       {
+               memcpy(dws->cur_transfer->rx_buf, dws->rx_buffer, dws->cur_transfer->len);
+               
+               DBG_SPI("dma rx:");
+               printk_transfer_data(dws, dws->cur_transfer->rx_buf, dws->cur_transfer->len);
+       }
+       
+       spin_unlock_irqrestore(&dws->lock, flags);
+       
+       /* If the other done */
+       if (!(dws->state & TXBUSY))
+       {
+               complete(&dws->xfer_completion);        
+               DBG_SPI("%s:complete\n", __FUNCTION__);         
+               //DMA could not lose intterupt
+               dw_spi_xfer_done(dws);
+       }
 
-       if (++dws->dma_chan_done != 2)
-               return;
-       dw_spi_xfer_done(dws);
 }
 
+static void dw_spi_dma_txcb(void *arg)
+{
+       struct dw_spi *dws = arg;
+       unsigned long flags;
+       struct dma_tx_state             state;
+       int                             dma_status;
+
+       dma_sync_single_for_device(dws->txchan->device->dev, dws->tx_dma,
+                                  dws->len, DMA_TO_DEVICE);
+       
+       dma_status = dmaengine_tx_status(dws->txchan, dws->tx_cookie, &state);
+       
+       DBG_SPI("%s:dma_status=0x%x\n", __FUNCTION__, dma_status);
+       DBG_SPI("dma tx:");
+       printk_transfer_data(dws, (char *)dws->cur_transfer->tx_buf, dws->cur_transfer->len);
+       
+       spin_lock_irqsave(&dws->lock, flags);
+       
+       if (dma_status == DMA_SUCCESS)
+               dws->state &= ~TXBUSY;
+       else    
+               dev_err(&dws->master->dev, "error:tx dma_status=%x\n", dma_status);     
+
+       spin_unlock_irqrestore(&dws->lock, flags);
+       
+       /* If the other done */
+       if (!(dws->state & RXBUSY)) 
+       {
+               complete(&dws->xfer_completion);                
+               DBG_SPI("%s:complete\n", __FUNCTION__);
+
+               //DMA could not lose intterupt
+               dw_spi_xfer_done(dws);
+       }
+
+}
+
+
 static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
 {
        struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
        struct dma_chan *txchan, *rxchan;
        struct dma_slave_config txconf, rxconf;
        u16 dma_ctrl = 0;
+       int ret = 0;
+       
+       enum dma_slave_buswidth width;
+
+       DBG_SPI("%s:cs_change=%d\n",__func__,cs_change);
+       
+       //alloc dma buffer default while cur_transfer->tx_dma or cur_transfer->rx_dma is null
+       if((dws->cur_transfer->tx_buf) && dws->dma_mapped && (!dws->cur_transfer->tx_dma))
+       {
+               //printk("%s:warning tx_dma is %p\n",__func__, (int *)dws->tx_dma);
+               memcpy(dws->tx_buffer, dws->cur_transfer->tx_buf, dws->cur_transfer->len);              
+               dws->tx_dma = dws->tx_dma_init;
+       }
 
-       /* 1. setup DMA related registers */
-       if (cs_change) {
-               spi_enable_chip(dws, 0);
-               dw_writew(dws, SPIM_DMARDLR, 0xf);
-               dw_writew(dws, SPIM_DMATDLR, 0x10);
-               if (dws->tx_dma)
-                       dma_ctrl |= 0x2;
-               if (dws->rx_dma)
-                       dma_ctrl |= 0x1;
-               dw_writew(dws, SPIM_DMACR, dma_ctrl);
-               spi_enable_chip(dws, 1);
+       if((dws->cur_transfer->rx_buf) && dws->dma_mapped && (!dws->cur_transfer->rx_dma))
+       {               
+               //printk("%s:warning rx_dma is %p\n",__func__, (int *)dws->rx_dma);
+               dws->rx_dma = dws->rx_dma_init;
        }
 
+       
+       if (dws->tx)
+               dws->state |= TXBUSY;   
+       if (dws->rx)
+               dws->state |= RXBUSY;
+
+       
+       switch (dws->n_bytes) {
+       case 1:
+               width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+               break;
+       case 2:
+               width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+               break;
+       default:
+               width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+               break;
+       }
+               
        dws->dma_chan_done = 0;
+       
+       if (dws->tx)
        txchan = dws->txchan;
+       
+       if (dws->rx)
        rxchan = dws->rxchan;
+       
+       if (dws->tx)
+       {
+               /* 2. Prepare the TX dma transfer */
+               txconf.direction = DMA_MEM_TO_DEV;
+               txconf.dst_addr = dws->tx_dma_addr;
+               txconf.dst_maxburst = dws->dma_width;
+               //txconf.src_addr_width = width;
+               txconf.dst_addr_width = width;
+               //txconf.device_fc = false;
+
+               ret = dmaengine_slave_config(txchan, &txconf);
+               if (ret) {
+                       dev_warn(dws->parent_dev, "TX DMA slave config failed\n");
+                       return -1;
+               }
+               
+               memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl));
+               dws->tx_sgl.dma_address = dws->tx_dma;
+               dws->tx_sgl.length = dws->len;
+
+               txdesc = dmaengine_prep_slave_sg(txchan,
+                                       &dws->tx_sgl,
+                                       1,
+                                       DMA_MEM_TO_DEV,
+                                       DMA_PREP_INTERRUPT);
+               
+               txdesc->callback = dw_spi_dma_txcb;
+               txdesc->callback_param = dws;
+
+               DBG_SPI("%s:dst_addr=0x%p,tx_dma=0x%p,len=%d,burst=%d,width=%d\n",__func__,(int *)dws->tx_dma_addr, (int *)dws->tx_dma, dws->len,dws->dma_width, width);
+       }
 
-       /* 2. Prepare the TX dma transfer */
-       txconf.direction = DMA_MEM_TO_DEV;
-       txconf.dst_addr = dws->dma_addr;
-       txconf.dst_maxburst = 0x03;//LNW_DMA_MSIZE_16;
-       txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-       txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
-       txconf.device_fc = false;
-
-       txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
-                                      (unsigned long) &txconf);
-
-       memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl));
-       dws->tx_sgl.dma_address = dws->tx_dma;
-       dws->tx_sgl.length = dws->len;
-
-       txdesc = dmaengine_prep_slave_sg(txchan,
-                               &dws->tx_sgl,
-                               1,
-                               DMA_MEM_TO_DEV,
-                               DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
-       txdesc->callback = dw_spi_dma_done;
-       txdesc->callback_param = dws;
-
-       /* 3. Prepare the RX dma transfer */
-       rxconf.direction = DMA_DEV_TO_MEM;
-       rxconf.src_addr = dws->dma_addr;
-       rxconf.src_maxburst = 0x03; //LNW_DMA_MSIZE_16;
-       rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-       rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
-       rxconf.device_fc = false;
-
-       rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
-                                      (unsigned long) &rxconf);
-
-       memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl));
-       dws->rx_sgl.dma_address = dws->rx_dma;
-       dws->rx_sgl.length = dws->len;
-
-       rxdesc = dmaengine_prep_slave_sg(rxchan,
-                               &dws->rx_sgl,
-                               1,
-                               DMA_DEV_TO_MEM,
-                               DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
-       rxdesc->callback = dw_spi_dma_done;
-       rxdesc->callback_param = dws;
-
-       /* rx must be started before tx due to spi instinct */
-       rxdesc->tx_submit(rxdesc);
-       txdesc->tx_submit(txdesc);
+       if (dws->rx)
+       {
+               /* 3. Prepare the RX dma transfer */
+               rxconf.direction = DMA_DEV_TO_MEM;
+               rxconf.src_addr = dws->rx_dma_addr;
+               rxconf.src_maxburst = dws->dma_width; 
+               //rxconf.dst_addr_width = width;
+               rxconf.src_addr_width = width;
+               //rxconf.device_fc = false;
+
+               ret = dmaengine_slave_config(rxchan, &rxconf);
+               if (ret) {
+                       dev_warn(dws->parent_dev, "RX DMA slave config failed\n");
+                       return -1;
+               }
+
+               memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl));
+               dws->rx_sgl.dma_address = dws->rx_dma;
+               dws->rx_sgl.length = dws->len;                          
+
+               rxdesc = dmaengine_prep_slave_sg(rxchan,
+                                       &dws->rx_sgl,
+                                       1,
+                                       DMA_DEV_TO_MEM,
+                                       DMA_PREP_INTERRUPT);
+               rxdesc->callback = dw_spi_dma_rxcb;
+               rxdesc->callback_param = dws;
+               
+               DBG_SPI("%s:src_addr=0x%p,rx_dma=0x%p,len=%d,burst=%d,width=%d\n",__func__, (int *)dws->rx_dma_addr, (int *)dws->rx_dma, dws->len, dws->dma_width, width);
+       }
+       
+       /* rx must be started before tx due to spi instinct */  
+       if (dws->rx)
+       {               
+               dws->rx_cookie = dmaengine_submit(rxdesc);
+               dma_sync_single_for_device(rxchan->device->dev, dws->rx_dma,
+                                  dws->len, DMA_FROM_DEVICE);
+               dma_async_issue_pending(rxchan);
+               
+               DBG_SPI("%s:rx end\n",__func__);
+       }
+       
+       if (dws->tx)
+       {               
+               dws->tx_cookie = dmaengine_submit(txdesc);
+               dma_sync_single_for_device(txchan->device->dev, dws->tx_dma,
+                                  dws->len, DMA_TO_DEVICE);
+               dma_async_issue_pending(txchan);
+               
+               DBG_SPI("%s:tx end\n",__func__);
+       }
+       
        return 0;
 }
 
@@ -212,11 +345,33 @@ static struct dw_spi_dma_ops spi_dma_ops = {
 
 int dw_spi_dma_init(struct dw_spi *dws)
 {
-
+       DBG_SPI("%s:start\n",__func__);
        dws->dma_priv = kzalloc(sizeof(struct spi_dma), GFP_KERNEL);
        if (!dws->dma_priv)
                return -ENOMEM;
        dws->dma_ops = &spi_dma_ops;
+
+       dws->tx_buffer = dma_alloc_coherent(dws->parent_dev, DMA_BUFFER_SIZE, &dws->tx_dma_init, GFP_KERNEL | GFP_DMA);
+       if (!dws->tx_buffer)
+       {
+               dev_err(dws->parent_dev, "fail to dma tx buffer alloc\n");
+               return -1;
+       }
+
+       dws->rx_buffer = dma_alloc_coherent(dws->parent_dev, DMA_BUFFER_SIZE, &dws->rx_dma_init, GFP_KERNEL | GFP_DMA);
+       if (!dws->rx_buffer)
+       {
+               dev_err(dws->parent_dev, "fail to dma rx buffer alloc\n");
+               return -1;
+       }
+
+       memset(dws->tx_buffer, 0, DMA_BUFFER_SIZE);
+       memset(dws->rx_buffer, 0, DMA_BUFFER_SIZE);
+
+       dws->state = 0;
+       
+       init_completion(&dws->xfer_completion);
+       
        return 0;
 }
 #endif
index a242e45ef239e8a9e063fe5f6ebbf1f11e666878..627101a209d0441d007813efea83c193f7bc7511 100755 (executable)
@@ -46,11 +46,11 @@ static struct spi_test_data *g_spi_test_data[MAX_SPI_BUS_NUM];
 static struct dw_spi_chip spi_test_chip[] = {\r
 {\r
        //.poll_mode = 1,\r
-       //.enable_dma = 1,\r
+       .enable_dma = 1,\r
 },\r
 {\r
        //.poll_mode = 1,\r
-       //.enable_dma = 1,\r
+       .enable_dma = 1,\r
 },\r
 \r
 };\r
@@ -153,7 +153,7 @@ static ssize_t spi_test_write(struct file *file,
                        break;\r
        }\r
 \r
-       for(i=0; i<1; i++)\r
+       for(i=0; i<100; i++)\r
        {\r
                ret = spi_write(spi, txbuf, 256);\r
                ret = spi_read(spi, rxbuf, 256);\r
index 95dec8e69e01871379a90d121fefd68d390632b0..38aa5d2fe5db9b84e610a22f65d143322ddc776d 100755 (executable)
@@ -83,7 +83,7 @@ static struct rockchip_spi_info *rockchip_spi_parse_dt(struct device *dev)
        }
 
        if (of_property_read_u32(dev->of_node, "max-freq", &temp)) {
-               dev_warn(dev, "fail to get max-freq\n");
+               dev_warn(dev, "fail to get max-freq,default set %dHZ\n",SPI_MAX_FREQUENCY);
                info->spi_freq = SPI_MAX_FREQUENCY;
        } else {
                info->spi_freq = temp;
@@ -254,6 +254,8 @@ static int rockchip_spi_remove(struct platform_device *pdev)
        dw_spi_remove_host(&sdd->dws);
        iounmap(sdd->dws.regs);
        kfree(sdd);
+
+       return 0;
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -336,7 +338,7 @@ static int __init rockchip_spi_init(void)
 {
        return platform_driver_probe(&rockchip_spi_driver, rockchip_spi_probe);
 }
-subsys_initcall(rockchip_spi_init);
+module_init(rockchip_spi_init);
 
 static void __exit rockchip_spi_exit(void)
 {