2 * linux/drivers/mmc/tmio_mmc_dma.c
4 * Copyright (C) 2010-2011 Guennadi Liakhovetski
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * DMA function for TMIO MMC implementations
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmaengine.h>
16 #include <linux/mfd/tmio.h>
17 #include <linux/mmc/host.h>
18 #include <linux/mmc/tmio.h>
19 #include <linux/pagemap.h>
20 #include <linux/scatterlist.h>
24 #define TMIO_MMC_MIN_DMA_LEN 8
26 void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
28 if (!host->chan_tx || !host->chan_rx)
31 if (host->dma->enable)
32 host->dma->enable(host, enable);
35 void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
37 tmio_mmc_enable_dma(host, false);
40 dmaengine_terminate_all(host->chan_rx);
42 dmaengine_terminate_all(host->chan_tx);
44 tmio_mmc_enable_dma(host, true);
47 static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
49 struct scatterlist *sg = host->sg_ptr, *sg_tmp;
50 struct dma_async_tx_descriptor *desc = NULL;
51 struct dma_chan *chan = host->chan_rx;
54 bool aligned = true, multiple = true;
55 unsigned int align = (1 << host->pdata->alignment_shift) - 1;
57 for_each_sg(sg, sg_tmp, host->sg_len, i) {
58 if (sg_tmp->offset & align)
60 if (sg_tmp->length & align) {
66 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
67 (align & PAGE_MASK))) || !multiple) {
72 if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
73 host->force_pio = true;
77 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY);
79 /* The only sg element can be unaligned, use our bounce buffer then */
81 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
82 host->sg_ptr = &host->bounce_sg;
86 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
88 desc = dmaengine_prep_slave_sg(chan, sg, ret,
89 DMA_DEV_TO_MEM, DMA_CTRL_ACK);
92 cookie = dmaengine_submit(desc);
98 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
99 __func__, host->sg_len, ret, cookie, host->mrq);
103 /* DMA failed, fall back to PIO */
104 tmio_mmc_enable_dma(host, false);
107 host->chan_rx = NULL;
108 dma_release_channel(chan);
109 /* Free the Tx channel too */
110 chan = host->chan_tx;
112 host->chan_tx = NULL;
113 dma_release_channel(chan);
115 dev_warn(&host->pdev->dev,
116 "DMA failed: %d, falling back to PIO\n", ret);
119 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
120 desc, cookie, host->sg_len);
123 static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
125 struct scatterlist *sg = host->sg_ptr, *sg_tmp;
126 struct dma_async_tx_descriptor *desc = NULL;
127 struct dma_chan *chan = host->chan_tx;
130 bool aligned = true, multiple = true;
131 unsigned int align = (1 << host->pdata->alignment_shift) - 1;
133 for_each_sg(sg, sg_tmp, host->sg_len, i) {
134 if (sg_tmp->offset & align)
136 if (sg_tmp->length & align) {
142 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
143 (align & PAGE_MASK))) || !multiple) {
148 if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
149 host->force_pio = true;
153 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ);
155 /* The only sg element can be unaligned, use our bounce buffer then */
158 void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
159 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
160 memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
161 tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
162 host->sg_ptr = &host->bounce_sg;
166 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
168 desc = dmaengine_prep_slave_sg(chan, sg, ret,
169 DMA_MEM_TO_DEV, DMA_CTRL_ACK);
172 cookie = dmaengine_submit(desc);
178 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
179 __func__, host->sg_len, ret, cookie, host->mrq);
183 /* DMA failed, fall back to PIO */
184 tmio_mmc_enable_dma(host, false);
187 host->chan_tx = NULL;
188 dma_release_channel(chan);
189 /* Free the Rx channel too */
190 chan = host->chan_rx;
192 host->chan_rx = NULL;
193 dma_release_channel(chan);
195 dev_warn(&host->pdev->dev,
196 "DMA failed: %d, falling back to PIO\n", ret);
199 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
203 void tmio_mmc_start_dma(struct tmio_mmc_host *host,
204 struct mmc_data *data)
206 if (data->flags & MMC_DATA_READ) {
208 tmio_mmc_start_dma_rx(host);
211 tmio_mmc_start_dma_tx(host);
215 static void tmio_mmc_issue_tasklet_fn(unsigned long priv)
217 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
218 struct dma_chan *chan = NULL;
220 spin_lock_irq(&host->lock);
222 if (host && host->data) {
223 if (host->data->flags & MMC_DATA_READ)
224 chan = host->chan_rx;
226 chan = host->chan_tx;
229 spin_unlock_irq(&host->lock);
231 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
234 dma_async_issue_pending(chan);
237 static void tmio_mmc_tasklet_fn(unsigned long arg)
239 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
241 spin_lock_irq(&host->lock);
246 if (host->data->flags & MMC_DATA_READ)
247 dma_unmap_sg(host->chan_rx->device->dev,
248 host->sg_ptr, host->sg_len,
251 dma_unmap_sg(host->chan_tx->device->dev,
252 host->sg_ptr, host->sg_len,
255 tmio_mmc_do_data_irq(host);
257 spin_unlock_irq(&host->lock);
260 void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
262 /* We can only either use DMA for both Tx and Rx or not use it at all */
263 if (!host->dma || (!host->pdev->dev.of_node &&
264 (!host->dma->chan_priv_tx || !host->dma->chan_priv_rx)))
267 if (!host->chan_tx && !host->chan_rx) {
268 struct resource *res = platform_get_resource(host->pdev,
270 struct dma_slave_config cfg = {};
278 dma_cap_set(DMA_SLAVE, mask);
280 host->chan_tx = dma_request_slave_channel_compat(mask,
281 host->dma->filter, host->dma->chan_priv_tx,
282 &host->pdev->dev, "tx");
283 dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
289 if (host->dma->chan_priv_tx)
290 cfg.slave_id = host->dma->slave_id_tx;
291 cfg.direction = DMA_MEM_TO_DEV;
292 cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->bus_shift);
293 cfg.dst_addr_width = host->dma->dma_buswidth;
294 if (!cfg.dst_addr_width)
295 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
297 ret = dmaengine_slave_config(host->chan_tx, &cfg);
301 host->chan_rx = dma_request_slave_channel_compat(mask,
302 host->dma->filter, host->dma->chan_priv_rx,
303 &host->pdev->dev, "rx");
304 dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
310 if (host->dma->chan_priv_rx)
311 cfg.slave_id = host->dma->slave_id_rx;
312 cfg.direction = DMA_DEV_TO_MEM;
313 cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset;
314 cfg.src_addr_width = host->dma->dma_buswidth;
315 if (!cfg.src_addr_width)
316 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
318 ret = dmaengine_slave_config(host->chan_rx, &cfg);
322 host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA);
323 if (!host->bounce_buf)
326 tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host);
327 tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host);
330 tmio_mmc_enable_dma(host, true);
336 dma_release_channel(host->chan_rx);
337 host->chan_rx = NULL;
340 dma_release_channel(host->chan_tx);
341 host->chan_tx = NULL;
344 void tmio_mmc_release_dma(struct tmio_mmc_host *host)
347 struct dma_chan *chan = host->chan_tx;
348 host->chan_tx = NULL;
349 dma_release_channel(chan);
352 struct dma_chan *chan = host->chan_rx;
353 host->chan_rx = NULL;
354 dma_release_channel(chan);
356 if (host->bounce_buf) {
357 free_pages((unsigned long)host->bounce_buf, 0);
358 host->bounce_buf = NULL;