2 * Renesas R-Car Gen2 DMA Controller Driver
4 * Copyright (C) 2014 Renesas Electronics Inc.
6 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
8 * This is free software; you can redistribute it and/or modify
9 * it under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
13 #include <linux/dma-mapping.h>
14 #include <linux/dmaengine.h>
15 #include <linux/interrupt.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
20 #include <linux/of_dma.h>
21 #include <linux/of_platform.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/slab.h>
25 #include <linux/spinlock.h>
27 #include "../dmaengine.h"
30 * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer
31 * @node: entry in the parent's chunks list
32 * @src_addr: device source address
33 * @dst_addr: device destination address
34 * @size: transfer size in bytes
36 struct rcar_dmac_xfer_chunk {
37 struct list_head node;
45 * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk
46 * @sar: value of the SAR register (source address)
47 * @dar: value of the DAR register (destination address)
48 * @tcr: value of the TCR register (transfer count)
50 struct rcar_dmac_hw_desc {
55 } __attribute__((__packed__));
58 * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor
59 * @async_tx: base DMA asynchronous transaction descriptor
60 * @direction: direction of the DMA transfer
61 * @xfer_shift: log2 of the transfer size
62 * @chcr: value of the channel configuration register for this transfer
63 * @node: entry in the channel's descriptors lists
64 * @chunks: list of transfer chunks for this transfer
65 * @running: the transfer chunk being currently processed
66 * @nchunks: number of transfer chunks for this transfer
67 * @hwdescs.use: whether the transfer descriptor uses hardware descriptors
68 * @hwdescs.mem: hardware descriptors memory for the transfer
69 * @hwdescs.dma: device address of the hardware descriptors memory
70 * @hwdescs.size: size of the hardware descriptors in bytes
71 * @size: transfer size in bytes
72 * @cyclic: when set indicates that the DMA transfer is cyclic
74 struct rcar_dmac_desc {
75 struct dma_async_tx_descriptor async_tx;
76 enum dma_transfer_direction direction;
77 unsigned int xfer_shift;
80 struct list_head node;
81 struct list_head chunks;
82 struct rcar_dmac_xfer_chunk *running;
87 struct rcar_dmac_hw_desc *mem;
96 #define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx)
99 * struct rcar_dmac_desc_page - One page worth of descriptors
100 * @node: entry in the channel's pages list
101 * @descs: array of DMA descriptors
102 * @chunks: array of transfer chunk descriptors
104 struct rcar_dmac_desc_page {
105 struct list_head node;
108 struct rcar_dmac_desc descs[0];
109 struct rcar_dmac_xfer_chunk chunks[0];
113 #define RCAR_DMAC_DESCS_PER_PAGE \
114 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \
115 sizeof(struct rcar_dmac_desc))
116 #define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \
117 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
118 sizeof(struct rcar_dmac_xfer_chunk))
121 * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel
122 * @chan: base DMA channel object
123 * @iomem: channel I/O memory base
124 * @index: index of this channel in the controller
125 * @src_xfer_size: size (in bytes) of hardware transfers on the source side
126 * @dst_xfer_size: size (in bytes) of hardware transfers on the destination side
127 * @src_slave_addr: slave source memory address
128 * @dst_slave_addr: slave destination memory address
129 * @mid_rid: hardware MID/RID for the DMA client using this channel
130 * @lock: protects the channel CHCR register and the desc members
131 * @desc.free: list of free descriptors
132 * @desc.pending: list of pending descriptors (submitted with tx_submit)
133 * @desc.active: list of active descriptors (activated with issue_pending)
134 * @desc.done: list of completed descriptors
135 * @desc.wait: list of descriptors waiting for an ack
136 * @desc.running: the descriptor being processed (a member of the active list)
137 * @desc.chunks_free: list of free transfer chunk descriptors
138 * @desc.pages: list of pages used by allocated descriptors
140 struct rcar_dmac_chan {
141 struct dma_chan chan;
145 unsigned int src_xfer_size;
146 unsigned int dst_xfer_size;
147 dma_addr_t src_slave_addr;
148 dma_addr_t dst_slave_addr;
154 struct list_head free;
155 struct list_head pending;
156 struct list_head active;
157 struct list_head done;
158 struct list_head wait;
159 struct rcar_dmac_desc *running;
161 struct list_head chunks_free;
163 struct list_head pages;
167 #define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan)
170 * struct rcar_dmac - R-Car Gen2 DMA Controller
171 * @engine: base DMA engine object
172 * @dev: the hardware device
173 * @iomem: remapped I/O memory base
174 * @n_channels: number of available channels
175 * @channels: array of DMAC channels
176 * @modules: bitmask of client modules in use
179 struct dma_device engine;
183 unsigned int n_channels;
184 struct rcar_dmac_chan *channels;
186 unsigned long modules[256 / BITS_PER_LONG];
189 #define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine)
191 /* -----------------------------------------------------------------------------
195 #define RCAR_DMAC_CHAN_OFFSET(i) (0x8000 + 0x80 * (i))
197 #define RCAR_DMAISTA 0x0020
198 #define RCAR_DMASEC 0x0030
199 #define RCAR_DMAOR 0x0060
200 #define RCAR_DMAOR_PRI_FIXED (0 << 8)
201 #define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
202 #define RCAR_DMAOR_AE (1 << 2)
203 #define RCAR_DMAOR_DME (1 << 0)
204 #define RCAR_DMACHCLR 0x0080
205 #define RCAR_DMADPSEC 0x00a0
207 #define RCAR_DMASAR 0x0000
208 #define RCAR_DMADAR 0x0004
209 #define RCAR_DMATCR 0x0008
210 #define RCAR_DMATCR_MASK 0x00ffffff
211 #define RCAR_DMATSR 0x0028
212 #define RCAR_DMACHCR 0x000c
213 #define RCAR_DMACHCR_CAE (1 << 31)
214 #define RCAR_DMACHCR_CAIE (1 << 30)
215 #define RCAR_DMACHCR_DPM_DISABLED (0 << 28)
216 #define RCAR_DMACHCR_DPM_ENABLED (1 << 28)
217 #define RCAR_DMACHCR_DPM_REPEAT (2 << 28)
218 #define RCAR_DMACHCR_DPM_INFINITE (3 << 28)
219 #define RCAR_DMACHCR_RPT_SAR (1 << 27)
220 #define RCAR_DMACHCR_RPT_DAR (1 << 26)
221 #define RCAR_DMACHCR_RPT_TCR (1 << 25)
222 #define RCAR_DMACHCR_DPB (1 << 22)
223 #define RCAR_DMACHCR_DSE (1 << 19)
224 #define RCAR_DMACHCR_DSIE (1 << 18)
225 #define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3))
226 #define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3))
227 #define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3))
228 #define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3))
229 #define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3))
230 #define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3))
231 #define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3))
232 #define RCAR_DMACHCR_DM_FIXED (0 << 14)
233 #define RCAR_DMACHCR_DM_INC (1 << 14)
234 #define RCAR_DMACHCR_DM_DEC (2 << 14)
235 #define RCAR_DMACHCR_SM_FIXED (0 << 12)
236 #define RCAR_DMACHCR_SM_INC (1 << 12)
237 #define RCAR_DMACHCR_SM_DEC (2 << 12)
238 #define RCAR_DMACHCR_RS_AUTO (4 << 8)
239 #define RCAR_DMACHCR_RS_DMARS (8 << 8)
240 #define RCAR_DMACHCR_IE (1 << 2)
241 #define RCAR_DMACHCR_TE (1 << 1)
242 #define RCAR_DMACHCR_DE (1 << 0)
243 #define RCAR_DMATCRB 0x0018
244 #define RCAR_DMATSRB 0x0038
245 #define RCAR_DMACHCRB 0x001c
246 #define RCAR_DMACHCRB_DCNT(n) ((n) << 24)
247 #define RCAR_DMACHCRB_DPTR_MASK (0xff << 16)
248 #define RCAR_DMACHCRB_DPTR_SHIFT 16
249 #define RCAR_DMACHCRB_DRST (1 << 15)
250 #define RCAR_DMACHCRB_DTS (1 << 8)
251 #define RCAR_DMACHCRB_SLM_NORMAL (0 << 4)
252 #define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4)
253 #define RCAR_DMACHCRB_PRI(n) ((n) << 0)
254 #define RCAR_DMARS 0x0040
255 #define RCAR_DMABUFCR 0x0048
256 #define RCAR_DMABUFCR_MBU(n) ((n) << 16)
257 #define RCAR_DMABUFCR_ULB(n) ((n) << 0)
258 #define RCAR_DMADPBASE 0x0050
259 #define RCAR_DMADPBASE_MASK 0xfffffff0
260 #define RCAR_DMADPBASE_SEL (1 << 0)
261 #define RCAR_DMADPCR 0x0054
262 #define RCAR_DMADPCR_DIPT(n) ((n) << 24)
263 #define RCAR_DMAFIXSAR 0x0010
264 #define RCAR_DMAFIXDAR 0x0014
265 #define RCAR_DMAFIXDPBASE 0x0060
267 /* Hardcode the MEMCPY transfer size to 4 bytes. */
268 #define RCAR_DMAC_MEMCPY_XFER_SIZE 4
270 /* -----------------------------------------------------------------------------
274 static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data)
276 if (reg == RCAR_DMAOR)
277 writew(data, dmac->iomem + reg);
279 writel(data, dmac->iomem + reg);
282 static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg)
284 if (reg == RCAR_DMAOR)
285 return readw(dmac->iomem + reg);
287 return readl(dmac->iomem + reg);
290 static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg)
292 if (reg == RCAR_DMARS)
293 return readw(chan->iomem + reg);
295 return readl(chan->iomem + reg);
298 static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data)
300 if (reg == RCAR_DMARS)
301 writew(data, chan->iomem + reg);
303 writel(data, chan->iomem + reg);
306 /* -----------------------------------------------------------------------------
307 * Initialization and configuration
310 static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan)
312 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
314 return (chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)) == RCAR_DMACHCR_DE;
317 static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
319 struct rcar_dmac_desc *desc = chan->desc.running;
320 u32 chcr = desc->chcr;
322 WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan));
324 if (chan->mid_rid >= 0)
325 rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid);
327 if (desc->hwdescs.use) {
328 struct rcar_dmac_xfer_chunk *chunk;
330 dev_dbg(chan->chan.device->dev,
331 "chan%u: queue desc %p: %u@%pad\n",
332 chan->index, desc, desc->nchunks, &desc->hwdescs.dma);
334 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
335 rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE,
336 desc->hwdescs.dma >> 32);
338 rcar_dmac_chan_write(chan, RCAR_DMADPBASE,
339 (desc->hwdescs.dma & 0xfffffff0) |
341 rcar_dmac_chan_write(chan, RCAR_DMACHCRB,
342 RCAR_DMACHCRB_DCNT(desc->nchunks - 1) |
346 * Errata: When descriptor memory is accessed through an IOMMU
347 * the DMADAR register isn't initialized automatically from the
348 * first descriptor at beginning of transfer by the DMAC like it
349 * should. Initialize it manually with the destination address
350 * of the first chunk.
352 chunk = list_first_entry(&desc->chunks,
353 struct rcar_dmac_xfer_chunk, node);
354 rcar_dmac_chan_write(chan, RCAR_DMADAR,
355 chunk->dst_addr & 0xffffffff);
358 * Program the descriptor stage interrupt to occur after the end
359 * of the first stage.
361 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(1));
363 chcr |= RCAR_DMACHCR_RPT_SAR | RCAR_DMACHCR_RPT_DAR
364 | RCAR_DMACHCR_RPT_TCR | RCAR_DMACHCR_DPB;
367 * If the descriptor isn't cyclic enable normal descriptor mode
368 * and the transfer completion interrupt.
371 chcr |= RCAR_DMACHCR_DPM_ENABLED | RCAR_DMACHCR_IE;
373 * If the descriptor is cyclic and has a callback enable the
374 * descriptor stage interrupt in infinite repeat mode.
376 else if (desc->async_tx.callback)
377 chcr |= RCAR_DMACHCR_DPM_INFINITE | RCAR_DMACHCR_DSIE;
379 * Otherwise just select infinite repeat mode without any
383 chcr |= RCAR_DMACHCR_DPM_INFINITE;
385 struct rcar_dmac_xfer_chunk *chunk = desc->running;
387 dev_dbg(chan->chan.device->dev,
388 "chan%u: queue chunk %p: %u@%pad -> %pad\n",
389 chan->index, chunk, chunk->size, &chunk->src_addr,
392 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
393 rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
394 chunk->src_addr >> 32);
395 rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
396 chunk->dst_addr >> 32);
398 rcar_dmac_chan_write(chan, RCAR_DMASAR,
399 chunk->src_addr & 0xffffffff);
400 rcar_dmac_chan_write(chan, RCAR_DMADAR,
401 chunk->dst_addr & 0xffffffff);
402 rcar_dmac_chan_write(chan, RCAR_DMATCR,
403 chunk->size >> desc->xfer_shift);
405 chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE;
408 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr | RCAR_DMACHCR_DE);
411 static int rcar_dmac_init(struct rcar_dmac *dmac)
415 /* Clear all channels and enable the DMAC globally. */
416 rcar_dmac_write(dmac, RCAR_DMACHCLR, 0x7fff);
417 rcar_dmac_write(dmac, RCAR_DMAOR,
418 RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
420 dmaor = rcar_dmac_read(dmac, RCAR_DMAOR);
421 if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) {
422 dev_warn(dmac->dev, "DMAOR initialization failed.\n");
429 /* -----------------------------------------------------------------------------
430 * Descriptors submission
433 static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx)
435 struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan);
436 struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx);
440 spin_lock_irqsave(&chan->lock, flags);
442 cookie = dma_cookie_assign(tx);
444 dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n",
445 chan->index, tx->cookie, desc);
447 list_add_tail(&desc->node, &chan->desc.pending);
448 desc->running = list_first_entry(&desc->chunks,
449 struct rcar_dmac_xfer_chunk, node);
451 spin_unlock_irqrestore(&chan->lock, flags);
456 /* -----------------------------------------------------------------------------
457 * Descriptors allocation and free
461 * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors
462 * @chan: the DMA channel
463 * @gfp: allocation flags
465 static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
467 struct rcar_dmac_desc_page *page;
471 page = (void *)get_zeroed_page(gfp);
475 for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) {
476 struct rcar_dmac_desc *desc = &page->descs[i];
478 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
479 desc->async_tx.tx_submit = rcar_dmac_tx_submit;
480 INIT_LIST_HEAD(&desc->chunks);
482 list_add_tail(&desc->node, &list);
485 spin_lock_irq(&chan->lock);
486 list_splice_tail(&list, &chan->desc.free);
487 list_add_tail(&page->node, &chan->desc.pages);
488 spin_unlock_irq(&chan->lock);
494 * rcar_dmac_desc_put - Release a DMA transfer descriptor
495 * @chan: the DMA channel
496 * @desc: the descriptor
498 * Put the descriptor and its transfer chunk descriptors back in the channel's
499 * free descriptors lists. The descriptor's chunks list will be reinitialized to
500 * an empty list as a result.
502 * The descriptor must have been removed from the channel's lists before calling
505 static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan,
506 struct rcar_dmac_desc *desc)
510 spin_lock_irqsave(&chan->lock, flags);
511 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free);
512 list_add_tail(&desc->node, &chan->desc.free);
513 spin_unlock_irqrestore(&chan->lock, flags);
516 static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
518 struct rcar_dmac_desc *desc, *_desc;
522 * We have to temporarily move all descriptors from the wait list to a
523 * local list as iterating over the wait list, even with
524 * list_for_each_entry_safe, isn't safe if we release the channel lock
525 * around the rcar_dmac_desc_put() call.
527 spin_lock_irq(&chan->lock);
528 list_splice_init(&chan->desc.wait, &list);
529 spin_unlock_irq(&chan->lock);
531 list_for_each_entry_safe(desc, _desc, &list, node) {
532 if (async_tx_test_ack(&desc->async_tx)) {
533 list_del(&desc->node);
534 rcar_dmac_desc_put(chan, desc);
538 if (list_empty(&list))
541 /* Put the remaining descriptors back in the wait list. */
542 spin_lock_irq(&chan->lock);
543 list_splice(&list, &chan->desc.wait);
544 spin_unlock_irq(&chan->lock);
548 * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer
549 * @chan: the DMA channel
551 * Locking: This function must be called in a non-atomic context.
553 * Return: A pointer to the allocated descriptor or NULL if no descriptor can
556 static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
558 struct rcar_dmac_desc *desc;
561 /* Recycle acked descriptors before attempting allocation. */
562 rcar_dmac_desc_recycle_acked(chan);
564 spin_lock_irq(&chan->lock);
566 while (list_empty(&chan->desc.free)) {
568 * No free descriptors, allocate a page worth of them and try
569 * again, as someone else could race us to get the newly
570 * allocated descriptors. If the allocation fails return an
573 spin_unlock_irq(&chan->lock);
574 ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT);
577 spin_lock_irq(&chan->lock);
580 desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node);
581 list_del(&desc->node);
583 spin_unlock_irq(&chan->lock);
589 * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks
590 * @chan: the DMA channel
591 * @gfp: allocation flags
593 static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
595 struct rcar_dmac_desc_page *page;
599 page = (void *)get_zeroed_page(gfp);
603 for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) {
604 struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i];
606 list_add_tail(&chunk->node, &list);
609 spin_lock_irq(&chan->lock);
610 list_splice_tail(&list, &chan->desc.chunks_free);
611 list_add_tail(&page->node, &chan->desc.pages);
612 spin_unlock_irq(&chan->lock);
618 * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer
619 * @chan: the DMA channel
621 * Locking: This function must be called in a non-atomic context.
623 * Return: A pointer to the allocated transfer chunk descriptor or NULL if no
624 * descriptor can be allocated.
626 static struct rcar_dmac_xfer_chunk *
627 rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan)
629 struct rcar_dmac_xfer_chunk *chunk;
632 spin_lock_irq(&chan->lock);
634 while (list_empty(&chan->desc.chunks_free)) {
636 * No free descriptors, allocate a page worth of them and try
637 * again, as someone else could race us to get the newly
638 * allocated descriptors. If the allocation fails return an
641 spin_unlock_irq(&chan->lock);
642 ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT);
645 spin_lock_irq(&chan->lock);
648 chunk = list_first_entry(&chan->desc.chunks_free,
649 struct rcar_dmac_xfer_chunk, node);
650 list_del(&chunk->node);
652 spin_unlock_irq(&chan->lock);
657 static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan,
658 struct rcar_dmac_desc *desc, size_t size)
661 * dma_alloc_coherent() allocates memory in page size increments. To
662 * avoid reallocating the hardware descriptors when the allocated size
663 * wouldn't change align the requested size to a multiple of the page
666 size = PAGE_ALIGN(size);
668 if (desc->hwdescs.size == size)
671 if (desc->hwdescs.mem) {
672 dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size,
673 desc->hwdescs.mem, desc->hwdescs.dma);
674 desc->hwdescs.mem = NULL;
675 desc->hwdescs.size = 0;
681 desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size,
682 &desc->hwdescs.dma, GFP_NOWAIT);
683 if (!desc->hwdescs.mem)
686 desc->hwdescs.size = size;
689 static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan,
690 struct rcar_dmac_desc *desc)
692 struct rcar_dmac_xfer_chunk *chunk;
693 struct rcar_dmac_hw_desc *hwdesc;
695 rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc));
697 hwdesc = desc->hwdescs.mem;
701 list_for_each_entry(chunk, &desc->chunks, node) {
702 hwdesc->sar = chunk->src_addr;
703 hwdesc->dar = chunk->dst_addr;
704 hwdesc->tcr = chunk->size >> desc->xfer_shift;
711 /* -----------------------------------------------------------------------------
715 static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
717 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
719 chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
720 RCAR_DMACHCR_TE | RCAR_DMACHCR_DE);
721 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
724 static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
726 struct rcar_dmac_desc *desc, *_desc;
730 spin_lock_irqsave(&chan->lock, flags);
732 /* Move all non-free descriptors to the local lists. */
733 list_splice_init(&chan->desc.pending, &descs);
734 list_splice_init(&chan->desc.active, &descs);
735 list_splice_init(&chan->desc.done, &descs);
736 list_splice_init(&chan->desc.wait, &descs);
738 chan->desc.running = NULL;
740 spin_unlock_irqrestore(&chan->lock, flags);
742 list_for_each_entry_safe(desc, _desc, &descs, node) {
743 list_del(&desc->node);
744 rcar_dmac_desc_put(chan, desc);
748 static void rcar_dmac_stop(struct rcar_dmac *dmac)
750 rcar_dmac_write(dmac, RCAR_DMAOR, 0);
753 static void rcar_dmac_abort(struct rcar_dmac *dmac)
757 /* Stop all channels. */
758 for (i = 0; i < dmac->n_channels; ++i) {
759 struct rcar_dmac_chan *chan = &dmac->channels[i];
761 /* Stop and reinitialize the channel. */
762 spin_lock(&chan->lock);
763 rcar_dmac_chan_halt(chan);
764 spin_unlock(&chan->lock);
766 rcar_dmac_chan_reinit(chan);
770 /* -----------------------------------------------------------------------------
771 * Descriptors preparation
774 static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan,
775 struct rcar_dmac_desc *desc)
777 static const u32 chcr_ts[] = {
778 RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B,
779 RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B,
780 RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B,
784 unsigned int xfer_size;
787 switch (desc->direction) {
789 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED
790 | RCAR_DMACHCR_RS_DMARS;
791 xfer_size = chan->src_xfer_size;
795 chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC
796 | RCAR_DMACHCR_RS_DMARS;
797 xfer_size = chan->dst_xfer_size;
802 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC
803 | RCAR_DMACHCR_RS_AUTO;
804 xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE;
808 desc->xfer_shift = ilog2(xfer_size);
809 desc->chcr = chcr | chcr_ts[desc->xfer_shift];
813 * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list
815 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
816 * converted to scatter-gather to guarantee consistent locking and a correct
817 * list manipulation. For slave DMA direction carries the usual meaning, and,
818 * logically, the SG list is RAM and the addr variable contains slave address,
819 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
820 * and the SG list contains only one element and points at the source buffer.
822 static struct dma_async_tx_descriptor *
823 rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
824 unsigned int sg_len, dma_addr_t dev_addr,
825 enum dma_transfer_direction dir, unsigned long dma_flags,
828 struct rcar_dmac_xfer_chunk *chunk;
829 struct rcar_dmac_desc *desc;
830 struct scatterlist *sg;
831 unsigned int nchunks = 0;
832 unsigned int max_chunk_size;
833 unsigned int full_size = 0;
834 bool highmem = false;
837 desc = rcar_dmac_desc_get(chan);
841 desc->async_tx.flags = dma_flags;
842 desc->async_tx.cookie = -EBUSY;
844 desc->cyclic = cyclic;
845 desc->direction = dir;
847 rcar_dmac_chan_configure_desc(chan, desc);
849 max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift;
852 * Allocate and fill the transfer chunk descriptors. We own the only
853 * reference to the DMA descriptor, there's no need for locking.
855 for_each_sg(sgl, sg, sg_len, i) {
856 dma_addr_t mem_addr = sg_dma_address(sg);
857 unsigned int len = sg_dma_len(sg);
862 unsigned int size = min(len, max_chunk_size);
864 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
866 * Prevent individual transfers from crossing 4GB
869 if (dev_addr >> 32 != (dev_addr + size - 1) >> 32)
870 size = ALIGN(dev_addr, 1ULL << 32) - dev_addr;
871 if (mem_addr >> 32 != (mem_addr + size - 1) >> 32)
872 size = ALIGN(mem_addr, 1ULL << 32) - mem_addr;
875 * Check if either of the source or destination address
876 * can't be expressed in 32 bits. If so we can't use
877 * hardware descriptor lists.
879 if (dev_addr >> 32 || mem_addr >> 32)
883 chunk = rcar_dmac_xfer_chunk_get(chan);
885 rcar_dmac_desc_put(chan, desc);
889 if (dir == DMA_DEV_TO_MEM) {
890 chunk->src_addr = dev_addr;
891 chunk->dst_addr = mem_addr;
893 chunk->src_addr = mem_addr;
894 chunk->dst_addr = dev_addr;
899 dev_dbg(chan->chan.device->dev,
900 "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n",
901 chan->index, chunk, desc, i, sg, size, len,
902 &chunk->src_addr, &chunk->dst_addr);
905 if (dir == DMA_MEM_TO_MEM)
910 list_add_tail(&chunk->node, &desc->chunks);
915 desc->nchunks = nchunks;
916 desc->size = full_size;
919 * Use hardware descriptor lists if possible when more than one chunk
920 * needs to be transferred (otherwise they don't make much sense).
922 * The highmem check currently covers the whole transfer. As an
923 * optimization we could use descriptor lists for consecutive lowmem
924 * chunks and direct manual mode for highmem chunks. Whether the
925 * performance improvement would be significant enough compared to the
926 * additional complexity remains to be investigated.
928 desc->hwdescs.use = !highmem && nchunks > 1;
929 if (desc->hwdescs.use) {
930 if (rcar_dmac_fill_hwdesc(chan, desc) < 0)
931 desc->hwdescs.use = false;
934 return &desc->async_tx;
937 /* -----------------------------------------------------------------------------
938 * DMA engine operations
941 static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan)
943 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
946 INIT_LIST_HEAD(&rchan->desc.chunks_free);
947 INIT_LIST_HEAD(&rchan->desc.pages);
949 /* Preallocate descriptors. */
950 ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL);
954 ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL);
958 return pm_runtime_get_sync(chan->device->dev);
961 static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
963 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
964 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
965 struct rcar_dmac_desc_page *page, *_page;
966 struct rcar_dmac_desc *desc;
969 /* Protect against ISR */
970 spin_lock_irq(&rchan->lock);
971 rcar_dmac_chan_halt(rchan);
972 spin_unlock_irq(&rchan->lock);
974 /* Now no new interrupts will occur */
976 if (rchan->mid_rid >= 0) {
977 /* The caller is holding dma_list_mutex */
978 clear_bit(rchan->mid_rid, dmac->modules);
979 rchan->mid_rid = -EINVAL;
982 list_splice_init(&rchan->desc.free, &list);
983 list_splice_init(&rchan->desc.pending, &list);
984 list_splice_init(&rchan->desc.active, &list);
985 list_splice_init(&rchan->desc.done, &list);
986 list_splice_init(&rchan->desc.wait, &list);
988 list_for_each_entry(desc, &list, node)
989 rcar_dmac_realloc_hwdesc(rchan, desc, 0);
991 list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) {
992 list_del(&page->node);
993 free_page((unsigned long)page);
996 pm_runtime_put(chan->device->dev);
999 static struct dma_async_tx_descriptor *
1000 rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
1001 dma_addr_t dma_src, size_t len, unsigned long flags)
1003 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1004 struct scatterlist sgl;
1009 sg_init_table(&sgl, 1);
1010 sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len,
1011 offset_in_page(dma_src));
1012 sg_dma_address(&sgl) = dma_src;
1013 sg_dma_len(&sgl) = len;
1015 return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest,
1016 DMA_MEM_TO_MEM, flags, false);
1019 static struct dma_async_tx_descriptor *
1020 rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1021 unsigned int sg_len, enum dma_transfer_direction dir,
1022 unsigned long flags, void *context)
1024 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1025 dma_addr_t dev_addr;
1027 /* Someone calling slave DMA on a generic channel? */
1028 if (rchan->mid_rid < 0 || !sg_len) {
1029 dev_warn(chan->device->dev,
1030 "%s: bad parameter: len=%d, id=%d\n",
1031 __func__, sg_len, rchan->mid_rid);
1035 dev_addr = dir == DMA_DEV_TO_MEM
1036 ? rchan->src_slave_addr : rchan->dst_slave_addr;
1037 return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
1041 #define RCAR_DMAC_MAX_SG_LEN 32
1043 static struct dma_async_tx_descriptor *
1044 rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
1045 size_t buf_len, size_t period_len,
1046 enum dma_transfer_direction dir, unsigned long flags)
1048 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1049 struct dma_async_tx_descriptor *desc;
1050 struct scatterlist *sgl;
1051 dma_addr_t dev_addr;
1052 unsigned int sg_len;
1055 /* Someone calling slave DMA on a generic channel? */
1056 if (rchan->mid_rid < 0 || buf_len < period_len) {
1057 dev_warn(chan->device->dev,
1058 "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
1059 __func__, buf_len, period_len, rchan->mid_rid);
1063 sg_len = buf_len / period_len;
1064 if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
1065 dev_err(chan->device->dev,
1066 "chan%u: sg length %d exceds limit %d",
1067 rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN);
1072 * Allocate the sg list dynamically as it would consume too much stack
1075 sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT);
1079 sg_init_table(sgl, sg_len);
1081 for (i = 0; i < sg_len; ++i) {
1082 dma_addr_t src = buf_addr + (period_len * i);
1084 sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
1085 offset_in_page(src));
1086 sg_dma_address(&sgl[i]) = src;
1087 sg_dma_len(&sgl[i]) = period_len;
1090 dev_addr = dir == DMA_DEV_TO_MEM
1091 ? rchan->src_slave_addr : rchan->dst_slave_addr;
1092 desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
1099 static int rcar_dmac_device_config(struct dma_chan *chan,
1100 struct dma_slave_config *cfg)
1102 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1105 * We could lock this, but you shouldn't be configuring the
1106 * channel, while using it...
1108 rchan->src_slave_addr = cfg->src_addr;
1109 rchan->dst_slave_addr = cfg->dst_addr;
1110 rchan->src_xfer_size = cfg->src_addr_width;
1111 rchan->dst_xfer_size = cfg->dst_addr_width;
1116 static int rcar_dmac_chan_terminate_all(struct dma_chan *chan)
1118 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1119 unsigned long flags;
1121 spin_lock_irqsave(&rchan->lock, flags);
1122 rcar_dmac_chan_halt(rchan);
1123 spin_unlock_irqrestore(&rchan->lock, flags);
1126 * FIXME: No new interrupt can occur now, but the IRQ thread might still
1130 rcar_dmac_chan_reinit(rchan);
1135 static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
1136 dma_cookie_t cookie)
1138 struct rcar_dmac_desc *desc = chan->desc.running;
1139 struct rcar_dmac_xfer_chunk *running = NULL;
1140 struct rcar_dmac_xfer_chunk *chunk;
1141 unsigned int residue = 0;
1142 unsigned int dptr = 0;
1148 * If the cookie doesn't correspond to the currently running transfer
1149 * then the descriptor hasn't been processed yet, and the residue is
1150 * equal to the full descriptor size.
1152 if (cookie != desc->async_tx.cookie)
1156 * In descriptor mode the descriptor running pointer is not maintained
1157 * by the interrupt handler, find the running descriptor from the
1158 * descriptor pointer field in the CHCRB register. In non-descriptor
1159 * mode just use the running descriptor pointer.
1161 if (desc->hwdescs.use) {
1162 dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1163 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
1164 WARN_ON(dptr >= desc->nchunks);
1166 running = desc->running;
1169 /* Compute the size of all chunks still to be transferred. */
1170 list_for_each_entry_reverse(chunk, &desc->chunks, node) {
1171 if (chunk == running || ++dptr == desc->nchunks)
1174 residue += chunk->size;
1177 /* Add the residue for the current chunk. */
1178 residue += rcar_dmac_chan_read(chan, RCAR_DMATCR) << desc->xfer_shift;
1183 static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
1184 dma_cookie_t cookie,
1185 struct dma_tx_state *txstate)
1187 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1188 enum dma_status status;
1189 unsigned long flags;
1190 unsigned int residue;
1192 status = dma_cookie_status(chan, cookie, txstate);
1193 if (status == DMA_COMPLETE || !txstate)
1196 spin_lock_irqsave(&rchan->lock, flags);
1197 residue = rcar_dmac_chan_get_residue(rchan, cookie);
1198 spin_unlock_irqrestore(&rchan->lock, flags);
1200 dma_set_residue(txstate, residue);
1205 static void rcar_dmac_issue_pending(struct dma_chan *chan)
1207 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1208 unsigned long flags;
1210 spin_lock_irqsave(&rchan->lock, flags);
1212 if (list_empty(&rchan->desc.pending))
1215 /* Append the pending list to the active list. */
1216 list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active);
1219 * If no transfer is running pick the first descriptor from the active
1220 * list and start the transfer.
1222 if (!rchan->desc.running) {
1223 struct rcar_dmac_desc *desc;
1225 desc = list_first_entry(&rchan->desc.active,
1226 struct rcar_dmac_desc, node);
1227 rchan->desc.running = desc;
1229 rcar_dmac_chan_start_xfer(rchan);
1233 spin_unlock_irqrestore(&rchan->lock, flags);
1236 /* -----------------------------------------------------------------------------
1240 static irqreturn_t rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan *chan)
1242 struct rcar_dmac_desc *desc = chan->desc.running;
1245 if (WARN_ON(!desc || !desc->cyclic)) {
1247 * This should never happen, there should always be a running
1248 * cyclic descriptor when a descriptor stage end interrupt is
1249 * triggered. Warn and return.
1254 /* Program the interrupt pointer to the next stage. */
1255 stage = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1256 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
1257 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(stage));
1259 return IRQ_WAKE_THREAD;
1262 static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan)
1264 struct rcar_dmac_desc *desc = chan->desc.running;
1265 irqreturn_t ret = IRQ_WAKE_THREAD;
1267 if (WARN_ON_ONCE(!desc)) {
1269 * This should never happen, there should always be a running
1270 * descriptor when a transfer end interrupt is triggered. Warn
1277 * The transfer end interrupt isn't generated for each chunk when using
1278 * descriptor mode. Only update the running chunk pointer in
1279 * non-descriptor mode.
1281 if (!desc->hwdescs.use) {
1283 * If we haven't completed the last transfer chunk simply move
1284 * to the next one. Only wake the IRQ thread if the transfer is
1287 if (!list_is_last(&desc->running->node, &desc->chunks)) {
1288 desc->running = list_next_entry(desc->running, node);
1295 * We've completed the last transfer chunk. If the transfer is
1296 * cyclic, move back to the first one.
1300 list_first_entry(&desc->chunks,
1301 struct rcar_dmac_xfer_chunk,
1307 /* The descriptor is complete, move it to the done list. */
1308 list_move_tail(&desc->node, &chan->desc.done);
1310 /* Queue the next descriptor, if any. */
1311 if (!list_empty(&chan->desc.active))
1312 chan->desc.running = list_first_entry(&chan->desc.active,
1313 struct rcar_dmac_desc,
1316 chan->desc.running = NULL;
1319 if (chan->desc.running)
1320 rcar_dmac_chan_start_xfer(chan);
1325 static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
1327 u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE;
1328 struct rcar_dmac_chan *chan = dev;
1329 irqreturn_t ret = IRQ_NONE;
1332 spin_lock(&chan->lock);
1334 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
1335 if (chcr & RCAR_DMACHCR_TE)
1336 mask |= RCAR_DMACHCR_DE;
1337 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
1339 if (chcr & RCAR_DMACHCR_DSE)
1340 ret |= rcar_dmac_isr_desc_stage_end(chan);
1342 if (chcr & RCAR_DMACHCR_TE)
1343 ret |= rcar_dmac_isr_transfer_end(chan);
1345 spin_unlock(&chan->lock);
1350 static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
1352 struct rcar_dmac_chan *chan = dev;
1353 struct rcar_dmac_desc *desc;
1355 spin_lock_irq(&chan->lock);
1357 /* For cyclic transfers notify the user after every chunk. */
1358 if (chan->desc.running && chan->desc.running->cyclic) {
1359 dma_async_tx_callback callback;
1360 void *callback_param;
1362 desc = chan->desc.running;
1363 callback = desc->async_tx.callback;
1364 callback_param = desc->async_tx.callback_param;
1367 spin_unlock_irq(&chan->lock);
1368 callback(callback_param);
1369 spin_lock_irq(&chan->lock);
1374 * Call the callback function for all descriptors on the done list and
1375 * move them to the ack wait list.
1377 while (!list_empty(&chan->desc.done)) {
1378 desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc,
1380 dma_cookie_complete(&desc->async_tx);
1381 list_del(&desc->node);
1383 if (desc->async_tx.callback) {
1384 spin_unlock_irq(&chan->lock);
1386 * We own the only reference to this descriptor, we can
1387 * safely dereference it without holding the channel
1390 desc->async_tx.callback(desc->async_tx.callback_param);
1391 spin_lock_irq(&chan->lock);
1394 list_add_tail(&desc->node, &chan->desc.wait);
1397 spin_unlock_irq(&chan->lock);
1399 /* Recycle all acked descriptors. */
1400 rcar_dmac_desc_recycle_acked(chan);
1405 static irqreturn_t rcar_dmac_isr_error(int irq, void *data)
1407 struct rcar_dmac *dmac = data;
1409 if (!(rcar_dmac_read(dmac, RCAR_DMAOR) & RCAR_DMAOR_AE))
1413 * An unrecoverable error occurred on an unknown channel. Halt the DMAC,
1414 * abort transfers on all channels, and reinitialize the DMAC.
1416 rcar_dmac_stop(dmac);
1417 rcar_dmac_abort(dmac);
1418 rcar_dmac_init(dmac);
1423 /* -----------------------------------------------------------------------------
1424 * OF xlate and channel filter
1427 static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg)
1429 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
1430 struct of_phandle_args *dma_spec = arg;
1433 * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate
1434 * function knows from which device it wants to allocate a channel from,
1435 * and would be perfectly capable of selecting the channel it wants.
1436 * Forcing it to call dma_request_channel() and iterate through all
1437 * channels from all controllers is just pointless.
1439 if (chan->device->device_config != rcar_dmac_device_config ||
1440 dma_spec->np != chan->device->dev->of_node)
1443 return !test_and_set_bit(dma_spec->args[0], dmac->modules);
1446 static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
1447 struct of_dma *ofdma)
1449 struct rcar_dmac_chan *rchan;
1450 struct dma_chan *chan;
1451 dma_cap_mask_t mask;
1453 if (dma_spec->args_count != 1)
1456 /* Only slave DMA channels can be allocated via DT */
1458 dma_cap_set(DMA_SLAVE, mask);
1460 chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec);
1464 rchan = to_rcar_dmac_chan(chan);
1465 rchan->mid_rid = dma_spec->args[0];
1470 /* -----------------------------------------------------------------------------
1474 #ifdef CONFIG_PM_SLEEP
1475 static int rcar_dmac_sleep_suspend(struct device *dev)
1478 * TODO: Wait for the current transfer to complete and stop the device.
1483 static int rcar_dmac_sleep_resume(struct device *dev)
1485 /* TODO: Resume transfers, if any. */
1491 static int rcar_dmac_runtime_suspend(struct device *dev)
1496 static int rcar_dmac_runtime_resume(struct device *dev)
1498 struct rcar_dmac *dmac = dev_get_drvdata(dev);
1500 return rcar_dmac_init(dmac);
1504 static const struct dev_pm_ops rcar_dmac_pm = {
1505 SET_SYSTEM_SLEEP_PM_OPS(rcar_dmac_sleep_suspend, rcar_dmac_sleep_resume)
1506 SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
1510 /* -----------------------------------------------------------------------------
1514 static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
1515 struct rcar_dmac_chan *rchan,
1518 struct platform_device *pdev = to_platform_device(dmac->dev);
1519 struct dma_chan *chan = &rchan->chan;
1520 char pdev_irqname[5];
1525 rchan->index = index;
1526 rchan->iomem = dmac->iomem + RCAR_DMAC_CHAN_OFFSET(index);
1527 rchan->mid_rid = -EINVAL;
1529 spin_lock_init(&rchan->lock);
1531 INIT_LIST_HEAD(&rchan->desc.free);
1532 INIT_LIST_HEAD(&rchan->desc.pending);
1533 INIT_LIST_HEAD(&rchan->desc.active);
1534 INIT_LIST_HEAD(&rchan->desc.done);
1535 INIT_LIST_HEAD(&rchan->desc.wait);
1537 /* Request the channel interrupt. */
1538 sprintf(pdev_irqname, "ch%u", index);
1539 irq = platform_get_irq_byname(pdev, pdev_irqname);
1541 dev_err(dmac->dev, "no IRQ specified for channel %u\n", index);
1545 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
1546 dev_name(dmac->dev), index);
1550 ret = devm_request_threaded_irq(dmac->dev, irq, rcar_dmac_isr_channel,
1551 rcar_dmac_isr_channel_thread, 0,
1554 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", irq, ret);
1559 * Initialize the DMA engine channel and add it to the DMA engine
1562 chan->device = &dmac->engine;
1563 dma_cookie_init(chan);
1565 list_add_tail(&chan->device_node, &dmac->engine.channels);
1570 static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
1572 struct device_node *np = dev->of_node;
1575 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
1577 dev_err(dev, "unable to read dma-channels property\n");
1581 if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
1582 dev_err(dev, "invalid number of channels %u\n",
1590 static int rcar_dmac_probe(struct platform_device *pdev)
1592 const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
1593 DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
1594 DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
1595 DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
1596 unsigned int channels_offset = 0;
1597 struct dma_device *engine;
1598 struct rcar_dmac *dmac;
1599 struct resource *mem;
1605 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
1609 dmac->dev = &pdev->dev;
1610 platform_set_drvdata(pdev, dmac);
1612 ret = rcar_dmac_parse_of(&pdev->dev, dmac);
1617 * A still unconfirmed hardware bug prevents the IPMMU microTLB 0 to be
1618 * flushed correctly, resulting in memory corruption. DMAC 0 channel 0
1619 * is connected to microTLB 0 on currently supported platforms, so we
1620 * can't use it with the IPMMU. As the IOMMU API operates at the device
1621 * level we can't disable it selectively, so ignore channel 0 for now if
1622 * the device is part of an IOMMU group.
1624 if (pdev->dev.iommu_group) {
1626 channels_offset = 1;
1629 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
1630 sizeof(*dmac->channels), GFP_KERNEL);
1631 if (!dmac->channels)
1634 /* Request resources. */
1635 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1636 dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
1637 if (IS_ERR(dmac->iomem))
1638 return PTR_ERR(dmac->iomem);
1640 irq = platform_get_irq_byname(pdev, "error");
1642 dev_err(&pdev->dev, "no error IRQ specified\n");
1646 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:error",
1647 dev_name(dmac->dev));
1651 ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0,
1654 dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
1659 /* Enable runtime PM and initialize the device. */
1660 pm_runtime_enable(&pdev->dev);
1661 ret = pm_runtime_get_sync(&pdev->dev);
1663 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
1667 ret = rcar_dmac_init(dmac);
1668 pm_runtime_put(&pdev->dev);
1671 dev_err(&pdev->dev, "failed to reset device\n");
1675 /* Initialize the channels. */
1676 INIT_LIST_HEAD(&dmac->engine.channels);
1678 for (i = 0; i < dmac->n_channels; ++i) {
1679 ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i],
1680 i + channels_offset);
1685 /* Register the DMAC as a DMA provider for DT. */
1686 ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
1692 * Register the DMA engine device.
1694 * Default transfer size of 32 bytes requires 32-byte alignment.
1696 engine = &dmac->engine;
1697 dma_cap_set(DMA_MEMCPY, engine->cap_mask);
1698 dma_cap_set(DMA_SLAVE, engine->cap_mask);
1700 engine->dev = &pdev->dev;
1701 engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE);
1703 engine->src_addr_widths = widths;
1704 engine->dst_addr_widths = widths;
1705 engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1706 engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1708 engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources;
1709 engine->device_free_chan_resources = rcar_dmac_free_chan_resources;
1710 engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy;
1711 engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
1712 engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
1713 engine->device_config = rcar_dmac_device_config;
1714 engine->device_terminate_all = rcar_dmac_chan_terminate_all;
1715 engine->device_tx_status = rcar_dmac_tx_status;
1716 engine->device_issue_pending = rcar_dmac_issue_pending;
1718 ret = dma_async_device_register(engine);
1725 of_dma_controller_free(pdev->dev.of_node);
1726 pm_runtime_disable(&pdev->dev);
1730 static int rcar_dmac_remove(struct platform_device *pdev)
1732 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1734 of_dma_controller_free(pdev->dev.of_node);
1735 dma_async_device_unregister(&dmac->engine);
1737 pm_runtime_disable(&pdev->dev);
1742 static void rcar_dmac_shutdown(struct platform_device *pdev)
1744 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1746 rcar_dmac_stop(dmac);
1749 static const struct of_device_id rcar_dmac_of_ids[] = {
1750 { .compatible = "renesas,rcar-dmac", },
1753 MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
1755 static struct platform_driver rcar_dmac_driver = {
1757 .pm = &rcar_dmac_pm,
1758 .name = "rcar-dmac",
1759 .of_match_table = rcar_dmac_of_ids,
1761 .probe = rcar_dmac_probe,
1762 .remove = rcar_dmac_remove,
1763 .shutdown = rcar_dmac_shutdown,
1766 module_platform_driver(rcar_dmac_driver);
1768 MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver");
1769 MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
1770 MODULE_LICENSE("GPL v2");