2 * Renesas R-Car Gen2 DMA Controller Driver
4 * Copyright (C) 2014 Renesas Electronics Inc.
6 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
8 * This is free software; you can redistribute it and/or modify
9 * it under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
13 #include <linux/dmaengine.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
19 #include <linux/of_dma.h>
20 #include <linux/of_platform.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
26 #include "../dmaengine.h"
29 * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer
30 * @node: entry in the parent's chunks list
31 * @src_addr: device source address
32 * @dst_addr: device destination address
33 * @size: transfer size in bytes
35 struct rcar_dmac_xfer_chunk {
36 struct list_head node;
44 * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor
45 * @async_tx: base DMA asynchronous transaction descriptor
46 * @direction: direction of the DMA transfer
47 * @xfer_shift: log2 of the transfer size
48 * @chcr: value of the channel configuration register for this transfer
49 * @node: entry in the channel's descriptors lists
50 * @chunks: list of transfer chunks for this transfer
51 * @running: the transfer chunk being currently processed
52 * @size: transfer size in bytes
53 * @cyclic: when set indicates that the DMA transfer is cyclic
55 struct rcar_dmac_desc {
56 struct dma_async_tx_descriptor async_tx;
57 enum dma_transfer_direction direction;
58 unsigned int xfer_shift;
61 struct list_head node;
62 struct list_head chunks;
63 struct rcar_dmac_xfer_chunk *running;
69 #define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx)
72 * struct rcar_dmac_desc_page - One page worth of descriptors
73 * @node: entry in the channel's pages list
74 * @descs: array of DMA descriptors
75 * @chunks: array of transfer chunk descriptors
77 struct rcar_dmac_desc_page {
78 struct list_head node;
81 struct rcar_dmac_desc descs[0];
82 struct rcar_dmac_xfer_chunk chunks[0];
86 #define RCAR_DMAC_DESCS_PER_PAGE \
87 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \
88 sizeof(struct rcar_dmac_desc))
89 #define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \
90 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
91 sizeof(struct rcar_dmac_xfer_chunk))
94 * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel
95 * @chan: base DMA channel object
96 * @iomem: channel I/O memory base
97 * @index: index of this channel in the controller
98 * @src_xfer_size: size (in bytes) of hardware transfers on the source side
99 * @dst_xfer_size: size (in bytes) of hardware transfers on the destination side
100 * @src_slave_addr: slave source memory address
101 * @dst_slave_addr: slave destination memory address
102 * @mid_rid: hardware MID/RID for the DMA client using this channel
103 * @lock: protects the channel CHCR register and the desc members
104 * @desc.free: list of free descriptors
105 * @desc.pending: list of pending descriptors (submitted with tx_submit)
106 * @desc.active: list of active descriptors (activated with issue_pending)
107 * @desc.done: list of completed descriptors
108 * @desc.wait: list of descriptors waiting for an ack
109 * @desc.running: the descriptor being processed (a member of the active list)
110 * @desc.chunks_free: list of free transfer chunk descriptors
111 * @desc.pages: list of pages used by allocated descriptors
113 struct rcar_dmac_chan {
114 struct dma_chan chan;
118 unsigned int src_xfer_size;
119 unsigned int dst_xfer_size;
120 dma_addr_t src_slave_addr;
121 dma_addr_t dst_slave_addr;
127 struct list_head free;
128 struct list_head pending;
129 struct list_head active;
130 struct list_head done;
131 struct list_head wait;
132 struct rcar_dmac_desc *running;
134 struct list_head chunks_free;
136 struct list_head pages;
140 #define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan)
143 * struct rcar_dmac - R-Car Gen2 DMA Controller
144 * @engine: base DMA engine object
145 * @dev: the hardware device
146 * @iomem: remapped I/O memory base
147 * @n_channels: number of available channels
148 * @channels: array of DMAC channels
149 * @modules: bitmask of client modules in use
152 struct dma_device engine;
156 unsigned int n_channels;
157 struct rcar_dmac_chan *channels;
159 unsigned long modules[256 / BITS_PER_LONG];
162 #define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine)
164 /* -----------------------------------------------------------------------------
168 #define RCAR_DMAC_CHAN_OFFSET(i) (0x8000 + 0x80 * (i))
170 #define RCAR_DMAISTA 0x0020
171 #define RCAR_DMASEC 0x0030
172 #define RCAR_DMAOR 0x0060
173 #define RCAR_DMAOR_PRI_FIXED (0 << 8)
174 #define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
175 #define RCAR_DMAOR_AE (1 << 2)
176 #define RCAR_DMAOR_DME (1 << 0)
177 #define RCAR_DMACHCLR 0x0080
178 #define RCAR_DMADPSEC 0x00a0
180 #define RCAR_DMASAR 0x0000
181 #define RCAR_DMADAR 0x0004
182 #define RCAR_DMATCR 0x0008
183 #define RCAR_DMATCR_MASK 0x00ffffff
184 #define RCAR_DMATSR 0x0028
185 #define RCAR_DMACHCR 0x000c
186 #define RCAR_DMACHCR_CAE (1 << 31)
187 #define RCAR_DMACHCR_CAIE (1 << 30)
188 #define RCAR_DMACHCR_DPM_DISABLED (0 << 28)
189 #define RCAR_DMACHCR_DPM_ENABLED (1 << 28)
190 #define RCAR_DMACHCR_DPM_REPEAT (2 << 28)
191 #define RCAR_DMACHCR_DPM_INFINITE (3 << 28)
192 #define RCAR_DMACHCR_RPT_SAR (1 << 27)
193 #define RCAR_DMACHCR_RPT_DAR (1 << 26)
194 #define RCAR_DMACHCR_RPT_TCR (1 << 25)
195 #define RCAR_DMACHCR_DPB (1 << 22)
196 #define RCAR_DMACHCR_DSE (1 << 19)
197 #define RCAR_DMACHCR_DSIE (1 << 18)
198 #define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3))
199 #define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3))
200 #define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3))
201 #define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3))
202 #define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3))
203 #define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3))
204 #define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3))
205 #define RCAR_DMACHCR_DM_FIXED (0 << 14)
206 #define RCAR_DMACHCR_DM_INC (1 << 14)
207 #define RCAR_DMACHCR_DM_DEC (2 << 14)
208 #define RCAR_DMACHCR_SM_FIXED (0 << 12)
209 #define RCAR_DMACHCR_SM_INC (1 << 12)
210 #define RCAR_DMACHCR_SM_DEC (2 << 12)
211 #define RCAR_DMACHCR_RS_AUTO (4 << 8)
212 #define RCAR_DMACHCR_RS_DMARS (8 << 8)
213 #define RCAR_DMACHCR_IE (1 << 2)
214 #define RCAR_DMACHCR_TE (1 << 1)
215 #define RCAR_DMACHCR_DE (1 << 0)
216 #define RCAR_DMATCRB 0x0018
217 #define RCAR_DMATSRB 0x0038
218 #define RCAR_DMACHCRB 0x001c
219 #define RCAR_DMACHCRB_DCNT(n) ((n) << 24)
220 #define RCAR_DMACHCRB_DPTR(n) ((n) << 16)
221 #define RCAR_DMACHCRB_DRST (1 << 15)
222 #define RCAR_DMACHCRB_DTS (1 << 8)
223 #define RCAR_DMACHCRB_SLM_NORMAL (0 << 4)
224 #define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4)
225 #define RCAR_DMACHCRB_PRI(n) ((n) << 0)
226 #define RCAR_DMARS 0x0040
227 #define RCAR_DMABUFCR 0x0048
228 #define RCAR_DMABUFCR_MBU(n) ((n) << 16)
229 #define RCAR_DMABUFCR_ULB(n) ((n) << 0)
230 #define RCAR_DMADPBASE 0x0050
231 #define RCAR_DMADPBASE_MASK 0xfffffff0
232 #define RCAR_DMADPBASE_SEL (1 << 0)
233 #define RCAR_DMADPCR 0x0054
234 #define RCAR_DMADPCR_DIPT(n) ((n) << 24)
235 #define RCAR_DMAFIXSAR 0x0010
236 #define RCAR_DMAFIXDAR 0x0014
237 #define RCAR_DMAFIXDPBASE 0x0060
239 /* Hardcode the MEMCPY transfer size to 4 bytes. */
240 #define RCAR_DMAC_MEMCPY_XFER_SIZE 4
242 /* -----------------------------------------------------------------------------
246 static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data)
248 if (reg == RCAR_DMAOR)
249 writew(data, dmac->iomem + reg);
251 writel(data, dmac->iomem + reg);
254 static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg)
256 if (reg == RCAR_DMAOR)
257 return readw(dmac->iomem + reg);
259 return readl(dmac->iomem + reg);
262 static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg)
264 if (reg == RCAR_DMARS)
265 return readw(chan->iomem + reg);
267 return readl(chan->iomem + reg);
270 static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data)
272 if (reg == RCAR_DMARS)
273 writew(data, chan->iomem + reg);
275 writel(data, chan->iomem + reg);
278 /* -----------------------------------------------------------------------------
279 * Initialization and configuration
282 static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan)
284 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
286 return (chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)) == RCAR_DMACHCR_DE;
289 static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
291 struct rcar_dmac_desc *desc = chan->desc.running;
292 struct rcar_dmac_xfer_chunk *chunk = desc->running;
294 dev_dbg(chan->chan.device->dev,
295 "chan%u: queue chunk %p: %u@%pad -> %pad\n",
296 chan->index, chunk, chunk->size, &chunk->src_addr,
299 WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan));
301 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
302 rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR, chunk->src_addr >> 32);
303 rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR, chunk->dst_addr >> 32);
305 rcar_dmac_chan_write(chan, RCAR_DMASAR, chunk->src_addr & 0xffffffff);
306 rcar_dmac_chan_write(chan, RCAR_DMADAR, chunk->dst_addr & 0xffffffff);
308 if (chan->mid_rid >= 0)
309 rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid);
311 rcar_dmac_chan_write(chan, RCAR_DMATCR,
312 chunk->size >> desc->xfer_shift);
314 rcar_dmac_chan_write(chan, RCAR_DMACHCR, desc->chcr | RCAR_DMACHCR_DE |
318 static int rcar_dmac_init(struct rcar_dmac *dmac)
322 /* Clear all channels and enable the DMAC globally. */
323 rcar_dmac_write(dmac, RCAR_DMACHCLR, 0x7fff);
324 rcar_dmac_write(dmac, RCAR_DMAOR,
325 RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
327 dmaor = rcar_dmac_read(dmac, RCAR_DMAOR);
328 if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) {
329 dev_warn(dmac->dev, "DMAOR initialization failed.\n");
336 /* -----------------------------------------------------------------------------
337 * Descriptors submission
340 static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx)
342 struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan);
343 struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx);
347 spin_lock_irqsave(&chan->lock, flags);
349 cookie = dma_cookie_assign(tx);
351 dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n",
352 chan->index, tx->cookie, desc);
354 list_add_tail(&desc->node, &chan->desc.pending);
355 desc->running = list_first_entry(&desc->chunks,
356 struct rcar_dmac_xfer_chunk, node);
358 spin_unlock_irqrestore(&chan->lock, flags);
363 /* -----------------------------------------------------------------------------
364 * Descriptors allocation and free
368 * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors
369 * @chan: the DMA channel
370 * @gfp: allocation flags
372 static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
374 struct rcar_dmac_desc_page *page;
378 page = (void *)get_zeroed_page(gfp);
382 for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) {
383 struct rcar_dmac_desc *desc = &page->descs[i];
385 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
386 desc->async_tx.tx_submit = rcar_dmac_tx_submit;
387 INIT_LIST_HEAD(&desc->chunks);
389 list_add_tail(&desc->node, &list);
392 spin_lock_irq(&chan->lock);
393 list_splice_tail(&list, &chan->desc.free);
394 list_add_tail(&page->node, &chan->desc.pages);
395 spin_unlock_irq(&chan->lock);
401 * rcar_dmac_desc_put - Release a DMA transfer descriptor
402 * @chan: the DMA channel
403 * @desc: the descriptor
405 * Put the descriptor and its transfer chunk descriptors back in the channel's
406 * free descriptors lists. The descriptor's chunk will be reinitialized to an
407 * empty list as a result.
409 * The descriptor must have been removed from the channel's done list before
410 * calling this function.
412 * Locking: Must be called with the channel lock held.
414 static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan,
415 struct rcar_dmac_desc *desc)
417 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free);
418 list_add_tail(&desc->node, &chan->desc.free);
421 static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
423 struct rcar_dmac_desc *desc, *_desc;
425 list_for_each_entry_safe(desc, _desc, &chan->desc.wait, node) {
426 if (async_tx_test_ack(&desc->async_tx)) {
427 list_del(&desc->node);
428 rcar_dmac_desc_put(chan, desc);
434 * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer
435 * @chan: the DMA channel
437 * Locking: This function must be called in a non-atomic context.
439 * Return: A pointer to the allocated descriptor or NULL if no descriptor can
442 static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
444 struct rcar_dmac_desc *desc;
447 spin_lock_irq(&chan->lock);
449 /* Recycle acked descriptors before attempting allocation. */
450 rcar_dmac_desc_recycle_acked(chan);
453 if (list_empty(&chan->desc.free)) {
455 * No free descriptors, allocate a page worth of them
456 * and try again, as someone else could race us to get
457 * the newly allocated descriptors. If the allocation
458 * fails return an error.
460 spin_unlock_irq(&chan->lock);
461 ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT);
464 spin_lock_irq(&chan->lock);
468 desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc,
470 list_del(&desc->node);
473 spin_unlock_irq(&chan->lock);
479 * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks
480 * @chan: the DMA channel
481 * @gfp: allocation flags
483 static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
485 struct rcar_dmac_desc_page *page;
489 page = (void *)get_zeroed_page(gfp);
493 for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) {
494 struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i];
496 list_add_tail(&chunk->node, &list);
499 spin_lock_irq(&chan->lock);
500 list_splice_tail(&list, &chan->desc.chunks_free);
501 list_add_tail(&page->node, &chan->desc.pages);
502 spin_unlock_irq(&chan->lock);
508 * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer
509 * @chan: the DMA channel
511 * Locking: This function must be called in a non-atomic context.
513 * Return: A pointer to the allocated transfer chunk descriptor or NULL if no
514 * descriptor can be allocated.
516 static struct rcar_dmac_xfer_chunk *
517 rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan)
519 struct rcar_dmac_xfer_chunk *chunk;
522 spin_lock_irq(&chan->lock);
525 if (list_empty(&chan->desc.chunks_free)) {
527 * No free descriptors, allocate a page worth of them
528 * and try again, as someone else could race us to get
529 * the newly allocated descriptors. If the allocation
530 * fails return an error.
532 spin_unlock_irq(&chan->lock);
533 ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT);
536 spin_lock_irq(&chan->lock);
540 chunk = list_first_entry(&chan->desc.chunks_free,
541 struct rcar_dmac_xfer_chunk, node);
542 list_del(&chunk->node);
545 spin_unlock_irq(&chan->lock);
550 /* -----------------------------------------------------------------------------
554 static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
556 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
558 chcr &= ~(RCAR_DMACHCR_IE | RCAR_DMACHCR_TE | RCAR_DMACHCR_DE);
559 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
562 static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
564 struct rcar_dmac_desc *desc, *_desc;
568 spin_lock_irqsave(&chan->lock, flags);
570 /* Move all non-free descriptors to the local lists. */
571 list_splice_init(&chan->desc.pending, &descs);
572 list_splice_init(&chan->desc.active, &descs);
573 list_splice_init(&chan->desc.done, &descs);
574 list_splice_init(&chan->desc.wait, &descs);
576 chan->desc.running = NULL;
578 spin_unlock_irqrestore(&chan->lock, flags);
580 list_for_each_entry_safe(desc, _desc, &descs, node) {
581 list_del(&desc->node);
582 rcar_dmac_desc_put(chan, desc);
586 static void rcar_dmac_stop(struct rcar_dmac *dmac)
588 rcar_dmac_write(dmac, RCAR_DMAOR, 0);
591 static void rcar_dmac_abort(struct rcar_dmac *dmac)
595 /* Stop all channels. */
596 for (i = 0; i < dmac->n_channels; ++i) {
597 struct rcar_dmac_chan *chan = &dmac->channels[i];
599 /* Stop and reinitialize the channel. */
600 spin_lock(&chan->lock);
601 rcar_dmac_chan_halt(chan);
602 spin_unlock(&chan->lock);
604 rcar_dmac_chan_reinit(chan);
608 /* -----------------------------------------------------------------------------
609 * Descriptors preparation
612 static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan,
613 struct rcar_dmac_desc *desc)
615 static const u32 chcr_ts[] = {
616 RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B,
617 RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B,
618 RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B,
622 unsigned int xfer_size;
625 switch (desc->direction) {
627 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED
628 | RCAR_DMACHCR_RS_DMARS;
629 xfer_size = chan->src_xfer_size;
633 chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC
634 | RCAR_DMACHCR_RS_DMARS;
635 xfer_size = chan->dst_xfer_size;
640 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC
641 | RCAR_DMACHCR_RS_AUTO;
642 xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE;
646 desc->xfer_shift = ilog2(xfer_size);
647 desc->chcr = chcr | chcr_ts[desc->xfer_shift];
651 * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list
653 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
654 * converted to scatter-gather to guarantee consistent locking and a correct
655 * list manipulation. For slave DMA direction carries the usual meaning, and,
656 * logically, the SG list is RAM and the addr variable contains slave address,
657 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
658 * and the SG list contains only one element and points at the source buffer.
660 static struct dma_async_tx_descriptor *
661 rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
662 unsigned int sg_len, dma_addr_t dev_addr,
663 enum dma_transfer_direction dir, unsigned long dma_flags,
666 struct rcar_dmac_xfer_chunk *chunk;
667 struct rcar_dmac_desc *desc;
668 struct scatterlist *sg;
669 unsigned int max_chunk_size;
670 unsigned int full_size = 0;
673 desc = rcar_dmac_desc_get(chan);
677 desc->async_tx.flags = dma_flags;
678 desc->async_tx.cookie = -EBUSY;
680 desc->cyclic = cyclic;
681 desc->direction = dir;
683 rcar_dmac_chan_configure_desc(chan, desc);
685 max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift;
688 * Allocate and fill the transfer chunk descriptors. We own the only
689 * reference to the DMA descriptor, there's no need for locking.
691 for_each_sg(sgl, sg, sg_len, i) {
692 dma_addr_t mem_addr = sg_dma_address(sg);
693 unsigned int len = sg_dma_len(sg);
698 unsigned int size = min(len, max_chunk_size);
700 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
702 * Prevent individual transfers from crossing 4GB
705 if (dev_addr >> 32 != (dev_addr + size - 1) >> 32)
706 size = ALIGN(dev_addr, 1ULL << 32) - dev_addr;
707 if (mem_addr >> 32 != (mem_addr + size - 1) >> 32)
708 size = ALIGN(mem_addr, 1ULL << 32) - mem_addr;
711 chunk = rcar_dmac_xfer_chunk_get(chan);
713 rcar_dmac_desc_put(chan, desc);
717 if (dir == DMA_DEV_TO_MEM) {
718 chunk->src_addr = dev_addr;
719 chunk->dst_addr = mem_addr;
721 chunk->src_addr = mem_addr;
722 chunk->dst_addr = dev_addr;
727 dev_dbg(chan->chan.device->dev,
728 "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n",
729 chan->index, chunk, desc, i, sg, size, len,
730 &chunk->src_addr, &chunk->dst_addr);
733 if (dir == DMA_MEM_TO_MEM)
738 list_add_tail(&chunk->node, &desc->chunks);
742 desc->size = full_size;
744 return &desc->async_tx;
747 /* -----------------------------------------------------------------------------
748 * DMA engine operations
751 static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan)
753 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
756 INIT_LIST_HEAD(&rchan->desc.free);
757 INIT_LIST_HEAD(&rchan->desc.pending);
758 INIT_LIST_HEAD(&rchan->desc.active);
759 INIT_LIST_HEAD(&rchan->desc.done);
760 INIT_LIST_HEAD(&rchan->desc.wait);
761 INIT_LIST_HEAD(&rchan->desc.chunks_free);
762 INIT_LIST_HEAD(&rchan->desc.pages);
764 /* Preallocate descriptors. */
765 ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL);
769 ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL);
773 return pm_runtime_get_sync(chan->device->dev);
776 static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
778 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
779 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
780 struct rcar_dmac_desc_page *page, *_page;
782 /* Protect against ISR */
783 spin_lock_irq(&rchan->lock);
784 rcar_dmac_chan_halt(rchan);
785 spin_unlock_irq(&rchan->lock);
787 /* Now no new interrupts will occur */
789 if (rchan->mid_rid >= 0) {
790 /* The caller is holding dma_list_mutex */
791 clear_bit(rchan->mid_rid, dmac->modules);
792 rchan->mid_rid = -EINVAL;
795 list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) {
796 list_del(&page->node);
797 free_page((unsigned long)page);
800 pm_runtime_put(chan->device->dev);
803 static struct dma_async_tx_descriptor *
804 rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
805 dma_addr_t dma_src, size_t len, unsigned long flags)
807 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
808 struct scatterlist sgl;
813 sg_init_table(&sgl, 1);
814 sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len,
815 offset_in_page(dma_src));
816 sg_dma_address(&sgl) = dma_src;
817 sg_dma_len(&sgl) = len;
819 return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest,
820 DMA_MEM_TO_MEM, flags, false);
823 static struct dma_async_tx_descriptor *
824 rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
825 unsigned int sg_len, enum dma_transfer_direction dir,
826 unsigned long flags, void *context)
828 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
831 /* Someone calling slave DMA on a generic channel? */
832 if (rchan->mid_rid < 0 || !sg_len) {
833 dev_warn(chan->device->dev,
834 "%s: bad parameter: len=%d, id=%d\n",
835 __func__, sg_len, rchan->mid_rid);
839 dev_addr = dir == DMA_DEV_TO_MEM
840 ? rchan->src_slave_addr : rchan->dst_slave_addr;
841 return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
845 #define RCAR_DMAC_MAX_SG_LEN 32
847 static struct dma_async_tx_descriptor *
848 rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
849 size_t buf_len, size_t period_len,
850 enum dma_transfer_direction dir, unsigned long flags)
852 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
853 struct dma_async_tx_descriptor *desc;
854 struct scatterlist *sgl;
859 /* Someone calling slave DMA on a generic channel? */
860 if (rchan->mid_rid < 0 || buf_len < period_len) {
861 dev_warn(chan->device->dev,
862 "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
863 __func__, buf_len, period_len, rchan->mid_rid);
867 sg_len = buf_len / period_len;
868 if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
869 dev_err(chan->device->dev,
870 "chan%u: sg length %d exceds limit %d",
871 rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN);
876 * Allocate the sg list dynamically as it would consume too much stack
879 sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT);
883 sg_init_table(sgl, sg_len);
885 for (i = 0; i < sg_len; ++i) {
886 dma_addr_t src = buf_addr + (period_len * i);
888 sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
889 offset_in_page(src));
890 sg_dma_address(&sgl[i]) = src;
891 sg_dma_len(&sgl[i]) = period_len;
894 dev_addr = dir == DMA_DEV_TO_MEM
895 ? rchan->src_slave_addr : rchan->dst_slave_addr;
896 desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
903 static int rcar_dmac_device_config(struct dma_chan *chan,
904 struct dma_slave_config *cfg)
906 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
909 * We could lock this, but you shouldn't be configuring the
910 * channel, while using it...
912 rchan->src_slave_addr = cfg->src_addr;
913 rchan->dst_slave_addr = cfg->dst_addr;
914 rchan->src_xfer_size = cfg->src_addr_width;
915 rchan->dst_xfer_size = cfg->dst_addr_width;
920 static int rcar_dmac_chan_terminate_all(struct dma_chan *chan)
922 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
925 spin_lock_irqsave(&rchan->lock, flags);
926 rcar_dmac_chan_halt(rchan);
927 spin_unlock_irqrestore(&rchan->lock, flags);
930 * FIXME: No new interrupt can occur now, but the IRQ thread might still
934 rcar_dmac_chan_reinit(rchan);
939 static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
942 struct rcar_dmac_desc *desc = chan->desc.running;
943 struct rcar_dmac_xfer_chunk *chunk;
944 unsigned int residue = 0;
950 * If the cookie doesn't correspond to the currently running transfer
951 * then the descriptor hasn't been processed yet, and the residue is
952 * equal to the full descriptor size.
954 if (cookie != desc->async_tx.cookie)
957 /* Compute the size of all chunks still to be transferred. */
958 list_for_each_entry_reverse(chunk, &desc->chunks, node) {
959 if (chunk == desc->running)
962 residue += chunk->size;
965 /* Add the residue for the current chunk. */
966 residue += rcar_dmac_chan_read(chan, RCAR_DMATCR) << desc->xfer_shift;
971 static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
973 struct dma_tx_state *txstate)
975 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
976 enum dma_status status;
978 unsigned int residue;
980 status = dma_cookie_status(chan, cookie, txstate);
981 if (status == DMA_COMPLETE || !txstate)
984 spin_lock_irqsave(&rchan->lock, flags);
985 residue = rcar_dmac_chan_get_residue(rchan, cookie);
986 spin_unlock_irqrestore(&rchan->lock, flags);
988 dma_set_residue(txstate, residue);
993 static void rcar_dmac_issue_pending(struct dma_chan *chan)
995 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
998 spin_lock_irqsave(&rchan->lock, flags);
1000 if (list_empty(&rchan->desc.pending))
1003 /* Append the pending list to the active list. */
1004 list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active);
1007 * If no transfer is running pick the first descriptor from the active
1008 * list and start the transfer.
1010 if (!rchan->desc.running) {
1011 struct rcar_dmac_desc *desc;
1013 desc = list_first_entry(&rchan->desc.active,
1014 struct rcar_dmac_desc, node);
1015 rchan->desc.running = desc;
1017 rcar_dmac_chan_start_xfer(rchan);
1021 spin_unlock_irqrestore(&rchan->lock, flags);
1024 /* -----------------------------------------------------------------------------
1028 static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan)
1030 struct rcar_dmac_desc *desc = chan->desc.running;
1031 struct rcar_dmac_xfer_chunk *chunk;
1032 irqreturn_t ret = IRQ_WAKE_THREAD;
1034 if (WARN_ON_ONCE(!desc)) {
1036 * This should never happen, there should always be
1037 * a running descriptor when a transfer ends. Warn and
1044 * If we haven't completed the last transfer chunk simply move to the
1045 * next one. Only wake the IRQ thread if the transfer is cyclic.
1047 chunk = desc->running;
1048 if (!list_is_last(&chunk->node, &desc->chunks)) {
1049 desc->running = list_next_entry(chunk, node);
1056 * We've completed the last transfer chunk. If the transfer is cyclic,
1057 * move back to the first one.
1060 desc->running = list_first_entry(&desc->chunks,
1061 struct rcar_dmac_xfer_chunk,
1066 /* The descriptor is complete, move it to the done list. */
1067 list_move_tail(&desc->node, &chan->desc.done);
1069 /* Queue the next descriptor, if any. */
1070 if (!list_empty(&chan->desc.active))
1071 chan->desc.running = list_first_entry(&chan->desc.active,
1072 struct rcar_dmac_desc,
1075 chan->desc.running = NULL;
1078 if (chan->desc.running)
1079 rcar_dmac_chan_start_xfer(chan);
1084 static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
1086 struct rcar_dmac_chan *chan = dev;
1087 irqreturn_t ret = IRQ_NONE;
1090 spin_lock(&chan->lock);
1092 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
1093 rcar_dmac_chan_write(chan, RCAR_DMACHCR,
1094 chcr & ~(RCAR_DMACHCR_TE | RCAR_DMACHCR_DE));
1096 if (chcr & RCAR_DMACHCR_TE)
1097 ret |= rcar_dmac_isr_transfer_end(chan);
1099 spin_unlock(&chan->lock);
1104 static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
1106 struct rcar_dmac_chan *chan = dev;
1107 struct rcar_dmac_desc *desc;
1109 spin_lock_irq(&chan->lock);
1111 /* For cyclic transfers notify the user after every chunk. */
1112 if (chan->desc.running && chan->desc.running->cyclic) {
1113 dma_async_tx_callback callback;
1114 void *callback_param;
1116 desc = chan->desc.running;
1117 callback = desc->async_tx.callback;
1118 callback_param = desc->async_tx.callback_param;
1121 spin_unlock_irq(&chan->lock);
1122 callback(callback_param);
1123 spin_lock_irq(&chan->lock);
1128 * Call the callback function for all descriptors on the done list and
1129 * move them to the ack wait list.
1131 while (!list_empty(&chan->desc.done)) {
1132 desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc,
1134 dma_cookie_complete(&desc->async_tx);
1135 list_del(&desc->node);
1137 if (desc->async_tx.callback) {
1138 spin_unlock_irq(&chan->lock);
1140 * We own the only reference to this descriptor, we can
1141 * safely dereference it without holding the channel
1144 desc->async_tx.callback(desc->async_tx.callback_param);
1145 spin_lock_irq(&chan->lock);
1148 list_add_tail(&desc->node, &chan->desc.wait);
1151 /* Recycle all acked descriptors. */
1152 rcar_dmac_desc_recycle_acked(chan);
1154 spin_unlock_irq(&chan->lock);
1159 static irqreturn_t rcar_dmac_isr_error(int irq, void *data)
1161 struct rcar_dmac *dmac = data;
1163 if (!(rcar_dmac_read(dmac, RCAR_DMAOR) & RCAR_DMAOR_AE))
1167 * An unrecoverable error occurred on an unknown channel. Halt the DMAC,
1168 * abort transfers on all channels, and reinitialize the DMAC.
1170 rcar_dmac_stop(dmac);
1171 rcar_dmac_abort(dmac);
1172 rcar_dmac_init(dmac);
1177 /* -----------------------------------------------------------------------------
1178 * OF xlate and channel filter
1181 static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg)
1183 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
1184 struct of_phandle_args *dma_spec = arg;
1187 * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate
1188 * function knows from which device it wants to allocate a channel from,
1189 * and would be perfectly capable of selecting the channel it wants.
1190 * Forcing it to call dma_request_channel() and iterate through all
1191 * channels from all controllers is just pointless.
1193 if (chan->device->device_config != rcar_dmac_device_config ||
1194 dma_spec->np != chan->device->dev->of_node)
1197 return !test_and_set_bit(dma_spec->args[0], dmac->modules);
1200 static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
1201 struct of_dma *ofdma)
1203 struct rcar_dmac_chan *rchan;
1204 struct dma_chan *chan;
1205 dma_cap_mask_t mask;
1207 if (dma_spec->args_count != 1)
1210 /* Only slave DMA channels can be allocated via DT */
1212 dma_cap_set(DMA_SLAVE, mask);
1214 chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec);
1218 rchan = to_rcar_dmac_chan(chan);
1219 rchan->mid_rid = dma_spec->args[0];
1224 /* -----------------------------------------------------------------------------
1228 #ifdef CONFIG_PM_SLEEP
1229 static int rcar_dmac_sleep_suspend(struct device *dev)
1232 * TODO: Wait for the current transfer to complete and stop the device.
1237 static int rcar_dmac_sleep_resume(struct device *dev)
1239 /* TODO: Resume transfers, if any. */
1245 static int rcar_dmac_runtime_suspend(struct device *dev)
1250 static int rcar_dmac_runtime_resume(struct device *dev)
1252 struct rcar_dmac *dmac = dev_get_drvdata(dev);
1254 return rcar_dmac_init(dmac);
1258 static const struct dev_pm_ops rcar_dmac_pm = {
1259 SET_SYSTEM_SLEEP_PM_OPS(rcar_dmac_sleep_suspend, rcar_dmac_sleep_resume)
1260 SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
1264 /* -----------------------------------------------------------------------------
1268 static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
1269 struct rcar_dmac_chan *rchan,
1272 struct platform_device *pdev = to_platform_device(dmac->dev);
1273 struct dma_chan *chan = &rchan->chan;
1274 char pdev_irqname[5];
1279 rchan->index = index;
1280 rchan->iomem = dmac->iomem + RCAR_DMAC_CHAN_OFFSET(index);
1281 rchan->mid_rid = -EINVAL;
1283 spin_lock_init(&rchan->lock);
1285 /* Request the channel interrupt. */
1286 sprintf(pdev_irqname, "ch%u", index);
1287 irq = platform_get_irq_byname(pdev, pdev_irqname);
1289 dev_err(dmac->dev, "no IRQ specified for channel %u\n", index);
1293 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
1294 dev_name(dmac->dev), index);
1298 ret = devm_request_threaded_irq(dmac->dev, irq, rcar_dmac_isr_channel,
1299 rcar_dmac_isr_channel_thread, 0,
1302 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", irq, ret);
1307 * Initialize the DMA engine channel and add it to the DMA engine
1310 chan->device = &dmac->engine;
1311 dma_cookie_init(chan);
1313 list_add_tail(&chan->device_node, &dmac->engine.channels);
1318 static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
1320 struct device_node *np = dev->of_node;
1323 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
1325 dev_err(dev, "unable to read dma-channels property\n");
1329 if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
1330 dev_err(dev, "invalid number of channels %u\n",
1338 static int rcar_dmac_probe(struct platform_device *pdev)
1340 const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
1341 DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
1342 DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
1343 DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
1344 struct dma_device *engine;
1345 struct rcar_dmac *dmac;
1346 struct resource *mem;
1352 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
1356 dmac->dev = &pdev->dev;
1357 platform_set_drvdata(pdev, dmac);
1359 ret = rcar_dmac_parse_of(&pdev->dev, dmac);
1363 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
1364 sizeof(*dmac->channels), GFP_KERNEL);
1365 if (!dmac->channels)
1368 /* Request resources. */
1369 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1370 dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
1371 if (IS_ERR(dmac->iomem))
1372 return PTR_ERR(dmac->iomem);
1374 irq = platform_get_irq_byname(pdev, "error");
1376 dev_err(&pdev->dev, "no error IRQ specified\n");
1380 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:error",
1381 dev_name(dmac->dev));
1385 ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0,
1388 dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
1393 /* Enable runtime PM and initialize the device. */
1394 pm_runtime_enable(&pdev->dev);
1395 ret = pm_runtime_get_sync(&pdev->dev);
1397 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
1401 ret = rcar_dmac_init(dmac);
1402 pm_runtime_put(&pdev->dev);
1405 dev_err(&pdev->dev, "failed to reset device\n");
1409 /* Initialize the channels. */
1410 INIT_LIST_HEAD(&dmac->engine.channels);
1412 for (i = 0; i < dmac->n_channels; ++i) {
1413 ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], i);
1418 /* Register the DMAC as a DMA provider for DT. */
1419 ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
1425 * Register the DMA engine device.
1427 * Default transfer size of 32 bytes requires 32-byte alignment.
1429 engine = &dmac->engine;
1430 dma_cap_set(DMA_MEMCPY, engine->cap_mask);
1431 dma_cap_set(DMA_SLAVE, engine->cap_mask);
1433 engine->dev = &pdev->dev;
1434 engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE);
1436 engine->src_addr_widths = widths;
1437 engine->dst_addr_widths = widths;
1438 engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1439 engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1441 engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources;
1442 engine->device_free_chan_resources = rcar_dmac_free_chan_resources;
1443 engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy;
1444 engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
1445 engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
1446 engine->device_config = rcar_dmac_device_config;
1447 engine->device_terminate_all = rcar_dmac_chan_terminate_all;
1448 engine->device_tx_status = rcar_dmac_tx_status;
1449 engine->device_issue_pending = rcar_dmac_issue_pending;
1451 ret = dma_async_device_register(engine);
1458 of_dma_controller_free(pdev->dev.of_node);
1459 pm_runtime_disable(&pdev->dev);
1463 static int rcar_dmac_remove(struct platform_device *pdev)
1465 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1467 of_dma_controller_free(pdev->dev.of_node);
1468 dma_async_device_unregister(&dmac->engine);
1470 pm_runtime_disable(&pdev->dev);
1475 static void rcar_dmac_shutdown(struct platform_device *pdev)
1477 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1479 rcar_dmac_stop(dmac);
1482 static const struct of_device_id rcar_dmac_of_ids[] = {
1483 { .compatible = "renesas,rcar-dmac", },
1486 MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
1488 static struct platform_driver rcar_dmac_driver = {
1490 .pm = &rcar_dmac_pm,
1491 .name = "rcar-dmac",
1492 .of_match_table = rcar_dmac_of_ids,
1494 .probe = rcar_dmac_probe,
1495 .remove = rcar_dmac_remove,
1496 .shutdown = rcar_dmac_shutdown,
1499 module_platform_driver(rcar_dmac_driver);
1501 MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver");
1502 MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
1503 MODULE_LICENSE("GPL v2");