2 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
3 * Copyright (C) Semihalf 2009
4 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
5 * Copyright (C) Alexander Popov, Promcontroller 2014
7 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
8 * (defines, structures and comments) was taken from MPC5121 DMA driver
9 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
11 * Approved as OSADL project by a majority of OSADL members and funded
12 * by OSADL membership fees in 2009; for details see www.osadl.org.
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the Free
16 * Software Foundation; either version 2 of the License, or (at your option)
19 * This program is distributed in the hope that it will be useful, but WITHOUT
20 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
24 * You should have received a copy of the GNU General Public License along with
25 * this program; if not, write to the Free Software Foundation, Inc., 59
26 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 * The full GNU General Public License is included in this distribution in the
29 * file called COPYING.
33 * MPC512x and MPC8308 DMA driver. It supports
34 * memory to memory data transfers (tested using dmatest module) and
35 * data transfers between memory and peripheral I/O memory
36 * by means of slave scatter/gather with these limitations:
37 * - chunked transfers (described by s/g lists with more than one item)
38 * are refused as long as proper support for scatter/gather is missing;
39 * - transfers on MPC8308 always start from software as this SoC appears
40 * not to have external request lines for peripheral flow control;
41 * - only peripheral devices with 4-byte FIFO access register are supported;
42 * - minimal memory <-> I/O memory transfer chunk is 4 bytes and consequently
43 * source and destination addresses must be 4-byte aligned
44 * and transfer size must be aligned on (4 * maxburst) boundary;
47 #include <linux/module.h>
48 #include <linux/dmaengine.h>
49 #include <linux/dma-mapping.h>
50 #include <linux/interrupt.h>
52 #include <linux/slab.h>
53 #include <linux/of_address.h>
54 #include <linux/of_device.h>
55 #include <linux/of_irq.h>
56 #include <linux/of_dma.h>
57 #include <linux/of_platform.h>
59 #include <linux/random.h>
61 #include "dmaengine.h"
63 /* Number of DMA Transfer descriptors allocated per channel */
64 #define MPC_DMA_DESCRIPTORS 64
66 /* Macro definitions */
67 #define MPC_DMA_TCD_OFFSET 0x1000
70 * Maximum channel counts for individual hardware variants
71 * and the maximum channel count over all supported controllers,
72 * used for data structure size
74 #define MPC8308_DMACHAN_MAX 16
75 #define MPC512x_DMACHAN_MAX 64
76 #define MPC_DMA_CHANNELS 64
78 /* Arbitration mode of group and channel */
79 #define MPC_DMA_DMACR_EDCG (1 << 31)
80 #define MPC_DMA_DMACR_ERGA (1 << 3)
81 #define MPC_DMA_DMACR_ERCA (1 << 2)
84 #define MPC_DMA_DMAES_VLD (1 << 31)
85 #define MPC_DMA_DMAES_GPE (1 << 15)
86 #define MPC_DMA_DMAES_CPE (1 << 14)
87 #define MPC_DMA_DMAES_ERRCHN(err) \
89 #define MPC_DMA_DMAES_SAE (1 << 7)
90 #define MPC_DMA_DMAES_SOE (1 << 6)
91 #define MPC_DMA_DMAES_DAE (1 << 5)
92 #define MPC_DMA_DMAES_DOE (1 << 4)
93 #define MPC_DMA_DMAES_NCE (1 << 3)
94 #define MPC_DMA_DMAES_SGE (1 << 2)
95 #define MPC_DMA_DMAES_SBE (1 << 1)
96 #define MPC_DMA_DMAES_DBE (1 << 0)
98 #define MPC_DMA_DMAGPOR_SNOOP_ENABLE (1 << 6)
100 #define MPC_DMA_TSIZE_1 0x00
101 #define MPC_DMA_TSIZE_2 0x01
102 #define MPC_DMA_TSIZE_4 0x02
103 #define MPC_DMA_TSIZE_16 0x04
104 #define MPC_DMA_TSIZE_32 0x05
106 /* MPC5121 DMA engine registers */
107 struct __attribute__ ((__packed__)) mpc_dma_regs {
109 u32 dmacr; /* DMA control register */
110 u32 dmaes; /* DMA error status */
112 u32 dmaerqh; /* DMA enable request high(channels 63~32) */
113 u32 dmaerql; /* DMA enable request low(channels 31~0) */
114 u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */
115 u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */
117 u8 dmaserq; /* DMA set enable request */
118 u8 dmacerq; /* DMA clear enable request */
119 u8 dmaseei; /* DMA set enable error interrupt */
120 u8 dmaceei; /* DMA clear enable error interrupt */
122 u8 dmacint; /* DMA clear interrupt request */
123 u8 dmacerr; /* DMA clear error */
124 u8 dmassrt; /* DMA set start bit */
125 u8 dmacdne; /* DMA clear DONE status bit */
127 u32 dmainth; /* DMA interrupt request high(ch63~32) */
128 u32 dmaintl; /* DMA interrupt request low(ch31~0) */
129 u32 dmaerrh; /* DMA error high(ch63~32) */
130 u32 dmaerrl; /* DMA error low(ch31~0) */
132 u32 dmahrsh; /* DMA hw request status high(ch63~32) */
133 u32 dmahrsl; /* DMA hardware request status low(ch31~0) */
135 u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */
136 u32 dmagpor; /* (General purpose register on MPC8308) */
138 u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */
140 u32 reserve0[48]; /* Reserved */
142 u8 dchpri[MPC_DMA_CHANNELS];
143 /* DMA channels(0~63) priority */
146 struct __attribute__ ((__packed__)) mpc_dma_tcd {
148 u32 saddr; /* Source address */
150 u32 smod:5; /* Source address modulo */
151 u32 ssize:3; /* Source data transfer size */
152 u32 dmod:5; /* Destination address modulo */
153 u32 dsize:3; /* Destination data transfer size */
154 u32 soff:16; /* Signed source address offset */
157 u32 nbytes; /* Inner "minor" byte count */
158 u32 slast; /* Last source address adjustment */
159 u32 daddr; /* Destination address */
162 u32 citer_elink:1; /* Enable channel-to-channel linking on
163 * minor loop complete
165 u32 citer_linkch:6; /* Link channel for minor loop complete */
166 u32 citer:9; /* Current "major" iteration count */
167 u32 doff:16; /* Signed destination address offset */
170 u32 dlast_sga; /* Last Destination address adjustment/scatter
175 u32 biter_elink:1; /* Enable channel-to-channel linking on major
179 u32 biter:9; /* Beginning "major" iteration count */
180 u32 bwc:2; /* Bandwidth control */
181 u32 major_linkch:6; /* Link channel number */
182 u32 done:1; /* Channel done */
183 u32 active:1; /* Channel active */
184 u32 major_elink:1; /* Enable channel-to-channel linking on major
187 u32 e_sg:1; /* Enable scatter/gather processing */
188 u32 d_req:1; /* Disable request */
189 u32 int_half:1; /* Enable an interrupt when major counter is
192 u32 int_maj:1; /* Enable an interrupt when major iteration
195 u32 start:1; /* Channel start */
198 struct mpc_dma_desc {
199 struct dma_async_tx_descriptor desc;
200 struct mpc_dma_tcd *tcd;
201 dma_addr_t tcd_paddr;
203 struct list_head node;
204 int will_access_peripheral;
207 struct mpc_dma_chan {
208 struct dma_chan chan;
209 struct list_head free;
210 struct list_head prepared;
211 struct list_head queued;
212 struct list_head active;
213 struct list_head completed;
214 struct mpc_dma_tcd *tcd;
215 dma_addr_t tcd_paddr;
217 /* Settings for access to peripheral FIFO */
218 dma_addr_t src_per_paddr;
220 dma_addr_t dst_per_paddr;
223 /* Lock for this structure */
228 struct dma_device dma;
229 struct tasklet_struct tasklet;
230 struct mpc_dma_chan channels[MPC_DMA_CHANNELS];
231 struct mpc_dma_regs __iomem *regs;
232 struct mpc_dma_tcd __iomem *tcd;
238 /* Lock for error_status field in this structure */
239 spinlock_t error_status_lock;
242 #define DRV_NAME "mpc512x_dma"
244 /* Convert struct dma_chan to struct mpc_dma_chan */
245 static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
247 return container_of(c, struct mpc_dma_chan, chan);
250 /* Convert struct dma_chan to struct mpc_dma */
251 static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
253 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
254 return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
258 * Execute all queued DMA descriptors.
260 * Following requirements must be met while calling mpc_dma_execute():
261 * a) mchan->lock is acquired,
262 * b) mchan->active list is empty,
263 * c) mchan->queued list contains at least one entry.
265 static void mpc_dma_execute(struct mpc_dma_chan *mchan)
267 struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
268 struct mpc_dma_desc *first = NULL;
269 struct mpc_dma_desc *prev = NULL;
270 struct mpc_dma_desc *mdesc;
271 int cid = mchan->chan.chan_id;
273 while (!list_empty(&mchan->queued)) {
274 mdesc = list_first_entry(&mchan->queued,
275 struct mpc_dma_desc, node);
277 * Grab either several mem-to-mem transfer descriptors
278 * or one peripheral transfer descriptor,
279 * don't mix mem-to-mem and peripheral transfer descriptors
280 * within the same 'active' list.
282 if (mdesc->will_access_peripheral) {
283 if (list_empty(&mchan->active))
284 list_move_tail(&mdesc->node, &mchan->active);
287 list_move_tail(&mdesc->node, &mchan->active);
291 /* Chain descriptors into one transaction */
292 list_for_each_entry(mdesc, &mchan->active, node) {
301 prev->tcd->dlast_sga = mdesc->tcd_paddr;
303 mdesc->tcd->start = 1;
308 prev->tcd->int_maj = 1;
310 /* Send first descriptor in chain into hardware */
311 memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
314 mdma->tcd[cid].e_sg = 1;
316 if (mdma->is_mpc8308) {
317 /* MPC8308, no request lines, software initiated start */
318 out_8(&mdma->regs->dmassrt, cid);
319 } else if (first->will_access_peripheral) {
320 /* Peripherals involved, start by external request signal */
321 out_8(&mdma->regs->dmaserq, cid);
323 /* Memory to memory transfer, software initiated start */
324 out_8(&mdma->regs->dmassrt, cid);
328 /* Handle interrupt on one half of DMA controller (32 channels) */
329 static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
331 struct mpc_dma_chan *mchan;
332 struct mpc_dma_desc *mdesc;
333 u32 status = is | es;
336 while ((ch = fls(status) - 1) >= 0) {
337 status &= ~(1 << ch);
338 mchan = &mdma->channels[ch + off];
340 spin_lock(&mchan->lock);
342 out_8(&mdma->regs->dmacint, ch + off);
343 out_8(&mdma->regs->dmacerr, ch + off);
345 /* Check error status */
347 list_for_each_entry(mdesc, &mchan->active, node)
350 /* Execute queued descriptors */
351 list_splice_tail_init(&mchan->active, &mchan->completed);
352 if (!list_empty(&mchan->queued))
353 mpc_dma_execute(mchan);
355 spin_unlock(&mchan->lock);
359 /* Interrupt handler */
360 static irqreturn_t mpc_dma_irq(int irq, void *data)
362 struct mpc_dma *mdma = data;
365 /* Save error status register */
366 es = in_be32(&mdma->regs->dmaes);
367 spin_lock(&mdma->error_status_lock);
368 if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0)
369 mdma->error_status = es;
370 spin_unlock(&mdma->error_status_lock);
372 /* Handle interrupt on each channel */
373 if (mdma->dma.chancnt > 32) {
374 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
375 in_be32(&mdma->regs->dmaerrh), 32);
377 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
378 in_be32(&mdma->regs->dmaerrl), 0);
380 /* Schedule tasklet */
381 tasklet_schedule(&mdma->tasklet);
386 /* process completed descriptors */
387 static void mpc_dma_process_completed(struct mpc_dma *mdma)
389 dma_cookie_t last_cookie = 0;
390 struct mpc_dma_chan *mchan;
391 struct mpc_dma_desc *mdesc;
392 struct dma_async_tx_descriptor *desc;
397 for (i = 0; i < mdma->dma.chancnt; i++) {
398 mchan = &mdma->channels[i];
400 /* Get all completed descriptors */
401 spin_lock_irqsave(&mchan->lock, flags);
402 if (!list_empty(&mchan->completed))
403 list_splice_tail_init(&mchan->completed, &list);
404 spin_unlock_irqrestore(&mchan->lock, flags);
406 if (list_empty(&list))
409 /* Execute callbacks and run dependencies */
410 list_for_each_entry(mdesc, &list, node) {
414 desc->callback(desc->callback_param);
416 last_cookie = desc->cookie;
417 dma_run_dependencies(desc);
420 /* Free descriptors */
421 spin_lock_irqsave(&mchan->lock, flags);
422 list_splice_tail_init(&list, &mchan->free);
423 mchan->chan.completed_cookie = last_cookie;
424 spin_unlock_irqrestore(&mchan->lock, flags);
429 static void mpc_dma_tasklet(unsigned long data)
431 struct mpc_dma *mdma = (void *)data;
435 spin_lock_irqsave(&mdma->error_status_lock, flags);
436 es = mdma->error_status;
437 mdma->error_status = 0;
438 spin_unlock_irqrestore(&mdma->error_status_lock, flags);
440 /* Print nice error report */
442 dev_err(mdma->dma.dev,
443 "Hardware reported following error(s) on channel %u:\n",
444 MPC_DMA_DMAES_ERRCHN(es));
446 if (es & MPC_DMA_DMAES_GPE)
447 dev_err(mdma->dma.dev, "- Group Priority Error\n");
448 if (es & MPC_DMA_DMAES_CPE)
449 dev_err(mdma->dma.dev, "- Channel Priority Error\n");
450 if (es & MPC_DMA_DMAES_SAE)
451 dev_err(mdma->dma.dev, "- Source Address Error\n");
452 if (es & MPC_DMA_DMAES_SOE)
453 dev_err(mdma->dma.dev, "- Source Offset"
454 " Configuration Error\n");
455 if (es & MPC_DMA_DMAES_DAE)
456 dev_err(mdma->dma.dev, "- Destination Address"
458 if (es & MPC_DMA_DMAES_DOE)
459 dev_err(mdma->dma.dev, "- Destination Offset"
460 " Configuration Error\n");
461 if (es & MPC_DMA_DMAES_NCE)
462 dev_err(mdma->dma.dev, "- NBytes/Citter"
463 " Configuration Error\n");
464 if (es & MPC_DMA_DMAES_SGE)
465 dev_err(mdma->dma.dev, "- Scatter/Gather"
466 " Configuration Error\n");
467 if (es & MPC_DMA_DMAES_SBE)
468 dev_err(mdma->dma.dev, "- Source Bus Error\n");
469 if (es & MPC_DMA_DMAES_DBE)
470 dev_err(mdma->dma.dev, "- Destination Bus Error\n");
473 mpc_dma_process_completed(mdma);
476 /* Submit descriptor to hardware */
477 static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
479 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan);
480 struct mpc_dma_desc *mdesc;
484 mdesc = container_of(txd, struct mpc_dma_desc, desc);
486 spin_lock_irqsave(&mchan->lock, flags);
488 /* Move descriptor to queue */
489 list_move_tail(&mdesc->node, &mchan->queued);
491 /* If channel is idle, execute all queued descriptors */
492 if (list_empty(&mchan->active))
493 mpc_dma_execute(mchan);
496 cookie = dma_cookie_assign(txd);
497 spin_unlock_irqrestore(&mchan->lock, flags);
502 /* Alloc channel resources */
503 static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
505 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
506 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
507 struct mpc_dma_desc *mdesc;
508 struct mpc_dma_tcd *tcd;
509 dma_addr_t tcd_paddr;
514 /* Alloc DMA memory for Transfer Control Descriptors */
515 tcd = dma_alloc_coherent(mdma->dma.dev,
516 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
517 &tcd_paddr, GFP_KERNEL);
521 /* Alloc descriptors for this channel */
522 for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
523 mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
525 dev_notice(mdma->dma.dev, "Memory allocation error. "
526 "Allocated only %u descriptors\n", i);
530 dma_async_tx_descriptor_init(&mdesc->desc, chan);
531 mdesc->desc.flags = DMA_CTRL_ACK;
532 mdesc->desc.tx_submit = mpc_dma_tx_submit;
534 mdesc->tcd = &tcd[i];
535 mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd));
537 list_add_tail(&mdesc->node, &descs);
540 /* Return error only if no descriptors were allocated */
542 dma_free_coherent(mdma->dma.dev,
543 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
548 spin_lock_irqsave(&mchan->lock, flags);
550 mchan->tcd_paddr = tcd_paddr;
551 list_splice_tail_init(&descs, &mchan->free);
552 spin_unlock_irqrestore(&mchan->lock, flags);
554 /* Enable Error Interrupt */
555 out_8(&mdma->regs->dmaseei, chan->chan_id);
560 /* Free channel resources */
561 static void mpc_dma_free_chan_resources(struct dma_chan *chan)
563 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
564 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
565 struct mpc_dma_desc *mdesc, *tmp;
566 struct mpc_dma_tcd *tcd;
567 dma_addr_t tcd_paddr;
571 spin_lock_irqsave(&mchan->lock, flags);
573 /* Channel must be idle */
574 BUG_ON(!list_empty(&mchan->prepared));
575 BUG_ON(!list_empty(&mchan->queued));
576 BUG_ON(!list_empty(&mchan->active));
577 BUG_ON(!list_empty(&mchan->completed));
580 list_splice_tail_init(&mchan->free, &descs);
582 tcd_paddr = mchan->tcd_paddr;
584 spin_unlock_irqrestore(&mchan->lock, flags);
586 /* Free DMA memory used by descriptors */
587 dma_free_coherent(mdma->dma.dev,
588 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
591 /* Free descriptors */
592 list_for_each_entry_safe(mdesc, tmp, &descs, node)
595 /* Disable Error Interrupt */
596 out_8(&mdma->regs->dmaceei, chan->chan_id);
599 /* Send all pending descriptor to hardware */
600 static void mpc_dma_issue_pending(struct dma_chan *chan)
603 * We are posting descriptors to the hardware as soon as
604 * they are ready, so this function does nothing.
608 /* Check request completion status */
609 static enum dma_status
610 mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
611 struct dma_tx_state *txstate)
613 return dma_cookie_status(chan, cookie, txstate);
616 /* Prepare descriptor for memory to memory copy */
617 static struct dma_async_tx_descriptor *
618 mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
619 size_t len, unsigned long flags)
621 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
622 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
623 struct mpc_dma_desc *mdesc = NULL;
624 struct mpc_dma_tcd *tcd;
625 unsigned long iflags;
627 /* Get free descriptor */
628 spin_lock_irqsave(&mchan->lock, iflags);
629 if (!list_empty(&mchan->free)) {
630 mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc,
632 list_del(&mdesc->node);
634 spin_unlock_irqrestore(&mchan->lock, iflags);
637 /* try to free completed descriptors */
638 mpc_dma_process_completed(mdma);
643 mdesc->will_access_peripheral = 0;
646 /* Prepare Transfer Control Descriptor for this transaction */
647 memset(tcd, 0, sizeof(struct mpc_dma_tcd));
649 if (IS_ALIGNED(src | dst | len, 32)) {
650 tcd->ssize = MPC_DMA_TSIZE_32;
651 tcd->dsize = MPC_DMA_TSIZE_32;
654 } else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
655 /* MPC8308 doesn't support 16 byte transfers */
656 tcd->ssize = MPC_DMA_TSIZE_16;
657 tcd->dsize = MPC_DMA_TSIZE_16;
660 } else if (IS_ALIGNED(src | dst | len, 4)) {
661 tcd->ssize = MPC_DMA_TSIZE_4;
662 tcd->dsize = MPC_DMA_TSIZE_4;
665 } else if (IS_ALIGNED(src | dst | len, 2)) {
666 tcd->ssize = MPC_DMA_TSIZE_2;
667 tcd->dsize = MPC_DMA_TSIZE_2;
671 tcd->ssize = MPC_DMA_TSIZE_1;
672 tcd->dsize = MPC_DMA_TSIZE_1;
683 /* Place descriptor in prepared list */
684 spin_lock_irqsave(&mchan->lock, iflags);
685 list_add_tail(&mdesc->node, &mchan->prepared);
686 spin_unlock_irqrestore(&mchan->lock, iflags);
691 static struct dma_async_tx_descriptor *
692 mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
693 unsigned int sg_len, enum dma_transfer_direction direction,
694 unsigned long flags, void *context)
696 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
697 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
698 struct mpc_dma_desc *mdesc = NULL;
699 dma_addr_t per_paddr;
701 struct mpc_dma_tcd *tcd;
702 unsigned long iflags;
703 struct scatterlist *sg;
707 /* Currently there is no proper support for scatter/gather */
711 if (!is_slave_direction(direction))
714 for_each_sg(sgl, sg, sg_len, i) {
715 spin_lock_irqsave(&mchan->lock, iflags);
717 mdesc = list_first_entry(&mchan->free,
718 struct mpc_dma_desc, node);
720 spin_unlock_irqrestore(&mchan->lock, iflags);
721 /* Try to free completed descriptors */
722 mpc_dma_process_completed(mdma);
726 list_del(&mdesc->node);
728 if (direction == DMA_DEV_TO_MEM) {
729 per_paddr = mchan->src_per_paddr;
730 tcd_nunits = mchan->src_tcd_nunits;
732 per_paddr = mchan->dst_per_paddr;
733 tcd_nunits = mchan->dst_tcd_nunits;
736 spin_unlock_irqrestore(&mchan->lock, iflags);
738 if (per_paddr == 0 || tcd_nunits == 0)
742 mdesc->will_access_peripheral = 1;
744 /* Prepare Transfer Control Descriptor for this transaction */
747 memset(tcd, 0, sizeof(struct mpc_dma_tcd));
749 if (!IS_ALIGNED(sg_dma_address(sg), 4))
752 if (direction == DMA_DEV_TO_MEM) {
753 tcd->saddr = per_paddr;
754 tcd->daddr = sg_dma_address(sg);
758 tcd->saddr = sg_dma_address(sg);
759 tcd->daddr = per_paddr;
764 tcd->ssize = MPC_DMA_TSIZE_4;
765 tcd->dsize = MPC_DMA_TSIZE_4;
767 len = sg_dma_len(sg);
768 tcd->nbytes = tcd_nunits * 4;
769 if (!IS_ALIGNED(len, tcd->nbytes))
772 iter = len / tcd->nbytes;
773 if (iter >= 1 << 15) {
777 /* citer_linkch contains the high bits of iter */
778 tcd->biter = iter & 0x1ff;
779 tcd->biter_linkch = iter >> 9;
780 tcd->citer = tcd->biter;
781 tcd->citer_linkch = tcd->biter_linkch;
786 /* Place descriptor in prepared list */
787 spin_lock_irqsave(&mchan->lock, iflags);
788 list_add_tail(&mdesc->node, &mchan->prepared);
789 spin_unlock_irqrestore(&mchan->lock, iflags);
795 /* Put the descriptor back */
796 spin_lock_irqsave(&mchan->lock, iflags);
797 list_add_tail(&mdesc->node, &mchan->free);
798 spin_unlock_irqrestore(&mchan->lock, iflags);
803 static int mpc_dma_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
806 struct mpc_dma_chan *mchan;
807 struct mpc_dma *mdma;
808 struct dma_slave_config *cfg;
811 mchan = dma_chan_to_mpc_dma_chan(chan);
813 case DMA_TERMINATE_ALL:
814 /* Disable channel requests */
815 mdma = dma_chan_to_mpc_dma(chan);
817 spin_lock_irqsave(&mchan->lock, flags);
819 out_8(&mdma->regs->dmacerq, chan->chan_id);
820 list_splice_tail_init(&mchan->prepared, &mchan->free);
821 list_splice_tail_init(&mchan->queued, &mchan->free);
822 list_splice_tail_init(&mchan->active, &mchan->free);
824 spin_unlock_irqrestore(&mchan->lock, flags);
828 case DMA_SLAVE_CONFIG:
830 * Software constraints:
831 * - only transfers between a peripheral device and
832 * memory are supported;
833 * - only peripheral devices with 4-byte FIFO access register
835 * - minimal transfer chunk is 4 bytes and consequently
836 * source and destination addresses must be 4-byte aligned
837 * and transfer size must be aligned on (4 * maxburst)
839 * - during the transfer RAM address is being incremented by
840 * the size of minimal transfer chunk;
841 * - peripheral port's address is constant during the transfer.
846 if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
847 cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
848 !IS_ALIGNED(cfg->src_addr, 4) ||
849 !IS_ALIGNED(cfg->dst_addr, 4)) {
853 spin_lock_irqsave(&mchan->lock, flags);
855 mchan->src_per_paddr = cfg->src_addr;
856 mchan->src_tcd_nunits = cfg->src_maxburst;
857 mchan->dst_per_paddr = cfg->dst_addr;
858 mchan->dst_tcd_nunits = cfg->dst_maxburst;
861 if (mchan->src_tcd_nunits == 0)
862 mchan->src_tcd_nunits = 1;
863 if (mchan->dst_tcd_nunits == 0)
864 mchan->dst_tcd_nunits = 1;
866 spin_unlock_irqrestore(&mchan->lock, flags);
871 /* Unknown command */
878 static int mpc_dma_probe(struct platform_device *op)
880 struct device_node *dn = op->dev.of_node;
881 struct device *dev = &op->dev;
882 struct dma_device *dma;
883 struct mpc_dma *mdma;
884 struct mpc_dma_chan *mchan;
886 ulong regs_start, regs_size;
890 mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
892 dev_err(dev, "Memory exhausted!\n");
897 mdma->irq = irq_of_parse_and_map(dn, 0);
898 if (mdma->irq == NO_IRQ) {
899 dev_err(dev, "Error mapping IRQ!\n");
904 if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
905 mdma->is_mpc8308 = 1;
906 mdma->irq2 = irq_of_parse_and_map(dn, 1);
907 if (mdma->irq2 == NO_IRQ) {
908 dev_err(dev, "Error mapping IRQ!\n");
914 retval = of_address_to_resource(dn, 0, &res);
916 dev_err(dev, "Error parsing memory region!\n");
920 regs_start = res.start;
921 regs_size = resource_size(&res);
923 if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
924 dev_err(dev, "Error requesting memory region!\n");
929 mdma->regs = devm_ioremap(dev, regs_start, regs_size);
931 dev_err(dev, "Error mapping memory region!\n");
936 mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
937 + MPC_DMA_TCD_OFFSET);
939 retval = request_irq(mdma->irq, &mpc_dma_irq, 0, DRV_NAME, mdma);
941 dev_err(dev, "Error requesting IRQ!\n");
946 if (mdma->is_mpc8308) {
947 retval = request_irq(mdma->irq2, &mpc_dma_irq, 0,
950 dev_err(dev, "Error requesting IRQ2!\n");
956 spin_lock_init(&mdma->error_status_lock);
960 dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
961 dma->device_free_chan_resources = mpc_dma_free_chan_resources;
962 dma->device_issue_pending = mpc_dma_issue_pending;
963 dma->device_tx_status = mpc_dma_tx_status;
964 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
965 dma->device_prep_slave_sg = mpc_dma_prep_slave_sg;
966 dma->device_control = mpc_dma_device_control;
968 INIT_LIST_HEAD(&dma->channels);
969 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
970 dma_cap_set(DMA_SLAVE, dma->cap_mask);
972 if (mdma->is_mpc8308)
973 chancnt = MPC8308_DMACHAN_MAX;
975 chancnt = MPC512x_DMACHAN_MAX;
977 for (i = 0; i < chancnt; i++) {
978 mchan = &mdma->channels[i];
980 mchan->chan.device = dma;
981 dma_cookie_init(&mchan->chan);
983 INIT_LIST_HEAD(&mchan->free);
984 INIT_LIST_HEAD(&mchan->prepared);
985 INIT_LIST_HEAD(&mchan->queued);
986 INIT_LIST_HEAD(&mchan->active);
987 INIT_LIST_HEAD(&mchan->completed);
989 spin_lock_init(&mchan->lock);
990 list_add_tail(&mchan->chan.device_node, &dma->channels);
993 tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma);
996 * Configure DMA Engine:
998 * - Round-robin group arbitration,
999 * - Round-robin channel arbitration.
1001 if (mdma->is_mpc8308) {
1002 /* MPC8308 has 16 channels and lacks some registers */
1003 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
1005 /* enable snooping */
1006 out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
1007 /* Disable error interrupts */
1008 out_be32(&mdma->regs->dmaeeil, 0);
1010 /* Clear interrupts status */
1011 out_be32(&mdma->regs->dmaintl, 0xFFFF);
1012 out_be32(&mdma->regs->dmaerrl, 0xFFFF);
1014 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
1015 MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
1017 /* Disable hardware DMA requests */
1018 out_be32(&mdma->regs->dmaerqh, 0);
1019 out_be32(&mdma->regs->dmaerql, 0);
1021 /* Disable error interrupts */
1022 out_be32(&mdma->regs->dmaeeih, 0);
1023 out_be32(&mdma->regs->dmaeeil, 0);
1025 /* Clear interrupts status */
1026 out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
1027 out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
1028 out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
1029 out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
1031 /* Route interrupts to IPIC */
1032 out_be32(&mdma->regs->dmaihsa, 0);
1033 out_be32(&mdma->regs->dmailsa, 0);
1036 /* Register DMA engine */
1037 dev_set_drvdata(dev, mdma);
1038 retval = dma_async_device_register(dma);
1042 /* Register with OF helpers for DMA lookups (nonfatal) */
1044 retval = of_dma_controller_register(dev->of_node,
1045 of_dma_xlate_by_chan_id, mdma);
1047 dev_warn(dev, "Could not register for OF lookup\n");
1053 if (mdma->is_mpc8308)
1054 free_irq(mdma->irq2, mdma);
1056 free_irq(mdma->irq, mdma);
1058 if (mdma->is_mpc8308)
1059 irq_dispose_mapping(mdma->irq2);
1061 irq_dispose_mapping(mdma->irq);
1066 static int mpc_dma_remove(struct platform_device *op)
1068 struct device *dev = &op->dev;
1069 struct mpc_dma *mdma = dev_get_drvdata(dev);
1072 of_dma_controller_free(dev->of_node);
1073 dma_async_device_unregister(&mdma->dma);
1074 if (mdma->is_mpc8308) {
1075 free_irq(mdma->irq2, mdma);
1076 irq_dispose_mapping(mdma->irq2);
1078 free_irq(mdma->irq, mdma);
1079 irq_dispose_mapping(mdma->irq);
1084 static struct of_device_id mpc_dma_match[] = {
1085 { .compatible = "fsl,mpc5121-dma", },
1086 { .compatible = "fsl,mpc8308-dma", },
1090 static struct platform_driver mpc_dma_driver = {
1091 .probe = mpc_dma_probe,
1092 .remove = mpc_dma_remove,
1095 .of_match_table = mpc_dma_match,
1099 module_platform_driver(mpc_dma_driver);
1101 MODULE_LICENSE("GPL");
1102 MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");