2 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
3 * Copyright (C) Semihalf 2009
4 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
6 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
7 * (defines, structures and comments) was taken from MPC5121 DMA driver
8 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
10 * Approved as OSADL project by a majority of OSADL members and funded
11 * by OSADL membership fees in 2009; for details see www.osadl.org.
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option)
18 * This program is distributed in the hope that it will be useful, but WITHOUT
19 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
20 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
23 * You should have received a copy of the GNU General Public License along with
24 * this program; if not, write to the Free Software Foundation, Inc., 59
25 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 * The full GNU General Public License is included in this distribution in the
28 * file called COPYING.
32 * This is initial version of MPC5121 DMA driver. Only memory to memory
33 * transfers are supported (tested using dmatest module).
36 #include <linux/module.h>
37 #include <linux/dmaengine.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/interrupt.h>
41 #include <linux/slab.h>
42 #include <linux/of_address.h>
43 #include <linux/of_device.h>
44 #include <linux/of_irq.h>
45 #include <linux/of_platform.h>
47 #include <linux/random.h>
49 #include "dmaengine.h"
51 /* Number of DMA Transfer descriptors allocated per channel */
52 #define MPC_DMA_DESCRIPTORS 64
54 /* Macro definitions */
55 #define MPC_DMA_TCD_OFFSET 0x1000
58 * Maximum channel counts for individual hardware variants
59 * and the maximum channel count over all supported controllers,
60 * used for data structure size
62 #define MPC8308_DMACHAN_MAX 16
63 #define MPC512x_DMACHAN_MAX 64
64 #define MPC_DMA_CHANNELS 64
66 /* Arbitration mode of group and channel */
67 #define MPC_DMA_DMACR_EDCG (1 << 31)
68 #define MPC_DMA_DMACR_ERGA (1 << 3)
69 #define MPC_DMA_DMACR_ERCA (1 << 2)
72 #define MPC_DMA_DMAES_VLD (1 << 31)
73 #define MPC_DMA_DMAES_GPE (1 << 15)
74 #define MPC_DMA_DMAES_CPE (1 << 14)
75 #define MPC_DMA_DMAES_ERRCHN(err) \
77 #define MPC_DMA_DMAES_SAE (1 << 7)
78 #define MPC_DMA_DMAES_SOE (1 << 6)
79 #define MPC_DMA_DMAES_DAE (1 << 5)
80 #define MPC_DMA_DMAES_DOE (1 << 4)
81 #define MPC_DMA_DMAES_NCE (1 << 3)
82 #define MPC_DMA_DMAES_SGE (1 << 2)
83 #define MPC_DMA_DMAES_SBE (1 << 1)
84 #define MPC_DMA_DMAES_DBE (1 << 0)
86 #define MPC_DMA_DMAGPOR_SNOOP_ENABLE (1 << 6)
88 #define MPC_DMA_TSIZE_1 0x00
89 #define MPC_DMA_TSIZE_2 0x01
90 #define MPC_DMA_TSIZE_4 0x02
91 #define MPC_DMA_TSIZE_16 0x04
92 #define MPC_DMA_TSIZE_32 0x05
94 /* MPC5121 DMA engine registers */
95 struct __attribute__ ((__packed__)) mpc_dma_regs {
97 u32 dmacr; /* DMA control register */
98 u32 dmaes; /* DMA error status */
100 u32 dmaerqh; /* DMA enable request high(channels 63~32) */
101 u32 dmaerql; /* DMA enable request low(channels 31~0) */
102 u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */
103 u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */
105 u8 dmaserq; /* DMA set enable request */
106 u8 dmacerq; /* DMA clear enable request */
107 u8 dmaseei; /* DMA set enable error interrupt */
108 u8 dmaceei; /* DMA clear enable error interrupt */
110 u8 dmacint; /* DMA clear interrupt request */
111 u8 dmacerr; /* DMA clear error */
112 u8 dmassrt; /* DMA set start bit */
113 u8 dmacdne; /* DMA clear DONE status bit */
115 u32 dmainth; /* DMA interrupt request high(ch63~32) */
116 u32 dmaintl; /* DMA interrupt request low(ch31~0) */
117 u32 dmaerrh; /* DMA error high(ch63~32) */
118 u32 dmaerrl; /* DMA error low(ch31~0) */
120 u32 dmahrsh; /* DMA hw request status high(ch63~32) */
121 u32 dmahrsl; /* DMA hardware request status low(ch31~0) */
123 u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */
124 u32 dmagpor; /* (General purpose register on MPC8308) */
126 u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */
128 u32 reserve0[48]; /* Reserved */
130 u8 dchpri[MPC_DMA_CHANNELS];
131 /* DMA channels(0~63) priority */
134 struct __attribute__ ((__packed__)) mpc_dma_tcd {
136 u32 saddr; /* Source address */
138 u32 smod:5; /* Source address modulo */
139 u32 ssize:3; /* Source data transfer size */
140 u32 dmod:5; /* Destination address modulo */
141 u32 dsize:3; /* Destination data transfer size */
142 u32 soff:16; /* Signed source address offset */
145 u32 nbytes; /* Inner "minor" byte count */
146 u32 slast; /* Last source address adjustment */
147 u32 daddr; /* Destination address */
150 u32 citer_elink:1; /* Enable channel-to-channel linking on
151 * minor loop complete
153 u32 citer_linkch:6; /* Link channel for minor loop complete */
154 u32 citer:9; /* Current "major" iteration count */
155 u32 doff:16; /* Signed destination address offset */
158 u32 dlast_sga; /* Last Destination address adjustment/scatter
163 u32 biter_elink:1; /* Enable channel-to-channel linking on major
167 u32 biter:9; /* Beginning "major" iteration count */
168 u32 bwc:2; /* Bandwidth control */
169 u32 major_linkch:6; /* Link channel number */
170 u32 done:1; /* Channel done */
171 u32 active:1; /* Channel active */
172 u32 major_elink:1; /* Enable channel-to-channel linking on major
175 u32 e_sg:1; /* Enable scatter/gather processing */
176 u32 d_req:1; /* Disable request */
177 u32 int_half:1; /* Enable an interrupt when major counter is
180 u32 int_maj:1; /* Enable an interrupt when major iteration
183 u32 start:1; /* Channel start */
186 struct mpc_dma_desc {
187 struct dma_async_tx_descriptor desc;
188 struct mpc_dma_tcd *tcd;
189 dma_addr_t tcd_paddr;
191 struct list_head node;
194 struct mpc_dma_chan {
195 struct dma_chan chan;
196 struct list_head free;
197 struct list_head prepared;
198 struct list_head queued;
199 struct list_head active;
200 struct list_head completed;
201 struct mpc_dma_tcd *tcd;
202 dma_addr_t tcd_paddr;
204 /* Lock for this structure */
209 struct dma_device dma;
210 struct tasklet_struct tasklet;
211 struct mpc_dma_chan channels[MPC_DMA_CHANNELS];
212 struct mpc_dma_regs __iomem *regs;
213 struct mpc_dma_tcd __iomem *tcd;
219 /* Lock for error_status field in this structure */
220 spinlock_t error_status_lock;
223 #define DRV_NAME "mpc512x_dma"
225 /* Convert struct dma_chan to struct mpc_dma_chan */
226 static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
228 return container_of(c, struct mpc_dma_chan, chan);
231 /* Convert struct dma_chan to struct mpc_dma */
232 static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
234 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
235 return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
239 * Execute all queued DMA descriptors.
241 * Following requirements must be met while calling mpc_dma_execute():
242 * a) mchan->lock is acquired,
243 * b) mchan->active list is empty,
244 * c) mchan->queued list contains at least one entry.
246 static void mpc_dma_execute(struct mpc_dma_chan *mchan)
248 struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
249 struct mpc_dma_desc *first = NULL;
250 struct mpc_dma_desc *prev = NULL;
251 struct mpc_dma_desc *mdesc;
252 int cid = mchan->chan.chan_id;
254 /* Move all queued descriptors to active list */
255 list_splice_tail_init(&mchan->queued, &mchan->active);
257 /* Chain descriptors into one transaction */
258 list_for_each_entry(mdesc, &mchan->active, node) {
267 prev->tcd->dlast_sga = mdesc->tcd_paddr;
269 mdesc->tcd->start = 1;
274 prev->tcd->int_maj = 1;
276 /* Send first descriptor in chain into hardware */
277 memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
280 mdma->tcd[cid].e_sg = 1;
281 out_8(&mdma->regs->dmassrt, cid);
284 /* Handle interrupt on one half of DMA controller (32 channels) */
285 static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
287 struct mpc_dma_chan *mchan;
288 struct mpc_dma_desc *mdesc;
289 u32 status = is | es;
292 while ((ch = fls(status) - 1) >= 0) {
293 status &= ~(1 << ch);
294 mchan = &mdma->channels[ch + off];
296 spin_lock(&mchan->lock);
298 out_8(&mdma->regs->dmacint, ch + off);
299 out_8(&mdma->regs->dmacerr, ch + off);
301 /* Check error status */
303 list_for_each_entry(mdesc, &mchan->active, node)
306 /* Execute queued descriptors */
307 list_splice_tail_init(&mchan->active, &mchan->completed);
308 if (!list_empty(&mchan->queued))
309 mpc_dma_execute(mchan);
311 spin_unlock(&mchan->lock);
315 /* Interrupt handler */
316 static irqreturn_t mpc_dma_irq(int irq, void *data)
318 struct mpc_dma *mdma = data;
321 /* Save error status register */
322 es = in_be32(&mdma->regs->dmaes);
323 spin_lock(&mdma->error_status_lock);
324 if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0)
325 mdma->error_status = es;
326 spin_unlock(&mdma->error_status_lock);
328 /* Handle interrupt on each channel */
329 if (mdma->dma.chancnt > 32) {
330 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
331 in_be32(&mdma->regs->dmaerrh), 32);
333 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
334 in_be32(&mdma->regs->dmaerrl), 0);
336 /* Schedule tasklet */
337 tasklet_schedule(&mdma->tasklet);
342 /* process completed descriptors */
343 static void mpc_dma_process_completed(struct mpc_dma *mdma)
345 dma_cookie_t last_cookie = 0;
346 struct mpc_dma_chan *mchan;
347 struct mpc_dma_desc *mdesc;
348 struct dma_async_tx_descriptor *desc;
353 for (i = 0; i < mdma->dma.chancnt; i++) {
354 mchan = &mdma->channels[i];
356 /* Get all completed descriptors */
357 spin_lock_irqsave(&mchan->lock, flags);
358 if (!list_empty(&mchan->completed))
359 list_splice_tail_init(&mchan->completed, &list);
360 spin_unlock_irqrestore(&mchan->lock, flags);
362 if (list_empty(&list))
365 /* Execute callbacks and run dependencies */
366 list_for_each_entry(mdesc, &list, node) {
370 desc->callback(desc->callback_param);
372 last_cookie = desc->cookie;
373 dma_run_dependencies(desc);
376 /* Free descriptors */
377 spin_lock_irqsave(&mchan->lock, flags);
378 list_splice_tail_init(&list, &mchan->free);
379 mchan->chan.completed_cookie = last_cookie;
380 spin_unlock_irqrestore(&mchan->lock, flags);
385 static void mpc_dma_tasklet(unsigned long data)
387 struct mpc_dma *mdma = (void *)data;
391 spin_lock_irqsave(&mdma->error_status_lock, flags);
392 es = mdma->error_status;
393 mdma->error_status = 0;
394 spin_unlock_irqrestore(&mdma->error_status_lock, flags);
396 /* Print nice error report */
398 dev_err(mdma->dma.dev,
399 "Hardware reported following error(s) on channel %u:\n",
400 MPC_DMA_DMAES_ERRCHN(es));
402 if (es & MPC_DMA_DMAES_GPE)
403 dev_err(mdma->dma.dev, "- Group Priority Error\n");
404 if (es & MPC_DMA_DMAES_CPE)
405 dev_err(mdma->dma.dev, "- Channel Priority Error\n");
406 if (es & MPC_DMA_DMAES_SAE)
407 dev_err(mdma->dma.dev, "- Source Address Error\n");
408 if (es & MPC_DMA_DMAES_SOE)
409 dev_err(mdma->dma.dev, "- Source Offset"
410 " Configuration Error\n");
411 if (es & MPC_DMA_DMAES_DAE)
412 dev_err(mdma->dma.dev, "- Destination Address"
414 if (es & MPC_DMA_DMAES_DOE)
415 dev_err(mdma->dma.dev, "- Destination Offset"
416 " Configuration Error\n");
417 if (es & MPC_DMA_DMAES_NCE)
418 dev_err(mdma->dma.dev, "- NBytes/Citter"
419 " Configuration Error\n");
420 if (es & MPC_DMA_DMAES_SGE)
421 dev_err(mdma->dma.dev, "- Scatter/Gather"
422 " Configuration Error\n");
423 if (es & MPC_DMA_DMAES_SBE)
424 dev_err(mdma->dma.dev, "- Source Bus Error\n");
425 if (es & MPC_DMA_DMAES_DBE)
426 dev_err(mdma->dma.dev, "- Destination Bus Error\n");
429 mpc_dma_process_completed(mdma);
432 /* Submit descriptor to hardware */
433 static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
435 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan);
436 struct mpc_dma_desc *mdesc;
440 mdesc = container_of(txd, struct mpc_dma_desc, desc);
442 spin_lock_irqsave(&mchan->lock, flags);
444 /* Move descriptor to queue */
445 list_move_tail(&mdesc->node, &mchan->queued);
447 /* If channel is idle, execute all queued descriptors */
448 if (list_empty(&mchan->active))
449 mpc_dma_execute(mchan);
452 cookie = dma_cookie_assign(txd);
453 spin_unlock_irqrestore(&mchan->lock, flags);
458 /* Alloc channel resources */
459 static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
461 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
462 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
463 struct mpc_dma_desc *mdesc;
464 struct mpc_dma_tcd *tcd;
465 dma_addr_t tcd_paddr;
470 /* Alloc DMA memory for Transfer Control Descriptors */
471 tcd = dma_alloc_coherent(mdma->dma.dev,
472 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
473 &tcd_paddr, GFP_KERNEL);
477 /* Alloc descriptors for this channel */
478 for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
479 mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
481 dev_notice(mdma->dma.dev, "Memory allocation error. "
482 "Allocated only %u descriptors\n", i);
486 dma_async_tx_descriptor_init(&mdesc->desc, chan);
487 mdesc->desc.flags = DMA_CTRL_ACK;
488 mdesc->desc.tx_submit = mpc_dma_tx_submit;
490 mdesc->tcd = &tcd[i];
491 mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd));
493 list_add_tail(&mdesc->node, &descs);
496 /* Return error only if no descriptors were allocated */
498 dma_free_coherent(mdma->dma.dev,
499 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
504 spin_lock_irqsave(&mchan->lock, flags);
506 mchan->tcd_paddr = tcd_paddr;
507 list_splice_tail_init(&descs, &mchan->free);
508 spin_unlock_irqrestore(&mchan->lock, flags);
510 /* Enable Error Interrupt */
511 out_8(&mdma->regs->dmaseei, chan->chan_id);
516 /* Free channel resources */
517 static void mpc_dma_free_chan_resources(struct dma_chan *chan)
519 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
520 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
521 struct mpc_dma_desc *mdesc, *tmp;
522 struct mpc_dma_tcd *tcd;
523 dma_addr_t tcd_paddr;
527 spin_lock_irqsave(&mchan->lock, flags);
529 /* Channel must be idle */
530 BUG_ON(!list_empty(&mchan->prepared));
531 BUG_ON(!list_empty(&mchan->queued));
532 BUG_ON(!list_empty(&mchan->active));
533 BUG_ON(!list_empty(&mchan->completed));
536 list_splice_tail_init(&mchan->free, &descs);
538 tcd_paddr = mchan->tcd_paddr;
540 spin_unlock_irqrestore(&mchan->lock, flags);
542 /* Free DMA memory used by descriptors */
543 dma_free_coherent(mdma->dma.dev,
544 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
547 /* Free descriptors */
548 list_for_each_entry_safe(mdesc, tmp, &descs, node)
551 /* Disable Error Interrupt */
552 out_8(&mdma->regs->dmaceei, chan->chan_id);
555 /* Send all pending descriptor to hardware */
556 static void mpc_dma_issue_pending(struct dma_chan *chan)
559 * We are posting descriptors to the hardware as soon as
560 * they are ready, so this function does nothing.
564 /* Check request completion status */
565 static enum dma_status
566 mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
567 struct dma_tx_state *txstate)
569 return dma_cookie_status(chan, cookie, txstate);
572 /* Prepare descriptor for memory to memory copy */
573 static struct dma_async_tx_descriptor *
574 mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
575 size_t len, unsigned long flags)
577 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
578 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
579 struct mpc_dma_desc *mdesc = NULL;
580 struct mpc_dma_tcd *tcd;
581 unsigned long iflags;
583 /* Get free descriptor */
584 spin_lock_irqsave(&mchan->lock, iflags);
585 if (!list_empty(&mchan->free)) {
586 mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc,
588 list_del(&mdesc->node);
590 spin_unlock_irqrestore(&mchan->lock, iflags);
593 /* try to free completed descriptors */
594 mpc_dma_process_completed(mdma);
601 /* Prepare Transfer Control Descriptor for this transaction */
602 memset(tcd, 0, sizeof(struct mpc_dma_tcd));
604 if (IS_ALIGNED(src | dst | len, 32)) {
605 tcd->ssize = MPC_DMA_TSIZE_32;
606 tcd->dsize = MPC_DMA_TSIZE_32;
609 } else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
610 /* MPC8308 doesn't support 16 byte transfers */
611 tcd->ssize = MPC_DMA_TSIZE_16;
612 tcd->dsize = MPC_DMA_TSIZE_16;
615 } else if (IS_ALIGNED(src | dst | len, 4)) {
616 tcd->ssize = MPC_DMA_TSIZE_4;
617 tcd->dsize = MPC_DMA_TSIZE_4;
620 } else if (IS_ALIGNED(src | dst | len, 2)) {
621 tcd->ssize = MPC_DMA_TSIZE_2;
622 tcd->dsize = MPC_DMA_TSIZE_2;
626 tcd->ssize = MPC_DMA_TSIZE_1;
627 tcd->dsize = MPC_DMA_TSIZE_1;
638 /* Place descriptor in prepared list */
639 spin_lock_irqsave(&mchan->lock, iflags);
640 list_add_tail(&mdesc->node, &mchan->prepared);
641 spin_unlock_irqrestore(&mchan->lock, iflags);
646 static int mpc_dma_probe(struct platform_device *op)
648 struct device_node *dn = op->dev.of_node;
649 struct device *dev = &op->dev;
650 struct dma_device *dma;
651 struct mpc_dma *mdma;
652 struct mpc_dma_chan *mchan;
654 ulong regs_start, regs_size;
657 mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
659 dev_err(dev, "Memory exhausted!\n");
663 mdma->irq = irq_of_parse_and_map(dn, 0);
664 if (mdma->irq == NO_IRQ) {
665 dev_err(dev, "Error mapping IRQ!\n");
669 if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
670 mdma->is_mpc8308 = 1;
671 mdma->irq2 = irq_of_parse_and_map(dn, 1);
672 if (mdma->irq2 == NO_IRQ) {
673 dev_err(dev, "Error mapping IRQ!\n");
678 retval = of_address_to_resource(dn, 0, &res);
680 dev_err(dev, "Error parsing memory region!\n");
684 regs_start = res.start;
685 regs_size = resource_size(&res);
687 if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
688 dev_err(dev, "Error requesting memory region!\n");
692 mdma->regs = devm_ioremap(dev, regs_start, regs_size);
694 dev_err(dev, "Error mapping memory region!\n");
698 mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
699 + MPC_DMA_TCD_OFFSET);
701 retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME,
704 dev_err(dev, "Error requesting IRQ!\n");
708 if (mdma->is_mpc8308) {
709 retval = devm_request_irq(dev, mdma->irq2, &mpc_dma_irq, 0,
712 dev_err(dev, "Error requesting IRQ2!\n");
717 spin_lock_init(&mdma->error_status_lock);
721 if (mdma->is_mpc8308)
722 dma->chancnt = MPC8308_DMACHAN_MAX;
724 dma->chancnt = MPC512x_DMACHAN_MAX;
725 dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
726 dma->device_free_chan_resources = mpc_dma_free_chan_resources;
727 dma->device_issue_pending = mpc_dma_issue_pending;
728 dma->device_tx_status = mpc_dma_tx_status;
729 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
731 INIT_LIST_HEAD(&dma->channels);
732 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
734 for (i = 0; i < dma->chancnt; i++) {
735 mchan = &mdma->channels[i];
737 mchan->chan.device = dma;
738 dma_cookie_init(&mchan->chan);
740 INIT_LIST_HEAD(&mchan->free);
741 INIT_LIST_HEAD(&mchan->prepared);
742 INIT_LIST_HEAD(&mchan->queued);
743 INIT_LIST_HEAD(&mchan->active);
744 INIT_LIST_HEAD(&mchan->completed);
746 spin_lock_init(&mchan->lock);
747 list_add_tail(&mchan->chan.device_node, &dma->channels);
750 tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma);
753 * Configure DMA Engine:
755 * - Round-robin group arbitration,
756 * - Round-robin channel arbitration.
758 if (mdma->is_mpc8308) {
759 /* MPC8308 has 16 channels and lacks some registers */
760 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
762 /* enable snooping */
763 out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
764 /* Disable error interrupts */
765 out_be32(&mdma->regs->dmaeeil, 0);
767 /* Clear interrupts status */
768 out_be32(&mdma->regs->dmaintl, 0xFFFF);
769 out_be32(&mdma->regs->dmaerrl, 0xFFFF);
771 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
772 MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
774 /* Disable hardware DMA requests */
775 out_be32(&mdma->regs->dmaerqh, 0);
776 out_be32(&mdma->regs->dmaerql, 0);
778 /* Disable error interrupts */
779 out_be32(&mdma->regs->dmaeeih, 0);
780 out_be32(&mdma->regs->dmaeeil, 0);
782 /* Clear interrupts status */
783 out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
784 out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
785 out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
786 out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
788 /* Route interrupts to IPIC */
789 out_be32(&mdma->regs->dmaihsa, 0);
790 out_be32(&mdma->regs->dmailsa, 0);
793 /* Register DMA engine */
794 dev_set_drvdata(dev, mdma);
795 retval = dma_async_device_register(dma);
797 devm_free_irq(dev, mdma->irq, mdma);
798 irq_dispose_mapping(mdma->irq);
804 static int mpc_dma_remove(struct platform_device *op)
806 struct device *dev = &op->dev;
807 struct mpc_dma *mdma = dev_get_drvdata(dev);
809 dma_async_device_unregister(&mdma->dma);
810 devm_free_irq(dev, mdma->irq, mdma);
811 irq_dispose_mapping(mdma->irq);
816 static struct of_device_id mpc_dma_match[] = {
817 { .compatible = "fsl,mpc5121-dma", },
818 { .compatible = "fsl,mpc8308-dma", },
822 static struct platform_driver mpc_dma_driver = {
823 .probe = mpc_dma_probe,
824 .remove = mpc_dma_remove,
827 .owner = THIS_MODULE,
828 .of_match_table = mpc_dma_match,
832 module_platform_driver(mpc_dma_driver);
834 MODULE_LICENSE("GPL");
835 MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");