2 * Copyright 2015 Robert Jarzmik <robert.jarzmik@free.fr>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/types.h>
13 #include <linux/interrupt.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/slab.h>
16 #include <linux/dmaengine.h>
17 #include <linux/platform_device.h>
18 #include <linux/device.h>
19 #include <linux/platform_data/mmp_dma.h>
20 #include <linux/dmapool.h>
21 #include <linux/of_device.h>
22 #include <linux/of_dma.h>
24 #include <linux/dma/pxa-dma.h>
26 #include "dmaengine.h"
29 #define DCSR(n) (0x0000 + ((n) << 2))
30 #define DALGN(n) 0x00a0
32 #define DDADR(n) (0x0200 + ((n) << 4))
33 #define DSADR(n) (0x0204 + ((n) << 4))
34 #define DTADR(n) (0x0208 + ((n) << 4))
35 #define DCMD(n) (0x020c + ((n) << 4))
37 #define PXA_DCSR_RUN BIT(31) /* Run Bit (read / write) */
38 #define PXA_DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */
39 #define PXA_DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (R/W) */
40 #define PXA_DCSR_REQPEND BIT(8) /* Request Pending (read-only) */
41 #define PXA_DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */
42 #define PXA_DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */
43 #define PXA_DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */
44 #define PXA_DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */
46 #define PXA_DCSR_EORIRQEN BIT(28) /* End of Receive IRQ Enable (R/W) */
47 #define PXA_DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */
48 #define PXA_DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
49 #define PXA_DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */
50 #define PXA_DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */
51 #define PXA_DCSR_CMPST BIT(10) /* The Descriptor Compare Status */
52 #define PXA_DCSR_EORINTR BIT(9) /* The end of Receive */
54 #define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */
55 #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
57 #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
58 #define DDADR_STOP BIT(0) /* Stop (read / write) */
60 #define PXA_DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */
61 #define PXA_DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */
62 #define PXA_DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */
63 #define PXA_DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */
64 #define PXA_DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */
65 #define PXA_DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */
66 #define PXA_DCMD_ENDIAN BIT(18) /* Device Endian-ness. */
67 #define PXA_DCMD_BURST8 (1 << 16) /* 8 byte burst */
68 #define PXA_DCMD_BURST16 (2 << 16) /* 16 byte burst */
69 #define PXA_DCMD_BURST32 (3 << 16) /* 32 byte burst */
70 #define PXA_DCMD_WIDTH1 (1 << 14) /* 1 byte width */
71 #define PXA_DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
72 #define PXA_DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
73 #define PXA_DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
75 #define PDMA_ALIGNMENT 3
76 #define PDMA_MAX_DESC_BYTES (PXA_DCMD_LENGTH & ~((1 << PDMA_ALIGNMENT) - 1))
79 u32 ddadr; /* Points to the next descriptor + flags */
80 u32 dsadr; /* DSADR value for the current transfer */
81 u32 dtadr; /* DTADR value for the current transfer */
82 u32 dcmd; /* DCMD value for the current transfer */
86 struct virt_dma_desc vd; /* Virtual descriptor */
87 int nb_desc; /* Number of hw. descriptors */
88 size_t len; /* Number of bytes xfered */
89 dma_addr_t first; /* First descriptor's addr */
91 /* At least one descriptor has an src/dst address not multiple of 8 */
94 struct dma_pool *desc_pool; /* Channel's used allocator */
96 struct pxad_desc_hw *hw_desc[]; /* DMA coherent descriptors */
102 struct pxad_chan *vchan;
106 struct virt_dma_chan vc; /* Virtual channel */
107 u32 drcmr; /* Requestor of the channel */
108 enum pxad_chan_prio prio; /* Required priority of phy */
110 * At least one desc_sw in submitted or issued transfers on this channel
111 * has one address such as: addr % 8 != 0. This implies the DALGN
112 * setting on the phy.
115 struct dma_slave_config cfg; /* Runtime config */
117 /* protected by vc->lock */
118 struct pxad_phy *phy;
119 struct dma_pool *desc_pool; /* Descriptors pool */
123 struct dma_device slave;
126 struct pxad_phy *phys;
127 spinlock_t phy_lock; /* Phy association */
130 #define tx_to_pxad_desc(tx) \
131 container_of(tx, struct pxad_desc_sw, async_tx)
132 #define to_pxad_chan(dchan) \
133 container_of(dchan, struct pxad_chan, vc.chan)
134 #define to_pxad_dev(dmadev) \
135 container_of(dmadev, struct pxad_device, slave)
136 #define to_pxad_sw_desc(_vd) \
137 container_of((_vd), struct pxad_desc_sw, vd)
139 #define _phy_readl_relaxed(phy, _reg) \
140 readl_relaxed((phy)->base + _reg((phy)->idx))
141 #define phy_readl_relaxed(phy, _reg) \
144 _v = readl_relaxed((phy)->base + _reg((phy)->idx)); \
145 dev_vdbg(&phy->vchan->vc.chan.dev->device, \
146 "%s(): readl(%s): 0x%08x\n", __func__, #_reg, \
150 #define phy_writel(phy, val, _reg) \
152 writel((val), (phy)->base + _reg((phy)->idx)); \
153 dev_vdbg(&phy->vchan->vc.chan.dev->device, \
154 "%s(): writel(0x%08x, %s)\n", \
155 __func__, (u32)(val), #_reg); \
157 #define phy_writel_relaxed(phy, val, _reg) \
159 writel_relaxed((val), (phy)->base + _reg((phy)->idx)); \
160 dev_vdbg(&phy->vchan->vc.chan.dev->device, \
161 "%s(): writel_relaxed(0x%08x, %s)\n", \
162 __func__, (u32)(val), #_reg); \
165 static unsigned int pxad_drcmr(unsigned int line)
168 return 0x100 + line * 4;
169 return 0x1000 + line * 4;
171 static struct pxad_phy *lookup_phy(struct pxad_chan *pchan)
174 struct pxad_device *pdev = to_pxad_dev(pchan->vc.chan.device);
175 struct pxad_phy *phy, *found = NULL;
179 * dma channel priorities
180 * ch 0 - 3, 16 - 19 <--> (0)
181 * ch 4 - 7, 20 - 23 <--> (1)
182 * ch 8 - 11, 24 - 27 <--> (2)
183 * ch 12 - 15, 28 - 31 <--> (3)
186 spin_lock_irqsave(&pdev->phy_lock, flags);
187 for (prio = pchan->prio; prio >= PXAD_PRIO_HIGHEST; prio--) {
188 for (i = 0; i < pdev->nr_chans; i++) {
189 if (prio != (i & 0xf) >> 2)
191 phy = &pdev->phys[i];
201 spin_unlock_irqrestore(&pdev->phy_lock, flags);
202 dev_dbg(&pchan->vc.chan.dev->device,
203 "%s(): phy=%p(%d)\n", __func__, found,
204 found ? found->idx : -1);
209 static void pxad_free_phy(struct pxad_chan *chan)
211 struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
215 dev_dbg(&chan->vc.chan.dev->device,
216 "%s(): freeing\n", __func__);
220 /* clear the channel mapping in DRCMR */
221 reg = pxad_drcmr(chan->drcmr);
222 writel_relaxed(0, chan->phy->base + reg);
224 spin_lock_irqsave(&pdev->phy_lock, flags);
225 chan->phy->vchan = NULL;
227 spin_unlock_irqrestore(&pdev->phy_lock, flags);
230 static bool is_chan_running(struct pxad_chan *chan)
233 struct pxad_phy *phy = chan->phy;
237 dcsr = phy_readl_relaxed(phy, DCSR);
238 return dcsr & PXA_DCSR_RUN;
241 static bool is_running_chan_misaligned(struct pxad_chan *chan)
246 dalgn = phy_readl_relaxed(chan->phy, DALGN);
247 return dalgn & (BIT(chan->phy->idx));
250 static void phy_enable(struct pxad_phy *phy, bool misaligned)
257 dev_dbg(&phy->vchan->vc.chan.dev->device,
258 "%s(); phy=%p(%d) misaligned=%d\n", __func__,
259 phy, phy->idx, misaligned);
261 reg = pxad_drcmr(phy->vchan->drcmr);
262 writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
264 dalgn = phy_readl_relaxed(phy, DALGN);
266 dalgn |= BIT(phy->idx);
268 dalgn &= ~BIT(phy->idx);
269 phy_writel_relaxed(phy, dalgn, DALGN);
271 phy_writel(phy, PXA_DCSR_STOPIRQEN | PXA_DCSR_ENDINTR |
272 PXA_DCSR_BUSERR | PXA_DCSR_RUN, DCSR);
275 static void phy_disable(struct pxad_phy *phy)
282 dcsr = phy_readl_relaxed(phy, DCSR);
283 dev_dbg(&phy->vchan->vc.chan.dev->device,
284 "%s(): phy=%p(%d)\n", __func__, phy, phy->idx);
285 phy_writel(phy, dcsr & ~PXA_DCSR_RUN & ~PXA_DCSR_STOPIRQEN, DCSR);
288 static void pxad_launch_chan(struct pxad_chan *chan,
289 struct pxad_desc_sw *desc)
291 dev_dbg(&chan->vc.chan.dev->device,
292 "%s(): desc=%p\n", __func__, desc);
294 chan->phy = lookup_phy(chan);
296 dev_dbg(&chan->vc.chan.dev->device,
297 "%s(): no free dma channel\n", __func__);
303 * Program the descriptor's address into the DMA controller,
304 * then start the DMA transaction
306 phy_writel(chan->phy, desc->first, DDADR);
307 phy_enable(chan->phy, chan->misaligned);
310 static void set_updater_desc(struct pxad_desc_sw *sw_desc,
313 struct pxad_desc_hw *updater =
314 sw_desc->hw_desc[sw_desc->nb_desc - 1];
315 dma_addr_t dma = sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr;
317 updater->ddadr = DDADR_STOP;
318 updater->dsadr = dma;
319 updater->dtadr = dma + 8;
320 updater->dcmd = PXA_DCMD_WIDTH4 | PXA_DCMD_BURST32 |
321 (PXA_DCMD_LENGTH & sizeof(u32));
322 if (flags & DMA_PREP_INTERRUPT)
323 updater->dcmd |= PXA_DCMD_ENDIRQEN;
326 static bool is_desc_completed(struct virt_dma_desc *vd)
328 struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
329 struct pxad_desc_hw *updater =
330 sw_desc->hw_desc[sw_desc->nb_desc - 1];
332 return updater->dtadr != (updater->dsadr + 8);
335 static void pxad_desc_chain(struct virt_dma_desc *vd1,
336 struct virt_dma_desc *vd2)
338 struct pxad_desc_sw *desc1 = to_pxad_sw_desc(vd1);
339 struct pxad_desc_sw *desc2 = to_pxad_sw_desc(vd2);
340 dma_addr_t dma_to_chain;
342 dma_to_chain = desc2->first;
343 desc1->hw_desc[desc1->nb_desc - 1]->ddadr = dma_to_chain;
346 static bool pxad_try_hotchain(struct virt_dma_chan *vc,
347 struct virt_dma_desc *vd)
349 struct virt_dma_desc *vd_last_issued = NULL;
350 struct pxad_chan *chan = to_pxad_chan(&vc->chan);
353 * Attempt to hot chain the tx if the phy is still running. This is
354 * considered successful only if either the channel is still running
355 * after the chaining, or if the chained transfer is completed after
356 * having been hot chained.
357 * A change of alignment is not allowed, and forbids hotchaining.
359 if (is_chan_running(chan)) {
360 BUG_ON(list_empty(&vc->desc_issued));
362 if (!is_running_chan_misaligned(chan) &&
363 to_pxad_sw_desc(vd)->misaligned)
366 vd_last_issued = list_entry(vc->desc_issued.prev,
367 struct virt_dma_desc, node);
368 pxad_desc_chain(vd_last_issued, vd);
369 if (is_chan_running(chan) || is_desc_completed(vd_last_issued))
376 static unsigned int clear_chan_irq(struct pxad_phy *phy)
379 u32 dint = readl(phy->base + DINT);
381 if (!(dint & BIT(phy->idx)))
385 dcsr = phy_readl_relaxed(phy, DCSR);
386 phy_writel(phy, dcsr, DCSR);
387 if ((dcsr & PXA_DCSR_BUSERR) && (phy->vchan))
388 dev_warn(&phy->vchan->vc.chan.dev->device,
389 "%s(chan=%p): PXA_DCSR_BUSERR\n",
390 __func__, &phy->vchan);
392 return dcsr & ~PXA_DCSR_RUN;
395 static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
397 struct pxad_phy *phy = dev_id;
398 struct pxad_chan *chan = phy->vchan;
399 struct virt_dma_desc *vd, *tmp;
405 dcsr = clear_chan_irq(phy);
406 if (dcsr & PXA_DCSR_RUN)
409 spin_lock_irqsave(&chan->vc.lock, flags);
410 list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) {
411 dev_dbg(&chan->vc.chan.dev->device,
412 "%s(): checking txd %p[%x]: completed=%d\n",
413 __func__, vd, vd->tx.cookie, is_desc_completed(vd));
414 if (is_desc_completed(vd)) {
416 vchan_cookie_complete(vd);
422 if (dcsr & PXA_DCSR_STOPSTATE) {
423 dev_dbg(&chan->vc.chan.dev->device,
424 "%s(): channel stopped, submitted_empty=%d issued_empty=%d",
426 list_empty(&chan->vc.desc_submitted),
427 list_empty(&chan->vc.desc_issued));
428 phy_writel_relaxed(phy, dcsr & ~PXA_DCSR_STOPIRQEN, DCSR);
430 if (list_empty(&chan->vc.desc_issued)) {
432 !list_empty(&chan->vc.desc_submitted);
434 vd = list_first_entry(&chan->vc.desc_issued,
435 struct virt_dma_desc, node);
436 pxad_launch_chan(chan, to_pxad_sw_desc(vd));
439 spin_unlock_irqrestore(&chan->vc.lock, flags);
444 static irqreturn_t pxad_int_handler(int irq, void *dev_id)
446 struct pxad_device *pdev = dev_id;
447 struct pxad_phy *phy;
448 u32 dint = readl(pdev->base + DINT);
449 int i, ret = IRQ_NONE;
454 phy = &pdev->phys[i];
455 if (pxad_chan_handler(irq, phy) == IRQ_HANDLED)
462 static int pxad_alloc_chan_resources(struct dma_chan *dchan)
464 struct pxad_chan *chan = to_pxad_chan(dchan);
465 struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
470 chan->desc_pool = dma_pool_create(dma_chan_name(dchan),
472 sizeof(struct pxad_desc_hw),
473 __alignof__(struct pxad_desc_hw),
475 if (!chan->desc_pool) {
476 dev_err(&chan->vc.chan.dev->device,
477 "%s(): unable to allocate descriptor pool\n",
485 static void pxad_free_chan_resources(struct dma_chan *dchan)
487 struct pxad_chan *chan = to_pxad_chan(dchan);
489 vchan_free_chan_resources(&chan->vc);
490 dma_pool_destroy(chan->desc_pool);
491 chan->desc_pool = NULL;
495 static void pxad_free_desc(struct virt_dma_desc *vd)
499 struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
501 BUG_ON(sw_desc->nb_desc == 0);
502 for (i = sw_desc->nb_desc - 1; i >= 0; i--) {
504 dma = sw_desc->hw_desc[i - 1]->ddadr;
506 dma = sw_desc->first;
507 dma_pool_free(sw_desc->desc_pool,
508 sw_desc->hw_desc[i], dma);
510 sw_desc->nb_desc = 0;
514 static struct pxad_desc_sw *
515 pxad_alloc_desc(struct pxad_chan *chan, unsigned int nb_hw_desc)
517 struct pxad_desc_sw *sw_desc;
521 sw_desc = kzalloc(sizeof(*sw_desc) +
522 nb_hw_desc * sizeof(struct pxad_desc_hw *),
526 sw_desc->desc_pool = chan->desc_pool;
528 for (i = 0; i < nb_hw_desc; i++) {
529 sw_desc->hw_desc[i] = dma_pool_alloc(sw_desc->desc_pool,
531 if (!sw_desc->hw_desc[i]) {
532 dev_err(&chan->vc.chan.dev->device,
533 "%s(): Couldn't allocate the %dth hw_desc from dma_pool %p\n",
534 __func__, i, sw_desc->desc_pool);
539 sw_desc->first = dma;
541 sw_desc->hw_desc[i - 1]->ddadr = dma;
547 pxad_free_desc(&sw_desc->vd);
551 static dma_cookie_t pxad_tx_submit(struct dma_async_tx_descriptor *tx)
553 struct virt_dma_chan *vc = to_virt_chan(tx->chan);
554 struct pxad_chan *chan = to_pxad_chan(&vc->chan);
555 struct virt_dma_desc *vd_chained = NULL,
556 *vd = container_of(tx, struct virt_dma_desc, tx);
560 set_updater_desc(to_pxad_sw_desc(vd), tx->flags);
562 spin_lock_irqsave(&vc->lock, flags);
563 cookie = dma_cookie_assign(tx);
565 if (list_empty(&vc->desc_submitted) && pxad_try_hotchain(vc, vd)) {
566 list_move_tail(&vd->node, &vc->desc_issued);
567 dev_dbg(&chan->vc.chan.dev->device,
568 "%s(): txd %p[%x]: submitted (hot linked)\n",
569 __func__, vd, cookie);
574 * Fallback to placing the tx in the submitted queue
576 if (!list_empty(&vc->desc_submitted)) {
577 vd_chained = list_entry(vc->desc_submitted.prev,
578 struct virt_dma_desc, node);
580 * Only chain the descriptors if no new misalignment is
581 * introduced. If a new misalignment is chained, let the channel
582 * stop, and be relaunched in misalign mode from the irq
585 if (chan->misaligned || !to_pxad_sw_desc(vd)->misaligned)
586 pxad_desc_chain(vd_chained, vd);
590 dev_dbg(&chan->vc.chan.dev->device,
591 "%s(): txd %p[%x]: submitted (%s linked)\n",
592 __func__, vd, cookie, vd_chained ? "cold" : "not");
593 list_move_tail(&vd->node, &vc->desc_submitted);
594 chan->misaligned |= to_pxad_sw_desc(vd)->misaligned;
597 spin_unlock_irqrestore(&vc->lock, flags);
601 static void pxad_issue_pending(struct dma_chan *dchan)
603 struct pxad_chan *chan = to_pxad_chan(dchan);
604 struct virt_dma_desc *vd_first;
607 spin_lock_irqsave(&chan->vc.lock, flags);
608 if (list_empty(&chan->vc.desc_submitted))
611 vd_first = list_first_entry(&chan->vc.desc_submitted,
612 struct virt_dma_desc, node);
613 dev_dbg(&chan->vc.chan.dev->device,
614 "%s(): txd %p[%x]", __func__, vd_first, vd_first->tx.cookie);
616 vchan_issue_pending(&chan->vc);
617 if (!pxad_try_hotchain(&chan->vc, vd_first))
618 pxad_launch_chan(chan, to_pxad_sw_desc(vd_first));
620 spin_unlock_irqrestore(&chan->vc.lock, flags);
623 static inline struct dma_async_tx_descriptor *
624 pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd,
625 unsigned long tx_flags)
627 struct dma_async_tx_descriptor *tx;
628 struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc);
630 tx = vchan_tx_prep(vc, vd, tx_flags);
631 tx->tx_submit = pxad_tx_submit;
632 dev_dbg(&chan->vc.chan.dev->device,
633 "%s(): vc=%p txd=%p[%x] flags=0x%lx\n", __func__,
634 vc, vd, vd->tx.cookie,
640 static void pxad_get_config(struct pxad_chan *chan,
641 enum dma_transfer_direction dir,
642 u32 *dcmd, u32 *dev_src, u32 *dev_dst)
644 u32 maxburst = 0, dev_addr = 0;
645 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
648 if (chan->cfg.direction == DMA_DEV_TO_MEM) {
649 maxburst = chan->cfg.src_maxburst;
650 width = chan->cfg.src_addr_width;
651 dev_addr = chan->cfg.src_addr;
653 *dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC;
655 if (chan->cfg.direction == DMA_MEM_TO_DEV) {
656 maxburst = chan->cfg.dst_maxburst;
657 width = chan->cfg.dst_addr_width;
658 dev_addr = chan->cfg.dst_addr;
660 *dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG;
662 if (chan->cfg.direction == DMA_MEM_TO_MEM)
663 *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR |
666 dev_dbg(&chan->vc.chan.dev->device,
667 "%s(): dev_addr=0x%x maxburst=%d width=%d dir=%d\n",
668 __func__, dev_addr, maxburst, width, dir);
670 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
671 *dcmd |= PXA_DCMD_WIDTH1;
672 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
673 *dcmd |= PXA_DCMD_WIDTH2;
674 else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
675 *dcmd |= PXA_DCMD_WIDTH4;
678 *dcmd |= PXA_DCMD_BURST8;
679 else if (maxburst == 16)
680 *dcmd |= PXA_DCMD_BURST16;
681 else if (maxburst == 32)
682 *dcmd |= PXA_DCMD_BURST32;
684 /* FIXME: drivers should be ported over to use the filter
685 * function. Once that's done, the following two lines can
688 if (chan->cfg.slave_id)
689 chan->drcmr = chan->cfg.slave_id;
692 static struct dma_async_tx_descriptor *
693 pxad_prep_memcpy(struct dma_chan *dchan,
694 dma_addr_t dma_dst, dma_addr_t dma_src,
695 size_t len, unsigned long flags)
697 struct pxad_chan *chan = to_pxad_chan(dchan);
698 struct pxad_desc_sw *sw_desc;
699 struct pxad_desc_hw *hw_desc;
701 unsigned int i, nb_desc = 0;
707 dev_dbg(&chan->vc.chan.dev->device,
708 "%s(): dma_dst=0x%lx dma_src=0x%lx len=%zu flags=%lx\n",
709 __func__, (unsigned long)dma_dst, (unsigned long)dma_src,
711 pxad_get_config(chan, DMA_MEM_TO_MEM, &dcmd, NULL, NULL);
713 nb_desc = DIV_ROUND_UP(len, PDMA_MAX_DESC_BYTES);
714 sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
719 if (!IS_ALIGNED(dma_src, 1 << PDMA_ALIGNMENT) ||
720 !IS_ALIGNED(dma_dst, 1 << PDMA_ALIGNMENT))
721 sw_desc->misaligned = true;
725 hw_desc = sw_desc->hw_desc[i++];
726 copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
727 hw_desc->dcmd = dcmd | (PXA_DCMD_LENGTH & copy);
728 hw_desc->dsadr = dma_src;
729 hw_desc->dtadr = dma_dst;
734 set_updater_desc(sw_desc, flags);
736 return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
739 static struct dma_async_tx_descriptor *
740 pxad_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
741 unsigned int sg_len, enum dma_transfer_direction dir,
742 unsigned long flags, void *context)
744 struct pxad_chan *chan = to_pxad_chan(dchan);
745 struct pxad_desc_sw *sw_desc;
747 struct scatterlist *sg;
749 u32 dcmd, dsadr = 0, dtadr = 0;
750 unsigned int nb_desc = 0, i, j = 0;
752 if ((sgl == NULL) || (sg_len == 0))
755 pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
756 dev_dbg(&chan->vc.chan.dev->device,
757 "%s(): dir=%d flags=%lx\n", __func__, dir, flags);
759 for_each_sg(sgl, sg, sg_len, i)
760 nb_desc += DIV_ROUND_UP(sg_dma_len(sg), PDMA_MAX_DESC_BYTES);
761 sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
765 for_each_sg(sgl, sg, sg_len, i) {
766 dma = sg_dma_address(sg);
767 avail = sg_dma_len(sg);
768 sw_desc->len += avail;
771 len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
773 sw_desc->misaligned = true;
775 sw_desc->hw_desc[j]->dcmd =
776 dcmd | (PXA_DCMD_LENGTH & len);
777 sw_desc->hw_desc[j]->dsadr = dsadr ? dsadr : dma;
778 sw_desc->hw_desc[j++]->dtadr = dtadr ? dtadr : dma;
784 set_updater_desc(sw_desc, flags);
786 return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
789 static struct dma_async_tx_descriptor *
790 pxad_prep_dma_cyclic(struct dma_chan *dchan,
791 dma_addr_t buf_addr, size_t len, size_t period_len,
792 enum dma_transfer_direction dir, unsigned long flags)
794 struct pxad_chan *chan = to_pxad_chan(dchan);
795 struct pxad_desc_sw *sw_desc;
796 struct pxad_desc_hw **phw_desc;
798 u32 dcmd, dsadr = 0, dtadr = 0;
799 unsigned int nb_desc = 0;
801 if (!dchan || !len || !period_len)
803 if ((dir != DMA_DEV_TO_MEM) && (dir != DMA_MEM_TO_DEV)) {
804 dev_err(&chan->vc.chan.dev->device,
805 "Unsupported direction for cyclic DMA\n");
808 /* the buffer length must be a multiple of period_len */
809 if (len % period_len != 0 || period_len > PDMA_MAX_DESC_BYTES ||
810 !IS_ALIGNED(period_len, 1 << PDMA_ALIGNMENT))
813 pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
814 dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH | period_len);
815 dev_dbg(&chan->vc.chan.dev->device,
816 "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
817 __func__, (unsigned long)buf_addr, len, period_len, dir, flags);
819 nb_desc = DIV_ROUND_UP(period_len, PDMA_MAX_DESC_BYTES);
820 nb_desc *= DIV_ROUND_UP(len, period_len);
821 sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
824 sw_desc->cyclic = true;
827 phw_desc = sw_desc->hw_desc;
830 phw_desc[0]->dsadr = dsadr ? dsadr : dma;
831 phw_desc[0]->dtadr = dtadr ? dtadr : dma;
832 phw_desc[0]->dcmd = dcmd;
837 set_updater_desc(sw_desc, flags);
839 return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
842 static int pxad_config(struct dma_chan *dchan,
843 struct dma_slave_config *cfg)
845 struct pxad_chan *chan = to_pxad_chan(dchan);
854 static int pxad_terminate_all(struct dma_chan *dchan)
856 struct pxad_chan *chan = to_pxad_chan(dchan);
857 struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
858 struct virt_dma_desc *vd = NULL;
860 struct pxad_phy *phy;
863 dev_dbg(&chan->vc.chan.dev->device,
864 "%s(): vchan %p: terminate all\n", __func__, &chan->vc);
866 spin_lock_irqsave(&chan->vc.lock, flags);
867 vchan_get_all_descriptors(&chan->vc, &head);
869 list_for_each_entry(vd, &head, node) {
870 dev_dbg(&chan->vc.chan.dev->device,
871 "%s(): cancelling txd %p[%x] (completed=%d)", __func__,
872 vd, vd->tx.cookie, is_desc_completed(vd));
877 phy_disable(chan->phy);
880 spin_lock(&pdev->phy_lock);
882 spin_unlock(&pdev->phy_lock);
884 spin_unlock_irqrestore(&chan->vc.lock, flags);
885 vchan_dma_desc_free_list(&chan->vc, &head);
890 static unsigned int pxad_residue(struct pxad_chan *chan,
893 struct virt_dma_desc *vd = NULL;
894 struct pxad_desc_sw *sw_desc = NULL;
895 struct pxad_desc_hw *hw_desc = NULL;
896 u32 curr, start, len, end, residue = 0;
902 * If the channel does not have a phy pointer anymore, it has already
903 * been completed. Therefore, its residue is 0.
908 spin_lock_irqsave(&chan->vc.lock, flags);
910 vd = vchan_find_desc(&chan->vc, cookie);
914 sw_desc = to_pxad_sw_desc(vd);
915 if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
916 curr = phy_readl_relaxed(chan->phy, DSADR);
918 curr = phy_readl_relaxed(chan->phy, DTADR);
920 for (i = 0; i < sw_desc->nb_desc - 1; i++) {
921 hw_desc = sw_desc->hw_desc[i];
922 if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
923 start = hw_desc->dsadr;
925 start = hw_desc->dtadr;
926 len = hw_desc->dcmd & PXA_DCMD_LENGTH;
930 * 'passed' will be latched once we found the descriptor
931 * which lies inside the boundaries of the curr
932 * pointer. All descriptors that occur in the list
933 * _after_ we found that partially handled descriptor
934 * are still to be processed and are hence added to the
935 * residual bytes counter.
940 } else if (curr >= start && curr <= end) {
941 residue += end - curr;
946 residue = sw_desc->len;
949 spin_unlock_irqrestore(&chan->vc.lock, flags);
950 dev_dbg(&chan->vc.chan.dev->device,
951 "%s(): txd %p[%x] sw_desc=%p: %d\n",
952 __func__, vd, cookie, sw_desc, residue);
956 static enum dma_status pxad_tx_status(struct dma_chan *dchan,
958 struct dma_tx_state *txstate)
960 struct pxad_chan *chan = to_pxad_chan(dchan);
963 ret = dma_cookie_status(dchan, cookie, txstate);
964 if (likely(txstate && (ret != DMA_ERROR)))
965 dma_set_residue(txstate, pxad_residue(chan, cookie));
970 static void pxad_free_channels(struct dma_device *dmadev)
972 struct pxad_chan *c, *cn;
974 list_for_each_entry_safe(c, cn, &dmadev->channels,
975 vc.chan.device_node) {
976 list_del(&c->vc.chan.device_node);
977 tasklet_kill(&c->vc.task);
981 static int pxad_remove(struct platform_device *op)
983 struct pxad_device *pdev = platform_get_drvdata(op);
985 pxad_free_channels(&pdev->slave);
986 dma_async_device_unregister(&pdev->slave);
990 static int pxad_init_phys(struct platform_device *op,
991 struct pxad_device *pdev,
992 unsigned int nb_phy_chans)
994 int irq0, irq, nr_irq = 0, i, ret;
995 struct pxad_phy *phy;
997 irq0 = platform_get_irq(op, 0);
1001 pdev->phys = devm_kcalloc(&op->dev, nb_phy_chans,
1002 sizeof(pdev->phys[0]), GFP_KERNEL);
1006 for (i = 0; i < nb_phy_chans; i++)
1007 if (platform_get_irq(op, i) > 0)
1010 for (i = 0; i < nb_phy_chans; i++) {
1011 phy = &pdev->phys[i];
1012 phy->base = pdev->base;
1014 irq = platform_get_irq(op, i);
1015 if ((nr_irq > 1) && (irq > 0))
1016 ret = devm_request_irq(&op->dev, irq,
1018 IRQF_SHARED, "pxa-dma", phy);
1019 if ((nr_irq == 1) && (i == 0))
1020 ret = devm_request_irq(&op->dev, irq0,
1022 IRQF_SHARED, "pxa-dma", pdev);
1024 dev_err(pdev->slave.dev,
1025 "%s(): can't request irq %d:%d\n", __func__,
1034 static const struct of_device_id const pxad_dt_ids[] = {
1035 { .compatible = "marvell,pdma-1.0", },
1038 MODULE_DEVICE_TABLE(of, pxad_dt_ids);
1040 static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec,
1041 struct of_dma *ofdma)
1043 struct pxad_device *d = ofdma->of_dma_data;
1044 struct dma_chan *chan;
1046 chan = dma_get_any_slave_channel(&d->slave);
1050 to_pxad_chan(chan)->drcmr = dma_spec->args[0];
1051 to_pxad_chan(chan)->prio = dma_spec->args[1];
1056 static int pxad_init_dmadev(struct platform_device *op,
1057 struct pxad_device *pdev,
1058 unsigned int nr_phy_chans)
1062 struct pxad_chan *c;
1064 pdev->nr_chans = nr_phy_chans;
1065 INIT_LIST_HEAD(&pdev->slave.channels);
1066 pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources;
1067 pdev->slave.device_free_chan_resources = pxad_free_chan_resources;
1068 pdev->slave.device_tx_status = pxad_tx_status;
1069 pdev->slave.device_issue_pending = pxad_issue_pending;
1070 pdev->slave.device_config = pxad_config;
1071 pdev->slave.device_terminate_all = pxad_terminate_all;
1073 if (op->dev.coherent_dma_mask)
1074 dma_set_mask(&op->dev, op->dev.coherent_dma_mask);
1076 dma_set_mask(&op->dev, DMA_BIT_MASK(32));
1078 ret = pxad_init_phys(op, pdev, nr_phy_chans);
1082 for (i = 0; i < nr_phy_chans; i++) {
1083 c = devm_kzalloc(&op->dev, sizeof(*c), GFP_KERNEL);
1086 c->vc.desc_free = pxad_free_desc;
1087 vchan_init(&c->vc, &pdev->slave);
1090 return dma_async_device_register(&pdev->slave);
1093 static int pxad_probe(struct platform_device *op)
1095 struct pxad_device *pdev;
1096 const struct of_device_id *of_id;
1097 struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
1098 struct resource *iores;
1099 int ret, dma_channels = 0;
1100 const enum dma_slave_buswidth widths =
1101 DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
1102 DMA_SLAVE_BUSWIDTH_4_BYTES;
1104 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
1108 spin_lock_init(&pdev->phy_lock);
1110 iores = platform_get_resource(op, IORESOURCE_MEM, 0);
1111 pdev->base = devm_ioremap_resource(&op->dev, iores);
1112 if (IS_ERR(pdev->base))
1113 return PTR_ERR(pdev->base);
1115 of_id = of_match_device(pxad_dt_ids, &op->dev);
1117 of_property_read_u32(op->dev.of_node, "#dma-channels",
1119 else if (pdata && pdata->dma_channels)
1120 dma_channels = pdata->dma_channels;
1122 dma_channels = 32; /* default 32 channel */
1124 dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask);
1125 dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask);
1126 dma_cap_set(DMA_CYCLIC, pdev->slave.cap_mask);
1127 dma_cap_set(DMA_PRIVATE, pdev->slave.cap_mask);
1128 pdev->slave.device_prep_dma_memcpy = pxad_prep_memcpy;
1129 pdev->slave.device_prep_slave_sg = pxad_prep_slave_sg;
1130 pdev->slave.device_prep_dma_cyclic = pxad_prep_dma_cyclic;
1132 pdev->slave.copy_align = PDMA_ALIGNMENT;
1133 pdev->slave.src_addr_widths = widths;
1134 pdev->slave.dst_addr_widths = widths;
1135 pdev->slave.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1136 pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1138 pdev->slave.dev = &op->dev;
1139 ret = pxad_init_dmadev(op, pdev, dma_channels);
1141 dev_err(pdev->slave.dev, "unable to register\n");
1145 if (op->dev.of_node) {
1146 /* Device-tree DMA controller registration */
1147 ret = of_dma_controller_register(op->dev.of_node,
1148 pxad_dma_xlate, pdev);
1150 dev_err(pdev->slave.dev,
1151 "of_dma_controller_register failed\n");
1156 platform_set_drvdata(op, pdev);
1157 dev_info(pdev->slave.dev, "initialized %d channels\n", dma_channels);
1161 static const struct platform_device_id pxad_id_table[] = {
1166 static struct platform_driver pxad_driver = {
1169 .of_match_table = pxad_dt_ids,
1171 .id_table = pxad_id_table,
1172 .probe = pxad_probe,
1173 .remove = pxad_remove,
1176 bool pxad_filter_fn(struct dma_chan *chan, void *param)
1178 struct pxad_chan *c = to_pxad_chan(chan);
1179 struct pxad_param *p = param;
1181 if (chan->device->dev->driver != &pxad_driver.driver)
1184 c->drcmr = p->drcmr;
1189 EXPORT_SYMBOL_GPL(pxad_filter_fn);
1191 module_platform_driver(pxad_driver);
1193 MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver");
1194 MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
1195 MODULE_LICENSE("GPL v2");