dw_dmac: allocate dma descriptors from DMA_COHERENT memory
[firefly-linux-kernel-4.4.55.git] / drivers / dma / dw_dmac.c
1 /*
2  * Core driver for the Synopsys DesignWare DMA Controller
3  *
4  * Copyright (C) 2007-2008 Atmel Corporation
5  * Copyright (C) 2010-2011 ST Microelectronics
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11
12 #include <linux/bitops.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dmapool.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/io.h>
21 #include <linux/of.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26
27 #include "dw_dmac_regs.h"
28 #include "dmaengine.h"
29
30 /*
31  * This supports the Synopsys "DesignWare AHB Central DMA Controller",
32  * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
33  * of which use ARM any more).  See the "Databook" from Synopsys for
34  * information beyond what licensees probably provide.
35  *
36  * The driver has currently been tested only with the Atmel AT32AP7000,
37  * which does not support descriptor writeback.
38  */
39
40 static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave)
41 {
42         return slave ? slave->dst_master : 0;
43 }
44
45 static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
46 {
47         return slave ? slave->src_master : 1;
48 }
49
50 #define DWC_DEFAULT_CTLLO(_chan) ({                             \
51                 struct dw_dma_slave *__slave = (_chan->private);        \
52                 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan);       \
53                 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
54                 bool _is_slave = is_slave_direction(_dwc->direction);   \
55                 int _dms = dwc_get_dms(__slave);                \
56                 int _sms = dwc_get_sms(__slave);                \
57                 u8 _smsize = _is_slave ? _sconfig->src_maxburst :       \
58                         DW_DMA_MSIZE_16;                        \
59                 u8 _dmsize = _is_slave ? _sconfig->dst_maxburst :       \
60                         DW_DMA_MSIZE_16;                        \
61                                                                 \
62                 (DWC_CTLL_DST_MSIZE(_dmsize)                    \
63                  | DWC_CTLL_SRC_MSIZE(_smsize)                  \
64                  | DWC_CTLL_LLP_D_EN                            \
65                  | DWC_CTLL_LLP_S_EN                            \
66                  | DWC_CTLL_DMS(_dms)                           \
67                  | DWC_CTLL_SMS(_sms));                         \
68         })
69
70 /*
71  * Number of descriptors to allocate for each channel. This should be
72  * made configurable somehow; preferably, the clients (at least the
73  * ones using slave transfers) should be able to give us a hint.
74  */
75 #define NR_DESCS_PER_CHANNEL    64
76
77 #define SRC_MASTER      0
78 #define DST_MASTER      1
79
80 static inline unsigned int dwc_get_data_width(struct dma_chan *chan, int master)
81 {
82         struct dw_dma *dw = to_dw_dma(chan->device);
83         struct dw_dma_slave *dws = chan->private;
84
85         if (master == SRC_MASTER)
86                 return dw->data_width[dwc_get_sms(dws)];
87         else if (master == DST_MASTER)
88                 return dw->data_width[dwc_get_dms(dws)];
89
90         return 0;
91 }
92
93 /*----------------------------------------------------------------------*/
94
95 static struct device *chan2dev(struct dma_chan *chan)
96 {
97         return &chan->dev->device;
98 }
99 static struct device *chan2parent(struct dma_chan *chan)
100 {
101         return chan->dev->device.parent;
102 }
103
104 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
105 {
106         return to_dw_desc(dwc->active_list.next);
107 }
108
109 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
110 {
111         struct dw_desc *desc, *_desc;
112         struct dw_desc *ret = NULL;
113         unsigned int i = 0;
114         unsigned long flags;
115
116         spin_lock_irqsave(&dwc->lock, flags);
117         list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
118                 i++;
119                 if (async_tx_test_ack(&desc->txd)) {
120                         list_del(&desc->desc_node);
121                         ret = desc;
122                         break;
123                 }
124                 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
125         }
126         spin_unlock_irqrestore(&dwc->lock, flags);
127
128         dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
129
130         return ret;
131 }
132
133 /*
134  * Move a descriptor, including any children, to the free list.
135  * `desc' must not be on any lists.
136  */
137 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
138 {
139         unsigned long flags;
140
141         if (desc) {
142                 struct dw_desc *child;
143
144                 spin_lock_irqsave(&dwc->lock, flags);
145                 list_for_each_entry(child, &desc->tx_list, desc_node)
146                         dev_vdbg(chan2dev(&dwc->chan),
147                                         "moving child desc %p to freelist\n",
148                                         child);
149                 list_splice_init(&desc->tx_list, &dwc->free_list);
150                 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
151                 list_add(&desc->desc_node, &dwc->free_list);
152                 spin_unlock_irqrestore(&dwc->lock, flags);
153         }
154 }
155
156 static void dwc_initialize(struct dw_dma_chan *dwc)
157 {
158         struct dw_dma *dw = to_dw_dma(dwc->chan.device);
159         struct dw_dma_slave *dws = dwc->chan.private;
160         u32 cfghi = DWC_CFGH_FIFO_MODE;
161         u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
162
163         if (dwc->initialized == true)
164                 return;
165
166         if (dws) {
167                 /*
168                  * We need controller-specific data to set up slave
169                  * transfers.
170                  */
171                 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
172
173                 cfghi = dws->cfg_hi;
174                 cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
175         } else {
176                 if (dwc->direction == DMA_MEM_TO_DEV)
177                         cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id);
178                 else if (dwc->direction == DMA_DEV_TO_MEM)
179                         cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id);
180         }
181
182         channel_writel(dwc, CFG_LO, cfglo);
183         channel_writel(dwc, CFG_HI, cfghi);
184
185         /* Enable interrupts */
186         channel_set_bit(dw, MASK.XFER, dwc->mask);
187         channel_set_bit(dw, MASK.ERROR, dwc->mask);
188
189         dwc->initialized = true;
190 }
191
192 /*----------------------------------------------------------------------*/
193
194 static inline unsigned int dwc_fast_fls(unsigned long long v)
195 {
196         /*
197          * We can be a lot more clever here, but this should take care
198          * of the most common optimization.
199          */
200         if (!(v & 7))
201                 return 3;
202         else if (!(v & 3))
203                 return 2;
204         else if (!(v & 1))
205                 return 1;
206         return 0;
207 }
208
209 static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
210 {
211         dev_err(chan2dev(&dwc->chan),
212                 "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
213                 channel_readl(dwc, SAR),
214                 channel_readl(dwc, DAR),
215                 channel_readl(dwc, LLP),
216                 channel_readl(dwc, CTL_HI),
217                 channel_readl(dwc, CTL_LO));
218 }
219
220 static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
221 {
222         channel_clear_bit(dw, CH_EN, dwc->mask);
223         while (dma_readl(dw, CH_EN) & dwc->mask)
224                 cpu_relax();
225 }
226
227 /*----------------------------------------------------------------------*/
228
229 /* Perform single block transfer */
230 static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
231                                        struct dw_desc *desc)
232 {
233         struct dw_dma   *dw = to_dw_dma(dwc->chan.device);
234         u32             ctllo;
235
236         /* Software emulation of LLP mode relies on interrupts to continue
237          * multi block transfer. */
238         ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
239
240         channel_writel(dwc, SAR, desc->lli.sar);
241         channel_writel(dwc, DAR, desc->lli.dar);
242         channel_writel(dwc, CTL_LO, ctllo);
243         channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
244         channel_set_bit(dw, CH_EN, dwc->mask);
245
246         /* Move pointer to next descriptor */
247         dwc->tx_node_active = dwc->tx_node_active->next;
248 }
249
250 /* Called with dwc->lock held and bh disabled */
251 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
252 {
253         struct dw_dma   *dw = to_dw_dma(dwc->chan.device);
254         unsigned long   was_soft_llp;
255
256         /* ASSERT:  channel is idle */
257         if (dma_readl(dw, CH_EN) & dwc->mask) {
258                 dev_err(chan2dev(&dwc->chan),
259                         "BUG: Attempted to start non-idle channel\n");
260                 dwc_dump_chan_regs(dwc);
261
262                 /* The tasklet will hopefully advance the queue... */
263                 return;
264         }
265
266         if (dwc->nollp) {
267                 was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
268                                                 &dwc->flags);
269                 if (was_soft_llp) {
270                         dev_err(chan2dev(&dwc->chan),
271                                 "BUG: Attempted to start new LLP transfer "
272                                 "inside ongoing one\n");
273                         return;
274                 }
275
276                 dwc_initialize(dwc);
277
278                 dwc->tx_list = &first->tx_list;
279                 dwc->tx_node_active = &first->tx_list;
280
281                 dwc_do_single_block(dwc, first);
282
283                 return;
284         }
285
286         dwc_initialize(dwc);
287
288         channel_writel(dwc, LLP, first->txd.phys);
289         channel_writel(dwc, CTL_LO,
290                         DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
291         channel_writel(dwc, CTL_HI, 0);
292         channel_set_bit(dw, CH_EN, dwc->mask);
293 }
294
295 /*----------------------------------------------------------------------*/
296
297 static void
298 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
299                 bool callback_required)
300 {
301         dma_async_tx_callback           callback = NULL;
302         void                            *param = NULL;
303         struct dma_async_tx_descriptor  *txd = &desc->txd;
304         struct dw_desc                  *child;
305         unsigned long                   flags;
306
307         dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
308
309         spin_lock_irqsave(&dwc->lock, flags);
310         dma_cookie_complete(txd);
311         if (callback_required) {
312                 callback = txd->callback;
313                 param = txd->callback_param;
314         }
315
316         /* async_tx_ack */
317         list_for_each_entry(child, &desc->tx_list, desc_node)
318                 async_tx_ack(&child->txd);
319         async_tx_ack(&desc->txd);
320
321         list_splice_init(&desc->tx_list, &dwc->free_list);
322         list_move(&desc->desc_node, &dwc->free_list);
323
324         if (!is_slave_direction(dwc->direction)) {
325                 struct device *parent = chan2parent(&dwc->chan);
326                 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
327                         if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
328                                 dma_unmap_single(parent, desc->lli.dar,
329                                                 desc->len, DMA_FROM_DEVICE);
330                         else
331                                 dma_unmap_page(parent, desc->lli.dar,
332                                                 desc->len, DMA_FROM_DEVICE);
333                 }
334                 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
335                         if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
336                                 dma_unmap_single(parent, desc->lli.sar,
337                                                 desc->len, DMA_TO_DEVICE);
338                         else
339                                 dma_unmap_page(parent, desc->lli.sar,
340                                                 desc->len, DMA_TO_DEVICE);
341                 }
342         }
343
344         spin_unlock_irqrestore(&dwc->lock, flags);
345
346         if (callback)
347                 callback(param);
348 }
349
350 static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
351 {
352         struct dw_desc *desc, *_desc;
353         LIST_HEAD(list);
354         unsigned long flags;
355
356         spin_lock_irqsave(&dwc->lock, flags);
357         if (dma_readl(dw, CH_EN) & dwc->mask) {
358                 dev_err(chan2dev(&dwc->chan),
359                         "BUG: XFER bit set, but channel not idle!\n");
360
361                 /* Try to continue after resetting the channel... */
362                 dwc_chan_disable(dw, dwc);
363         }
364
365         /*
366          * Submit queued descriptors ASAP, i.e. before we go through
367          * the completed ones.
368          */
369         list_splice_init(&dwc->active_list, &list);
370         if (!list_empty(&dwc->queue)) {
371                 list_move(dwc->queue.next, &dwc->active_list);
372                 dwc_dostart(dwc, dwc_first_active(dwc));
373         }
374
375         spin_unlock_irqrestore(&dwc->lock, flags);
376
377         list_for_each_entry_safe(desc, _desc, &list, desc_node)
378                 dwc_descriptor_complete(dwc, desc, true);
379 }
380
381 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
382 {
383         dma_addr_t llp;
384         struct dw_desc *desc, *_desc;
385         struct dw_desc *child;
386         u32 status_xfer;
387         unsigned long flags;
388
389         spin_lock_irqsave(&dwc->lock, flags);
390         llp = channel_readl(dwc, LLP);
391         status_xfer = dma_readl(dw, RAW.XFER);
392
393         if (status_xfer & dwc->mask) {
394                 /* Everything we've submitted is done */
395                 dma_writel(dw, CLEAR.XFER, dwc->mask);
396                 spin_unlock_irqrestore(&dwc->lock, flags);
397
398                 dwc_complete_all(dw, dwc);
399                 return;
400         }
401
402         if (list_empty(&dwc->active_list)) {
403                 spin_unlock_irqrestore(&dwc->lock, flags);
404                 return;
405         }
406
407         dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__,
408                         (unsigned long long)llp);
409
410         list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
411                 /* check first descriptors addr */
412                 if (desc->txd.phys == llp) {
413                         spin_unlock_irqrestore(&dwc->lock, flags);
414                         return;
415                 }
416
417                 /* check first descriptors llp */
418                 if (desc->lli.llp == llp) {
419                         /* This one is currently in progress */
420                         spin_unlock_irqrestore(&dwc->lock, flags);
421                         return;
422                 }
423
424                 list_for_each_entry(child, &desc->tx_list, desc_node)
425                         if (child->lli.llp == llp) {
426                                 /* Currently in progress */
427                                 spin_unlock_irqrestore(&dwc->lock, flags);
428                                 return;
429                         }
430
431                 /*
432                  * No descriptors so far seem to be in progress, i.e.
433                  * this one must be done.
434                  */
435                 spin_unlock_irqrestore(&dwc->lock, flags);
436                 dwc_descriptor_complete(dwc, desc, true);
437                 spin_lock_irqsave(&dwc->lock, flags);
438         }
439
440         dev_err(chan2dev(&dwc->chan),
441                 "BUG: All descriptors done, but channel not idle!\n");
442
443         /* Try to continue after resetting the channel... */
444         dwc_chan_disable(dw, dwc);
445
446         if (!list_empty(&dwc->queue)) {
447                 list_move(dwc->queue.next, &dwc->active_list);
448                 dwc_dostart(dwc, dwc_first_active(dwc));
449         }
450         spin_unlock_irqrestore(&dwc->lock, flags);
451 }
452
453 static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
454 {
455         dev_crit(chan2dev(&dwc->chan), "  desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
456                  lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
457 }
458
459 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
460 {
461         struct dw_desc *bad_desc;
462         struct dw_desc *child;
463         unsigned long flags;
464
465         dwc_scan_descriptors(dw, dwc);
466
467         spin_lock_irqsave(&dwc->lock, flags);
468
469         /*
470          * The descriptor currently at the head of the active list is
471          * borked. Since we don't have any way to report errors, we'll
472          * just have to scream loudly and try to carry on.
473          */
474         bad_desc = dwc_first_active(dwc);
475         list_del_init(&bad_desc->desc_node);
476         list_move(dwc->queue.next, dwc->active_list.prev);
477
478         /* Clear the error flag and try to restart the controller */
479         dma_writel(dw, CLEAR.ERROR, dwc->mask);
480         if (!list_empty(&dwc->active_list))
481                 dwc_dostart(dwc, dwc_first_active(dwc));
482
483         /*
484          * WARN may seem harsh, but since this only happens
485          * when someone submits a bad physical address in a
486          * descriptor, we should consider ourselves lucky that the
487          * controller flagged an error instead of scribbling over
488          * random memory locations.
489          */
490         dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
491                                        "  cookie: %d\n", bad_desc->txd.cookie);
492         dwc_dump_lli(dwc, &bad_desc->lli);
493         list_for_each_entry(child, &bad_desc->tx_list, desc_node)
494                 dwc_dump_lli(dwc, &child->lli);
495
496         spin_unlock_irqrestore(&dwc->lock, flags);
497
498         /* Pretend the descriptor completed successfully */
499         dwc_descriptor_complete(dwc, bad_desc, true);
500 }
501
502 /* --------------------- Cyclic DMA API extensions -------------------- */
503
504 inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
505 {
506         struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
507         return channel_readl(dwc, SAR);
508 }
509 EXPORT_SYMBOL(dw_dma_get_src_addr);
510
511 inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
512 {
513         struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
514         return channel_readl(dwc, DAR);
515 }
516 EXPORT_SYMBOL(dw_dma_get_dst_addr);
517
518 /* called with dwc->lock held and all DMAC interrupts disabled */
519 static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
520                 u32 status_err, u32 status_xfer)
521 {
522         unsigned long flags;
523
524         if (dwc->mask) {
525                 void (*callback)(void *param);
526                 void *callback_param;
527
528                 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
529                                 channel_readl(dwc, LLP));
530
531                 callback = dwc->cdesc->period_callback;
532                 callback_param = dwc->cdesc->period_callback_param;
533
534                 if (callback)
535                         callback(callback_param);
536         }
537
538         /*
539          * Error and transfer complete are highly unlikely, and will most
540          * likely be due to a configuration error by the user.
541          */
542         if (unlikely(status_err & dwc->mask) ||
543                         unlikely(status_xfer & dwc->mask)) {
544                 int i;
545
546                 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
547                                 "interrupt, stopping DMA transfer\n",
548                                 status_xfer ? "xfer" : "error");
549
550                 spin_lock_irqsave(&dwc->lock, flags);
551
552                 dwc_dump_chan_regs(dwc);
553
554                 dwc_chan_disable(dw, dwc);
555
556                 /* make sure DMA does not restart by loading a new list */
557                 channel_writel(dwc, LLP, 0);
558                 channel_writel(dwc, CTL_LO, 0);
559                 channel_writel(dwc, CTL_HI, 0);
560
561                 dma_writel(dw, CLEAR.ERROR, dwc->mask);
562                 dma_writel(dw, CLEAR.XFER, dwc->mask);
563
564                 for (i = 0; i < dwc->cdesc->periods; i++)
565                         dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
566
567                 spin_unlock_irqrestore(&dwc->lock, flags);
568         }
569 }
570
571 /* ------------------------------------------------------------------------- */
572
573 static void dw_dma_tasklet(unsigned long data)
574 {
575         struct dw_dma *dw = (struct dw_dma *)data;
576         struct dw_dma_chan *dwc;
577         u32 status_xfer;
578         u32 status_err;
579         int i;
580
581         status_xfer = dma_readl(dw, RAW.XFER);
582         status_err = dma_readl(dw, RAW.ERROR);
583
584         dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
585
586         for (i = 0; i < dw->dma.chancnt; i++) {
587                 dwc = &dw->chan[i];
588                 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
589                         dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
590                 else if (status_err & (1 << i))
591                         dwc_handle_error(dw, dwc);
592                 else if (status_xfer & (1 << i)) {
593                         unsigned long flags;
594
595                         spin_lock_irqsave(&dwc->lock, flags);
596                         if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
597                                 if (dwc->tx_node_active != dwc->tx_list) {
598                                         struct dw_desc *desc =
599                                                 to_dw_desc(dwc->tx_node_active);
600
601                                         dma_writel(dw, CLEAR.XFER, dwc->mask);
602
603                                         dwc_do_single_block(dwc, desc);
604
605                                         spin_unlock_irqrestore(&dwc->lock, flags);
606                                         continue;
607                                 }
608                                 /* we are done here */
609                                 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
610                         }
611                         spin_unlock_irqrestore(&dwc->lock, flags);
612
613                         dwc_scan_descriptors(dw, dwc);
614                 }
615         }
616
617         /*
618          * Re-enable interrupts.
619          */
620         channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
621         channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
622 }
623
624 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
625 {
626         struct dw_dma *dw = dev_id;
627         u32 status;
628
629         dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__,
630                         dma_readl(dw, STATUS_INT));
631
632         /*
633          * Just disable the interrupts. We'll turn them back on in the
634          * softirq handler.
635          */
636         channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
637         channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
638
639         status = dma_readl(dw, STATUS_INT);
640         if (status) {
641                 dev_err(dw->dma.dev,
642                         "BUG: Unexpected interrupts pending: 0x%x\n",
643                         status);
644
645                 /* Try to recover */
646                 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
647                 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
648                 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
649                 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
650         }
651
652         tasklet_schedule(&dw->tasklet);
653
654         return IRQ_HANDLED;
655 }
656
657 /*----------------------------------------------------------------------*/
658
659 static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
660 {
661         struct dw_desc          *desc = txd_to_dw_desc(tx);
662         struct dw_dma_chan      *dwc = to_dw_dma_chan(tx->chan);
663         dma_cookie_t            cookie;
664         unsigned long           flags;
665
666         spin_lock_irqsave(&dwc->lock, flags);
667         cookie = dma_cookie_assign(tx);
668
669         /*
670          * REVISIT: We should attempt to chain as many descriptors as
671          * possible, perhaps even appending to those already submitted
672          * for DMA. But this is hard to do in a race-free manner.
673          */
674         if (list_empty(&dwc->active_list)) {
675                 dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
676                                 desc->txd.cookie);
677                 list_add_tail(&desc->desc_node, &dwc->active_list);
678                 dwc_dostart(dwc, dwc_first_active(dwc));
679         } else {
680                 dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
681                                 desc->txd.cookie);
682
683                 list_add_tail(&desc->desc_node, &dwc->queue);
684         }
685
686         spin_unlock_irqrestore(&dwc->lock, flags);
687
688         return cookie;
689 }
690
691 static struct dma_async_tx_descriptor *
692 dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
693                 size_t len, unsigned long flags)
694 {
695         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
696         struct dw_desc          *desc;
697         struct dw_desc          *first;
698         struct dw_desc          *prev;
699         size_t                  xfer_count;
700         size_t                  offset;
701         unsigned int            src_width;
702         unsigned int            dst_width;
703         unsigned int            data_width;
704         u32                     ctllo;
705
706         dev_vdbg(chan2dev(chan),
707                         "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
708                         (unsigned long long)dest, (unsigned long long)src,
709                         len, flags);
710
711         if (unlikely(!len)) {
712                 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
713                 return NULL;
714         }
715
716         dwc->direction = DMA_MEM_TO_MEM;
717
718         data_width = min_t(unsigned int, dwc_get_data_width(chan, SRC_MASTER),
719                            dwc_get_data_width(chan, DST_MASTER));
720
721         src_width = dst_width = min_t(unsigned int, data_width,
722                                       dwc_fast_fls(src | dest | len));
723
724         ctllo = DWC_DEFAULT_CTLLO(chan)
725                         | DWC_CTLL_DST_WIDTH(dst_width)
726                         | DWC_CTLL_SRC_WIDTH(src_width)
727                         | DWC_CTLL_DST_INC
728                         | DWC_CTLL_SRC_INC
729                         | DWC_CTLL_FC_M2M;
730         prev = first = NULL;
731
732         for (offset = 0; offset < len; offset += xfer_count << src_width) {
733                 xfer_count = min_t(size_t, (len - offset) >> src_width,
734                                            dwc->block_size);
735
736                 desc = dwc_desc_get(dwc);
737                 if (!desc)
738                         goto err_desc_get;
739
740                 desc->lli.sar = src + offset;
741                 desc->lli.dar = dest + offset;
742                 desc->lli.ctllo = ctllo;
743                 desc->lli.ctlhi = xfer_count;
744
745                 if (!first) {
746                         first = desc;
747                 } else {
748                         prev->lli.llp = desc->txd.phys;
749                         list_add_tail(&desc->desc_node,
750                                         &first->tx_list);
751                 }
752                 prev = desc;
753         }
754
755         if (flags & DMA_PREP_INTERRUPT)
756                 /* Trigger interrupt after last block */
757                 prev->lli.ctllo |= DWC_CTLL_INT_EN;
758
759         prev->lli.llp = 0;
760         first->txd.flags = flags;
761         first->len = len;
762
763         return &first->txd;
764
765 err_desc_get:
766         dwc_desc_put(dwc, first);
767         return NULL;
768 }
769
770 static struct dma_async_tx_descriptor *
771 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
772                 unsigned int sg_len, enum dma_transfer_direction direction,
773                 unsigned long flags, void *context)
774 {
775         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
776         struct dma_slave_config *sconfig = &dwc->dma_sconfig;
777         struct dw_desc          *prev;
778         struct dw_desc          *first;
779         u32                     ctllo;
780         dma_addr_t              reg;
781         unsigned int            reg_width;
782         unsigned int            mem_width;
783         unsigned int            data_width;
784         unsigned int            i;
785         struct scatterlist      *sg;
786         size_t                  total_len = 0;
787
788         dev_vdbg(chan2dev(chan), "%s\n", __func__);
789
790         if (unlikely(!is_slave_direction(direction) || !sg_len))
791                 return NULL;
792
793         dwc->direction = direction;
794
795         prev = first = NULL;
796
797         switch (direction) {
798         case DMA_MEM_TO_DEV:
799                 reg_width = __fls(sconfig->dst_addr_width);
800                 reg = sconfig->dst_addr;
801                 ctllo = (DWC_DEFAULT_CTLLO(chan)
802                                 | DWC_CTLL_DST_WIDTH(reg_width)
803                                 | DWC_CTLL_DST_FIX
804                                 | DWC_CTLL_SRC_INC);
805
806                 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
807                         DWC_CTLL_FC(DW_DMA_FC_D_M2P);
808
809                 data_width = dwc_get_data_width(chan, SRC_MASTER);
810
811                 for_each_sg(sgl, sg, sg_len, i) {
812                         struct dw_desc  *desc;
813                         u32             len, dlen, mem;
814
815                         mem = sg_dma_address(sg);
816                         len = sg_dma_len(sg);
817
818                         mem_width = min_t(unsigned int,
819                                           data_width, dwc_fast_fls(mem | len));
820
821 slave_sg_todev_fill_desc:
822                         desc = dwc_desc_get(dwc);
823                         if (!desc) {
824                                 dev_err(chan2dev(chan),
825                                         "not enough descriptors available\n");
826                                 goto err_desc_get;
827                         }
828
829                         desc->lli.sar = mem;
830                         desc->lli.dar = reg;
831                         desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
832                         if ((len >> mem_width) > dwc->block_size) {
833                                 dlen = dwc->block_size << mem_width;
834                                 mem += dlen;
835                                 len -= dlen;
836                         } else {
837                                 dlen = len;
838                                 len = 0;
839                         }
840
841                         desc->lli.ctlhi = dlen >> mem_width;
842
843                         if (!first) {
844                                 first = desc;
845                         } else {
846                                 prev->lli.llp = desc->txd.phys;
847                                 list_add_tail(&desc->desc_node,
848                                                 &first->tx_list);
849                         }
850                         prev = desc;
851                         total_len += dlen;
852
853                         if (len)
854                                 goto slave_sg_todev_fill_desc;
855                 }
856                 break;
857         case DMA_DEV_TO_MEM:
858                 reg_width = __fls(sconfig->src_addr_width);
859                 reg = sconfig->src_addr;
860                 ctllo = (DWC_DEFAULT_CTLLO(chan)
861                                 | DWC_CTLL_SRC_WIDTH(reg_width)
862                                 | DWC_CTLL_DST_INC
863                                 | DWC_CTLL_SRC_FIX);
864
865                 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
866                         DWC_CTLL_FC(DW_DMA_FC_D_P2M);
867
868                 data_width = dwc_get_data_width(chan, DST_MASTER);
869
870                 for_each_sg(sgl, sg, sg_len, i) {
871                         struct dw_desc  *desc;
872                         u32             len, dlen, mem;
873
874                         mem = sg_dma_address(sg);
875                         len = sg_dma_len(sg);
876
877                         mem_width = min_t(unsigned int,
878                                           data_width, dwc_fast_fls(mem | len));
879
880 slave_sg_fromdev_fill_desc:
881                         desc = dwc_desc_get(dwc);
882                         if (!desc) {
883                                 dev_err(chan2dev(chan),
884                                                 "not enough descriptors available\n");
885                                 goto err_desc_get;
886                         }
887
888                         desc->lli.sar = reg;
889                         desc->lli.dar = mem;
890                         desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
891                         if ((len >> reg_width) > dwc->block_size) {
892                                 dlen = dwc->block_size << reg_width;
893                                 mem += dlen;
894                                 len -= dlen;
895                         } else {
896                                 dlen = len;
897                                 len = 0;
898                         }
899                         desc->lli.ctlhi = dlen >> reg_width;
900
901                         if (!first) {
902                                 first = desc;
903                         } else {
904                                 prev->lli.llp = desc->txd.phys;
905                                 list_add_tail(&desc->desc_node,
906                                                 &first->tx_list);
907                         }
908                         prev = desc;
909                         total_len += dlen;
910
911                         if (len)
912                                 goto slave_sg_fromdev_fill_desc;
913                 }
914                 break;
915         default:
916                 return NULL;
917         }
918
919         if (flags & DMA_PREP_INTERRUPT)
920                 /* Trigger interrupt after last block */
921                 prev->lli.ctllo |= DWC_CTLL_INT_EN;
922
923         prev->lli.llp = 0;
924         first->len = total_len;
925
926         return &first->txd;
927
928 err_desc_get:
929         dwc_desc_put(dwc, first);
930         return NULL;
931 }
932
933 /*
934  * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
935  * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
936  *
937  * NOTE: burst size 2 is not supported by controller.
938  *
939  * This can be done by finding least significant bit set: n & (n - 1)
940  */
941 static inline void convert_burst(u32 *maxburst)
942 {
943         if (*maxburst > 1)
944                 *maxburst = fls(*maxburst) - 2;
945         else
946                 *maxburst = 0;
947 }
948
949 static int
950 set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
951 {
952         struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
953
954         /* Check if chan will be configured for slave transfers */
955         if (!is_slave_direction(sconfig->direction))
956                 return -EINVAL;
957
958         memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
959         dwc->direction = sconfig->direction;
960
961         convert_burst(&dwc->dma_sconfig.src_maxburst);
962         convert_burst(&dwc->dma_sconfig.dst_maxburst);
963
964         return 0;
965 }
966
967 static inline void dwc_chan_pause(struct dw_dma_chan *dwc)
968 {
969         u32 cfglo = channel_readl(dwc, CFG_LO);
970
971         channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
972         while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
973                 cpu_relax();
974
975         dwc->paused = true;
976 }
977
978 static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
979 {
980         u32 cfglo = channel_readl(dwc, CFG_LO);
981
982         channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
983
984         dwc->paused = false;
985 }
986
987 static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
988                        unsigned long arg)
989 {
990         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
991         struct dw_dma           *dw = to_dw_dma(chan->device);
992         struct dw_desc          *desc, *_desc;
993         unsigned long           flags;
994         LIST_HEAD(list);
995
996         if (cmd == DMA_PAUSE) {
997                 spin_lock_irqsave(&dwc->lock, flags);
998
999                 dwc_chan_pause(dwc);
1000
1001                 spin_unlock_irqrestore(&dwc->lock, flags);
1002         } else if (cmd == DMA_RESUME) {
1003                 if (!dwc->paused)
1004                         return 0;
1005
1006                 spin_lock_irqsave(&dwc->lock, flags);
1007
1008                 dwc_chan_resume(dwc);
1009
1010                 spin_unlock_irqrestore(&dwc->lock, flags);
1011         } else if (cmd == DMA_TERMINATE_ALL) {
1012                 spin_lock_irqsave(&dwc->lock, flags);
1013
1014                 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
1015
1016                 dwc_chan_disable(dw, dwc);
1017
1018                 dwc_chan_resume(dwc);
1019
1020                 /* active_list entries will end up before queued entries */
1021                 list_splice_init(&dwc->queue, &list);
1022                 list_splice_init(&dwc->active_list, &list);
1023
1024                 spin_unlock_irqrestore(&dwc->lock, flags);
1025
1026                 /* Flush all pending and queued descriptors */
1027                 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1028                         dwc_descriptor_complete(dwc, desc, false);
1029         } else if (cmd == DMA_SLAVE_CONFIG) {
1030                 return set_runtime_config(chan, (struct dma_slave_config *)arg);
1031         } else {
1032                 return -ENXIO;
1033         }
1034
1035         return 0;
1036 }
1037
1038 static enum dma_status
1039 dwc_tx_status(struct dma_chan *chan,
1040               dma_cookie_t cookie,
1041               struct dma_tx_state *txstate)
1042 {
1043         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1044         enum dma_status         ret;
1045
1046         ret = dma_cookie_status(chan, cookie, txstate);
1047         if (ret != DMA_SUCCESS) {
1048                 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1049
1050                 ret = dma_cookie_status(chan, cookie, txstate);
1051         }
1052
1053         if (ret != DMA_SUCCESS)
1054                 dma_set_residue(txstate, dwc_first_active(dwc)->len);
1055
1056         if (dwc->paused)
1057                 return DMA_PAUSED;
1058
1059         return ret;
1060 }
1061
1062 static void dwc_issue_pending(struct dma_chan *chan)
1063 {
1064         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1065
1066         if (!list_empty(&dwc->queue))
1067                 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1068 }
1069
1070 static int dwc_alloc_chan_resources(struct dma_chan *chan)
1071 {
1072         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1073         struct dw_dma           *dw = to_dw_dma(chan->device);
1074         struct dw_desc          *desc;
1075         int                     i;
1076         unsigned long           flags;
1077
1078         dev_vdbg(chan2dev(chan), "%s\n", __func__);
1079
1080         /* ASSERT:  channel is idle */
1081         if (dma_readl(dw, CH_EN) & dwc->mask) {
1082                 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1083                 return -EIO;
1084         }
1085
1086         dma_cookie_init(chan);
1087
1088         /*
1089          * NOTE: some controllers may have additional features that we
1090          * need to initialize here, like "scatter-gather" (which
1091          * doesn't mean what you think it means), and status writeback.
1092          */
1093
1094         spin_lock_irqsave(&dwc->lock, flags);
1095         i = dwc->descs_allocated;
1096         while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
1097                 dma_addr_t phys;
1098
1099                 spin_unlock_irqrestore(&dwc->lock, flags);
1100
1101                 desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys);
1102                 if (!desc)
1103                         goto err_desc_alloc;
1104
1105                 memset(desc, 0, sizeof(struct dw_desc));
1106
1107                 INIT_LIST_HEAD(&desc->tx_list);
1108                 dma_async_tx_descriptor_init(&desc->txd, chan);
1109                 desc->txd.tx_submit = dwc_tx_submit;
1110                 desc->txd.flags = DMA_CTRL_ACK;
1111                 desc->txd.phys = phys;
1112
1113                 dwc_desc_put(dwc, desc);
1114
1115                 spin_lock_irqsave(&dwc->lock, flags);
1116                 i = ++dwc->descs_allocated;
1117         }
1118
1119         spin_unlock_irqrestore(&dwc->lock, flags);
1120
1121         dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
1122
1123         return i;
1124
1125 err_desc_alloc:
1126         dev_info(chan2dev(chan), "only allocated %d descriptors\n", i);
1127
1128         return i;
1129 }
1130
1131 static void dwc_free_chan_resources(struct dma_chan *chan)
1132 {
1133         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1134         struct dw_dma           *dw = to_dw_dma(chan->device);
1135         struct dw_desc          *desc, *_desc;
1136         unsigned long           flags;
1137         LIST_HEAD(list);
1138
1139         dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
1140                         dwc->descs_allocated);
1141
1142         /* ASSERT:  channel is idle */
1143         BUG_ON(!list_empty(&dwc->active_list));
1144         BUG_ON(!list_empty(&dwc->queue));
1145         BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1146
1147         spin_lock_irqsave(&dwc->lock, flags);
1148         list_splice_init(&dwc->free_list, &list);
1149         dwc->descs_allocated = 0;
1150         dwc->initialized = false;
1151
1152         /* Disable interrupts */
1153         channel_clear_bit(dw, MASK.XFER, dwc->mask);
1154         channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1155
1156         spin_unlock_irqrestore(&dwc->lock, flags);
1157
1158         list_for_each_entry_safe(desc, _desc, &list, desc_node) {
1159                 dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
1160                 dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
1161         }
1162
1163         dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
1164 }
1165
1166 bool dw_dma_generic_filter(struct dma_chan *chan, void *param)
1167 {
1168         struct dw_dma *dw = to_dw_dma(chan->device);
1169         static struct dw_dma *last_dw;
1170         static char *last_bus_id;
1171         int i = -1;
1172
1173         /*
1174          * dmaengine framework calls this routine for all channels of all dma
1175          * controller, until true is returned. If 'param' bus_id is not
1176          * registered with a dma controller (dw), then there is no need of
1177          * running below function for all channels of dw.
1178          *
1179          * This block of code does this by saving the parameters of last
1180          * failure. If dw and param are same, i.e. trying on same dw with
1181          * different channel, return false.
1182          */
1183         if ((last_dw == dw) && (last_bus_id == param))
1184                 return false;
1185         /*
1186          * Return true:
1187          * - If dw_dma's platform data is not filled with slave info, then all
1188          *   dma controllers are fine for transfer.
1189          * - Or if param is NULL
1190          */
1191         if (!dw->sd || !param)
1192                 return true;
1193
1194         while (++i < dw->sd_count) {
1195                 if (!strcmp(dw->sd[i].bus_id, param)) {
1196                         chan->private = &dw->sd[i];
1197                         last_dw = NULL;
1198                         last_bus_id = NULL;
1199
1200                         return true;
1201                 }
1202         }
1203
1204         last_dw = dw;
1205         last_bus_id = param;
1206         return false;
1207 }
1208 EXPORT_SYMBOL(dw_dma_generic_filter);
1209
1210 /* --------------------- Cyclic DMA API extensions -------------------- */
1211
1212 /**
1213  * dw_dma_cyclic_start - start the cyclic DMA transfer
1214  * @chan: the DMA channel to start
1215  *
1216  * Must be called with soft interrupts disabled. Returns zero on success or
1217  * -errno on failure.
1218  */
1219 int dw_dma_cyclic_start(struct dma_chan *chan)
1220 {
1221         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1222         struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
1223         unsigned long           flags;
1224
1225         if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1226                 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
1227                 return -ENODEV;
1228         }
1229
1230         spin_lock_irqsave(&dwc->lock, flags);
1231
1232         /* assert channel is idle */
1233         if (dma_readl(dw, CH_EN) & dwc->mask) {
1234                 dev_err(chan2dev(&dwc->chan),
1235                         "BUG: Attempted to start non-idle channel\n");
1236                 dwc_dump_chan_regs(dwc);
1237                 spin_unlock_irqrestore(&dwc->lock, flags);
1238                 return -EBUSY;
1239         }
1240
1241         dma_writel(dw, CLEAR.ERROR, dwc->mask);
1242         dma_writel(dw, CLEAR.XFER, dwc->mask);
1243
1244         /* setup DMAC channel registers */
1245         channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
1246         channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
1247         channel_writel(dwc, CTL_HI, 0);
1248
1249         channel_set_bit(dw, CH_EN, dwc->mask);
1250
1251         spin_unlock_irqrestore(&dwc->lock, flags);
1252
1253         return 0;
1254 }
1255 EXPORT_SYMBOL(dw_dma_cyclic_start);
1256
1257 /**
1258  * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1259  * @chan: the DMA channel to stop
1260  *
1261  * Must be called with soft interrupts disabled.
1262  */
1263 void dw_dma_cyclic_stop(struct dma_chan *chan)
1264 {
1265         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1266         struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
1267         unsigned long           flags;
1268
1269         spin_lock_irqsave(&dwc->lock, flags);
1270
1271         dwc_chan_disable(dw, dwc);
1272
1273         spin_unlock_irqrestore(&dwc->lock, flags);
1274 }
1275 EXPORT_SYMBOL(dw_dma_cyclic_stop);
1276
1277 /**
1278  * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1279  * @chan: the DMA channel to prepare
1280  * @buf_addr: physical DMA address where the buffer starts
1281  * @buf_len: total number of bytes for the entire buffer
1282  * @period_len: number of bytes for each period
1283  * @direction: transfer direction, to or from device
1284  *
1285  * Must be called before trying to start the transfer. Returns a valid struct
1286  * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1287  */
1288 struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1289                 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
1290                 enum dma_transfer_direction direction)
1291 {
1292         struct dw_dma_chan              *dwc = to_dw_dma_chan(chan);
1293         struct dma_slave_config         *sconfig = &dwc->dma_sconfig;
1294         struct dw_cyclic_desc           *cdesc;
1295         struct dw_cyclic_desc           *retval = NULL;
1296         struct dw_desc                  *desc;
1297         struct dw_desc                  *last = NULL;
1298         unsigned long                   was_cyclic;
1299         unsigned int                    reg_width;
1300         unsigned int                    periods;
1301         unsigned int                    i;
1302         unsigned long                   flags;
1303
1304         spin_lock_irqsave(&dwc->lock, flags);
1305         if (dwc->nollp) {
1306                 spin_unlock_irqrestore(&dwc->lock, flags);
1307                 dev_dbg(chan2dev(&dwc->chan),
1308                                 "channel doesn't support LLP transfers\n");
1309                 return ERR_PTR(-EINVAL);
1310         }
1311
1312         if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1313                 spin_unlock_irqrestore(&dwc->lock, flags);
1314                 dev_dbg(chan2dev(&dwc->chan),
1315                                 "queue and/or active list are not empty\n");
1316                 return ERR_PTR(-EBUSY);
1317         }
1318
1319         was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1320         spin_unlock_irqrestore(&dwc->lock, flags);
1321         if (was_cyclic) {
1322                 dev_dbg(chan2dev(&dwc->chan),
1323                                 "channel already prepared for cyclic DMA\n");
1324                 return ERR_PTR(-EBUSY);
1325         }
1326
1327         retval = ERR_PTR(-EINVAL);
1328
1329         if (unlikely(!is_slave_direction(direction)))
1330                 goto out_err;
1331
1332         dwc->direction = direction;
1333
1334         if (direction == DMA_MEM_TO_DEV)
1335                 reg_width = __ffs(sconfig->dst_addr_width);
1336         else
1337                 reg_width = __ffs(sconfig->src_addr_width);
1338
1339         periods = buf_len / period_len;
1340
1341         /* Check for too big/unaligned periods and unaligned DMA buffer. */
1342         if (period_len > (dwc->block_size << reg_width))
1343                 goto out_err;
1344         if (unlikely(period_len & ((1 << reg_width) - 1)))
1345                 goto out_err;
1346         if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1347                 goto out_err;
1348
1349         retval = ERR_PTR(-ENOMEM);
1350
1351         if (periods > NR_DESCS_PER_CHANNEL)
1352                 goto out_err;
1353
1354         cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1355         if (!cdesc)
1356                 goto out_err;
1357
1358         cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1359         if (!cdesc->desc)
1360                 goto out_err_alloc;
1361
1362         for (i = 0; i < periods; i++) {
1363                 desc = dwc_desc_get(dwc);
1364                 if (!desc)
1365                         goto out_err_desc_get;
1366
1367                 switch (direction) {
1368                 case DMA_MEM_TO_DEV:
1369                         desc->lli.dar = sconfig->dst_addr;
1370                         desc->lli.sar = buf_addr + (period_len * i);
1371                         desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1372                                         | DWC_CTLL_DST_WIDTH(reg_width)
1373                                         | DWC_CTLL_SRC_WIDTH(reg_width)
1374                                         | DWC_CTLL_DST_FIX
1375                                         | DWC_CTLL_SRC_INC
1376                                         | DWC_CTLL_INT_EN);
1377
1378                         desc->lli.ctllo |= sconfig->device_fc ?
1379                                 DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
1380                                 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
1381
1382                         break;
1383                 case DMA_DEV_TO_MEM:
1384                         desc->lli.dar = buf_addr + (period_len * i);
1385                         desc->lli.sar = sconfig->src_addr;
1386                         desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1387                                         | DWC_CTLL_SRC_WIDTH(reg_width)
1388                                         | DWC_CTLL_DST_WIDTH(reg_width)
1389                                         | DWC_CTLL_DST_INC
1390                                         | DWC_CTLL_SRC_FIX
1391                                         | DWC_CTLL_INT_EN);
1392
1393                         desc->lli.ctllo |= sconfig->device_fc ?
1394                                 DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
1395                                 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
1396
1397                         break;
1398                 default:
1399                         break;
1400                 }
1401
1402                 desc->lli.ctlhi = (period_len >> reg_width);
1403                 cdesc->desc[i] = desc;
1404
1405                 if (last)
1406                         last->lli.llp = desc->txd.phys;
1407
1408                 last = desc;
1409         }
1410
1411         /* lets make a cyclic list */
1412         last->lli.llp = cdesc->desc[0]->txd.phys;
1413
1414         dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
1415                         "period %zu periods %d\n", (unsigned long long)buf_addr,
1416                         buf_len, period_len, periods);
1417
1418         cdesc->periods = periods;
1419         dwc->cdesc = cdesc;
1420
1421         return cdesc;
1422
1423 out_err_desc_get:
1424         while (i--)
1425                 dwc_desc_put(dwc, cdesc->desc[i]);
1426 out_err_alloc:
1427         kfree(cdesc);
1428 out_err:
1429         clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1430         return (struct dw_cyclic_desc *)retval;
1431 }
1432 EXPORT_SYMBOL(dw_dma_cyclic_prep);
1433
1434 /**
1435  * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1436  * @chan: the DMA channel to free
1437  */
1438 void dw_dma_cyclic_free(struct dma_chan *chan)
1439 {
1440         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1441         struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
1442         struct dw_cyclic_desc   *cdesc = dwc->cdesc;
1443         int                     i;
1444         unsigned long           flags;
1445
1446         dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
1447
1448         if (!cdesc)
1449                 return;
1450
1451         spin_lock_irqsave(&dwc->lock, flags);
1452
1453         dwc_chan_disable(dw, dwc);
1454
1455         dma_writel(dw, CLEAR.ERROR, dwc->mask);
1456         dma_writel(dw, CLEAR.XFER, dwc->mask);
1457
1458         spin_unlock_irqrestore(&dwc->lock, flags);
1459
1460         for (i = 0; i < cdesc->periods; i++)
1461                 dwc_desc_put(dwc, cdesc->desc[i]);
1462
1463         kfree(cdesc->desc);
1464         kfree(cdesc);
1465
1466         clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1467 }
1468 EXPORT_SYMBOL(dw_dma_cyclic_free);
1469
1470 /*----------------------------------------------------------------------*/
1471
1472 static void dw_dma_off(struct dw_dma *dw)
1473 {
1474         int i;
1475
1476         dma_writel(dw, CFG, 0);
1477
1478         channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1479         channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1480         channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1481         channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1482
1483         while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1484                 cpu_relax();
1485
1486         for (i = 0; i < dw->dma.chancnt; i++)
1487                 dw->chan[i].initialized = false;
1488 }
1489
1490 #ifdef CONFIG_OF
1491 static struct dw_dma_platform_data *
1492 dw_dma_parse_dt(struct platform_device *pdev)
1493 {
1494         struct device_node *sn, *cn, *np = pdev->dev.of_node;
1495         struct dw_dma_platform_data *pdata;
1496         struct dw_dma_slave *sd;
1497         u32 tmp, arr[4];
1498
1499         if (!np) {
1500                 dev_err(&pdev->dev, "Missing DT data\n");
1501                 return NULL;
1502         }
1503
1504         pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1505         if (!pdata)
1506                 return NULL;
1507
1508         if (of_property_read_u32(np, "nr_channels", &pdata->nr_channels))
1509                 return NULL;
1510
1511         if (of_property_read_bool(np, "is_private"))
1512                 pdata->is_private = true;
1513
1514         if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
1515                 pdata->chan_allocation_order = (unsigned char)tmp;
1516
1517         if (!of_property_read_u32(np, "chan_priority", &tmp))
1518                 pdata->chan_priority = tmp;
1519
1520         if (!of_property_read_u32(np, "block_size", &tmp))
1521                 pdata->block_size = tmp;
1522
1523         if (!of_property_read_u32(np, "nr_masters", &tmp)) {
1524                 if (tmp > 4)
1525                         return NULL;
1526
1527                 pdata->nr_masters = tmp;
1528         }
1529
1530         if (!of_property_read_u32_array(np, "data_width", arr,
1531                                 pdata->nr_masters))
1532                 for (tmp = 0; tmp < pdata->nr_masters; tmp++)
1533                         pdata->data_width[tmp] = arr[tmp];
1534
1535         /* parse slave data */
1536         sn = of_find_node_by_name(np, "slave_info");
1537         if (!sn)
1538                 return pdata;
1539
1540         /* calculate number of slaves */
1541         tmp = of_get_child_count(sn);
1542         if (!tmp)
1543                 return NULL;
1544
1545         sd = devm_kzalloc(&pdev->dev, sizeof(*sd) * tmp, GFP_KERNEL);
1546         if (!sd)
1547                 return NULL;
1548
1549         pdata->sd = sd;
1550         pdata->sd_count = tmp;
1551
1552         for_each_child_of_node(sn, cn) {
1553                 sd->dma_dev = &pdev->dev;
1554                 of_property_read_string(cn, "bus_id", &sd->bus_id);
1555                 of_property_read_u32(cn, "cfg_hi", &sd->cfg_hi);
1556                 of_property_read_u32(cn, "cfg_lo", &sd->cfg_lo);
1557                 if (!of_property_read_u32(cn, "src_master", &tmp))
1558                         sd->src_master = tmp;
1559
1560                 if (!of_property_read_u32(cn, "dst_master", &tmp))
1561                         sd->dst_master = tmp;
1562                 sd++;
1563         }
1564
1565         return pdata;
1566 }
1567 #else
1568 static inline struct dw_dma_platform_data *
1569 dw_dma_parse_dt(struct platform_device *pdev)
1570 {
1571         return NULL;
1572 }
1573 #endif
1574
1575 static int dw_probe(struct platform_device *pdev)
1576 {
1577         struct dw_dma_platform_data *pdata;
1578         struct resource         *io;
1579         struct dw_dma           *dw;
1580         size_t                  size;
1581         void __iomem            *regs;
1582         bool                    autocfg;
1583         unsigned int            dw_params;
1584         unsigned int            nr_channels;
1585         unsigned int            max_blk_size = 0;
1586         int                     irq;
1587         int                     err;
1588         int                     i;
1589
1590         io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1591         if (!io)
1592                 return -EINVAL;
1593
1594         irq = platform_get_irq(pdev, 0);
1595         if (irq < 0)
1596                 return irq;
1597
1598         regs = devm_request_and_ioremap(&pdev->dev, io);
1599         if (!regs)
1600                 return -EBUSY;
1601
1602         dw_params = dma_read_byaddr(regs, DW_PARAMS);
1603         autocfg = dw_params >> DW_PARAMS_EN & 0x1;
1604
1605         pdata = dev_get_platdata(&pdev->dev);
1606         if (!pdata)
1607                 pdata = dw_dma_parse_dt(pdev);
1608
1609         if (!pdata && autocfg) {
1610                 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1611                 if (!pdata)
1612                         return -ENOMEM;
1613
1614                 /* Fill platform data with the default values */
1615                 pdata->is_private = true;
1616                 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
1617                 pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
1618         } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
1619                 return -EINVAL;
1620
1621         if (autocfg)
1622                 nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1;
1623         else
1624                 nr_channels = pdata->nr_channels;
1625
1626         size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan);
1627         dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
1628         if (!dw)
1629                 return -ENOMEM;
1630
1631         dw->clk = devm_clk_get(&pdev->dev, "hclk");
1632         if (IS_ERR(dw->clk))
1633                 return PTR_ERR(dw->clk);
1634         clk_prepare_enable(dw->clk);
1635
1636         dw->regs = regs;
1637         dw->sd = pdata->sd;
1638         dw->sd_count = pdata->sd_count;
1639
1640         /* get hardware configuration parameters */
1641         if (autocfg) {
1642                 max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
1643
1644                 dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
1645                 for (i = 0; i < dw->nr_masters; i++) {
1646                         dw->data_width[i] =
1647                                 (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2;
1648                 }
1649         } else {
1650                 dw->nr_masters = pdata->nr_masters;
1651                 memcpy(dw->data_width, pdata->data_width, 4);
1652         }
1653
1654         /* Calculate all channel mask before DMA setup */
1655         dw->all_chan_mask = (1 << nr_channels) - 1;
1656
1657         /* force dma off, just in case */
1658         dw_dma_off(dw);
1659
1660         /* disable BLOCK interrupts as well */
1661         channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1662
1663         err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0,
1664                                "dw_dmac", dw);
1665         if (err)
1666                 return err;
1667
1668         platform_set_drvdata(pdev, dw);
1669
1670         /* create a pool of consistent memory blocks for hardware descriptors */
1671         dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev,
1672                                          sizeof(struct dw_desc), 4, 0);
1673         if (!dw->desc_pool) {
1674                 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1675                 return -ENOMEM;
1676         }
1677
1678         tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1679
1680         INIT_LIST_HEAD(&dw->dma.channels);
1681         for (i = 0; i < nr_channels; i++) {
1682                 struct dw_dma_chan      *dwc = &dw->chan[i];
1683                 int                     r = nr_channels - i - 1;
1684
1685                 dwc->chan.device = &dw->dma;
1686                 dma_cookie_init(&dwc->chan);
1687                 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1688                         list_add_tail(&dwc->chan.device_node,
1689                                         &dw->dma.channels);
1690                 else
1691                         list_add(&dwc->chan.device_node, &dw->dma.channels);
1692
1693                 /* 7 is highest priority & 0 is lowest. */
1694                 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1695                         dwc->priority = r;
1696                 else
1697                         dwc->priority = i;
1698
1699                 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1700                 spin_lock_init(&dwc->lock);
1701                 dwc->mask = 1 << i;
1702
1703                 INIT_LIST_HEAD(&dwc->active_list);
1704                 INIT_LIST_HEAD(&dwc->queue);
1705                 INIT_LIST_HEAD(&dwc->free_list);
1706
1707                 channel_clear_bit(dw, CH_EN, dwc->mask);
1708
1709                 dwc->direction = DMA_TRANS_NONE;
1710
1711                 /* hardware configuration */
1712                 if (autocfg) {
1713                         unsigned int dwc_params;
1714
1715                         dwc_params = dma_read_byaddr(regs + r * sizeof(u32),
1716                                                      DWC_PARAMS);
1717
1718                         /* Decode maximum block size for given channel. The
1719                          * stored 4 bit value represents blocks from 0x00 for 3
1720                          * up to 0x0a for 4095. */
1721                         dwc->block_size =
1722                                 (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
1723                         dwc->nollp =
1724                                 (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
1725                 } else {
1726                         dwc->block_size = pdata->block_size;
1727
1728                         /* Check if channel supports multi block transfer */
1729                         channel_writel(dwc, LLP, 0xfffffffc);
1730                         dwc->nollp =
1731                                 (channel_readl(dwc, LLP) & 0xfffffffc) == 0;
1732                         channel_writel(dwc, LLP, 0);
1733                 }
1734         }
1735
1736         /* Clear all interrupts on all channels. */
1737         dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1738         dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1739         dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1740         dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1741         dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1742
1743         dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1744         dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1745         if (pdata->is_private)
1746                 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1747         dw->dma.dev = &pdev->dev;
1748         dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1749         dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1750
1751         dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1752
1753         dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1754         dw->dma.device_control = dwc_control;
1755
1756         dw->dma.device_tx_status = dwc_tx_status;
1757         dw->dma.device_issue_pending = dwc_issue_pending;
1758
1759         dma_writel(dw, CFG, DW_CFG_DMA_EN);
1760
1761         dev_info(&pdev->dev, "DesignWare DMA Controller, %d channels\n",
1762                  nr_channels);
1763
1764         dma_async_device_register(&dw->dma);
1765
1766         return 0;
1767 }
1768
1769 static int __devexit dw_remove(struct platform_device *pdev)
1770 {
1771         struct dw_dma           *dw = platform_get_drvdata(pdev);
1772         struct dw_dma_chan      *dwc, *_dwc;
1773
1774         dw_dma_off(dw);
1775         dma_async_device_unregister(&dw->dma);
1776
1777         tasklet_kill(&dw->tasklet);
1778
1779         list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1780                         chan.device_node) {
1781                 list_del(&dwc->chan.device_node);
1782                 channel_clear_bit(dw, CH_EN, dwc->mask);
1783         }
1784
1785         return 0;
1786 }
1787
1788 static void dw_shutdown(struct platform_device *pdev)
1789 {
1790         struct dw_dma   *dw = platform_get_drvdata(pdev);
1791
1792         dw_dma_off(dw);
1793         clk_disable_unprepare(dw->clk);
1794 }
1795
1796 static int dw_suspend_noirq(struct device *dev)
1797 {
1798         struct platform_device *pdev = to_platform_device(dev);
1799         struct dw_dma   *dw = platform_get_drvdata(pdev);
1800
1801         dw_dma_off(dw);
1802         clk_disable_unprepare(dw->clk);
1803
1804         return 0;
1805 }
1806
1807 static int dw_resume_noirq(struct device *dev)
1808 {
1809         struct platform_device *pdev = to_platform_device(dev);
1810         struct dw_dma   *dw = platform_get_drvdata(pdev);
1811
1812         clk_prepare_enable(dw->clk);
1813         dma_writel(dw, CFG, DW_CFG_DMA_EN);
1814
1815         return 0;
1816 }
1817
1818 static const struct dev_pm_ops dw_dev_pm_ops = {
1819         .suspend_noirq = dw_suspend_noirq,
1820         .resume_noirq = dw_resume_noirq,
1821         .freeze_noirq = dw_suspend_noirq,
1822         .thaw_noirq = dw_resume_noirq,
1823         .restore_noirq = dw_resume_noirq,
1824         .poweroff_noirq = dw_suspend_noirq,
1825 };
1826
1827 #ifdef CONFIG_OF
1828 static const struct of_device_id dw_dma_id_table[] = {
1829         { .compatible = "snps,dma-spear1340" },
1830         {}
1831 };
1832 MODULE_DEVICE_TABLE(of, dw_dma_id_table);
1833 #endif
1834
1835 static struct platform_driver dw_driver = {
1836         .probe          = dw_probe,
1837         .remove         = dw_remove,
1838         .shutdown       = dw_shutdown,
1839         .driver = {
1840                 .name   = "dw_dmac",
1841                 .pm     = &dw_dev_pm_ops,
1842                 .of_match_table = of_match_ptr(dw_dma_id_table),
1843         },
1844 };
1845
1846 static int __init dw_init(void)
1847 {
1848         return platform_driver_register(&dw_driver);
1849 }
1850 subsys_initcall(dw_init);
1851
1852 static void __exit dw_exit(void)
1853 {
1854         platform_driver_unregister(&dw_driver);
1855 }
1856 module_exit(dw_exit);
1857
1858 MODULE_LICENSE("GPL v2");
1859 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1860 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1861 MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");