dc3b9558a25c4a9f1182c328bcce7f33dd5d9374
[firefly-linux-kernel-4.4.55.git] / drivers / dma / dw_dmac.c
1 /*
2  * Core driver for the Synopsys DesignWare DMA Controller
3  *
4  * Copyright (C) 2007-2008 Atmel Corporation
5  * Copyright (C) 2010-2011 ST Microelectronics
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11
12 #include <linux/bitops.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dmapool.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/io.h>
21 #include <linux/of.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26
27 #include "dw_dmac_regs.h"
28 #include "dmaengine.h"
29
30 /*
31  * This supports the Synopsys "DesignWare AHB Central DMA Controller",
32  * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
33  * of which use ARM any more).  See the "Databook" from Synopsys for
34  * information beyond what licensees probably provide.
35  *
36  * The driver has currently been tested only with the Atmel AT32AP7000,
37  * which does not support descriptor writeback.
38  */
39
40 static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave)
41 {
42         return slave ? slave->dst_master : 0;
43 }
44
45 static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
46 {
47         return slave ? slave->src_master : 1;
48 }
49
50 #define SRC_MASTER      0
51 #define DST_MASTER      1
52
53 static inline unsigned int dwc_get_master(struct dma_chan *chan, int master)
54 {
55         struct dw_dma *dw = to_dw_dma(chan->device);
56         struct dw_dma_slave *dws = chan->private;
57         unsigned int m;
58
59         if (master == SRC_MASTER)
60                 m = dwc_get_sms(dws);
61         else
62                 m = dwc_get_dms(dws);
63
64         return min_t(unsigned int, dw->nr_masters - 1, m);
65 }
66
67 #define DWC_DEFAULT_CTLLO(_chan) ({                             \
68                 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan);       \
69                 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
70                 bool _is_slave = is_slave_direction(_dwc->direction);   \
71                 int _dms = dwc_get_master(_chan, DST_MASTER);           \
72                 int _sms = dwc_get_master(_chan, SRC_MASTER);           \
73                 u8 _smsize = _is_slave ? _sconfig->src_maxburst :       \
74                         DW_DMA_MSIZE_16;                        \
75                 u8 _dmsize = _is_slave ? _sconfig->dst_maxburst :       \
76                         DW_DMA_MSIZE_16;                        \
77                                                                 \
78                 (DWC_CTLL_DST_MSIZE(_dmsize)                    \
79                  | DWC_CTLL_SRC_MSIZE(_smsize)                  \
80                  | DWC_CTLL_LLP_D_EN                            \
81                  | DWC_CTLL_LLP_S_EN                            \
82                  | DWC_CTLL_DMS(_dms)                           \
83                  | DWC_CTLL_SMS(_sms));                         \
84         })
85
86 /*
87  * Number of descriptors to allocate for each channel. This should be
88  * made configurable somehow; preferably, the clients (at least the
89  * ones using slave transfers) should be able to give us a hint.
90  */
91 #define NR_DESCS_PER_CHANNEL    64
92
93 static inline unsigned int dwc_get_data_width(struct dma_chan *chan, int master)
94 {
95         struct dw_dma *dw = to_dw_dma(chan->device);
96
97         return dw->data_width[dwc_get_master(chan, master)];
98 }
99
100 /*----------------------------------------------------------------------*/
101
102 static struct device *chan2dev(struct dma_chan *chan)
103 {
104         return &chan->dev->device;
105 }
106 static struct device *chan2parent(struct dma_chan *chan)
107 {
108         return chan->dev->device.parent;
109 }
110
111 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
112 {
113         return to_dw_desc(dwc->active_list.next);
114 }
115
116 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
117 {
118         struct dw_desc *desc, *_desc;
119         struct dw_desc *ret = NULL;
120         unsigned int i = 0;
121         unsigned long flags;
122
123         spin_lock_irqsave(&dwc->lock, flags);
124         list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
125                 i++;
126                 if (async_tx_test_ack(&desc->txd)) {
127                         list_del(&desc->desc_node);
128                         ret = desc;
129                         break;
130                 }
131                 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
132         }
133         spin_unlock_irqrestore(&dwc->lock, flags);
134
135         dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
136
137         return ret;
138 }
139
140 /*
141  * Move a descriptor, including any children, to the free list.
142  * `desc' must not be on any lists.
143  */
144 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
145 {
146         unsigned long flags;
147
148         if (desc) {
149                 struct dw_desc *child;
150
151                 spin_lock_irqsave(&dwc->lock, flags);
152                 list_for_each_entry(child, &desc->tx_list, desc_node)
153                         dev_vdbg(chan2dev(&dwc->chan),
154                                         "moving child desc %p to freelist\n",
155                                         child);
156                 list_splice_init(&desc->tx_list, &dwc->free_list);
157                 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
158                 list_add(&desc->desc_node, &dwc->free_list);
159                 spin_unlock_irqrestore(&dwc->lock, flags);
160         }
161 }
162
163 static void dwc_initialize(struct dw_dma_chan *dwc)
164 {
165         struct dw_dma *dw = to_dw_dma(dwc->chan.device);
166         struct dw_dma_slave *dws = dwc->chan.private;
167         u32 cfghi = DWC_CFGH_FIFO_MODE;
168         u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
169
170         if (dwc->initialized == true)
171                 return;
172
173         if (dws) {
174                 /*
175                  * We need controller-specific data to set up slave
176                  * transfers.
177                  */
178                 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
179
180                 cfghi = dws->cfg_hi;
181                 cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
182         } else {
183                 if (dwc->direction == DMA_MEM_TO_DEV)
184                         cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id);
185                 else if (dwc->direction == DMA_DEV_TO_MEM)
186                         cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id);
187         }
188
189         channel_writel(dwc, CFG_LO, cfglo);
190         channel_writel(dwc, CFG_HI, cfghi);
191
192         /* Enable interrupts */
193         channel_set_bit(dw, MASK.XFER, dwc->mask);
194         channel_set_bit(dw, MASK.ERROR, dwc->mask);
195
196         dwc->initialized = true;
197 }
198
199 /*----------------------------------------------------------------------*/
200
201 static inline unsigned int dwc_fast_fls(unsigned long long v)
202 {
203         /*
204          * We can be a lot more clever here, but this should take care
205          * of the most common optimization.
206          */
207         if (!(v & 7))
208                 return 3;
209         else if (!(v & 3))
210                 return 2;
211         else if (!(v & 1))
212                 return 1;
213         return 0;
214 }
215
216 static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
217 {
218         dev_err(chan2dev(&dwc->chan),
219                 "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
220                 channel_readl(dwc, SAR),
221                 channel_readl(dwc, DAR),
222                 channel_readl(dwc, LLP),
223                 channel_readl(dwc, CTL_HI),
224                 channel_readl(dwc, CTL_LO));
225 }
226
227 static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
228 {
229         channel_clear_bit(dw, CH_EN, dwc->mask);
230         while (dma_readl(dw, CH_EN) & dwc->mask)
231                 cpu_relax();
232 }
233
234 /*----------------------------------------------------------------------*/
235
236 /* Perform single block transfer */
237 static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
238                                        struct dw_desc *desc)
239 {
240         struct dw_dma   *dw = to_dw_dma(dwc->chan.device);
241         u32             ctllo;
242
243         /* Software emulation of LLP mode relies on interrupts to continue
244          * multi block transfer. */
245         ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
246
247         channel_writel(dwc, SAR, desc->lli.sar);
248         channel_writel(dwc, DAR, desc->lli.dar);
249         channel_writel(dwc, CTL_LO, ctllo);
250         channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
251         channel_set_bit(dw, CH_EN, dwc->mask);
252
253         /* Move pointer to next descriptor */
254         dwc->tx_node_active = dwc->tx_node_active->next;
255 }
256
257 /* Called with dwc->lock held and bh disabled */
258 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
259 {
260         struct dw_dma   *dw = to_dw_dma(dwc->chan.device);
261         unsigned long   was_soft_llp;
262
263         /* ASSERT:  channel is idle */
264         if (dma_readl(dw, CH_EN) & dwc->mask) {
265                 dev_err(chan2dev(&dwc->chan),
266                         "BUG: Attempted to start non-idle channel\n");
267                 dwc_dump_chan_regs(dwc);
268
269                 /* The tasklet will hopefully advance the queue... */
270                 return;
271         }
272
273         if (dwc->nollp) {
274                 was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
275                                                 &dwc->flags);
276                 if (was_soft_llp) {
277                         dev_err(chan2dev(&dwc->chan),
278                                 "BUG: Attempted to start new LLP transfer "
279                                 "inside ongoing one\n");
280                         return;
281                 }
282
283                 dwc_initialize(dwc);
284
285                 dwc->tx_list = &first->tx_list;
286                 dwc->tx_node_active = &first->tx_list;
287
288                 dwc_do_single_block(dwc, first);
289
290                 return;
291         }
292
293         dwc_initialize(dwc);
294
295         channel_writel(dwc, LLP, first->txd.phys);
296         channel_writel(dwc, CTL_LO,
297                         DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
298         channel_writel(dwc, CTL_HI, 0);
299         channel_set_bit(dw, CH_EN, dwc->mask);
300 }
301
302 /*----------------------------------------------------------------------*/
303
304 static void
305 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
306                 bool callback_required)
307 {
308         dma_async_tx_callback           callback = NULL;
309         void                            *param = NULL;
310         struct dma_async_tx_descriptor  *txd = &desc->txd;
311         struct dw_desc                  *child;
312         unsigned long                   flags;
313
314         dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
315
316         spin_lock_irqsave(&dwc->lock, flags);
317         dma_cookie_complete(txd);
318         if (callback_required) {
319                 callback = txd->callback;
320                 param = txd->callback_param;
321         }
322
323         /* async_tx_ack */
324         list_for_each_entry(child, &desc->tx_list, desc_node)
325                 async_tx_ack(&child->txd);
326         async_tx_ack(&desc->txd);
327
328         list_splice_init(&desc->tx_list, &dwc->free_list);
329         list_move(&desc->desc_node, &dwc->free_list);
330
331         if (!is_slave_direction(dwc->direction)) {
332                 struct device *parent = chan2parent(&dwc->chan);
333                 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
334                         if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
335                                 dma_unmap_single(parent, desc->lli.dar,
336                                                 desc->len, DMA_FROM_DEVICE);
337                         else
338                                 dma_unmap_page(parent, desc->lli.dar,
339                                                 desc->len, DMA_FROM_DEVICE);
340                 }
341                 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
342                         if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
343                                 dma_unmap_single(parent, desc->lli.sar,
344                                                 desc->len, DMA_TO_DEVICE);
345                         else
346                                 dma_unmap_page(parent, desc->lli.sar,
347                                                 desc->len, DMA_TO_DEVICE);
348                 }
349         }
350
351         spin_unlock_irqrestore(&dwc->lock, flags);
352
353         if (callback)
354                 callback(param);
355 }
356
357 static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
358 {
359         struct dw_desc *desc, *_desc;
360         LIST_HEAD(list);
361         unsigned long flags;
362
363         spin_lock_irqsave(&dwc->lock, flags);
364         if (dma_readl(dw, CH_EN) & dwc->mask) {
365                 dev_err(chan2dev(&dwc->chan),
366                         "BUG: XFER bit set, but channel not idle!\n");
367
368                 /* Try to continue after resetting the channel... */
369                 dwc_chan_disable(dw, dwc);
370         }
371
372         /*
373          * Submit queued descriptors ASAP, i.e. before we go through
374          * the completed ones.
375          */
376         list_splice_init(&dwc->active_list, &list);
377         if (!list_empty(&dwc->queue)) {
378                 list_move(dwc->queue.next, &dwc->active_list);
379                 dwc_dostart(dwc, dwc_first_active(dwc));
380         }
381
382         spin_unlock_irqrestore(&dwc->lock, flags);
383
384         list_for_each_entry_safe(desc, _desc, &list, desc_node)
385                 dwc_descriptor_complete(dwc, desc, true);
386 }
387
388 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
389 {
390         dma_addr_t llp;
391         struct dw_desc *desc, *_desc;
392         struct dw_desc *child;
393         u32 status_xfer;
394         unsigned long flags;
395
396         spin_lock_irqsave(&dwc->lock, flags);
397         llp = channel_readl(dwc, LLP);
398         status_xfer = dma_readl(dw, RAW.XFER);
399
400         if (status_xfer & dwc->mask) {
401                 /* Everything we've submitted is done */
402                 dma_writel(dw, CLEAR.XFER, dwc->mask);
403
404                 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
405                         if (dwc->tx_node_active != dwc->tx_list) {
406                                 desc = to_dw_desc(dwc->tx_node_active);
407
408                                 /* Submit next block */
409                                 dwc_do_single_block(dwc, desc);
410                                 spin_unlock_irqrestore(&dwc->lock, flags);
411
412                                 return;
413                         }
414                         /* We are done here */
415                         clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
416                 }
417                 spin_unlock_irqrestore(&dwc->lock, flags);
418
419                 dwc_complete_all(dw, dwc);
420                 return;
421         }
422
423         if (list_empty(&dwc->active_list)) {
424                 spin_unlock_irqrestore(&dwc->lock, flags);
425                 return;
426         }
427
428         if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
429                 dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
430                 spin_unlock_irqrestore(&dwc->lock, flags);
431                 return;
432         }
433
434         dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__,
435                         (unsigned long long)llp);
436
437         list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
438                 /* check first descriptors addr */
439                 if (desc->txd.phys == llp) {
440                         spin_unlock_irqrestore(&dwc->lock, flags);
441                         return;
442                 }
443
444                 /* check first descriptors llp */
445                 if (desc->lli.llp == llp) {
446                         /* This one is currently in progress */
447                         spin_unlock_irqrestore(&dwc->lock, flags);
448                         return;
449                 }
450
451                 list_for_each_entry(child, &desc->tx_list, desc_node)
452                         if (child->lli.llp == llp) {
453                                 /* Currently in progress */
454                                 spin_unlock_irqrestore(&dwc->lock, flags);
455                                 return;
456                         }
457
458                 /*
459                  * No descriptors so far seem to be in progress, i.e.
460                  * this one must be done.
461                  */
462                 spin_unlock_irqrestore(&dwc->lock, flags);
463                 dwc_descriptor_complete(dwc, desc, true);
464                 spin_lock_irqsave(&dwc->lock, flags);
465         }
466
467         dev_err(chan2dev(&dwc->chan),
468                 "BUG: All descriptors done, but channel not idle!\n");
469
470         /* Try to continue after resetting the channel... */
471         dwc_chan_disable(dw, dwc);
472
473         if (!list_empty(&dwc->queue)) {
474                 list_move(dwc->queue.next, &dwc->active_list);
475                 dwc_dostart(dwc, dwc_first_active(dwc));
476         }
477         spin_unlock_irqrestore(&dwc->lock, flags);
478 }
479
480 static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
481 {
482         dev_crit(chan2dev(&dwc->chan), "  desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
483                  lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
484 }
485
486 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
487 {
488         struct dw_desc *bad_desc;
489         struct dw_desc *child;
490         unsigned long flags;
491
492         dwc_scan_descriptors(dw, dwc);
493
494         spin_lock_irqsave(&dwc->lock, flags);
495
496         /*
497          * The descriptor currently at the head of the active list is
498          * borked. Since we don't have any way to report errors, we'll
499          * just have to scream loudly and try to carry on.
500          */
501         bad_desc = dwc_first_active(dwc);
502         list_del_init(&bad_desc->desc_node);
503         list_move(dwc->queue.next, dwc->active_list.prev);
504
505         /* Clear the error flag and try to restart the controller */
506         dma_writel(dw, CLEAR.ERROR, dwc->mask);
507         if (!list_empty(&dwc->active_list))
508                 dwc_dostart(dwc, dwc_first_active(dwc));
509
510         /*
511          * WARN may seem harsh, but since this only happens
512          * when someone submits a bad physical address in a
513          * descriptor, we should consider ourselves lucky that the
514          * controller flagged an error instead of scribbling over
515          * random memory locations.
516          */
517         dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
518                                        "  cookie: %d\n", bad_desc->txd.cookie);
519         dwc_dump_lli(dwc, &bad_desc->lli);
520         list_for_each_entry(child, &bad_desc->tx_list, desc_node)
521                 dwc_dump_lli(dwc, &child->lli);
522
523         spin_unlock_irqrestore(&dwc->lock, flags);
524
525         /* Pretend the descriptor completed successfully */
526         dwc_descriptor_complete(dwc, bad_desc, true);
527 }
528
529 /* --------------------- Cyclic DMA API extensions -------------------- */
530
531 inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
532 {
533         struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
534         return channel_readl(dwc, SAR);
535 }
536 EXPORT_SYMBOL(dw_dma_get_src_addr);
537
538 inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
539 {
540         struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
541         return channel_readl(dwc, DAR);
542 }
543 EXPORT_SYMBOL(dw_dma_get_dst_addr);
544
545 /* called with dwc->lock held and all DMAC interrupts disabled */
546 static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
547                 u32 status_err, u32 status_xfer)
548 {
549         unsigned long flags;
550
551         if (dwc->mask) {
552                 void (*callback)(void *param);
553                 void *callback_param;
554
555                 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
556                                 channel_readl(dwc, LLP));
557
558                 callback = dwc->cdesc->period_callback;
559                 callback_param = dwc->cdesc->period_callback_param;
560
561                 if (callback)
562                         callback(callback_param);
563         }
564
565         /*
566          * Error and transfer complete are highly unlikely, and will most
567          * likely be due to a configuration error by the user.
568          */
569         if (unlikely(status_err & dwc->mask) ||
570                         unlikely(status_xfer & dwc->mask)) {
571                 int i;
572
573                 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
574                                 "interrupt, stopping DMA transfer\n",
575                                 status_xfer ? "xfer" : "error");
576
577                 spin_lock_irqsave(&dwc->lock, flags);
578
579                 dwc_dump_chan_regs(dwc);
580
581                 dwc_chan_disable(dw, dwc);
582
583                 /* make sure DMA does not restart by loading a new list */
584                 channel_writel(dwc, LLP, 0);
585                 channel_writel(dwc, CTL_LO, 0);
586                 channel_writel(dwc, CTL_HI, 0);
587
588                 dma_writel(dw, CLEAR.ERROR, dwc->mask);
589                 dma_writel(dw, CLEAR.XFER, dwc->mask);
590
591                 for (i = 0; i < dwc->cdesc->periods; i++)
592                         dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
593
594                 spin_unlock_irqrestore(&dwc->lock, flags);
595         }
596 }
597
598 /* ------------------------------------------------------------------------- */
599
600 static void dw_dma_tasklet(unsigned long data)
601 {
602         struct dw_dma *dw = (struct dw_dma *)data;
603         struct dw_dma_chan *dwc;
604         u32 status_xfer;
605         u32 status_err;
606         int i;
607
608         status_xfer = dma_readl(dw, RAW.XFER);
609         status_err = dma_readl(dw, RAW.ERROR);
610
611         dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
612
613         for (i = 0; i < dw->dma.chancnt; i++) {
614                 dwc = &dw->chan[i];
615                 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
616                         dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
617                 else if (status_err & (1 << i))
618                         dwc_handle_error(dw, dwc);
619                 else if (status_xfer & (1 << i))
620                         dwc_scan_descriptors(dw, dwc);
621         }
622
623         /*
624          * Re-enable interrupts.
625          */
626         channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
627         channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
628 }
629
630 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
631 {
632         struct dw_dma *dw = dev_id;
633         u32 status;
634
635         dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__,
636                         dma_readl(dw, STATUS_INT));
637
638         /*
639          * Just disable the interrupts. We'll turn them back on in the
640          * softirq handler.
641          */
642         channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
643         channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
644
645         status = dma_readl(dw, STATUS_INT);
646         if (status) {
647                 dev_err(dw->dma.dev,
648                         "BUG: Unexpected interrupts pending: 0x%x\n",
649                         status);
650
651                 /* Try to recover */
652                 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
653                 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
654                 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
655                 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
656         }
657
658         tasklet_schedule(&dw->tasklet);
659
660         return IRQ_HANDLED;
661 }
662
663 /*----------------------------------------------------------------------*/
664
665 static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
666 {
667         struct dw_desc          *desc = txd_to_dw_desc(tx);
668         struct dw_dma_chan      *dwc = to_dw_dma_chan(tx->chan);
669         dma_cookie_t            cookie;
670         unsigned long           flags;
671
672         spin_lock_irqsave(&dwc->lock, flags);
673         cookie = dma_cookie_assign(tx);
674
675         /*
676          * REVISIT: We should attempt to chain as many descriptors as
677          * possible, perhaps even appending to those already submitted
678          * for DMA. But this is hard to do in a race-free manner.
679          */
680         if (list_empty(&dwc->active_list)) {
681                 dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
682                                 desc->txd.cookie);
683                 list_add_tail(&desc->desc_node, &dwc->active_list);
684                 dwc_dostart(dwc, dwc_first_active(dwc));
685         } else {
686                 dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
687                                 desc->txd.cookie);
688
689                 list_add_tail(&desc->desc_node, &dwc->queue);
690         }
691
692         spin_unlock_irqrestore(&dwc->lock, flags);
693
694         return cookie;
695 }
696
697 static struct dma_async_tx_descriptor *
698 dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
699                 size_t len, unsigned long flags)
700 {
701         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
702         struct dw_desc          *desc;
703         struct dw_desc          *first;
704         struct dw_desc          *prev;
705         size_t                  xfer_count;
706         size_t                  offset;
707         unsigned int            src_width;
708         unsigned int            dst_width;
709         unsigned int            data_width;
710         u32                     ctllo;
711
712         dev_vdbg(chan2dev(chan),
713                         "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
714                         (unsigned long long)dest, (unsigned long long)src,
715                         len, flags);
716
717         if (unlikely(!len)) {
718                 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
719                 return NULL;
720         }
721
722         dwc->direction = DMA_MEM_TO_MEM;
723
724         data_width = min_t(unsigned int, dwc_get_data_width(chan, SRC_MASTER),
725                            dwc_get_data_width(chan, DST_MASTER));
726
727         src_width = dst_width = min_t(unsigned int, data_width,
728                                       dwc_fast_fls(src | dest | len));
729
730         ctllo = DWC_DEFAULT_CTLLO(chan)
731                         | DWC_CTLL_DST_WIDTH(dst_width)
732                         | DWC_CTLL_SRC_WIDTH(src_width)
733                         | DWC_CTLL_DST_INC
734                         | DWC_CTLL_SRC_INC
735                         | DWC_CTLL_FC_M2M;
736         prev = first = NULL;
737
738         for (offset = 0; offset < len; offset += xfer_count << src_width) {
739                 xfer_count = min_t(size_t, (len - offset) >> src_width,
740                                            dwc->block_size);
741
742                 desc = dwc_desc_get(dwc);
743                 if (!desc)
744                         goto err_desc_get;
745
746                 desc->lli.sar = src + offset;
747                 desc->lli.dar = dest + offset;
748                 desc->lli.ctllo = ctllo;
749                 desc->lli.ctlhi = xfer_count;
750
751                 if (!first) {
752                         first = desc;
753                 } else {
754                         prev->lli.llp = desc->txd.phys;
755                         list_add_tail(&desc->desc_node,
756                                         &first->tx_list);
757                 }
758                 prev = desc;
759         }
760
761         if (flags & DMA_PREP_INTERRUPT)
762                 /* Trigger interrupt after last block */
763                 prev->lli.ctllo |= DWC_CTLL_INT_EN;
764
765         prev->lli.llp = 0;
766         first->txd.flags = flags;
767         first->len = len;
768
769         return &first->txd;
770
771 err_desc_get:
772         dwc_desc_put(dwc, first);
773         return NULL;
774 }
775
776 static struct dma_async_tx_descriptor *
777 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
778                 unsigned int sg_len, enum dma_transfer_direction direction,
779                 unsigned long flags, void *context)
780 {
781         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
782         struct dma_slave_config *sconfig = &dwc->dma_sconfig;
783         struct dw_desc          *prev;
784         struct dw_desc          *first;
785         u32                     ctllo;
786         dma_addr_t              reg;
787         unsigned int            reg_width;
788         unsigned int            mem_width;
789         unsigned int            data_width;
790         unsigned int            i;
791         struct scatterlist      *sg;
792         size_t                  total_len = 0;
793
794         dev_vdbg(chan2dev(chan), "%s\n", __func__);
795
796         if (unlikely(!is_slave_direction(direction) || !sg_len))
797                 return NULL;
798
799         dwc->direction = direction;
800
801         prev = first = NULL;
802
803         switch (direction) {
804         case DMA_MEM_TO_DEV:
805                 reg_width = __fls(sconfig->dst_addr_width);
806                 reg = sconfig->dst_addr;
807                 ctllo = (DWC_DEFAULT_CTLLO(chan)
808                                 | DWC_CTLL_DST_WIDTH(reg_width)
809                                 | DWC_CTLL_DST_FIX
810                                 | DWC_CTLL_SRC_INC);
811
812                 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
813                         DWC_CTLL_FC(DW_DMA_FC_D_M2P);
814
815                 data_width = dwc_get_data_width(chan, SRC_MASTER);
816
817                 for_each_sg(sgl, sg, sg_len, i) {
818                         struct dw_desc  *desc;
819                         u32             len, dlen, mem;
820
821                         mem = sg_dma_address(sg);
822                         len = sg_dma_len(sg);
823
824                         mem_width = min_t(unsigned int,
825                                           data_width, dwc_fast_fls(mem | len));
826
827 slave_sg_todev_fill_desc:
828                         desc = dwc_desc_get(dwc);
829                         if (!desc) {
830                                 dev_err(chan2dev(chan),
831                                         "not enough descriptors available\n");
832                                 goto err_desc_get;
833                         }
834
835                         desc->lli.sar = mem;
836                         desc->lli.dar = reg;
837                         desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
838                         if ((len >> mem_width) > dwc->block_size) {
839                                 dlen = dwc->block_size << mem_width;
840                                 mem += dlen;
841                                 len -= dlen;
842                         } else {
843                                 dlen = len;
844                                 len = 0;
845                         }
846
847                         desc->lli.ctlhi = dlen >> mem_width;
848
849                         if (!first) {
850                                 first = desc;
851                         } else {
852                                 prev->lli.llp = desc->txd.phys;
853                                 list_add_tail(&desc->desc_node,
854                                                 &first->tx_list);
855                         }
856                         prev = desc;
857                         total_len += dlen;
858
859                         if (len)
860                                 goto slave_sg_todev_fill_desc;
861                 }
862                 break;
863         case DMA_DEV_TO_MEM:
864                 reg_width = __fls(sconfig->src_addr_width);
865                 reg = sconfig->src_addr;
866                 ctllo = (DWC_DEFAULT_CTLLO(chan)
867                                 | DWC_CTLL_SRC_WIDTH(reg_width)
868                                 | DWC_CTLL_DST_INC
869                                 | DWC_CTLL_SRC_FIX);
870
871                 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
872                         DWC_CTLL_FC(DW_DMA_FC_D_P2M);
873
874                 data_width = dwc_get_data_width(chan, DST_MASTER);
875
876                 for_each_sg(sgl, sg, sg_len, i) {
877                         struct dw_desc  *desc;
878                         u32             len, dlen, mem;
879
880                         mem = sg_dma_address(sg);
881                         len = sg_dma_len(sg);
882
883                         mem_width = min_t(unsigned int,
884                                           data_width, dwc_fast_fls(mem | len));
885
886 slave_sg_fromdev_fill_desc:
887                         desc = dwc_desc_get(dwc);
888                         if (!desc) {
889                                 dev_err(chan2dev(chan),
890                                                 "not enough descriptors available\n");
891                                 goto err_desc_get;
892                         }
893
894                         desc->lli.sar = reg;
895                         desc->lli.dar = mem;
896                         desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
897                         if ((len >> reg_width) > dwc->block_size) {
898                                 dlen = dwc->block_size << reg_width;
899                                 mem += dlen;
900                                 len -= dlen;
901                         } else {
902                                 dlen = len;
903                                 len = 0;
904                         }
905                         desc->lli.ctlhi = dlen >> reg_width;
906
907                         if (!first) {
908                                 first = desc;
909                         } else {
910                                 prev->lli.llp = desc->txd.phys;
911                                 list_add_tail(&desc->desc_node,
912                                                 &first->tx_list);
913                         }
914                         prev = desc;
915                         total_len += dlen;
916
917                         if (len)
918                                 goto slave_sg_fromdev_fill_desc;
919                 }
920                 break;
921         default:
922                 return NULL;
923         }
924
925         if (flags & DMA_PREP_INTERRUPT)
926                 /* Trigger interrupt after last block */
927                 prev->lli.ctllo |= DWC_CTLL_INT_EN;
928
929         prev->lli.llp = 0;
930         first->len = total_len;
931
932         return &first->txd;
933
934 err_desc_get:
935         dwc_desc_put(dwc, first);
936         return NULL;
937 }
938
939 /*
940  * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
941  * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
942  *
943  * NOTE: burst size 2 is not supported by controller.
944  *
945  * This can be done by finding least significant bit set: n & (n - 1)
946  */
947 static inline void convert_burst(u32 *maxburst)
948 {
949         if (*maxburst > 1)
950                 *maxburst = fls(*maxburst) - 2;
951         else
952                 *maxburst = 0;
953 }
954
955 static int
956 set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
957 {
958         struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
959
960         /* Check if chan will be configured for slave transfers */
961         if (!is_slave_direction(sconfig->direction))
962                 return -EINVAL;
963
964         memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
965         dwc->direction = sconfig->direction;
966
967         convert_burst(&dwc->dma_sconfig.src_maxburst);
968         convert_burst(&dwc->dma_sconfig.dst_maxburst);
969
970         return 0;
971 }
972
973 static inline void dwc_chan_pause(struct dw_dma_chan *dwc)
974 {
975         u32 cfglo = channel_readl(dwc, CFG_LO);
976
977         channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
978         while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
979                 cpu_relax();
980
981         dwc->paused = true;
982 }
983
984 static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
985 {
986         u32 cfglo = channel_readl(dwc, CFG_LO);
987
988         channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
989
990         dwc->paused = false;
991 }
992
993 static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
994                        unsigned long arg)
995 {
996         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
997         struct dw_dma           *dw = to_dw_dma(chan->device);
998         struct dw_desc          *desc, *_desc;
999         unsigned long           flags;
1000         LIST_HEAD(list);
1001
1002         if (cmd == DMA_PAUSE) {
1003                 spin_lock_irqsave(&dwc->lock, flags);
1004
1005                 dwc_chan_pause(dwc);
1006
1007                 spin_unlock_irqrestore(&dwc->lock, flags);
1008         } else if (cmd == DMA_RESUME) {
1009                 if (!dwc->paused)
1010                         return 0;
1011
1012                 spin_lock_irqsave(&dwc->lock, flags);
1013
1014                 dwc_chan_resume(dwc);
1015
1016                 spin_unlock_irqrestore(&dwc->lock, flags);
1017         } else if (cmd == DMA_TERMINATE_ALL) {
1018                 spin_lock_irqsave(&dwc->lock, flags);
1019
1020                 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
1021
1022                 dwc_chan_disable(dw, dwc);
1023
1024                 dwc_chan_resume(dwc);
1025
1026                 /* active_list entries will end up before queued entries */
1027                 list_splice_init(&dwc->queue, &list);
1028                 list_splice_init(&dwc->active_list, &list);
1029
1030                 spin_unlock_irqrestore(&dwc->lock, flags);
1031
1032                 /* Flush all pending and queued descriptors */
1033                 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1034                         dwc_descriptor_complete(dwc, desc, false);
1035         } else if (cmd == DMA_SLAVE_CONFIG) {
1036                 return set_runtime_config(chan, (struct dma_slave_config *)arg);
1037         } else {
1038                 return -ENXIO;
1039         }
1040
1041         return 0;
1042 }
1043
1044 static enum dma_status
1045 dwc_tx_status(struct dma_chan *chan,
1046               dma_cookie_t cookie,
1047               struct dma_tx_state *txstate)
1048 {
1049         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1050         enum dma_status         ret;
1051
1052         ret = dma_cookie_status(chan, cookie, txstate);
1053         if (ret != DMA_SUCCESS) {
1054                 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1055
1056                 ret = dma_cookie_status(chan, cookie, txstate);
1057         }
1058
1059         if (ret != DMA_SUCCESS)
1060                 dma_set_residue(txstate, dwc_first_active(dwc)->len);
1061
1062         if (dwc->paused)
1063                 return DMA_PAUSED;
1064
1065         return ret;
1066 }
1067
1068 static void dwc_issue_pending(struct dma_chan *chan)
1069 {
1070         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1071
1072         if (!list_empty(&dwc->queue))
1073                 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1074 }
1075
1076 static int dwc_alloc_chan_resources(struct dma_chan *chan)
1077 {
1078         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1079         struct dw_dma           *dw = to_dw_dma(chan->device);
1080         struct dw_desc          *desc;
1081         int                     i;
1082         unsigned long           flags;
1083
1084         dev_vdbg(chan2dev(chan), "%s\n", __func__);
1085
1086         /* ASSERT:  channel is idle */
1087         if (dma_readl(dw, CH_EN) & dwc->mask) {
1088                 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1089                 return -EIO;
1090         }
1091
1092         dma_cookie_init(chan);
1093
1094         /*
1095          * NOTE: some controllers may have additional features that we
1096          * need to initialize here, like "scatter-gather" (which
1097          * doesn't mean what you think it means), and status writeback.
1098          */
1099
1100         spin_lock_irqsave(&dwc->lock, flags);
1101         i = dwc->descs_allocated;
1102         while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
1103                 dma_addr_t phys;
1104
1105                 spin_unlock_irqrestore(&dwc->lock, flags);
1106
1107                 desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys);
1108                 if (!desc)
1109                         goto err_desc_alloc;
1110
1111                 memset(desc, 0, sizeof(struct dw_desc));
1112
1113                 INIT_LIST_HEAD(&desc->tx_list);
1114                 dma_async_tx_descriptor_init(&desc->txd, chan);
1115                 desc->txd.tx_submit = dwc_tx_submit;
1116                 desc->txd.flags = DMA_CTRL_ACK;
1117                 desc->txd.phys = phys;
1118
1119                 dwc_desc_put(dwc, desc);
1120
1121                 spin_lock_irqsave(&dwc->lock, flags);
1122                 i = ++dwc->descs_allocated;
1123         }
1124
1125         spin_unlock_irqrestore(&dwc->lock, flags);
1126
1127         dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
1128
1129         return i;
1130
1131 err_desc_alloc:
1132         dev_info(chan2dev(chan), "only allocated %d descriptors\n", i);
1133
1134         return i;
1135 }
1136
1137 static void dwc_free_chan_resources(struct dma_chan *chan)
1138 {
1139         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1140         struct dw_dma           *dw = to_dw_dma(chan->device);
1141         struct dw_desc          *desc, *_desc;
1142         unsigned long           flags;
1143         LIST_HEAD(list);
1144
1145         dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
1146                         dwc->descs_allocated);
1147
1148         /* ASSERT:  channel is idle */
1149         BUG_ON(!list_empty(&dwc->active_list));
1150         BUG_ON(!list_empty(&dwc->queue));
1151         BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1152
1153         spin_lock_irqsave(&dwc->lock, flags);
1154         list_splice_init(&dwc->free_list, &list);
1155         dwc->descs_allocated = 0;
1156         dwc->initialized = false;
1157
1158         /* Disable interrupts */
1159         channel_clear_bit(dw, MASK.XFER, dwc->mask);
1160         channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1161
1162         spin_unlock_irqrestore(&dwc->lock, flags);
1163
1164         list_for_each_entry_safe(desc, _desc, &list, desc_node) {
1165                 dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
1166                 dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
1167         }
1168
1169         dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
1170 }
1171
1172 bool dw_dma_generic_filter(struct dma_chan *chan, void *param)
1173 {
1174         struct dw_dma *dw = to_dw_dma(chan->device);
1175         static struct dw_dma *last_dw;
1176         static char *last_bus_id;
1177         int i = -1;
1178
1179         /*
1180          * dmaengine framework calls this routine for all channels of all dma
1181          * controller, until true is returned. If 'param' bus_id is not
1182          * registered with a dma controller (dw), then there is no need of
1183          * running below function for all channels of dw.
1184          *
1185          * This block of code does this by saving the parameters of last
1186          * failure. If dw and param are same, i.e. trying on same dw with
1187          * different channel, return false.
1188          */
1189         if ((last_dw == dw) && (last_bus_id == param))
1190                 return false;
1191         /*
1192          * Return true:
1193          * - If dw_dma's platform data is not filled with slave info, then all
1194          *   dma controllers are fine for transfer.
1195          * - Or if param is NULL
1196          */
1197         if (!dw->sd || !param)
1198                 return true;
1199
1200         while (++i < dw->sd_count) {
1201                 if (!strcmp(dw->sd[i].bus_id, param)) {
1202                         chan->private = &dw->sd[i];
1203                         last_dw = NULL;
1204                         last_bus_id = NULL;
1205
1206                         return true;
1207                 }
1208         }
1209
1210         last_dw = dw;
1211         last_bus_id = param;
1212         return false;
1213 }
1214 EXPORT_SYMBOL(dw_dma_generic_filter);
1215
1216 /* --------------------- Cyclic DMA API extensions -------------------- */
1217
1218 /**
1219  * dw_dma_cyclic_start - start the cyclic DMA transfer
1220  * @chan: the DMA channel to start
1221  *
1222  * Must be called with soft interrupts disabled. Returns zero on success or
1223  * -errno on failure.
1224  */
1225 int dw_dma_cyclic_start(struct dma_chan *chan)
1226 {
1227         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1228         struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
1229         unsigned long           flags;
1230
1231         if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1232                 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
1233                 return -ENODEV;
1234         }
1235
1236         spin_lock_irqsave(&dwc->lock, flags);
1237
1238         /* assert channel is idle */
1239         if (dma_readl(dw, CH_EN) & dwc->mask) {
1240                 dev_err(chan2dev(&dwc->chan),
1241                         "BUG: Attempted to start non-idle channel\n");
1242                 dwc_dump_chan_regs(dwc);
1243                 spin_unlock_irqrestore(&dwc->lock, flags);
1244                 return -EBUSY;
1245         }
1246
1247         dma_writel(dw, CLEAR.ERROR, dwc->mask);
1248         dma_writel(dw, CLEAR.XFER, dwc->mask);
1249
1250         /* setup DMAC channel registers */
1251         channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
1252         channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
1253         channel_writel(dwc, CTL_HI, 0);
1254
1255         channel_set_bit(dw, CH_EN, dwc->mask);
1256
1257         spin_unlock_irqrestore(&dwc->lock, flags);
1258
1259         return 0;
1260 }
1261 EXPORT_SYMBOL(dw_dma_cyclic_start);
1262
1263 /**
1264  * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1265  * @chan: the DMA channel to stop
1266  *
1267  * Must be called with soft interrupts disabled.
1268  */
1269 void dw_dma_cyclic_stop(struct dma_chan *chan)
1270 {
1271         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1272         struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
1273         unsigned long           flags;
1274
1275         spin_lock_irqsave(&dwc->lock, flags);
1276
1277         dwc_chan_disable(dw, dwc);
1278
1279         spin_unlock_irqrestore(&dwc->lock, flags);
1280 }
1281 EXPORT_SYMBOL(dw_dma_cyclic_stop);
1282
1283 /**
1284  * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1285  * @chan: the DMA channel to prepare
1286  * @buf_addr: physical DMA address where the buffer starts
1287  * @buf_len: total number of bytes for the entire buffer
1288  * @period_len: number of bytes for each period
1289  * @direction: transfer direction, to or from device
1290  *
1291  * Must be called before trying to start the transfer. Returns a valid struct
1292  * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1293  */
1294 struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1295                 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
1296                 enum dma_transfer_direction direction)
1297 {
1298         struct dw_dma_chan              *dwc = to_dw_dma_chan(chan);
1299         struct dma_slave_config         *sconfig = &dwc->dma_sconfig;
1300         struct dw_cyclic_desc           *cdesc;
1301         struct dw_cyclic_desc           *retval = NULL;
1302         struct dw_desc                  *desc;
1303         struct dw_desc                  *last = NULL;
1304         unsigned long                   was_cyclic;
1305         unsigned int                    reg_width;
1306         unsigned int                    periods;
1307         unsigned int                    i;
1308         unsigned long                   flags;
1309
1310         spin_lock_irqsave(&dwc->lock, flags);
1311         if (dwc->nollp) {
1312                 spin_unlock_irqrestore(&dwc->lock, flags);
1313                 dev_dbg(chan2dev(&dwc->chan),
1314                                 "channel doesn't support LLP transfers\n");
1315                 return ERR_PTR(-EINVAL);
1316         }
1317
1318         if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1319                 spin_unlock_irqrestore(&dwc->lock, flags);
1320                 dev_dbg(chan2dev(&dwc->chan),
1321                                 "queue and/or active list are not empty\n");
1322                 return ERR_PTR(-EBUSY);
1323         }
1324
1325         was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1326         spin_unlock_irqrestore(&dwc->lock, flags);
1327         if (was_cyclic) {
1328                 dev_dbg(chan2dev(&dwc->chan),
1329                                 "channel already prepared for cyclic DMA\n");
1330                 return ERR_PTR(-EBUSY);
1331         }
1332
1333         retval = ERR_PTR(-EINVAL);
1334
1335         if (unlikely(!is_slave_direction(direction)))
1336                 goto out_err;
1337
1338         dwc->direction = direction;
1339
1340         if (direction == DMA_MEM_TO_DEV)
1341                 reg_width = __ffs(sconfig->dst_addr_width);
1342         else
1343                 reg_width = __ffs(sconfig->src_addr_width);
1344
1345         periods = buf_len / period_len;
1346
1347         /* Check for too big/unaligned periods and unaligned DMA buffer. */
1348         if (period_len > (dwc->block_size << reg_width))
1349                 goto out_err;
1350         if (unlikely(period_len & ((1 << reg_width) - 1)))
1351                 goto out_err;
1352         if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1353                 goto out_err;
1354
1355         retval = ERR_PTR(-ENOMEM);
1356
1357         if (periods > NR_DESCS_PER_CHANNEL)
1358                 goto out_err;
1359
1360         cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1361         if (!cdesc)
1362                 goto out_err;
1363
1364         cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1365         if (!cdesc->desc)
1366                 goto out_err_alloc;
1367
1368         for (i = 0; i < periods; i++) {
1369                 desc = dwc_desc_get(dwc);
1370                 if (!desc)
1371                         goto out_err_desc_get;
1372
1373                 switch (direction) {
1374                 case DMA_MEM_TO_DEV:
1375                         desc->lli.dar = sconfig->dst_addr;
1376                         desc->lli.sar = buf_addr + (period_len * i);
1377                         desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1378                                         | DWC_CTLL_DST_WIDTH(reg_width)
1379                                         | DWC_CTLL_SRC_WIDTH(reg_width)
1380                                         | DWC_CTLL_DST_FIX
1381                                         | DWC_CTLL_SRC_INC
1382                                         | DWC_CTLL_INT_EN);
1383
1384                         desc->lli.ctllo |= sconfig->device_fc ?
1385                                 DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
1386                                 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
1387
1388                         break;
1389                 case DMA_DEV_TO_MEM:
1390                         desc->lli.dar = buf_addr + (period_len * i);
1391                         desc->lli.sar = sconfig->src_addr;
1392                         desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1393                                         | DWC_CTLL_SRC_WIDTH(reg_width)
1394                                         | DWC_CTLL_DST_WIDTH(reg_width)
1395                                         | DWC_CTLL_DST_INC
1396                                         | DWC_CTLL_SRC_FIX
1397                                         | DWC_CTLL_INT_EN);
1398
1399                         desc->lli.ctllo |= sconfig->device_fc ?
1400                                 DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
1401                                 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
1402
1403                         break;
1404                 default:
1405                         break;
1406                 }
1407
1408                 desc->lli.ctlhi = (period_len >> reg_width);
1409                 cdesc->desc[i] = desc;
1410
1411                 if (last)
1412                         last->lli.llp = desc->txd.phys;
1413
1414                 last = desc;
1415         }
1416
1417         /* lets make a cyclic list */
1418         last->lli.llp = cdesc->desc[0]->txd.phys;
1419
1420         dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
1421                         "period %zu periods %d\n", (unsigned long long)buf_addr,
1422                         buf_len, period_len, periods);
1423
1424         cdesc->periods = periods;
1425         dwc->cdesc = cdesc;
1426
1427         return cdesc;
1428
1429 out_err_desc_get:
1430         while (i--)
1431                 dwc_desc_put(dwc, cdesc->desc[i]);
1432 out_err_alloc:
1433         kfree(cdesc);
1434 out_err:
1435         clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1436         return (struct dw_cyclic_desc *)retval;
1437 }
1438 EXPORT_SYMBOL(dw_dma_cyclic_prep);
1439
1440 /**
1441  * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1442  * @chan: the DMA channel to free
1443  */
1444 void dw_dma_cyclic_free(struct dma_chan *chan)
1445 {
1446         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1447         struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
1448         struct dw_cyclic_desc   *cdesc = dwc->cdesc;
1449         int                     i;
1450         unsigned long           flags;
1451
1452         dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
1453
1454         if (!cdesc)
1455                 return;
1456
1457         spin_lock_irqsave(&dwc->lock, flags);
1458
1459         dwc_chan_disable(dw, dwc);
1460
1461         dma_writel(dw, CLEAR.ERROR, dwc->mask);
1462         dma_writel(dw, CLEAR.XFER, dwc->mask);
1463
1464         spin_unlock_irqrestore(&dwc->lock, flags);
1465
1466         for (i = 0; i < cdesc->periods; i++)
1467                 dwc_desc_put(dwc, cdesc->desc[i]);
1468
1469         kfree(cdesc->desc);
1470         kfree(cdesc);
1471
1472         clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1473 }
1474 EXPORT_SYMBOL(dw_dma_cyclic_free);
1475
1476 /*----------------------------------------------------------------------*/
1477
1478 static void dw_dma_off(struct dw_dma *dw)
1479 {
1480         int i;
1481
1482         dma_writel(dw, CFG, 0);
1483
1484         channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1485         channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1486         channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1487         channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1488
1489         while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1490                 cpu_relax();
1491
1492         for (i = 0; i < dw->dma.chancnt; i++)
1493                 dw->chan[i].initialized = false;
1494 }
1495
1496 #ifdef CONFIG_OF
1497 static struct dw_dma_platform_data *
1498 dw_dma_parse_dt(struct platform_device *pdev)
1499 {
1500         struct device_node *sn, *cn, *np = pdev->dev.of_node;
1501         struct dw_dma_platform_data *pdata;
1502         struct dw_dma_slave *sd;
1503         u32 tmp, arr[4];
1504
1505         if (!np) {
1506                 dev_err(&pdev->dev, "Missing DT data\n");
1507                 return NULL;
1508         }
1509
1510         pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1511         if (!pdata)
1512                 return NULL;
1513
1514         if (of_property_read_u32(np, "nr_channels", &pdata->nr_channels))
1515                 return NULL;
1516
1517         if (of_property_read_bool(np, "is_private"))
1518                 pdata->is_private = true;
1519
1520         if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
1521                 pdata->chan_allocation_order = (unsigned char)tmp;
1522
1523         if (!of_property_read_u32(np, "chan_priority", &tmp))
1524                 pdata->chan_priority = tmp;
1525
1526         if (!of_property_read_u32(np, "block_size", &tmp))
1527                 pdata->block_size = tmp;
1528
1529         if (!of_property_read_u32(np, "nr_masters", &tmp)) {
1530                 if (tmp > 4)
1531                         return NULL;
1532
1533                 pdata->nr_masters = tmp;
1534         }
1535
1536         if (!of_property_read_u32_array(np, "data_width", arr,
1537                                 pdata->nr_masters))
1538                 for (tmp = 0; tmp < pdata->nr_masters; tmp++)
1539                         pdata->data_width[tmp] = arr[tmp];
1540
1541         /* parse slave data */
1542         sn = of_find_node_by_name(np, "slave_info");
1543         if (!sn)
1544                 return pdata;
1545
1546         /* calculate number of slaves */
1547         tmp = of_get_child_count(sn);
1548         if (!tmp)
1549                 return NULL;
1550
1551         sd = devm_kzalloc(&pdev->dev, sizeof(*sd) * tmp, GFP_KERNEL);
1552         if (!sd)
1553                 return NULL;
1554
1555         pdata->sd = sd;
1556         pdata->sd_count = tmp;
1557
1558         for_each_child_of_node(sn, cn) {
1559                 sd->dma_dev = &pdev->dev;
1560                 of_property_read_string(cn, "bus_id", &sd->bus_id);
1561                 of_property_read_u32(cn, "cfg_hi", &sd->cfg_hi);
1562                 of_property_read_u32(cn, "cfg_lo", &sd->cfg_lo);
1563                 if (!of_property_read_u32(cn, "src_master", &tmp))
1564                         sd->src_master = tmp;
1565
1566                 if (!of_property_read_u32(cn, "dst_master", &tmp))
1567                         sd->dst_master = tmp;
1568                 sd++;
1569         }
1570
1571         return pdata;
1572 }
1573 #else
1574 static inline struct dw_dma_platform_data *
1575 dw_dma_parse_dt(struct platform_device *pdev)
1576 {
1577         return NULL;
1578 }
1579 #endif
1580
1581 static int dw_probe(struct platform_device *pdev)
1582 {
1583         struct dw_dma_platform_data *pdata;
1584         struct resource         *io;
1585         struct dw_dma           *dw;
1586         size_t                  size;
1587         void __iomem            *regs;
1588         bool                    autocfg;
1589         unsigned int            dw_params;
1590         unsigned int            nr_channels;
1591         unsigned int            max_blk_size = 0;
1592         int                     irq;
1593         int                     err;
1594         int                     i;
1595
1596         io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1597         if (!io)
1598                 return -EINVAL;
1599
1600         irq = platform_get_irq(pdev, 0);
1601         if (irq < 0)
1602                 return irq;
1603
1604         regs = devm_request_and_ioremap(&pdev->dev, io);
1605         if (!regs)
1606                 return -EBUSY;
1607
1608         dw_params = dma_read_byaddr(regs, DW_PARAMS);
1609         autocfg = dw_params >> DW_PARAMS_EN & 0x1;
1610
1611         pdata = dev_get_platdata(&pdev->dev);
1612         if (!pdata)
1613                 pdata = dw_dma_parse_dt(pdev);
1614
1615         if (!pdata && autocfg) {
1616                 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1617                 if (!pdata)
1618                         return -ENOMEM;
1619
1620                 /* Fill platform data with the default values */
1621                 pdata->is_private = true;
1622                 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
1623                 pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
1624         } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
1625                 return -EINVAL;
1626
1627         if (autocfg)
1628                 nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1;
1629         else
1630                 nr_channels = pdata->nr_channels;
1631
1632         size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan);
1633         dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
1634         if (!dw)
1635                 return -ENOMEM;
1636
1637         dw->clk = devm_clk_get(&pdev->dev, "hclk");
1638         if (IS_ERR(dw->clk))
1639                 return PTR_ERR(dw->clk);
1640         clk_prepare_enable(dw->clk);
1641
1642         dw->regs = regs;
1643         dw->sd = pdata->sd;
1644         dw->sd_count = pdata->sd_count;
1645
1646         /* get hardware configuration parameters */
1647         if (autocfg) {
1648                 max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
1649
1650                 dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
1651                 for (i = 0; i < dw->nr_masters; i++) {
1652                         dw->data_width[i] =
1653                                 (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2;
1654                 }
1655         } else {
1656                 dw->nr_masters = pdata->nr_masters;
1657                 memcpy(dw->data_width, pdata->data_width, 4);
1658         }
1659
1660         /* Calculate all channel mask before DMA setup */
1661         dw->all_chan_mask = (1 << nr_channels) - 1;
1662
1663         /* force dma off, just in case */
1664         dw_dma_off(dw);
1665
1666         /* disable BLOCK interrupts as well */
1667         channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1668
1669         err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0,
1670                                "dw_dmac", dw);
1671         if (err)
1672                 return err;
1673
1674         platform_set_drvdata(pdev, dw);
1675
1676         /* create a pool of consistent memory blocks for hardware descriptors */
1677         dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev,
1678                                          sizeof(struct dw_desc), 4, 0);
1679         if (!dw->desc_pool) {
1680                 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1681                 return -ENOMEM;
1682         }
1683
1684         tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1685
1686         INIT_LIST_HEAD(&dw->dma.channels);
1687         for (i = 0; i < nr_channels; i++) {
1688                 struct dw_dma_chan      *dwc = &dw->chan[i];
1689                 int                     r = nr_channels - i - 1;
1690
1691                 dwc->chan.device = &dw->dma;
1692                 dma_cookie_init(&dwc->chan);
1693                 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1694                         list_add_tail(&dwc->chan.device_node,
1695                                         &dw->dma.channels);
1696                 else
1697                         list_add(&dwc->chan.device_node, &dw->dma.channels);
1698
1699                 /* 7 is highest priority & 0 is lowest. */
1700                 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1701                         dwc->priority = r;
1702                 else
1703                         dwc->priority = i;
1704
1705                 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1706                 spin_lock_init(&dwc->lock);
1707                 dwc->mask = 1 << i;
1708
1709                 INIT_LIST_HEAD(&dwc->active_list);
1710                 INIT_LIST_HEAD(&dwc->queue);
1711                 INIT_LIST_HEAD(&dwc->free_list);
1712
1713                 channel_clear_bit(dw, CH_EN, dwc->mask);
1714
1715                 dwc->direction = DMA_TRANS_NONE;
1716
1717                 /* hardware configuration */
1718                 if (autocfg) {
1719                         unsigned int dwc_params;
1720
1721                         dwc_params = dma_read_byaddr(regs + r * sizeof(u32),
1722                                                      DWC_PARAMS);
1723
1724                         /* Decode maximum block size for given channel. The
1725                          * stored 4 bit value represents blocks from 0x00 for 3
1726                          * up to 0x0a for 4095. */
1727                         dwc->block_size =
1728                                 (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
1729                         dwc->nollp =
1730                                 (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
1731                 } else {
1732                         dwc->block_size = pdata->block_size;
1733
1734                         /* Check if channel supports multi block transfer */
1735                         channel_writel(dwc, LLP, 0xfffffffc);
1736                         dwc->nollp =
1737                                 (channel_readl(dwc, LLP) & 0xfffffffc) == 0;
1738                         channel_writel(dwc, LLP, 0);
1739                 }
1740         }
1741
1742         /* Clear all interrupts on all channels. */
1743         dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1744         dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1745         dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1746         dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1747         dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1748
1749         dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1750         dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1751         if (pdata->is_private)
1752                 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1753         dw->dma.dev = &pdev->dev;
1754         dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1755         dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1756
1757         dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1758
1759         dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1760         dw->dma.device_control = dwc_control;
1761
1762         dw->dma.device_tx_status = dwc_tx_status;
1763         dw->dma.device_issue_pending = dwc_issue_pending;
1764
1765         dma_writel(dw, CFG, DW_CFG_DMA_EN);
1766
1767         dev_info(&pdev->dev, "DesignWare DMA Controller, %d channels\n",
1768                  nr_channels);
1769
1770         dma_async_device_register(&dw->dma);
1771
1772         return 0;
1773 }
1774
1775 static int __devexit dw_remove(struct platform_device *pdev)
1776 {
1777         struct dw_dma           *dw = platform_get_drvdata(pdev);
1778         struct dw_dma_chan      *dwc, *_dwc;
1779
1780         dw_dma_off(dw);
1781         dma_async_device_unregister(&dw->dma);
1782
1783         tasklet_kill(&dw->tasklet);
1784
1785         list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1786                         chan.device_node) {
1787                 list_del(&dwc->chan.device_node);
1788                 channel_clear_bit(dw, CH_EN, dwc->mask);
1789         }
1790
1791         return 0;
1792 }
1793
1794 static void dw_shutdown(struct platform_device *pdev)
1795 {
1796         struct dw_dma   *dw = platform_get_drvdata(pdev);
1797
1798         dw_dma_off(dw);
1799         clk_disable_unprepare(dw->clk);
1800 }
1801
1802 static int dw_suspend_noirq(struct device *dev)
1803 {
1804         struct platform_device *pdev = to_platform_device(dev);
1805         struct dw_dma   *dw = platform_get_drvdata(pdev);
1806
1807         dw_dma_off(dw);
1808         clk_disable_unprepare(dw->clk);
1809
1810         return 0;
1811 }
1812
1813 static int dw_resume_noirq(struct device *dev)
1814 {
1815         struct platform_device *pdev = to_platform_device(dev);
1816         struct dw_dma   *dw = platform_get_drvdata(pdev);
1817
1818         clk_prepare_enable(dw->clk);
1819         dma_writel(dw, CFG, DW_CFG_DMA_EN);
1820
1821         return 0;
1822 }
1823
1824 static const struct dev_pm_ops dw_dev_pm_ops = {
1825         .suspend_noirq = dw_suspend_noirq,
1826         .resume_noirq = dw_resume_noirq,
1827         .freeze_noirq = dw_suspend_noirq,
1828         .thaw_noirq = dw_resume_noirq,
1829         .restore_noirq = dw_resume_noirq,
1830         .poweroff_noirq = dw_suspend_noirq,
1831 };
1832
1833 #ifdef CONFIG_OF
1834 static const struct of_device_id dw_dma_id_table[] = {
1835         { .compatible = "snps,dma-spear1340" },
1836         {}
1837 };
1838 MODULE_DEVICE_TABLE(of, dw_dma_id_table);
1839 #endif
1840
1841 static struct platform_driver dw_driver = {
1842         .probe          = dw_probe,
1843         .remove         = dw_remove,
1844         .shutdown       = dw_shutdown,
1845         .driver = {
1846                 .name   = "dw_dmac",
1847                 .pm     = &dw_dev_pm_ops,
1848                 .of_match_table = of_match_ptr(dw_dma_id_table),
1849         },
1850 };
1851
1852 static int __init dw_init(void)
1853 {
1854         return platform_driver_register(&dw_driver);
1855 }
1856 subsys_initcall(dw_init);
1857
1858 static void __exit dw_exit(void)
1859 {
1860         platform_driver_unregister(&dw_driver);
1861 }
1862 module_exit(dw_exit);
1863
1864 MODULE_LICENSE("GPL v2");
1865 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1866 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1867 MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");