dma: dw_dmac: clear suspend bit during termination
[firefly-linux-kernel-4.4.55.git] / drivers / dma / dw_dmac.c
1 /*
2  * Core driver for the Synopsys DesignWare DMA Controller
3  *
4  * Copyright (C) 2007-2008 Atmel Corporation
5  * Copyright (C) 2010-2011 ST Microelectronics
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11
12 #include <linux/bitops.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/init.h>
18 #include <linux/interrupt.h>
19 #include <linux/io.h>
20 #include <linux/of.h>
21 #include <linux/mm.h>
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/slab.h>
25
26 #include "dw_dmac_regs.h"
27 #include "dmaengine.h"
28
29 /*
30  * This supports the Synopsys "DesignWare AHB Central DMA Controller",
31  * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
32  * of which use ARM any more).  See the "Databook" from Synopsys for
33  * information beyond what licensees probably provide.
34  *
35  * The driver has currently been tested only with the Atmel AT32AP7000,
36  * which does not support descriptor writeback.
37  */
38
39 static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave)
40 {
41         return slave ? slave->dst_master : 0;
42 }
43
44 static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
45 {
46         return slave ? slave->src_master : 1;
47 }
48
49 #define DWC_DEFAULT_CTLLO(_chan) ({                             \
50                 struct dw_dma_slave *__slave = (_chan->private);        \
51                 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan);       \
52                 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
53                 bool _is_slave = is_slave_direction(_dwc->direction);   \
54                 int _dms = dwc_get_dms(__slave);                \
55                 int _sms = dwc_get_sms(__slave);                \
56                 u8 _smsize = _is_slave ? _sconfig->src_maxburst :       \
57                         DW_DMA_MSIZE_16;                        \
58                 u8 _dmsize = _is_slave ? _sconfig->dst_maxburst :       \
59                         DW_DMA_MSIZE_16;                        \
60                                                                 \
61                 (DWC_CTLL_DST_MSIZE(_dmsize)                    \
62                  | DWC_CTLL_SRC_MSIZE(_smsize)                  \
63                  | DWC_CTLL_LLP_D_EN                            \
64                  | DWC_CTLL_LLP_S_EN                            \
65                  | DWC_CTLL_DMS(_dms)                           \
66                  | DWC_CTLL_SMS(_sms));                         \
67         })
68
69 /*
70  * Number of descriptors to allocate for each channel. This should be
71  * made configurable somehow; preferably, the clients (at least the
72  * ones using slave transfers) should be able to give us a hint.
73  */
74 #define NR_DESCS_PER_CHANNEL    64
75
76 #define SRC_MASTER      0
77 #define DST_MASTER      1
78
79 static inline unsigned int dwc_get_data_width(struct dma_chan *chan, int master)
80 {
81         struct dw_dma *dw = to_dw_dma(chan->device);
82         struct dw_dma_slave *dws = chan->private;
83
84         if (master == SRC_MASTER)
85                 return dw->data_width[dwc_get_sms(dws)];
86         else if (master == DST_MASTER)
87                 return dw->data_width[dwc_get_dms(dws)];
88
89         return 0;
90 }
91
92 /*----------------------------------------------------------------------*/
93
94 /*
95  * Because we're not relying on writeback from the controller (it may not
96  * even be configured into the core!) we don't need to use dma_pool.  These
97  * descriptors -- and associated data -- are cacheable.  We do need to make
98  * sure their dcache entries are written back before handing them off to
99  * the controller, though.
100  */
101
102 static struct device *chan2dev(struct dma_chan *chan)
103 {
104         return &chan->dev->device;
105 }
106 static struct device *chan2parent(struct dma_chan *chan)
107 {
108         return chan->dev->device.parent;
109 }
110
111 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
112 {
113         return to_dw_desc(dwc->active_list.next);
114 }
115
116 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
117 {
118         struct dw_desc *desc, *_desc;
119         struct dw_desc *ret = NULL;
120         unsigned int i = 0;
121         unsigned long flags;
122
123         spin_lock_irqsave(&dwc->lock, flags);
124         list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
125                 i++;
126                 if (async_tx_test_ack(&desc->txd)) {
127                         list_del(&desc->desc_node);
128                         ret = desc;
129                         break;
130                 }
131                 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
132         }
133         spin_unlock_irqrestore(&dwc->lock, flags);
134
135         dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
136
137         return ret;
138 }
139
140 static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
141 {
142         struct dw_desc  *child;
143
144         list_for_each_entry(child, &desc->tx_list, desc_node)
145                 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
146                                 child->txd.phys, sizeof(child->lli),
147                                 DMA_TO_DEVICE);
148         dma_sync_single_for_cpu(chan2parent(&dwc->chan),
149                         desc->txd.phys, sizeof(desc->lli),
150                         DMA_TO_DEVICE);
151 }
152
153 /*
154  * Move a descriptor, including any children, to the free list.
155  * `desc' must not be on any lists.
156  */
157 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
158 {
159         unsigned long flags;
160
161         if (desc) {
162                 struct dw_desc *child;
163
164                 dwc_sync_desc_for_cpu(dwc, desc);
165
166                 spin_lock_irqsave(&dwc->lock, flags);
167                 list_for_each_entry(child, &desc->tx_list, desc_node)
168                         dev_vdbg(chan2dev(&dwc->chan),
169                                         "moving child desc %p to freelist\n",
170                                         child);
171                 list_splice_init(&desc->tx_list, &dwc->free_list);
172                 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
173                 list_add(&desc->desc_node, &dwc->free_list);
174                 spin_unlock_irqrestore(&dwc->lock, flags);
175         }
176 }
177
178 static void dwc_initialize(struct dw_dma_chan *dwc)
179 {
180         struct dw_dma *dw = to_dw_dma(dwc->chan.device);
181         struct dw_dma_slave *dws = dwc->chan.private;
182         u32 cfghi = DWC_CFGH_FIFO_MODE;
183         u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
184
185         if (dwc->initialized == true)
186                 return;
187
188         if (dws) {
189                 /*
190                  * We need controller-specific data to set up slave
191                  * transfers.
192                  */
193                 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
194
195                 cfghi = dws->cfg_hi;
196                 cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
197         } else {
198                 if (dwc->direction == DMA_MEM_TO_DEV)
199                         cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id);
200                 else if (dwc->direction == DMA_DEV_TO_MEM)
201                         cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id);
202         }
203
204         channel_writel(dwc, CFG_LO, cfglo);
205         channel_writel(dwc, CFG_HI, cfghi);
206
207         /* Enable interrupts */
208         channel_set_bit(dw, MASK.XFER, dwc->mask);
209         channel_set_bit(dw, MASK.ERROR, dwc->mask);
210
211         dwc->initialized = true;
212 }
213
214 /*----------------------------------------------------------------------*/
215
216 static inline unsigned int dwc_fast_fls(unsigned long long v)
217 {
218         /*
219          * We can be a lot more clever here, but this should take care
220          * of the most common optimization.
221          */
222         if (!(v & 7))
223                 return 3;
224         else if (!(v & 3))
225                 return 2;
226         else if (!(v & 1))
227                 return 1;
228         return 0;
229 }
230
231 static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
232 {
233         dev_err(chan2dev(&dwc->chan),
234                 "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
235                 channel_readl(dwc, SAR),
236                 channel_readl(dwc, DAR),
237                 channel_readl(dwc, LLP),
238                 channel_readl(dwc, CTL_HI),
239                 channel_readl(dwc, CTL_LO));
240 }
241
242 static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
243 {
244         channel_clear_bit(dw, CH_EN, dwc->mask);
245         while (dma_readl(dw, CH_EN) & dwc->mask)
246                 cpu_relax();
247 }
248
249 /*----------------------------------------------------------------------*/
250
251 /* Perform single block transfer */
252 static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
253                                        struct dw_desc *desc)
254 {
255         struct dw_dma   *dw = to_dw_dma(dwc->chan.device);
256         u32             ctllo;
257
258         /* Software emulation of LLP mode relies on interrupts to continue
259          * multi block transfer. */
260         ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
261
262         channel_writel(dwc, SAR, desc->lli.sar);
263         channel_writel(dwc, DAR, desc->lli.dar);
264         channel_writel(dwc, CTL_LO, ctllo);
265         channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
266         channel_set_bit(dw, CH_EN, dwc->mask);
267
268         /* Move pointer to next descriptor */
269         dwc->tx_node_active = dwc->tx_node_active->next;
270 }
271
272 /* Called with dwc->lock held and bh disabled */
273 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
274 {
275         struct dw_dma   *dw = to_dw_dma(dwc->chan.device);
276         unsigned long   was_soft_llp;
277
278         /* ASSERT:  channel is idle */
279         if (dma_readl(dw, CH_EN) & dwc->mask) {
280                 dev_err(chan2dev(&dwc->chan),
281                         "BUG: Attempted to start non-idle channel\n");
282                 dwc_dump_chan_regs(dwc);
283
284                 /* The tasklet will hopefully advance the queue... */
285                 return;
286         }
287
288         if (dwc->nollp) {
289                 was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
290                                                 &dwc->flags);
291                 if (was_soft_llp) {
292                         dev_err(chan2dev(&dwc->chan),
293                                 "BUG: Attempted to start new LLP transfer "
294                                 "inside ongoing one\n");
295                         return;
296                 }
297
298                 dwc_initialize(dwc);
299
300                 dwc->tx_list = &first->tx_list;
301                 dwc->tx_node_active = &first->tx_list;
302
303                 dwc_do_single_block(dwc, first);
304
305                 return;
306         }
307
308         dwc_initialize(dwc);
309
310         channel_writel(dwc, LLP, first->txd.phys);
311         channel_writel(dwc, CTL_LO,
312                         DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
313         channel_writel(dwc, CTL_HI, 0);
314         channel_set_bit(dw, CH_EN, dwc->mask);
315 }
316
317 /*----------------------------------------------------------------------*/
318
319 static void
320 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
321                 bool callback_required)
322 {
323         dma_async_tx_callback           callback = NULL;
324         void                            *param = NULL;
325         struct dma_async_tx_descriptor  *txd = &desc->txd;
326         struct dw_desc                  *child;
327         unsigned long                   flags;
328
329         dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
330
331         spin_lock_irqsave(&dwc->lock, flags);
332         dma_cookie_complete(txd);
333         if (callback_required) {
334                 callback = txd->callback;
335                 param = txd->callback_param;
336         }
337
338         dwc_sync_desc_for_cpu(dwc, desc);
339
340         /* async_tx_ack */
341         list_for_each_entry(child, &desc->tx_list, desc_node)
342                 async_tx_ack(&child->txd);
343         async_tx_ack(&desc->txd);
344
345         list_splice_init(&desc->tx_list, &dwc->free_list);
346         list_move(&desc->desc_node, &dwc->free_list);
347
348         if (!is_slave_direction(dwc->direction)) {
349                 struct device *parent = chan2parent(&dwc->chan);
350                 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
351                         if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
352                                 dma_unmap_single(parent, desc->lli.dar,
353                                                 desc->len, DMA_FROM_DEVICE);
354                         else
355                                 dma_unmap_page(parent, desc->lli.dar,
356                                                 desc->len, DMA_FROM_DEVICE);
357                 }
358                 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
359                         if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
360                                 dma_unmap_single(parent, desc->lli.sar,
361                                                 desc->len, DMA_TO_DEVICE);
362                         else
363                                 dma_unmap_page(parent, desc->lli.sar,
364                                                 desc->len, DMA_TO_DEVICE);
365                 }
366         }
367
368         spin_unlock_irqrestore(&dwc->lock, flags);
369
370         if (callback)
371                 callback(param);
372 }
373
374 static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
375 {
376         struct dw_desc *desc, *_desc;
377         LIST_HEAD(list);
378         unsigned long flags;
379
380         spin_lock_irqsave(&dwc->lock, flags);
381         if (dma_readl(dw, CH_EN) & dwc->mask) {
382                 dev_err(chan2dev(&dwc->chan),
383                         "BUG: XFER bit set, but channel not idle!\n");
384
385                 /* Try to continue after resetting the channel... */
386                 dwc_chan_disable(dw, dwc);
387         }
388
389         /*
390          * Submit queued descriptors ASAP, i.e. before we go through
391          * the completed ones.
392          */
393         list_splice_init(&dwc->active_list, &list);
394         if (!list_empty(&dwc->queue)) {
395                 list_move(dwc->queue.next, &dwc->active_list);
396                 dwc_dostart(dwc, dwc_first_active(dwc));
397         }
398
399         spin_unlock_irqrestore(&dwc->lock, flags);
400
401         list_for_each_entry_safe(desc, _desc, &list, desc_node)
402                 dwc_descriptor_complete(dwc, desc, true);
403 }
404
405 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
406 {
407         dma_addr_t llp;
408         struct dw_desc *desc, *_desc;
409         struct dw_desc *child;
410         u32 status_xfer;
411         unsigned long flags;
412
413         spin_lock_irqsave(&dwc->lock, flags);
414         llp = channel_readl(dwc, LLP);
415         status_xfer = dma_readl(dw, RAW.XFER);
416
417         if (status_xfer & dwc->mask) {
418                 /* Everything we've submitted is done */
419                 dma_writel(dw, CLEAR.XFER, dwc->mask);
420                 spin_unlock_irqrestore(&dwc->lock, flags);
421
422                 dwc_complete_all(dw, dwc);
423                 return;
424         }
425
426         if (list_empty(&dwc->active_list)) {
427                 spin_unlock_irqrestore(&dwc->lock, flags);
428                 return;
429         }
430
431         dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__,
432                         (unsigned long long)llp);
433
434         list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
435                 /* check first descriptors addr */
436                 if (desc->txd.phys == llp) {
437                         spin_unlock_irqrestore(&dwc->lock, flags);
438                         return;
439                 }
440
441                 /* check first descriptors llp */
442                 if (desc->lli.llp == llp) {
443                         /* This one is currently in progress */
444                         spin_unlock_irqrestore(&dwc->lock, flags);
445                         return;
446                 }
447
448                 list_for_each_entry(child, &desc->tx_list, desc_node)
449                         if (child->lli.llp == llp) {
450                                 /* Currently in progress */
451                                 spin_unlock_irqrestore(&dwc->lock, flags);
452                                 return;
453                         }
454
455                 /*
456                  * No descriptors so far seem to be in progress, i.e.
457                  * this one must be done.
458                  */
459                 spin_unlock_irqrestore(&dwc->lock, flags);
460                 dwc_descriptor_complete(dwc, desc, true);
461                 spin_lock_irqsave(&dwc->lock, flags);
462         }
463
464         dev_err(chan2dev(&dwc->chan),
465                 "BUG: All descriptors done, but channel not idle!\n");
466
467         /* Try to continue after resetting the channel... */
468         dwc_chan_disable(dw, dwc);
469
470         if (!list_empty(&dwc->queue)) {
471                 list_move(dwc->queue.next, &dwc->active_list);
472                 dwc_dostart(dwc, dwc_first_active(dwc));
473         }
474         spin_unlock_irqrestore(&dwc->lock, flags);
475 }
476
477 static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
478 {
479         dev_crit(chan2dev(&dwc->chan), "  desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
480                  lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
481 }
482
483 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
484 {
485         struct dw_desc *bad_desc;
486         struct dw_desc *child;
487         unsigned long flags;
488
489         dwc_scan_descriptors(dw, dwc);
490
491         spin_lock_irqsave(&dwc->lock, flags);
492
493         /*
494          * The descriptor currently at the head of the active list is
495          * borked. Since we don't have any way to report errors, we'll
496          * just have to scream loudly and try to carry on.
497          */
498         bad_desc = dwc_first_active(dwc);
499         list_del_init(&bad_desc->desc_node);
500         list_move(dwc->queue.next, dwc->active_list.prev);
501
502         /* Clear the error flag and try to restart the controller */
503         dma_writel(dw, CLEAR.ERROR, dwc->mask);
504         if (!list_empty(&dwc->active_list))
505                 dwc_dostart(dwc, dwc_first_active(dwc));
506
507         /*
508          * WARN may seem harsh, but since this only happens
509          * when someone submits a bad physical address in a
510          * descriptor, we should consider ourselves lucky that the
511          * controller flagged an error instead of scribbling over
512          * random memory locations.
513          */
514         dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
515                                        "  cookie: %d\n", bad_desc->txd.cookie);
516         dwc_dump_lli(dwc, &bad_desc->lli);
517         list_for_each_entry(child, &bad_desc->tx_list, desc_node)
518                 dwc_dump_lli(dwc, &child->lli);
519
520         spin_unlock_irqrestore(&dwc->lock, flags);
521
522         /* Pretend the descriptor completed successfully */
523         dwc_descriptor_complete(dwc, bad_desc, true);
524 }
525
526 /* --------------------- Cyclic DMA API extensions -------------------- */
527
528 inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
529 {
530         struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
531         return channel_readl(dwc, SAR);
532 }
533 EXPORT_SYMBOL(dw_dma_get_src_addr);
534
535 inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
536 {
537         struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
538         return channel_readl(dwc, DAR);
539 }
540 EXPORT_SYMBOL(dw_dma_get_dst_addr);
541
542 /* called with dwc->lock held and all DMAC interrupts disabled */
543 static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
544                 u32 status_err, u32 status_xfer)
545 {
546         unsigned long flags;
547
548         if (dwc->mask) {
549                 void (*callback)(void *param);
550                 void *callback_param;
551
552                 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
553                                 channel_readl(dwc, LLP));
554
555                 callback = dwc->cdesc->period_callback;
556                 callback_param = dwc->cdesc->period_callback_param;
557
558                 if (callback)
559                         callback(callback_param);
560         }
561
562         /*
563          * Error and transfer complete are highly unlikely, and will most
564          * likely be due to a configuration error by the user.
565          */
566         if (unlikely(status_err & dwc->mask) ||
567                         unlikely(status_xfer & dwc->mask)) {
568                 int i;
569
570                 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
571                                 "interrupt, stopping DMA transfer\n",
572                                 status_xfer ? "xfer" : "error");
573
574                 spin_lock_irqsave(&dwc->lock, flags);
575
576                 dwc_dump_chan_regs(dwc);
577
578                 dwc_chan_disable(dw, dwc);
579
580                 /* make sure DMA does not restart by loading a new list */
581                 channel_writel(dwc, LLP, 0);
582                 channel_writel(dwc, CTL_LO, 0);
583                 channel_writel(dwc, CTL_HI, 0);
584
585                 dma_writel(dw, CLEAR.ERROR, dwc->mask);
586                 dma_writel(dw, CLEAR.XFER, dwc->mask);
587
588                 for (i = 0; i < dwc->cdesc->periods; i++)
589                         dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
590
591                 spin_unlock_irqrestore(&dwc->lock, flags);
592         }
593 }
594
595 /* ------------------------------------------------------------------------- */
596
597 static void dw_dma_tasklet(unsigned long data)
598 {
599         struct dw_dma *dw = (struct dw_dma *)data;
600         struct dw_dma_chan *dwc;
601         u32 status_xfer;
602         u32 status_err;
603         int i;
604
605         status_xfer = dma_readl(dw, RAW.XFER);
606         status_err = dma_readl(dw, RAW.ERROR);
607
608         dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
609
610         for (i = 0; i < dw->dma.chancnt; i++) {
611                 dwc = &dw->chan[i];
612                 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
613                         dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
614                 else if (status_err & (1 << i))
615                         dwc_handle_error(dw, dwc);
616                 else if (status_xfer & (1 << i)) {
617                         unsigned long flags;
618
619                         spin_lock_irqsave(&dwc->lock, flags);
620                         if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
621                                 if (dwc->tx_node_active != dwc->tx_list) {
622                                         struct dw_desc *desc =
623                                                 to_dw_desc(dwc->tx_node_active);
624
625                                         dma_writel(dw, CLEAR.XFER, dwc->mask);
626
627                                         dwc_do_single_block(dwc, desc);
628
629                                         spin_unlock_irqrestore(&dwc->lock, flags);
630                                         continue;
631                                 }
632                                 /* we are done here */
633                                 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
634                         }
635                         spin_unlock_irqrestore(&dwc->lock, flags);
636
637                         dwc_scan_descriptors(dw, dwc);
638                 }
639         }
640
641         /*
642          * Re-enable interrupts.
643          */
644         channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
645         channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
646 }
647
648 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
649 {
650         struct dw_dma *dw = dev_id;
651         u32 status;
652
653         dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__,
654                         dma_readl(dw, STATUS_INT));
655
656         /*
657          * Just disable the interrupts. We'll turn them back on in the
658          * softirq handler.
659          */
660         channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
661         channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
662
663         status = dma_readl(dw, STATUS_INT);
664         if (status) {
665                 dev_err(dw->dma.dev,
666                         "BUG: Unexpected interrupts pending: 0x%x\n",
667                         status);
668
669                 /* Try to recover */
670                 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
671                 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
672                 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
673                 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
674         }
675
676         tasklet_schedule(&dw->tasklet);
677
678         return IRQ_HANDLED;
679 }
680
681 /*----------------------------------------------------------------------*/
682
683 static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
684 {
685         struct dw_desc          *desc = txd_to_dw_desc(tx);
686         struct dw_dma_chan      *dwc = to_dw_dma_chan(tx->chan);
687         dma_cookie_t            cookie;
688         unsigned long           flags;
689
690         spin_lock_irqsave(&dwc->lock, flags);
691         cookie = dma_cookie_assign(tx);
692
693         /*
694          * REVISIT: We should attempt to chain as many descriptors as
695          * possible, perhaps even appending to those already submitted
696          * for DMA. But this is hard to do in a race-free manner.
697          */
698         if (list_empty(&dwc->active_list)) {
699                 dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
700                                 desc->txd.cookie);
701                 list_add_tail(&desc->desc_node, &dwc->active_list);
702                 dwc_dostart(dwc, dwc_first_active(dwc));
703         } else {
704                 dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
705                                 desc->txd.cookie);
706
707                 list_add_tail(&desc->desc_node, &dwc->queue);
708         }
709
710         spin_unlock_irqrestore(&dwc->lock, flags);
711
712         return cookie;
713 }
714
715 static struct dma_async_tx_descriptor *
716 dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
717                 size_t len, unsigned long flags)
718 {
719         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
720         struct dw_desc          *desc;
721         struct dw_desc          *first;
722         struct dw_desc          *prev;
723         size_t                  xfer_count;
724         size_t                  offset;
725         unsigned int            src_width;
726         unsigned int            dst_width;
727         unsigned int            data_width;
728         u32                     ctllo;
729
730         dev_vdbg(chan2dev(chan),
731                         "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
732                         (unsigned long long)dest, (unsigned long long)src,
733                         len, flags);
734
735         if (unlikely(!len)) {
736                 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
737                 return NULL;
738         }
739
740         dwc->direction = DMA_MEM_TO_MEM;
741
742         data_width = min_t(unsigned int, dwc_get_data_width(chan, SRC_MASTER),
743                            dwc_get_data_width(chan, DST_MASTER));
744
745         src_width = dst_width = min_t(unsigned int, data_width,
746                                       dwc_fast_fls(src | dest | len));
747
748         ctllo = DWC_DEFAULT_CTLLO(chan)
749                         | DWC_CTLL_DST_WIDTH(dst_width)
750                         | DWC_CTLL_SRC_WIDTH(src_width)
751                         | DWC_CTLL_DST_INC
752                         | DWC_CTLL_SRC_INC
753                         | DWC_CTLL_FC_M2M;
754         prev = first = NULL;
755
756         for (offset = 0; offset < len; offset += xfer_count << src_width) {
757                 xfer_count = min_t(size_t, (len - offset) >> src_width,
758                                            dwc->block_size);
759
760                 desc = dwc_desc_get(dwc);
761                 if (!desc)
762                         goto err_desc_get;
763
764                 desc->lli.sar = src + offset;
765                 desc->lli.dar = dest + offset;
766                 desc->lli.ctllo = ctllo;
767                 desc->lli.ctlhi = xfer_count;
768
769                 if (!first) {
770                         first = desc;
771                 } else {
772                         prev->lli.llp = desc->txd.phys;
773                         dma_sync_single_for_device(chan2parent(chan),
774                                         prev->txd.phys, sizeof(prev->lli),
775                                         DMA_TO_DEVICE);
776                         list_add_tail(&desc->desc_node,
777                                         &first->tx_list);
778                 }
779                 prev = desc;
780         }
781
782
783         if (flags & DMA_PREP_INTERRUPT)
784                 /* Trigger interrupt after last block */
785                 prev->lli.ctllo |= DWC_CTLL_INT_EN;
786
787         prev->lli.llp = 0;
788         dma_sync_single_for_device(chan2parent(chan),
789                         prev->txd.phys, sizeof(prev->lli),
790                         DMA_TO_DEVICE);
791
792         first->txd.flags = flags;
793         first->len = len;
794
795         return &first->txd;
796
797 err_desc_get:
798         dwc_desc_put(dwc, first);
799         return NULL;
800 }
801
802 static struct dma_async_tx_descriptor *
803 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
804                 unsigned int sg_len, enum dma_transfer_direction direction,
805                 unsigned long flags, void *context)
806 {
807         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
808         struct dma_slave_config *sconfig = &dwc->dma_sconfig;
809         struct dw_desc          *prev;
810         struct dw_desc          *first;
811         u32                     ctllo;
812         dma_addr_t              reg;
813         unsigned int            reg_width;
814         unsigned int            mem_width;
815         unsigned int            data_width;
816         unsigned int            i;
817         struct scatterlist      *sg;
818         size_t                  total_len = 0;
819
820         dev_vdbg(chan2dev(chan), "%s\n", __func__);
821
822         if (unlikely(!is_slave_direction(direction) || !sg_len))
823                 return NULL;
824
825         dwc->direction = direction;
826
827         prev = first = NULL;
828
829         switch (direction) {
830         case DMA_MEM_TO_DEV:
831                 reg_width = __fls(sconfig->dst_addr_width);
832                 reg = sconfig->dst_addr;
833                 ctllo = (DWC_DEFAULT_CTLLO(chan)
834                                 | DWC_CTLL_DST_WIDTH(reg_width)
835                                 | DWC_CTLL_DST_FIX
836                                 | DWC_CTLL_SRC_INC);
837
838                 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
839                         DWC_CTLL_FC(DW_DMA_FC_D_M2P);
840
841                 data_width = dwc_get_data_width(chan, SRC_MASTER);
842
843                 for_each_sg(sgl, sg, sg_len, i) {
844                         struct dw_desc  *desc;
845                         u32             len, dlen, mem;
846
847                         mem = sg_dma_address(sg);
848                         len = sg_dma_len(sg);
849
850                         mem_width = min_t(unsigned int,
851                                           data_width, dwc_fast_fls(mem | len));
852
853 slave_sg_todev_fill_desc:
854                         desc = dwc_desc_get(dwc);
855                         if (!desc) {
856                                 dev_err(chan2dev(chan),
857                                         "not enough descriptors available\n");
858                                 goto err_desc_get;
859                         }
860
861                         desc->lli.sar = mem;
862                         desc->lli.dar = reg;
863                         desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
864                         if ((len >> mem_width) > dwc->block_size) {
865                                 dlen = dwc->block_size << mem_width;
866                                 mem += dlen;
867                                 len -= dlen;
868                         } else {
869                                 dlen = len;
870                                 len = 0;
871                         }
872
873                         desc->lli.ctlhi = dlen >> mem_width;
874
875                         if (!first) {
876                                 first = desc;
877                         } else {
878                                 prev->lli.llp = desc->txd.phys;
879                                 dma_sync_single_for_device(chan2parent(chan),
880                                                 prev->txd.phys,
881                                                 sizeof(prev->lli),
882                                                 DMA_TO_DEVICE);
883                                 list_add_tail(&desc->desc_node,
884                                                 &first->tx_list);
885                         }
886                         prev = desc;
887                         total_len += dlen;
888
889                         if (len)
890                                 goto slave_sg_todev_fill_desc;
891                 }
892                 break;
893         case DMA_DEV_TO_MEM:
894                 reg_width = __fls(sconfig->src_addr_width);
895                 reg = sconfig->src_addr;
896                 ctllo = (DWC_DEFAULT_CTLLO(chan)
897                                 | DWC_CTLL_SRC_WIDTH(reg_width)
898                                 | DWC_CTLL_DST_INC
899                                 | DWC_CTLL_SRC_FIX);
900
901                 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
902                         DWC_CTLL_FC(DW_DMA_FC_D_P2M);
903
904                 data_width = dwc_get_data_width(chan, DST_MASTER);
905
906                 for_each_sg(sgl, sg, sg_len, i) {
907                         struct dw_desc  *desc;
908                         u32             len, dlen, mem;
909
910                         mem = sg_dma_address(sg);
911                         len = sg_dma_len(sg);
912
913                         mem_width = min_t(unsigned int,
914                                           data_width, dwc_fast_fls(mem | len));
915
916 slave_sg_fromdev_fill_desc:
917                         desc = dwc_desc_get(dwc);
918                         if (!desc) {
919                                 dev_err(chan2dev(chan),
920                                                 "not enough descriptors available\n");
921                                 goto err_desc_get;
922                         }
923
924                         desc->lli.sar = reg;
925                         desc->lli.dar = mem;
926                         desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
927                         if ((len >> reg_width) > dwc->block_size) {
928                                 dlen = dwc->block_size << reg_width;
929                                 mem += dlen;
930                                 len -= dlen;
931                         } else {
932                                 dlen = len;
933                                 len = 0;
934                         }
935                         desc->lli.ctlhi = dlen >> reg_width;
936
937                         if (!first) {
938                                 first = desc;
939                         } else {
940                                 prev->lli.llp = desc->txd.phys;
941                                 dma_sync_single_for_device(chan2parent(chan),
942                                                 prev->txd.phys,
943                                                 sizeof(prev->lli),
944                                                 DMA_TO_DEVICE);
945                                 list_add_tail(&desc->desc_node,
946                                                 &first->tx_list);
947                         }
948                         prev = desc;
949                         total_len += dlen;
950
951                         if (len)
952                                 goto slave_sg_fromdev_fill_desc;
953                 }
954                 break;
955         default:
956                 return NULL;
957         }
958
959         if (flags & DMA_PREP_INTERRUPT)
960                 /* Trigger interrupt after last block */
961                 prev->lli.ctllo |= DWC_CTLL_INT_EN;
962
963         prev->lli.llp = 0;
964         dma_sync_single_for_device(chan2parent(chan),
965                         prev->txd.phys, sizeof(prev->lli),
966                         DMA_TO_DEVICE);
967
968         first->len = total_len;
969
970         return &first->txd;
971
972 err_desc_get:
973         dwc_desc_put(dwc, first);
974         return NULL;
975 }
976
977 /*
978  * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
979  * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
980  *
981  * NOTE: burst size 2 is not supported by controller.
982  *
983  * This can be done by finding least significant bit set: n & (n - 1)
984  */
985 static inline void convert_burst(u32 *maxburst)
986 {
987         if (*maxburst > 1)
988                 *maxburst = fls(*maxburst) - 2;
989         else
990                 *maxburst = 0;
991 }
992
993 static int
994 set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
995 {
996         struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
997
998         /* Check if chan will be configured for slave transfers */
999         if (!is_slave_direction(sconfig->direction))
1000                 return -EINVAL;
1001
1002         memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
1003         dwc->direction = sconfig->direction;
1004
1005         convert_burst(&dwc->dma_sconfig.src_maxburst);
1006         convert_burst(&dwc->dma_sconfig.dst_maxburst);
1007
1008         return 0;
1009 }
1010
1011 static inline void dwc_chan_pause(struct dw_dma_chan *dwc)
1012 {
1013         u32 cfglo = channel_readl(dwc, CFG_LO);
1014
1015         channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
1016         while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
1017                 cpu_relax();
1018
1019         dwc->paused = true;
1020 }
1021
1022 static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
1023 {
1024         u32 cfglo = channel_readl(dwc, CFG_LO);
1025
1026         channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
1027
1028         dwc->paused = false;
1029 }
1030
1031 static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1032                        unsigned long arg)
1033 {
1034         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1035         struct dw_dma           *dw = to_dw_dma(chan->device);
1036         struct dw_desc          *desc, *_desc;
1037         unsigned long           flags;
1038         LIST_HEAD(list);
1039
1040         if (cmd == DMA_PAUSE) {
1041                 spin_lock_irqsave(&dwc->lock, flags);
1042
1043                 dwc_chan_pause(dwc);
1044
1045                 spin_unlock_irqrestore(&dwc->lock, flags);
1046         } else if (cmd == DMA_RESUME) {
1047                 if (!dwc->paused)
1048                         return 0;
1049
1050                 spin_lock_irqsave(&dwc->lock, flags);
1051
1052                 dwc_chan_resume(dwc);
1053
1054                 spin_unlock_irqrestore(&dwc->lock, flags);
1055         } else if (cmd == DMA_TERMINATE_ALL) {
1056                 spin_lock_irqsave(&dwc->lock, flags);
1057
1058                 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
1059
1060                 dwc_chan_disable(dw, dwc);
1061
1062                 dwc_chan_resume(dwc);
1063
1064                 /* active_list entries will end up before queued entries */
1065                 list_splice_init(&dwc->queue, &list);
1066                 list_splice_init(&dwc->active_list, &list);
1067
1068                 spin_unlock_irqrestore(&dwc->lock, flags);
1069
1070                 /* Flush all pending and queued descriptors */
1071                 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1072                         dwc_descriptor_complete(dwc, desc, false);
1073         } else if (cmd == DMA_SLAVE_CONFIG) {
1074                 return set_runtime_config(chan, (struct dma_slave_config *)arg);
1075         } else {
1076                 return -ENXIO;
1077         }
1078
1079         return 0;
1080 }
1081
1082 static enum dma_status
1083 dwc_tx_status(struct dma_chan *chan,
1084               dma_cookie_t cookie,
1085               struct dma_tx_state *txstate)
1086 {
1087         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1088         enum dma_status         ret;
1089
1090         ret = dma_cookie_status(chan, cookie, txstate);
1091         if (ret != DMA_SUCCESS) {
1092                 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1093
1094                 ret = dma_cookie_status(chan, cookie, txstate);
1095         }
1096
1097         if (ret != DMA_SUCCESS)
1098                 dma_set_residue(txstate, dwc_first_active(dwc)->len);
1099
1100         if (dwc->paused)
1101                 return DMA_PAUSED;
1102
1103         return ret;
1104 }
1105
1106 static void dwc_issue_pending(struct dma_chan *chan)
1107 {
1108         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1109
1110         if (!list_empty(&dwc->queue))
1111                 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1112 }
1113
1114 static int dwc_alloc_chan_resources(struct dma_chan *chan)
1115 {
1116         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1117         struct dw_dma           *dw = to_dw_dma(chan->device);
1118         struct dw_desc          *desc;
1119         int                     i;
1120         unsigned long           flags;
1121         int                     ret;
1122
1123         dev_vdbg(chan2dev(chan), "%s\n", __func__);
1124
1125         /* ASSERT:  channel is idle */
1126         if (dma_readl(dw, CH_EN) & dwc->mask) {
1127                 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1128                 return -EIO;
1129         }
1130
1131         dma_cookie_init(chan);
1132
1133         /*
1134          * NOTE: some controllers may have additional features that we
1135          * need to initialize here, like "scatter-gather" (which
1136          * doesn't mean what you think it means), and status writeback.
1137          */
1138
1139         spin_lock_irqsave(&dwc->lock, flags);
1140         i = dwc->descs_allocated;
1141         while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
1142                 spin_unlock_irqrestore(&dwc->lock, flags);
1143
1144                 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
1145                 if (!desc)
1146                         goto err_desc_alloc;
1147
1148                 INIT_LIST_HEAD(&desc->tx_list);
1149                 dma_async_tx_descriptor_init(&desc->txd, chan);
1150                 desc->txd.tx_submit = dwc_tx_submit;
1151                 desc->txd.flags = DMA_CTRL_ACK;
1152                 desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
1153                                 sizeof(desc->lli), DMA_TO_DEVICE);
1154                 ret = dma_mapping_error(chan2parent(chan), desc->txd.phys);
1155                 if (ret)
1156                         goto err_desc_alloc;
1157
1158                 dwc_desc_put(dwc, desc);
1159
1160                 spin_lock_irqsave(&dwc->lock, flags);
1161                 i = ++dwc->descs_allocated;
1162         }
1163
1164         spin_unlock_irqrestore(&dwc->lock, flags);
1165
1166         dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
1167
1168         return i;
1169
1170 err_desc_alloc:
1171         kfree(desc);
1172
1173         dev_info(chan2dev(chan), "only allocated %d descriptors\n", i);
1174
1175         return i;
1176 }
1177
1178 static void dwc_free_chan_resources(struct dma_chan *chan)
1179 {
1180         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1181         struct dw_dma           *dw = to_dw_dma(chan->device);
1182         struct dw_desc          *desc, *_desc;
1183         unsigned long           flags;
1184         LIST_HEAD(list);
1185
1186         dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
1187                         dwc->descs_allocated);
1188
1189         /* ASSERT:  channel is idle */
1190         BUG_ON(!list_empty(&dwc->active_list));
1191         BUG_ON(!list_empty(&dwc->queue));
1192         BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1193
1194         spin_lock_irqsave(&dwc->lock, flags);
1195         list_splice_init(&dwc->free_list, &list);
1196         dwc->descs_allocated = 0;
1197         dwc->initialized = false;
1198
1199         /* Disable interrupts */
1200         channel_clear_bit(dw, MASK.XFER, dwc->mask);
1201         channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1202
1203         spin_unlock_irqrestore(&dwc->lock, flags);
1204
1205         list_for_each_entry_safe(desc, _desc, &list, desc_node) {
1206                 dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
1207                 dma_unmap_single(chan2parent(chan), desc->txd.phys,
1208                                 sizeof(desc->lli), DMA_TO_DEVICE);
1209                 kfree(desc);
1210         }
1211
1212         dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
1213 }
1214
1215 bool dw_dma_generic_filter(struct dma_chan *chan, void *param)
1216 {
1217         struct dw_dma *dw = to_dw_dma(chan->device);
1218         static struct dw_dma *last_dw;
1219         static char *last_bus_id;
1220         int i = -1;
1221
1222         /*
1223          * dmaengine framework calls this routine for all channels of all dma
1224          * controller, until true is returned. If 'param' bus_id is not
1225          * registered with a dma controller (dw), then there is no need of
1226          * running below function for all channels of dw.
1227          *
1228          * This block of code does this by saving the parameters of last
1229          * failure. If dw and param are same, i.e. trying on same dw with
1230          * different channel, return false.
1231          */
1232         if ((last_dw == dw) && (last_bus_id == param))
1233                 return false;
1234         /*
1235          * Return true:
1236          * - If dw_dma's platform data is not filled with slave info, then all
1237          *   dma controllers are fine for transfer.
1238          * - Or if param is NULL
1239          */
1240         if (!dw->sd || !param)
1241                 return true;
1242
1243         while (++i < dw->sd_count) {
1244                 if (!strcmp(dw->sd[i].bus_id, param)) {
1245                         chan->private = &dw->sd[i];
1246                         last_dw = NULL;
1247                         last_bus_id = NULL;
1248
1249                         return true;
1250                 }
1251         }
1252
1253         last_dw = dw;
1254         last_bus_id = param;
1255         return false;
1256 }
1257 EXPORT_SYMBOL(dw_dma_generic_filter);
1258
1259 /* --------------------- Cyclic DMA API extensions -------------------- */
1260
1261 /**
1262  * dw_dma_cyclic_start - start the cyclic DMA transfer
1263  * @chan: the DMA channel to start
1264  *
1265  * Must be called with soft interrupts disabled. Returns zero on success or
1266  * -errno on failure.
1267  */
1268 int dw_dma_cyclic_start(struct dma_chan *chan)
1269 {
1270         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1271         struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
1272         unsigned long           flags;
1273
1274         if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1275                 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
1276                 return -ENODEV;
1277         }
1278
1279         spin_lock_irqsave(&dwc->lock, flags);
1280
1281         /* assert channel is idle */
1282         if (dma_readl(dw, CH_EN) & dwc->mask) {
1283                 dev_err(chan2dev(&dwc->chan),
1284                         "BUG: Attempted to start non-idle channel\n");
1285                 dwc_dump_chan_regs(dwc);
1286                 spin_unlock_irqrestore(&dwc->lock, flags);
1287                 return -EBUSY;
1288         }
1289
1290         dma_writel(dw, CLEAR.ERROR, dwc->mask);
1291         dma_writel(dw, CLEAR.XFER, dwc->mask);
1292
1293         /* setup DMAC channel registers */
1294         channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
1295         channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
1296         channel_writel(dwc, CTL_HI, 0);
1297
1298         channel_set_bit(dw, CH_EN, dwc->mask);
1299
1300         spin_unlock_irqrestore(&dwc->lock, flags);
1301
1302         return 0;
1303 }
1304 EXPORT_SYMBOL(dw_dma_cyclic_start);
1305
1306 /**
1307  * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1308  * @chan: the DMA channel to stop
1309  *
1310  * Must be called with soft interrupts disabled.
1311  */
1312 void dw_dma_cyclic_stop(struct dma_chan *chan)
1313 {
1314         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1315         struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
1316         unsigned long           flags;
1317
1318         spin_lock_irqsave(&dwc->lock, flags);
1319
1320         dwc_chan_disable(dw, dwc);
1321
1322         spin_unlock_irqrestore(&dwc->lock, flags);
1323 }
1324 EXPORT_SYMBOL(dw_dma_cyclic_stop);
1325
1326 /**
1327  * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1328  * @chan: the DMA channel to prepare
1329  * @buf_addr: physical DMA address where the buffer starts
1330  * @buf_len: total number of bytes for the entire buffer
1331  * @period_len: number of bytes for each period
1332  * @direction: transfer direction, to or from device
1333  *
1334  * Must be called before trying to start the transfer. Returns a valid struct
1335  * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1336  */
1337 struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1338                 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
1339                 enum dma_transfer_direction direction)
1340 {
1341         struct dw_dma_chan              *dwc = to_dw_dma_chan(chan);
1342         struct dma_slave_config         *sconfig = &dwc->dma_sconfig;
1343         struct dw_cyclic_desc           *cdesc;
1344         struct dw_cyclic_desc           *retval = NULL;
1345         struct dw_desc                  *desc;
1346         struct dw_desc                  *last = NULL;
1347         unsigned long                   was_cyclic;
1348         unsigned int                    reg_width;
1349         unsigned int                    periods;
1350         unsigned int                    i;
1351         unsigned long                   flags;
1352
1353         spin_lock_irqsave(&dwc->lock, flags);
1354         if (dwc->nollp) {
1355                 spin_unlock_irqrestore(&dwc->lock, flags);
1356                 dev_dbg(chan2dev(&dwc->chan),
1357                                 "channel doesn't support LLP transfers\n");
1358                 return ERR_PTR(-EINVAL);
1359         }
1360
1361         if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1362                 spin_unlock_irqrestore(&dwc->lock, flags);
1363                 dev_dbg(chan2dev(&dwc->chan),
1364                                 "queue and/or active list are not empty\n");
1365                 return ERR_PTR(-EBUSY);
1366         }
1367
1368         was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1369         spin_unlock_irqrestore(&dwc->lock, flags);
1370         if (was_cyclic) {
1371                 dev_dbg(chan2dev(&dwc->chan),
1372                                 "channel already prepared for cyclic DMA\n");
1373                 return ERR_PTR(-EBUSY);
1374         }
1375
1376         retval = ERR_PTR(-EINVAL);
1377
1378         if (unlikely(!is_slave_direction(direction)))
1379                 goto out_err;
1380
1381         dwc->direction = direction;
1382
1383         if (direction == DMA_MEM_TO_DEV)
1384                 reg_width = __ffs(sconfig->dst_addr_width);
1385         else
1386                 reg_width = __ffs(sconfig->src_addr_width);
1387
1388         periods = buf_len / period_len;
1389
1390         /* Check for too big/unaligned periods and unaligned DMA buffer. */
1391         if (period_len > (dwc->block_size << reg_width))
1392                 goto out_err;
1393         if (unlikely(period_len & ((1 << reg_width) - 1)))
1394                 goto out_err;
1395         if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1396                 goto out_err;
1397
1398         retval = ERR_PTR(-ENOMEM);
1399
1400         if (periods > NR_DESCS_PER_CHANNEL)
1401                 goto out_err;
1402
1403         cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1404         if (!cdesc)
1405                 goto out_err;
1406
1407         cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1408         if (!cdesc->desc)
1409                 goto out_err_alloc;
1410
1411         for (i = 0; i < periods; i++) {
1412                 desc = dwc_desc_get(dwc);
1413                 if (!desc)
1414                         goto out_err_desc_get;
1415
1416                 switch (direction) {
1417                 case DMA_MEM_TO_DEV:
1418                         desc->lli.dar = sconfig->dst_addr;
1419                         desc->lli.sar = buf_addr + (period_len * i);
1420                         desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1421                                         | DWC_CTLL_DST_WIDTH(reg_width)
1422                                         | DWC_CTLL_SRC_WIDTH(reg_width)
1423                                         | DWC_CTLL_DST_FIX
1424                                         | DWC_CTLL_SRC_INC
1425                                         | DWC_CTLL_INT_EN);
1426
1427                         desc->lli.ctllo |= sconfig->device_fc ?
1428                                 DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
1429                                 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
1430
1431                         break;
1432                 case DMA_DEV_TO_MEM:
1433                         desc->lli.dar = buf_addr + (period_len * i);
1434                         desc->lli.sar = sconfig->src_addr;
1435                         desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1436                                         | DWC_CTLL_SRC_WIDTH(reg_width)
1437                                         | DWC_CTLL_DST_WIDTH(reg_width)
1438                                         | DWC_CTLL_DST_INC
1439                                         | DWC_CTLL_SRC_FIX
1440                                         | DWC_CTLL_INT_EN);
1441
1442                         desc->lli.ctllo |= sconfig->device_fc ?
1443                                 DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
1444                                 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
1445
1446                         break;
1447                 default:
1448                         break;
1449                 }
1450
1451                 desc->lli.ctlhi = (period_len >> reg_width);
1452                 cdesc->desc[i] = desc;
1453
1454                 if (last) {
1455                         last->lli.llp = desc->txd.phys;
1456                         dma_sync_single_for_device(chan2parent(chan),
1457                                         last->txd.phys, sizeof(last->lli),
1458                                         DMA_TO_DEVICE);
1459                 }
1460
1461                 last = desc;
1462         }
1463
1464         /* lets make a cyclic list */
1465         last->lli.llp = cdesc->desc[0]->txd.phys;
1466         dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
1467                         sizeof(last->lli), DMA_TO_DEVICE);
1468
1469         dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
1470                         "period %zu periods %d\n", (unsigned long long)buf_addr,
1471                         buf_len, period_len, periods);
1472
1473         cdesc->periods = periods;
1474         dwc->cdesc = cdesc;
1475
1476         return cdesc;
1477
1478 out_err_desc_get:
1479         while (i--)
1480                 dwc_desc_put(dwc, cdesc->desc[i]);
1481 out_err_alloc:
1482         kfree(cdesc);
1483 out_err:
1484         clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1485         return (struct dw_cyclic_desc *)retval;
1486 }
1487 EXPORT_SYMBOL(dw_dma_cyclic_prep);
1488
1489 /**
1490  * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1491  * @chan: the DMA channel to free
1492  */
1493 void dw_dma_cyclic_free(struct dma_chan *chan)
1494 {
1495         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1496         struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
1497         struct dw_cyclic_desc   *cdesc = dwc->cdesc;
1498         int                     i;
1499         unsigned long           flags;
1500
1501         dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
1502
1503         if (!cdesc)
1504                 return;
1505
1506         spin_lock_irqsave(&dwc->lock, flags);
1507
1508         dwc_chan_disable(dw, dwc);
1509
1510         dma_writel(dw, CLEAR.ERROR, dwc->mask);
1511         dma_writel(dw, CLEAR.XFER, dwc->mask);
1512
1513         spin_unlock_irqrestore(&dwc->lock, flags);
1514
1515         for (i = 0; i < cdesc->periods; i++)
1516                 dwc_desc_put(dwc, cdesc->desc[i]);
1517
1518         kfree(cdesc->desc);
1519         kfree(cdesc);
1520
1521         clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1522 }
1523 EXPORT_SYMBOL(dw_dma_cyclic_free);
1524
1525 /*----------------------------------------------------------------------*/
1526
1527 static void dw_dma_off(struct dw_dma *dw)
1528 {
1529         int i;
1530
1531         dma_writel(dw, CFG, 0);
1532
1533         channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1534         channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1535         channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1536         channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1537
1538         while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1539                 cpu_relax();
1540
1541         for (i = 0; i < dw->dma.chancnt; i++)
1542                 dw->chan[i].initialized = false;
1543 }
1544
1545 #ifdef CONFIG_OF
1546 static struct dw_dma_platform_data *
1547 dw_dma_parse_dt(struct platform_device *pdev)
1548 {
1549         struct device_node *sn, *cn, *np = pdev->dev.of_node;
1550         struct dw_dma_platform_data *pdata;
1551         struct dw_dma_slave *sd;
1552         u32 tmp, arr[4];
1553
1554         if (!np) {
1555                 dev_err(&pdev->dev, "Missing DT data\n");
1556                 return NULL;
1557         }
1558
1559         pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1560         if (!pdata)
1561                 return NULL;
1562
1563         if (of_property_read_u32(np, "nr_channels", &pdata->nr_channels))
1564                 return NULL;
1565
1566         if (of_property_read_bool(np, "is_private"))
1567                 pdata->is_private = true;
1568
1569         if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
1570                 pdata->chan_allocation_order = (unsigned char)tmp;
1571
1572         if (!of_property_read_u32(np, "chan_priority", &tmp))
1573                 pdata->chan_priority = tmp;
1574
1575         if (!of_property_read_u32(np, "block_size", &tmp))
1576                 pdata->block_size = tmp;
1577
1578         if (!of_property_read_u32(np, "nr_masters", &tmp)) {
1579                 if (tmp > 4)
1580                         return NULL;
1581
1582                 pdata->nr_masters = tmp;
1583         }
1584
1585         if (!of_property_read_u32_array(np, "data_width", arr,
1586                                 pdata->nr_masters))
1587                 for (tmp = 0; tmp < pdata->nr_masters; tmp++)
1588                         pdata->data_width[tmp] = arr[tmp];
1589
1590         /* parse slave data */
1591         sn = of_find_node_by_name(np, "slave_info");
1592         if (!sn)
1593                 return pdata;
1594
1595         /* calculate number of slaves */
1596         tmp = of_get_child_count(sn);
1597         if (!tmp)
1598                 return NULL;
1599
1600         sd = devm_kzalloc(&pdev->dev, sizeof(*sd) * tmp, GFP_KERNEL);
1601         if (!sd)
1602                 return NULL;
1603
1604         pdata->sd = sd;
1605         pdata->sd_count = tmp;
1606
1607         for_each_child_of_node(sn, cn) {
1608                 sd->dma_dev = &pdev->dev;
1609                 of_property_read_string(cn, "bus_id", &sd->bus_id);
1610                 of_property_read_u32(cn, "cfg_hi", &sd->cfg_hi);
1611                 of_property_read_u32(cn, "cfg_lo", &sd->cfg_lo);
1612                 if (!of_property_read_u32(cn, "src_master", &tmp))
1613                         sd->src_master = tmp;
1614
1615                 if (!of_property_read_u32(cn, "dst_master", &tmp))
1616                         sd->dst_master = tmp;
1617                 sd++;
1618         }
1619
1620         return pdata;
1621 }
1622 #else
1623 static inline struct dw_dma_platform_data *
1624 dw_dma_parse_dt(struct platform_device *pdev)
1625 {
1626         return NULL;
1627 }
1628 #endif
1629
1630 static int dw_probe(struct platform_device *pdev)
1631 {
1632         struct dw_dma_platform_data *pdata;
1633         struct resource         *io;
1634         struct dw_dma           *dw;
1635         size_t                  size;
1636         void __iomem            *regs;
1637         bool                    autocfg;
1638         unsigned int            dw_params;
1639         unsigned int            nr_channels;
1640         unsigned int            max_blk_size = 0;
1641         int                     irq;
1642         int                     err;
1643         int                     i;
1644
1645         io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1646         if (!io)
1647                 return -EINVAL;
1648
1649         irq = platform_get_irq(pdev, 0);
1650         if (irq < 0)
1651                 return irq;
1652
1653         regs = devm_request_and_ioremap(&pdev->dev, io);
1654         if (!regs)
1655                 return -EBUSY;
1656
1657         dw_params = dma_read_byaddr(regs, DW_PARAMS);
1658         autocfg = dw_params >> DW_PARAMS_EN & 0x1;
1659
1660         pdata = dev_get_platdata(&pdev->dev);
1661         if (!pdata)
1662                 pdata = dw_dma_parse_dt(pdev);
1663
1664         if (!pdata && autocfg) {
1665                 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1666                 if (!pdata)
1667                         return -ENOMEM;
1668
1669                 /* Fill platform data with the default values */
1670                 pdata->is_private = true;
1671                 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
1672                 pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
1673         } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
1674                 return -EINVAL;
1675
1676         if (autocfg)
1677                 nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1;
1678         else
1679                 nr_channels = pdata->nr_channels;
1680
1681         size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan);
1682         dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
1683         if (!dw)
1684                 return -ENOMEM;
1685
1686         dw->clk = devm_clk_get(&pdev->dev, "hclk");
1687         if (IS_ERR(dw->clk))
1688                 return PTR_ERR(dw->clk);
1689         clk_prepare_enable(dw->clk);
1690
1691         dw->regs = regs;
1692         dw->sd = pdata->sd;
1693         dw->sd_count = pdata->sd_count;
1694
1695         /* get hardware configuration parameters */
1696         if (autocfg) {
1697                 max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
1698
1699                 dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
1700                 for (i = 0; i < dw->nr_masters; i++) {
1701                         dw->data_width[i] =
1702                                 (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2;
1703                 }
1704         } else {
1705                 dw->nr_masters = pdata->nr_masters;
1706                 memcpy(dw->data_width, pdata->data_width, 4);
1707         }
1708
1709         /* Calculate all channel mask before DMA setup */
1710         dw->all_chan_mask = (1 << nr_channels) - 1;
1711
1712         /* force dma off, just in case */
1713         dw_dma_off(dw);
1714
1715         /* disable BLOCK interrupts as well */
1716         channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1717
1718         err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0,
1719                                "dw_dmac", dw);
1720         if (err)
1721                 return err;
1722
1723         platform_set_drvdata(pdev, dw);
1724
1725         tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1726
1727         INIT_LIST_HEAD(&dw->dma.channels);
1728         for (i = 0; i < nr_channels; i++) {
1729                 struct dw_dma_chan      *dwc = &dw->chan[i];
1730                 int                     r = nr_channels - i - 1;
1731
1732                 dwc->chan.device = &dw->dma;
1733                 dma_cookie_init(&dwc->chan);
1734                 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1735                         list_add_tail(&dwc->chan.device_node,
1736                                         &dw->dma.channels);
1737                 else
1738                         list_add(&dwc->chan.device_node, &dw->dma.channels);
1739
1740                 /* 7 is highest priority & 0 is lowest. */
1741                 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1742                         dwc->priority = r;
1743                 else
1744                         dwc->priority = i;
1745
1746                 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1747                 spin_lock_init(&dwc->lock);
1748                 dwc->mask = 1 << i;
1749
1750                 INIT_LIST_HEAD(&dwc->active_list);
1751                 INIT_LIST_HEAD(&dwc->queue);
1752                 INIT_LIST_HEAD(&dwc->free_list);
1753
1754                 channel_clear_bit(dw, CH_EN, dwc->mask);
1755
1756                 dwc->direction = DMA_TRANS_NONE;
1757
1758                 /* hardware configuration */
1759                 if (autocfg) {
1760                         unsigned int dwc_params;
1761
1762                         dwc_params = dma_read_byaddr(regs + r * sizeof(u32),
1763                                                      DWC_PARAMS);
1764
1765                         /* Decode maximum block size for given channel. The
1766                          * stored 4 bit value represents blocks from 0x00 for 3
1767                          * up to 0x0a for 4095. */
1768                         dwc->block_size =
1769                                 (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
1770                         dwc->nollp =
1771                                 (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
1772                 } else {
1773                         dwc->block_size = pdata->block_size;
1774
1775                         /* Check if channel supports multi block transfer */
1776                         channel_writel(dwc, LLP, 0xfffffffc);
1777                         dwc->nollp =
1778                                 (channel_readl(dwc, LLP) & 0xfffffffc) == 0;
1779                         channel_writel(dwc, LLP, 0);
1780                 }
1781         }
1782
1783         /* Clear all interrupts on all channels. */
1784         dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1785         dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1786         dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1787         dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1788         dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1789
1790         dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1791         dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1792         if (pdata->is_private)
1793                 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1794         dw->dma.dev = &pdev->dev;
1795         dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1796         dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1797
1798         dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1799
1800         dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1801         dw->dma.device_control = dwc_control;
1802
1803         dw->dma.device_tx_status = dwc_tx_status;
1804         dw->dma.device_issue_pending = dwc_issue_pending;
1805
1806         dma_writel(dw, CFG, DW_CFG_DMA_EN);
1807
1808         dev_info(&pdev->dev, "DesignWare DMA Controller, %d channels\n",
1809                  nr_channels);
1810
1811         dma_async_device_register(&dw->dma);
1812
1813         return 0;
1814 }
1815
1816 static int __devexit dw_remove(struct platform_device *pdev)
1817 {
1818         struct dw_dma           *dw = platform_get_drvdata(pdev);
1819         struct dw_dma_chan      *dwc, *_dwc;
1820
1821         dw_dma_off(dw);
1822         dma_async_device_unregister(&dw->dma);
1823
1824         tasklet_kill(&dw->tasklet);
1825
1826         list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1827                         chan.device_node) {
1828                 list_del(&dwc->chan.device_node);
1829                 channel_clear_bit(dw, CH_EN, dwc->mask);
1830         }
1831
1832         return 0;
1833 }
1834
1835 static void dw_shutdown(struct platform_device *pdev)
1836 {
1837         struct dw_dma   *dw = platform_get_drvdata(pdev);
1838
1839         dw_dma_off(dw);
1840         clk_disable_unprepare(dw->clk);
1841 }
1842
1843 static int dw_suspend_noirq(struct device *dev)
1844 {
1845         struct platform_device *pdev = to_platform_device(dev);
1846         struct dw_dma   *dw = platform_get_drvdata(pdev);
1847
1848         dw_dma_off(dw);
1849         clk_disable_unprepare(dw->clk);
1850
1851         return 0;
1852 }
1853
1854 static int dw_resume_noirq(struct device *dev)
1855 {
1856         struct platform_device *pdev = to_platform_device(dev);
1857         struct dw_dma   *dw = platform_get_drvdata(pdev);
1858
1859         clk_prepare_enable(dw->clk);
1860         dma_writel(dw, CFG, DW_CFG_DMA_EN);
1861
1862         return 0;
1863 }
1864
1865 static const struct dev_pm_ops dw_dev_pm_ops = {
1866         .suspend_noirq = dw_suspend_noirq,
1867         .resume_noirq = dw_resume_noirq,
1868         .freeze_noirq = dw_suspend_noirq,
1869         .thaw_noirq = dw_resume_noirq,
1870         .restore_noirq = dw_resume_noirq,
1871         .poweroff_noirq = dw_suspend_noirq,
1872 };
1873
1874 #ifdef CONFIG_OF
1875 static const struct of_device_id dw_dma_id_table[] = {
1876         { .compatible = "snps,dma-spear1340" },
1877         {}
1878 };
1879 MODULE_DEVICE_TABLE(of, dw_dma_id_table);
1880 #endif
1881
1882 static struct platform_driver dw_driver = {
1883         .probe          = dw_probe,
1884         .remove         = dw_remove,
1885         .shutdown       = dw_shutdown,
1886         .driver = {
1887                 .name   = "dw_dmac",
1888                 .pm     = &dw_dev_pm_ops,
1889                 .of_match_table = of_match_ptr(dw_dma_id_table),
1890         },
1891 };
1892
1893 static int __init dw_init(void)
1894 {
1895         return platform_driver_register(&dw_driver);
1896 }
1897 subsys_initcall(dw_init);
1898
1899 static void __exit dw_exit(void)
1900 {
1901         platform_driver_unregister(&dw_driver);
1902 }
1903 module_exit(dw_exit);
1904
1905 MODULE_LICENSE("GPL v2");
1906 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1907 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1908 MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");