dmaengine: omap: add support for cyclic DMA
[firefly-linux-kernel-4.4.55.git] / drivers / dma / sa11x0-dma.c
1 /*
2  * SA11x0 DMAengine support
3  *
4  * Copyright (C) 2012 Russell King
5  *   Derived in part from arch/arm/mach-sa1100/dma.c,
6  *   Copyright (C) 2000, 2001 by Nicolas Pitre
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 #include <linux/sched.h>
13 #include <linux/device.h>
14 #include <linux/dmaengine.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <linux/sa11x0-dma.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23
24 #include "virt-dma.h"
25
26 #define NR_PHY_CHAN     6
27 #define DMA_ALIGN       3
28 #define DMA_MAX_SIZE    0x1fff
29 #define DMA_CHUNK_SIZE  0x1000
30
31 #define DMA_DDAR        0x00
32 #define DMA_DCSR_S      0x04
33 #define DMA_DCSR_C      0x08
34 #define DMA_DCSR_R      0x0c
35 #define DMA_DBSA        0x10
36 #define DMA_DBTA        0x14
37 #define DMA_DBSB        0x18
38 #define DMA_DBTB        0x1c
39 #define DMA_SIZE        0x20
40
41 #define DCSR_RUN        (1 << 0)
42 #define DCSR_IE         (1 << 1)
43 #define DCSR_ERROR      (1 << 2)
44 #define DCSR_DONEA      (1 << 3)
45 #define DCSR_STRTA      (1 << 4)
46 #define DCSR_DONEB      (1 << 5)
47 #define DCSR_STRTB      (1 << 6)
48 #define DCSR_BIU        (1 << 7)
49
50 #define DDAR_RW         (1 << 0)        /* 0 = W, 1 = R */
51 #define DDAR_E          (1 << 1)        /* 0 = LE, 1 = BE */
52 #define DDAR_BS         (1 << 2)        /* 0 = BS4, 1 = BS8 */
53 #define DDAR_DW         (1 << 3)        /* 0 = 8b, 1 = 16b */
54 #define DDAR_Ser0UDCTr  (0x0 << 4)
55 #define DDAR_Ser0UDCRc  (0x1 << 4)
56 #define DDAR_Ser1SDLCTr (0x2 << 4)
57 #define DDAR_Ser1SDLCRc (0x3 << 4)
58 #define DDAR_Ser1UARTTr (0x4 << 4)
59 #define DDAR_Ser1UARTRc (0x5 << 4)
60 #define DDAR_Ser2ICPTr  (0x6 << 4)
61 #define DDAR_Ser2ICPRc  (0x7 << 4)
62 #define DDAR_Ser3UARTTr (0x8 << 4)
63 #define DDAR_Ser3UARTRc (0x9 << 4)
64 #define DDAR_Ser4MCP0Tr (0xa << 4)
65 #define DDAR_Ser4MCP0Rc (0xb << 4)
66 #define DDAR_Ser4MCP1Tr (0xc << 4)
67 #define DDAR_Ser4MCP1Rc (0xd << 4)
68 #define DDAR_Ser4SSPTr  (0xe << 4)
69 #define DDAR_Ser4SSPRc  (0xf << 4)
70
71 struct sa11x0_dma_sg {
72         u32                     addr;
73         u32                     len;
74 };
75
76 struct sa11x0_dma_desc {
77         struct virt_dma_desc    vd;
78
79         u32                     ddar;
80         size_t                  size;
81
82         unsigned                sglen;
83         struct sa11x0_dma_sg    sg[0];
84 };
85
86 struct sa11x0_dma_phy;
87
88 struct sa11x0_dma_chan {
89         struct virt_dma_chan    vc;
90
91         /* protected by c->vc.lock */
92         struct sa11x0_dma_phy   *phy;
93         enum dma_status         status;
94
95         /* protected by d->lock */
96         struct list_head        node;
97
98         u32                     ddar;
99         const char              *name;
100 };
101
102 struct sa11x0_dma_phy {
103         void __iomem            *base;
104         struct sa11x0_dma_dev   *dev;
105         unsigned                num;
106
107         struct sa11x0_dma_chan  *vchan;
108
109         /* Protected by c->vc.lock */
110         unsigned                sg_load;
111         struct sa11x0_dma_desc  *txd_load;
112         unsigned                sg_done;
113         struct sa11x0_dma_desc  *txd_done;
114 #ifdef CONFIG_PM_SLEEP
115         u32                     dbs[2];
116         u32                     dbt[2];
117         u32                     dcsr;
118 #endif
119 };
120
121 struct sa11x0_dma_dev {
122         struct dma_device       slave;
123         void __iomem            *base;
124         spinlock_t              lock;
125         struct tasklet_struct   task;
126         struct list_head        chan_pending;
127         struct sa11x0_dma_phy   phy[NR_PHY_CHAN];
128 };
129
130 static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
131 {
132         return container_of(chan, struct sa11x0_dma_chan, vc.chan);
133 }
134
135 static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
136 {
137         return container_of(dmadev, struct sa11x0_dma_dev, slave);
138 }
139
140 static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
141 {
142         struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
143
144         return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL;
145 }
146
147 static void sa11x0_dma_free_desc(struct virt_dma_desc *vd)
148 {
149         kfree(container_of(vd, struct sa11x0_dma_desc, vd));
150 }
151
152 static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
153 {
154         list_del(&txd->vd.node);
155         p->txd_load = txd;
156         p->sg_load = 0;
157
158         dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
159                 p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar);
160 }
161
162 static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
163         struct sa11x0_dma_chan *c)
164 {
165         struct sa11x0_dma_desc *txd = p->txd_load;
166         struct sa11x0_dma_sg *sg;
167         void __iomem *base = p->base;
168         unsigned dbsx, dbtx;
169         u32 dcsr;
170
171         if (!txd)
172                 return;
173
174         dcsr = readl_relaxed(base + DMA_DCSR_R);
175
176         /* Don't try to load the next transfer if both buffers are started */
177         if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB))
178                 return;
179
180         if (p->sg_load == txd->sglen) {
181                 struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
182
183                 /*
184                  * We have reached the end of the current descriptor.
185                  * Peek at the next descriptor, and if compatible with
186                  * the current, start processing it.
187                  */
188                 if (txn && txn->ddar == txd->ddar) {
189                         txd = txn;
190                         sa11x0_dma_start_desc(p, txn);
191                 } else {
192                         p->txd_load = NULL;
193                         return;
194                 }
195         }
196
197         sg = &txd->sg[p->sg_load++];
198
199         /* Select buffer to load according to channel status */
200         if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) ||
201             ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) {
202                 dbsx = DMA_DBSA;
203                 dbtx = DMA_DBTA;
204                 dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN;
205         } else {
206                 dbsx = DMA_DBSB;
207                 dbtx = DMA_DBTB;
208                 dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN;
209         }
210
211         writel_relaxed(sg->addr, base + dbsx);
212         writel_relaxed(sg->len, base + dbtx);
213         writel(dcsr, base + DMA_DCSR_S);
214
215         dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n",
216                 p->num, dcsr,
217                 'A' + (dbsx == DMA_DBSB), sg->addr,
218                 'A' + (dbtx == DMA_DBTB), sg->len);
219 }
220
221 static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
222         struct sa11x0_dma_chan *c)
223 {
224         struct sa11x0_dma_desc *txd = p->txd_done;
225
226         if (++p->sg_done == txd->sglen) {
227                 vchan_cookie_complete(&txd->vd);
228
229                 p->sg_done = 0;
230                 p->txd_done = p->txd_load;
231
232                 if (!p->txd_done)
233                         tasklet_schedule(&p->dev->task);
234         }
235
236         sa11x0_dma_start_sg(p, c);
237 }
238
239 static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
240 {
241         struct sa11x0_dma_phy *p = dev_id;
242         struct sa11x0_dma_dev *d = p->dev;
243         struct sa11x0_dma_chan *c;
244         u32 dcsr;
245
246         dcsr = readl_relaxed(p->base + DMA_DCSR_R);
247         if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB)))
248                 return IRQ_NONE;
249
250         /* Clear reported status bits */
251         writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB),
252                 p->base + DMA_DCSR_C);
253
254         dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr);
255
256         if (dcsr & DCSR_ERROR) {
257                 dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n",
258                         p->num, dcsr,
259                         readl_relaxed(p->base + DMA_DDAR),
260                         readl_relaxed(p->base + DMA_DBSA),
261                         readl_relaxed(p->base + DMA_DBTA),
262                         readl_relaxed(p->base + DMA_DBSB),
263                         readl_relaxed(p->base + DMA_DBTB));
264         }
265
266         c = p->vchan;
267         if (c) {
268                 unsigned long flags;
269
270                 spin_lock_irqsave(&c->vc.lock, flags);
271                 /*
272                  * Now that we're holding the lock, check that the vchan
273                  * really is associated with this pchan before touching the
274                  * hardware.  This should always succeed, because we won't
275                  * change p->vchan or c->phy while the channel is actively
276                  * transferring.
277                  */
278                 if (c->phy == p) {
279                         if (dcsr & DCSR_DONEA)
280                                 sa11x0_dma_complete(p, c);
281                         if (dcsr & DCSR_DONEB)
282                                 sa11x0_dma_complete(p, c);
283                 }
284                 spin_unlock_irqrestore(&c->vc.lock, flags);
285         }
286
287         return IRQ_HANDLED;
288 }
289
290 static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c)
291 {
292         struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c);
293
294         /* If the issued list is empty, we have no further txds to process */
295         if (txd) {
296                 struct sa11x0_dma_phy *p = c->phy;
297
298                 sa11x0_dma_start_desc(p, txd);
299                 p->txd_done = txd;
300                 p->sg_done = 0;
301
302                 /* The channel should not have any transfers started */
303                 WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) &
304                                       (DCSR_STRTA | DCSR_STRTB));
305
306                 /* Clear the run and start bits before changing DDAR */
307                 writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB,
308                                p->base + DMA_DCSR_C);
309                 writel_relaxed(txd->ddar, p->base + DMA_DDAR);
310
311                 /* Try to start both buffers */
312                 sa11x0_dma_start_sg(p, c);
313                 sa11x0_dma_start_sg(p, c);
314         }
315 }
316
317 static void sa11x0_dma_tasklet(unsigned long arg)
318 {
319         struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg;
320         struct sa11x0_dma_phy *p;
321         struct sa11x0_dma_chan *c;
322         unsigned pch, pch_alloc = 0;
323
324         dev_dbg(d->slave.dev, "tasklet enter\n");
325
326         list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) {
327                 spin_lock_irq(&c->vc.lock);
328                 p = c->phy;
329                 if (p && !p->txd_done) {
330                         sa11x0_dma_start_txd(c);
331                         if (!p->txd_done) {
332                                 /* No current txd associated with this channel */
333                                 dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
334
335                                 /* Mark this channel free */
336                                 c->phy = NULL;
337                                 p->vchan = NULL;
338                         }
339                 }
340                 spin_unlock_irq(&c->vc.lock);
341         }
342
343         spin_lock_irq(&d->lock);
344         for (pch = 0; pch < NR_PHY_CHAN; pch++) {
345                 p = &d->phy[pch];
346
347                 if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
348                         c = list_first_entry(&d->chan_pending,
349                                 struct sa11x0_dma_chan, node);
350                         list_del_init(&c->node);
351
352                         pch_alloc |= 1 << pch;
353
354                         /* Mark this channel allocated */
355                         p->vchan = c;
356
357                         dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
358                 }
359         }
360         spin_unlock_irq(&d->lock);
361
362         for (pch = 0; pch < NR_PHY_CHAN; pch++) {
363                 if (pch_alloc & (1 << pch)) {
364                         p = &d->phy[pch];
365                         c = p->vchan;
366
367                         spin_lock_irq(&c->vc.lock);
368                         c->phy = p;
369
370                         sa11x0_dma_start_txd(c);
371                         spin_unlock_irq(&c->vc.lock);
372                 }
373         }
374
375         dev_dbg(d->slave.dev, "tasklet exit\n");
376 }
377
378
379 static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan)
380 {
381         return 0;
382 }
383
384 static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
385 {
386         struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
387         struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
388         unsigned long flags;
389
390         spin_lock_irqsave(&d->lock, flags);
391         list_del_init(&c->node);
392         spin_unlock_irqrestore(&d->lock, flags);
393
394         vchan_free_chan_resources(&c->vc);
395 }
396
397 static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
398 {
399         unsigned reg;
400         u32 dcsr;
401
402         dcsr = readl_relaxed(p->base + DMA_DCSR_R);
403
404         if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA ||
405             (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU)
406                 reg = DMA_DBSA;
407         else
408                 reg = DMA_DBSB;
409
410         return readl_relaxed(p->base + reg);
411 }
412
413 static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
414         dma_cookie_t cookie, struct dma_tx_state *state)
415 {
416         struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
417         struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
418         struct sa11x0_dma_phy *p;
419         struct sa11x0_dma_desc *txd;
420         unsigned long flags;
421         enum dma_status ret;
422         size_t bytes = 0;
423
424         ret = dma_cookie_status(&c->vc.chan, cookie, state);
425         if (ret == DMA_SUCCESS)
426                 return ret;
427
428         spin_lock_irqsave(&c->vc.lock, flags);
429         p = c->phy;
430         ret = c->status;
431         if (p) {
432                 dma_addr_t addr = sa11x0_dma_pos(p);
433
434                 dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
435
436                 txd = p->txd_done;
437                 if (txd) {
438                         unsigned i;
439
440                         for (i = 0; i < txd->sglen; i++) {
441                                 dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
442                                         i, txd->sg[i].addr, txd->sg[i].len);
443                                 if (addr >= txd->sg[i].addr &&
444                                     addr < txd->sg[i].addr + txd->sg[i].len) {
445                                         unsigned len;
446
447                                         len = txd->sg[i].len -
448                                                 (addr - txd->sg[i].addr);
449                                         dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n",
450                                                 i, len);
451                                         bytes += len;
452                                         i++;
453                                         break;
454                                 }
455                         }
456                         for (; i < txd->sglen; i++) {
457                                 dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n",
458                                         i, txd->sg[i].addr, txd->sg[i].len);
459                                 bytes += txd->sg[i].len;
460                         }
461                 }
462                 if (txd != p->txd_load && p->txd_load)
463                         bytes += p->txd_load->size;
464         }
465         list_for_each_entry(txd, &c->vc.desc_issued, vd.node) {
466                 bytes += txd->size;
467         }
468         spin_unlock_irqrestore(&c->vc.lock, flags);
469
470         if (state)
471                 state->residue = bytes;
472
473         dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes);
474
475         return ret;
476 }
477
478 /*
479  * Move pending txds to the issued list, and re-init pending list.
480  * If not already pending, add this channel to the list of pending
481  * channels and trigger the tasklet to run.
482  */
483 static void sa11x0_dma_issue_pending(struct dma_chan *chan)
484 {
485         struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
486         struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
487         unsigned long flags;
488
489         spin_lock_irqsave(&c->vc.lock, flags);
490         if (vchan_issue_pending(&c->vc)) {
491                 if (!c->phy) {
492                         spin_lock(&d->lock);
493                         if (list_empty(&c->node)) {
494                                 list_add_tail(&c->node, &d->chan_pending);
495                                 tasklet_schedule(&d->task);
496                                 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
497                         }
498                         spin_unlock(&d->lock);
499                 }
500         } else
501                 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
502         spin_unlock_irqrestore(&c->vc.lock, flags);
503 }
504
505 static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
506         struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen,
507         enum dma_transfer_direction dir, unsigned long flags, void *context)
508 {
509         struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
510         struct sa11x0_dma_desc *txd;
511         struct scatterlist *sgent;
512         unsigned i, j = sglen;
513         size_t size = 0;
514
515         /* SA11x0 channels can only operate in their native direction */
516         if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
517                 dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
518                         &c->vc, c->ddar, dir);
519                 return NULL;
520         }
521
522         /* Do not allow zero-sized txds */
523         if (sglen == 0)
524                 return NULL;
525
526         for_each_sg(sg, sgent, sglen, i) {
527                 dma_addr_t addr = sg_dma_address(sgent);
528                 unsigned int len = sg_dma_len(sgent);
529
530                 if (len > DMA_MAX_SIZE)
531                         j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
532                 if (addr & DMA_ALIGN) {
533                         dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n",
534                                 &c->vc, addr);
535                         return NULL;
536                 }
537         }
538
539         txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC);
540         if (!txd) {
541                 dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
542                 return NULL;
543         }
544
545         j = 0;
546         for_each_sg(sg, sgent, sglen, i) {
547                 dma_addr_t addr = sg_dma_address(sgent);
548                 unsigned len = sg_dma_len(sgent);
549
550                 size += len;
551
552                 do {
553                         unsigned tlen = len;
554
555                         /*
556                          * Check whether the transfer will fit.  If not, try
557                          * to split the transfer up such that we end up with
558                          * equal chunks - but make sure that we preserve the
559                          * alignment.  This avoids small segments.
560                          */
561                         if (tlen > DMA_MAX_SIZE) {
562                                 unsigned mult = DIV_ROUND_UP(tlen,
563                                         DMA_MAX_SIZE & ~DMA_ALIGN);
564
565                                 tlen = (tlen / mult) & ~DMA_ALIGN;
566                         }
567
568                         txd->sg[j].addr = addr;
569                         txd->sg[j].len = tlen;
570
571                         addr += tlen;
572                         len -= tlen;
573                         j++;
574                 } while (len);
575         }
576
577         txd->ddar = c->ddar;
578         txd->size = size;
579         txd->sglen = j;
580
581         dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n",
582                 &c->vc, &txd->vd, txd->size, txd->sglen);
583
584         return vchan_tx_prep(&c->vc, &txd->vd, flags);
585 }
586
587 static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg)
588 {
589         u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
590         dma_addr_t addr;
591         enum dma_slave_buswidth width;
592         u32 maxburst;
593
594         if (ddar & DDAR_RW) {
595                 addr = cfg->src_addr;
596                 width = cfg->src_addr_width;
597                 maxburst = cfg->src_maxburst;
598         } else {
599                 addr = cfg->dst_addr;
600                 width = cfg->dst_addr_width;
601                 maxburst = cfg->dst_maxburst;
602         }
603
604         if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE &&
605              width != DMA_SLAVE_BUSWIDTH_2_BYTES) ||
606             (maxburst != 4 && maxburst != 8))
607                 return -EINVAL;
608
609         if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
610                 ddar |= DDAR_DW;
611         if (maxburst == 8)
612                 ddar |= DDAR_BS;
613
614         dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
615                 &c->vc, addr, width, maxburst);
616
617         c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
618
619         return 0;
620 }
621
622 static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
623         unsigned long arg)
624 {
625         struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
626         struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
627         struct sa11x0_dma_phy *p;
628         LIST_HEAD(head);
629         unsigned long flags;
630         int ret;
631
632         switch (cmd) {
633         case DMA_SLAVE_CONFIG:
634                 return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg);
635
636         case DMA_TERMINATE_ALL:
637                 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
638                 /* Clear the tx descriptor lists */
639                 spin_lock_irqsave(&c->vc.lock, flags);
640                 vchan_get_all_descriptors(&c->vc, &head);
641
642                 p = c->phy;
643                 if (p) {
644                         dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
645                         /* vchan is assigned to a pchan - stop the channel */
646                         writel(DCSR_RUN | DCSR_IE |
647                                 DCSR_STRTA | DCSR_DONEA |
648                                 DCSR_STRTB | DCSR_DONEB,
649                                 p->base + DMA_DCSR_C);
650
651                         if (p->txd_load) {
652                                 if (p->txd_load != p->txd_done)
653                                         list_add_tail(&p->txd_load->vd.node, &head);
654                                 p->txd_load = NULL;
655                         }
656                         if (p->txd_done) {
657                                 list_add_tail(&p->txd_done->vd.node, &head);
658                                 p->txd_done = NULL;
659                         }
660                         c->phy = NULL;
661                         spin_lock(&d->lock);
662                         p->vchan = NULL;
663                         spin_unlock(&d->lock);
664                         tasklet_schedule(&d->task);
665                 }
666                 spin_unlock_irqrestore(&c->vc.lock, flags);
667                 vchan_dma_desc_free_list(&c->vc, &head);
668                 ret = 0;
669                 break;
670
671         case DMA_PAUSE:
672                 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
673                 spin_lock_irqsave(&c->vc.lock, flags);
674                 if (c->status == DMA_IN_PROGRESS) {
675                         c->status = DMA_PAUSED;
676
677                         p = c->phy;
678                         if (p) {
679                                 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
680                         } else {
681                                 spin_lock(&d->lock);
682                                 list_del_init(&c->node);
683                                 spin_unlock(&d->lock);
684                         }
685                 }
686                 spin_unlock_irqrestore(&c->vc.lock, flags);
687                 ret = 0;
688                 break;
689
690         case DMA_RESUME:
691                 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
692                 spin_lock_irqsave(&c->vc.lock, flags);
693                 if (c->status == DMA_PAUSED) {
694                         c->status = DMA_IN_PROGRESS;
695
696                         p = c->phy;
697                         if (p) {
698                                 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
699                         } else if (!list_empty(&c->vc.desc_issued)) {
700                                 spin_lock(&d->lock);
701                                 list_add_tail(&c->node, &d->chan_pending);
702                                 spin_unlock(&d->lock);
703                         }
704                 }
705                 spin_unlock_irqrestore(&c->vc.lock, flags);
706                 ret = 0;
707                 break;
708
709         default:
710                 ret = -ENXIO;
711                 break;
712         }
713
714         return ret;
715 }
716
717 struct sa11x0_dma_channel_desc {
718         u32 ddar;
719         const char *name;
720 };
721
722 #define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 }
723 static const struct sa11x0_dma_channel_desc chan_desc[] = {
724         CD(Ser0UDCTr, 0),
725         CD(Ser0UDCRc, DDAR_RW),
726         CD(Ser1SDLCTr, 0),
727         CD(Ser1SDLCRc, DDAR_RW),
728         CD(Ser1UARTTr, 0),
729         CD(Ser1UARTRc, DDAR_RW),
730         CD(Ser2ICPTr, 0),
731         CD(Ser2ICPRc, DDAR_RW),
732         CD(Ser3UARTTr, 0),
733         CD(Ser3UARTRc, DDAR_RW),
734         CD(Ser4MCP0Tr, 0),
735         CD(Ser4MCP0Rc, DDAR_RW),
736         CD(Ser4MCP1Tr, 0),
737         CD(Ser4MCP1Rc, DDAR_RW),
738         CD(Ser4SSPTr, 0),
739         CD(Ser4SSPRc, DDAR_RW),
740 };
741
742 static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev,
743         struct device *dev)
744 {
745         unsigned i;
746
747         dmadev->chancnt = ARRAY_SIZE(chan_desc);
748         INIT_LIST_HEAD(&dmadev->channels);
749         dmadev->dev = dev;
750         dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources;
751         dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
752         dmadev->device_control = sa11x0_dma_control;
753         dmadev->device_tx_status = sa11x0_dma_tx_status;
754         dmadev->device_issue_pending = sa11x0_dma_issue_pending;
755
756         for (i = 0; i < dmadev->chancnt; i++) {
757                 struct sa11x0_dma_chan *c;
758
759                 c = kzalloc(sizeof(*c), GFP_KERNEL);
760                 if (!c) {
761                         dev_err(dev, "no memory for channel %u\n", i);
762                         return -ENOMEM;
763                 }
764
765                 c->status = DMA_IN_PROGRESS;
766                 c->ddar = chan_desc[i].ddar;
767                 c->name = chan_desc[i].name;
768                 INIT_LIST_HEAD(&c->node);
769
770                 c->vc.desc_free = sa11x0_dma_free_desc;
771                 vchan_init(&c->vc, dmadev);
772         }
773
774         return dma_async_device_register(dmadev);
775 }
776
777 static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr,
778         void *data)
779 {
780         int irq = platform_get_irq(pdev, nr);
781
782         if (irq <= 0)
783                 return -ENXIO;
784
785         return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data);
786 }
787
788 static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr,
789         void *data)
790 {
791         int irq = platform_get_irq(pdev, nr);
792         if (irq > 0)
793                 free_irq(irq, data);
794 }
795
796 static void sa11x0_dma_free_channels(struct dma_device *dmadev)
797 {
798         struct sa11x0_dma_chan *c, *cn;
799
800         list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) {
801                 list_del(&c->vc.chan.device_node);
802                 tasklet_kill(&c->vc.task);
803                 kfree(c);
804         }
805 }
806
807 static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
808 {
809         struct sa11x0_dma_dev *d;
810         struct resource *res;
811         unsigned i;
812         int ret;
813
814         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
815         if (!res)
816                 return -ENXIO;
817
818         d = kzalloc(sizeof(*d), GFP_KERNEL);
819         if (!d) {
820                 ret = -ENOMEM;
821                 goto err_alloc;
822         }
823
824         spin_lock_init(&d->lock);
825         INIT_LIST_HEAD(&d->chan_pending);
826
827         d->base = ioremap(res->start, resource_size(res));
828         if (!d->base) {
829                 ret = -ENOMEM;
830                 goto err_ioremap;
831         }
832
833         tasklet_init(&d->task, sa11x0_dma_tasklet, (unsigned long)d);
834
835         for (i = 0; i < NR_PHY_CHAN; i++) {
836                 struct sa11x0_dma_phy *p = &d->phy[i];
837
838                 p->dev = d;
839                 p->num = i;
840                 p->base = d->base + i * DMA_SIZE;
841                 writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR |
842                         DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB,
843                         p->base + DMA_DCSR_C);
844                 writel_relaxed(0, p->base + DMA_DDAR);
845
846                 ret = sa11x0_dma_request_irq(pdev, i, p);
847                 if (ret) {
848                         while (i) {
849                                 i--;
850                                 sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
851                         }
852                         goto err_irq;
853                 }
854         }
855
856         dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
857         d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
858         ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
859         if (ret) {
860                 dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
861                         ret);
862                 goto err_slave_reg;
863         }
864
865         platform_set_drvdata(pdev, d);
866         return 0;
867
868  err_slave_reg:
869         sa11x0_dma_free_channels(&d->slave);
870         for (i = 0; i < NR_PHY_CHAN; i++)
871                 sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
872  err_irq:
873         tasklet_kill(&d->task);
874         iounmap(d->base);
875  err_ioremap:
876         kfree(d);
877  err_alloc:
878         return ret;
879 }
880
881 static int __devexit sa11x0_dma_remove(struct platform_device *pdev)
882 {
883         struct sa11x0_dma_dev *d = platform_get_drvdata(pdev);
884         unsigned pch;
885
886         dma_async_device_unregister(&d->slave);
887
888         sa11x0_dma_free_channels(&d->slave);
889         for (pch = 0; pch < NR_PHY_CHAN; pch++)
890                 sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]);
891         tasklet_kill(&d->task);
892         iounmap(d->base);
893         kfree(d);
894
895         return 0;
896 }
897
898 #ifdef CONFIG_PM_SLEEP
899 static int sa11x0_dma_suspend(struct device *dev)
900 {
901         struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
902         unsigned pch;
903
904         for (pch = 0; pch < NR_PHY_CHAN; pch++) {
905                 struct sa11x0_dma_phy *p = &d->phy[pch];
906                 u32 dcsr, saved_dcsr;
907
908                 dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R);
909                 if (dcsr & DCSR_RUN) {
910                         writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
911                         dcsr = readl_relaxed(p->base + DMA_DCSR_R);
912                 }
913
914                 saved_dcsr &= DCSR_RUN | DCSR_IE;
915                 if (dcsr & DCSR_BIU) {
916                         p->dbs[0] = readl_relaxed(p->base + DMA_DBSB);
917                         p->dbt[0] = readl_relaxed(p->base + DMA_DBTB);
918                         p->dbs[1] = readl_relaxed(p->base + DMA_DBSA);
919                         p->dbt[1] = readl_relaxed(p->base + DMA_DBTA);
920                         saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) |
921                                       (dcsr & DCSR_STRTB ? DCSR_STRTA : 0);
922                 } else {
923                         p->dbs[0] = readl_relaxed(p->base + DMA_DBSA);
924                         p->dbt[0] = readl_relaxed(p->base + DMA_DBTA);
925                         p->dbs[1] = readl_relaxed(p->base + DMA_DBSB);
926                         p->dbt[1] = readl_relaxed(p->base + DMA_DBTB);
927                         saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB);
928                 }
929                 p->dcsr = saved_dcsr;
930
931                 writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C);
932         }
933
934         return 0;
935 }
936
937 static int sa11x0_dma_resume(struct device *dev)
938 {
939         struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
940         unsigned pch;
941
942         for (pch = 0; pch < NR_PHY_CHAN; pch++) {
943                 struct sa11x0_dma_phy *p = &d->phy[pch];
944                 struct sa11x0_dma_desc *txd = NULL;
945                 u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
946
947                 WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN));
948
949                 if (p->txd_done)
950                         txd = p->txd_done;
951                 else if (p->txd_load)
952                         txd = p->txd_load;
953
954                 if (!txd)
955                         continue;
956
957                 writel_relaxed(txd->ddar, p->base + DMA_DDAR);
958
959                 writel_relaxed(p->dbs[0], p->base + DMA_DBSA);
960                 writel_relaxed(p->dbt[0], p->base + DMA_DBTA);
961                 writel_relaxed(p->dbs[1], p->base + DMA_DBSB);
962                 writel_relaxed(p->dbt[1], p->base + DMA_DBTB);
963                 writel_relaxed(p->dcsr, p->base + DMA_DCSR_S);
964         }
965
966         return 0;
967 }
968 #endif
969
970 static const struct dev_pm_ops sa11x0_dma_pm_ops = {
971         .suspend_noirq = sa11x0_dma_suspend,
972         .resume_noirq = sa11x0_dma_resume,
973         .freeze_noirq = sa11x0_dma_suspend,
974         .thaw_noirq = sa11x0_dma_resume,
975         .poweroff_noirq = sa11x0_dma_suspend,
976         .restore_noirq = sa11x0_dma_resume,
977 };
978
979 static struct platform_driver sa11x0_dma_driver = {
980         .driver = {
981                 .name   = "sa11x0-dma",
982                 .owner  = THIS_MODULE,
983                 .pm     = &sa11x0_dma_pm_ops,
984         },
985         .probe          = sa11x0_dma_probe,
986         .remove         = __devexit_p(sa11x0_dma_remove),
987 };
988
989 bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param)
990 {
991         if (chan->device->dev->driver == &sa11x0_dma_driver.driver) {
992                 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
993                 const char *p = param;
994
995                 return !strcmp(c->name, p);
996         }
997         return false;
998 }
999 EXPORT_SYMBOL(sa11x0_dma_filter_fn);
1000
1001 static int __init sa11x0_dma_init(void)
1002 {
1003         return platform_driver_register(&sa11x0_dma_driver);
1004 }
1005 subsys_initcall(sa11x0_dma_init);
1006
1007 static void __exit sa11x0_dma_exit(void)
1008 {
1009         platform_driver_unregister(&sa11x0_dma_driver);
1010 }
1011 module_exit(sa11x0_dma_exit);
1012
1013 MODULE_AUTHOR("Russell King");
1014 MODULE_DESCRIPTION("SA-11x0 DMA driver");
1015 MODULE_LICENSE("GPL v2");
1016 MODULE_ALIAS("platform:sa11x0-dma");