dmaengine: omap-dma: program hardware directly
[firefly-linux-kernel-4.4.55.git] / drivers / dma / omap-dma.c
1 /*
2  * OMAP DMAengine support
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/err.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/module.h>
15 #include <linux/omap-dma.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/of_dma.h>
20 #include <linux/of_device.h>
21
22 #include "virt-dma.h"
23
24 struct omap_dmadev {
25         struct dma_device ddev;
26         spinlock_t lock;
27         struct tasklet_struct task;
28         struct list_head pending;
29         struct omap_system_dma_plat_info *plat;
30 };
31
32 struct omap_chan {
33         struct virt_dma_chan vc;
34         struct list_head node;
35         struct omap_system_dma_plat_info *plat;
36
37         struct dma_slave_config cfg;
38         unsigned dma_sig;
39         bool cyclic;
40         bool paused;
41
42         int dma_ch;
43         struct omap_desc *desc;
44         unsigned sgidx;
45 };
46
47 struct omap_sg {
48         dma_addr_t addr;
49         uint32_t en;            /* number of elements (24-bit) */
50         uint32_t fn;            /* number of frames (16-bit) */
51 };
52
53 struct omap_desc {
54         struct virt_dma_desc vd;
55         enum dma_transfer_direction dir;
56         dma_addr_t dev_addr;
57
58         int16_t fi;             /* for OMAP_DMA_SYNC_PACKET */
59         uint8_t es;             /* OMAP_DMA_DATA_TYPE_xxx */
60         uint8_t sync_mode;      /* OMAP_DMA_SYNC_xxx */
61         uint8_t sync_type;      /* OMAP_DMA_xxx_SYNC* */
62         uint8_t periph_port;    /* Peripheral port */
63
64         unsigned sglen;
65         struct omap_sg sg[0];
66 };
67
68 static const unsigned es_bytes[] = {
69         [OMAP_DMA_DATA_TYPE_S8] = 1,
70         [OMAP_DMA_DATA_TYPE_S16] = 2,
71         [OMAP_DMA_DATA_TYPE_S32] = 4,
72 };
73
74 static struct of_dma_filter_info omap_dma_info = {
75         .filter_fn = omap_dma_filter_fn,
76 };
77
78 static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
79 {
80         return container_of(d, struct omap_dmadev, ddev);
81 }
82
83 static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
84 {
85         return container_of(c, struct omap_chan, vc.chan);
86 }
87
88 static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
89 {
90         return container_of(t, struct omap_desc, vd.tx);
91 }
92
93 static void omap_dma_desc_free(struct virt_dma_desc *vd)
94 {
95         kfree(container_of(vd, struct omap_desc, vd));
96 }
97
98 static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
99         unsigned idx)
100 {
101         struct omap_sg *sg = d->sg + idx;
102         uint32_t val;
103
104         if (d->dir == DMA_DEV_TO_MEM) {
105                 if (dma_omap1()) {
106                         val = c->plat->dma_read(CSDP, c->dma_ch);
107                         val &= ~(0x1f << 9);
108                         val |= OMAP_DMA_PORT_EMIFF << 9;
109                         c->plat->dma_write(val, CSDP, c->dma_ch);
110                 }
111
112                 val = c->plat->dma_read(CCR, c->dma_ch);
113                 val &= ~(0x03 << 14);
114                 val |= OMAP_DMA_AMODE_POST_INC << 14;
115                 c->plat->dma_write(val, CCR, c->dma_ch);
116
117                 c->plat->dma_write(sg->addr, CDSA, c->dma_ch);
118                 c->plat->dma_write(0, CDEI, c->dma_ch);
119                 c->plat->dma_write(0, CDFI, c->dma_ch);
120         } else {
121                 if (dma_omap1()) {
122                         val = c->plat->dma_read(CSDP, c->dma_ch);
123                         val &= ~(0x1f << 2);
124                         val |= OMAP_DMA_PORT_EMIFF << 2;
125                         c->plat->dma_write(val, CSDP, c->dma_ch);
126                 }
127
128                 val = c->plat->dma_read(CCR, c->dma_ch);
129                 val &= ~(0x03 << 12);
130                 val |= OMAP_DMA_AMODE_POST_INC << 12;
131                 c->plat->dma_write(val, CCR, c->dma_ch);
132
133                 c->plat->dma_write(sg->addr, CSSA, c->dma_ch);
134                 c->plat->dma_write(0, CSEI, c->dma_ch);
135                 c->plat->dma_write(0, CSFI, c->dma_ch);
136         }
137
138         val = c->plat->dma_read(CSDP, c->dma_ch);
139         val &= ~0x03;
140         val |= d->es;
141         c->plat->dma_write(val, CSDP, c->dma_ch);
142
143         if (dma_omap1()) {
144                 val = c->plat->dma_read(CCR, c->dma_ch);
145                 val &= ~(1 << 5);
146                 if (d->sync_mode == OMAP_DMA_SYNC_FRAME)
147                         val |= 1 << 5;
148                 c->plat->dma_write(val, CCR, c->dma_ch);
149
150                 val = c->plat->dma_read(CCR2, c->dma_ch);
151                 val &= ~(1 << 2);
152                 if (d->sync_mode == OMAP_DMA_SYNC_BLOCK)
153                         val |= 1 << 2;
154                 c->plat->dma_write(val, CCR2, c->dma_ch);
155         } else if (c->dma_sig) {
156                 val = c->plat->dma_read(CCR, c->dma_ch);
157
158                 /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */
159                 val &= ~((1 << 23) | (3 << 19) | 0x1f);
160                 val |= (c->dma_sig & ~0x1f) << 14;
161                 val |= c->dma_sig & 0x1f;
162
163                 if (d->sync_mode & OMAP_DMA_SYNC_FRAME)
164                         val |= 1 << 5;
165                 else
166                         val &= ~(1 << 5);
167
168                 if (d->sync_mode & OMAP_DMA_SYNC_BLOCK)
169                         val |= 1 << 18;
170                 else
171                         val &= ~(1 << 18);
172
173                 switch (d->sync_type) {
174                 case OMAP_DMA_DST_SYNC_PREFETCH:
175                         val &= ~(1 << 24);      /* dest synch */
176                         val |= 1 << 23;         /* Prefetch */
177                         break;
178                 case 0:
179                         val &= ~(1 << 24);      /* dest synch */
180                         break;
181                 default:
182                         val |= 1 << 24;         /* source synch */
183                         break;
184                 }
185                 c->plat->dma_write(val, CCR, c->dma_ch);
186         }
187
188         c->plat->dma_write(sg->en, CEN, c->dma_ch);
189         c->plat->dma_write(sg->fn, CFN, c->dma_ch);
190
191         omap_start_dma(c->dma_ch);
192 }
193
194 static void omap_dma_start_desc(struct omap_chan *c)
195 {
196         struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
197         struct omap_desc *d;
198         uint32_t val;
199
200         if (!vd) {
201                 c->desc = NULL;
202                 return;
203         }
204
205         list_del(&vd->node);
206
207         c->desc = d = to_omap_dma_desc(&vd->tx);
208         c->sgidx = 0;
209
210         if (d->dir == DMA_DEV_TO_MEM) {
211                 if (dma_omap1()) {
212                         val = c->plat->dma_read(CSDP, c->dma_ch);
213                         val &= ~(0x1f << 2);
214                         val |= d->periph_port << 2;
215                         c->plat->dma_write(val, CSDP, c->dma_ch);
216                 }
217
218                 val = c->plat->dma_read(CCR, c->dma_ch);
219                 val &= ~(0x03 << 12);
220                 val |= OMAP_DMA_AMODE_CONSTANT << 12;
221                 c->plat->dma_write(val, CCR, c->dma_ch);
222
223                 c->plat->dma_write(d->dev_addr, CSSA, c->dma_ch);
224                 c->plat->dma_write(0, CSEI, c->dma_ch);
225                 c->plat->dma_write(d->fi, CSFI, c->dma_ch);
226         } else {
227                 if (dma_omap1()) {
228                         val = c->plat->dma_read(CSDP, c->dma_ch);
229                         val &= ~(0x1f << 9);
230                         val |= d->periph_port << 9;
231                         c->plat->dma_write(val, CSDP, c->dma_ch);
232                 }
233
234                 val = c->plat->dma_read(CCR, c->dma_ch);
235                 val &= ~(0x03 << 14);
236                 val |= OMAP_DMA_AMODE_CONSTANT << 14;
237                 c->plat->dma_write(val, CCR, c->dma_ch);
238
239                 c->plat->dma_write(d->dev_addr, CDSA, c->dma_ch);
240                 c->plat->dma_write(0, CDEI, c->dma_ch);
241                 c->plat->dma_write(d->fi, CDFI, c->dma_ch);
242         }
243
244         omap_dma_start_sg(c, d, 0);
245 }
246
247 static void omap_dma_callback(int ch, u16 status, void *data)
248 {
249         struct omap_chan *c = data;
250         struct omap_desc *d;
251         unsigned long flags;
252
253         spin_lock_irqsave(&c->vc.lock, flags);
254         d = c->desc;
255         if (d) {
256                 if (!c->cyclic) {
257                         if (++c->sgidx < d->sglen) {
258                                 omap_dma_start_sg(c, d, c->sgidx);
259                         } else {
260                                 omap_dma_start_desc(c);
261                                 vchan_cookie_complete(&d->vd);
262                         }
263                 } else {
264                         vchan_cyclic_callback(&d->vd);
265                 }
266         }
267         spin_unlock_irqrestore(&c->vc.lock, flags);
268 }
269
270 /*
271  * This callback schedules all pending channels.  We could be more
272  * clever here by postponing allocation of the real DMA channels to
273  * this point, and freeing them when our virtual channel becomes idle.
274  *
275  * We would then need to deal with 'all channels in-use'
276  */
277 static void omap_dma_sched(unsigned long data)
278 {
279         struct omap_dmadev *d = (struct omap_dmadev *)data;
280         LIST_HEAD(head);
281
282         spin_lock_irq(&d->lock);
283         list_splice_tail_init(&d->pending, &head);
284         spin_unlock_irq(&d->lock);
285
286         while (!list_empty(&head)) {
287                 struct omap_chan *c = list_first_entry(&head,
288                         struct omap_chan, node);
289
290                 spin_lock_irq(&c->vc.lock);
291                 list_del_init(&c->node);
292                 omap_dma_start_desc(c);
293                 spin_unlock_irq(&c->vc.lock);
294         }
295 }
296
297 static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
298 {
299         struct omap_chan *c = to_omap_dma_chan(chan);
300
301         dev_dbg(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
302
303         return omap_request_dma(c->dma_sig, "DMA engine",
304                 omap_dma_callback, c, &c->dma_ch);
305 }
306
307 static void omap_dma_free_chan_resources(struct dma_chan *chan)
308 {
309         struct omap_chan *c = to_omap_dma_chan(chan);
310
311         vchan_free_chan_resources(&c->vc);
312         omap_free_dma(c->dma_ch);
313
314         dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
315 }
316
317 static size_t omap_dma_sg_size(struct omap_sg *sg)
318 {
319         return sg->en * sg->fn;
320 }
321
322 static size_t omap_dma_desc_size(struct omap_desc *d)
323 {
324         unsigned i;
325         size_t size;
326
327         for (size = i = 0; i < d->sglen; i++)
328                 size += omap_dma_sg_size(&d->sg[i]);
329
330         return size * es_bytes[d->es];
331 }
332
333 static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
334 {
335         unsigned i;
336         size_t size, es_size = es_bytes[d->es];
337
338         for (size = i = 0; i < d->sglen; i++) {
339                 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
340
341                 if (size)
342                         size += this_size;
343                 else if (addr >= d->sg[i].addr &&
344                          addr < d->sg[i].addr + this_size)
345                         size += d->sg[i].addr + this_size - addr;
346         }
347         return size;
348 }
349
350 static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
351         dma_cookie_t cookie, struct dma_tx_state *txstate)
352 {
353         struct omap_chan *c = to_omap_dma_chan(chan);
354         struct virt_dma_desc *vd;
355         enum dma_status ret;
356         unsigned long flags;
357
358         ret = dma_cookie_status(chan, cookie, txstate);
359         if (ret == DMA_COMPLETE || !txstate)
360                 return ret;
361
362         spin_lock_irqsave(&c->vc.lock, flags);
363         vd = vchan_find_desc(&c->vc, cookie);
364         if (vd) {
365                 txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
366         } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
367                 struct omap_desc *d = c->desc;
368                 dma_addr_t pos;
369
370                 if (d->dir == DMA_MEM_TO_DEV)
371                         pos = omap_get_dma_src_pos(c->dma_ch);
372                 else if (d->dir == DMA_DEV_TO_MEM)
373                         pos = omap_get_dma_dst_pos(c->dma_ch);
374                 else
375                         pos = 0;
376
377                 txstate->residue = omap_dma_desc_size_pos(d, pos);
378         } else {
379                 txstate->residue = 0;
380         }
381         spin_unlock_irqrestore(&c->vc.lock, flags);
382
383         return ret;
384 }
385
386 static void omap_dma_issue_pending(struct dma_chan *chan)
387 {
388         struct omap_chan *c = to_omap_dma_chan(chan);
389         unsigned long flags;
390
391         spin_lock_irqsave(&c->vc.lock, flags);
392         if (vchan_issue_pending(&c->vc) && !c->desc) {
393                 /*
394                  * c->cyclic is used only by audio and in this case the DMA need
395                  * to be started without delay.
396                  */
397                 if (!c->cyclic) {
398                         struct omap_dmadev *d = to_omap_dma_dev(chan->device);
399                         spin_lock(&d->lock);
400                         if (list_empty(&c->node))
401                                 list_add_tail(&c->node, &d->pending);
402                         spin_unlock(&d->lock);
403                         tasklet_schedule(&d->task);
404                 } else {
405                         omap_dma_start_desc(c);
406                 }
407         }
408         spin_unlock_irqrestore(&c->vc.lock, flags);
409 }
410
411 static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
412         struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
413         enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
414 {
415         struct omap_chan *c = to_omap_dma_chan(chan);
416         enum dma_slave_buswidth dev_width;
417         struct scatterlist *sgent;
418         struct omap_desc *d;
419         dma_addr_t dev_addr;
420         unsigned i, j = 0, es, en, frame_bytes, sync_type;
421         u32 burst;
422
423         if (dir == DMA_DEV_TO_MEM) {
424                 dev_addr = c->cfg.src_addr;
425                 dev_width = c->cfg.src_addr_width;
426                 burst = c->cfg.src_maxburst;
427                 sync_type = OMAP_DMA_SRC_SYNC;
428         } else if (dir == DMA_MEM_TO_DEV) {
429                 dev_addr = c->cfg.dst_addr;
430                 dev_width = c->cfg.dst_addr_width;
431                 burst = c->cfg.dst_maxburst;
432                 sync_type = OMAP_DMA_DST_SYNC;
433         } else {
434                 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
435                 return NULL;
436         }
437
438         /* Bus width translates to the element size (ES) */
439         switch (dev_width) {
440         case DMA_SLAVE_BUSWIDTH_1_BYTE:
441                 es = OMAP_DMA_DATA_TYPE_S8;
442                 break;
443         case DMA_SLAVE_BUSWIDTH_2_BYTES:
444                 es = OMAP_DMA_DATA_TYPE_S16;
445                 break;
446         case DMA_SLAVE_BUSWIDTH_4_BYTES:
447                 es = OMAP_DMA_DATA_TYPE_S32;
448                 break;
449         default: /* not reached */
450                 return NULL;
451         }
452
453         /* Now allocate and setup the descriptor. */
454         d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
455         if (!d)
456                 return NULL;
457
458         d->dir = dir;
459         d->dev_addr = dev_addr;
460         d->es = es;
461         d->sync_mode = OMAP_DMA_SYNC_FRAME;
462         d->sync_type = sync_type;
463         d->periph_port = OMAP_DMA_PORT_TIPB;
464
465         /*
466          * Build our scatterlist entries: each contains the address,
467          * the number of elements (EN) in each frame, and the number of
468          * frames (FN).  Number of bytes for this entry = ES * EN * FN.
469          *
470          * Burst size translates to number of elements with frame sync.
471          * Note: DMA engine defines burst to be the number of dev-width
472          * transfers.
473          */
474         en = burst;
475         frame_bytes = es_bytes[es] * en;
476         for_each_sg(sgl, sgent, sglen, i) {
477                 d->sg[j].addr = sg_dma_address(sgent);
478                 d->sg[j].en = en;
479                 d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
480                 j++;
481         }
482
483         d->sglen = j;
484
485         return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
486 }
487
488 static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
489         struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
490         size_t period_len, enum dma_transfer_direction dir, unsigned long flags,
491         void *context)
492 {
493         struct omap_chan *c = to_omap_dma_chan(chan);
494         enum dma_slave_buswidth dev_width;
495         struct omap_desc *d;
496         dma_addr_t dev_addr;
497         unsigned es, sync_type;
498         u32 burst;
499
500         if (dir == DMA_DEV_TO_MEM) {
501                 dev_addr = c->cfg.src_addr;
502                 dev_width = c->cfg.src_addr_width;
503                 burst = c->cfg.src_maxburst;
504                 sync_type = OMAP_DMA_SRC_SYNC;
505         } else if (dir == DMA_MEM_TO_DEV) {
506                 dev_addr = c->cfg.dst_addr;
507                 dev_width = c->cfg.dst_addr_width;
508                 burst = c->cfg.dst_maxburst;
509                 sync_type = OMAP_DMA_DST_SYNC;
510         } else {
511                 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
512                 return NULL;
513         }
514
515         /* Bus width translates to the element size (ES) */
516         switch (dev_width) {
517         case DMA_SLAVE_BUSWIDTH_1_BYTE:
518                 es = OMAP_DMA_DATA_TYPE_S8;
519                 break;
520         case DMA_SLAVE_BUSWIDTH_2_BYTES:
521                 es = OMAP_DMA_DATA_TYPE_S16;
522                 break;
523         case DMA_SLAVE_BUSWIDTH_4_BYTES:
524                 es = OMAP_DMA_DATA_TYPE_S32;
525                 break;
526         default: /* not reached */
527                 return NULL;
528         }
529
530         /* Now allocate and setup the descriptor. */
531         d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
532         if (!d)
533                 return NULL;
534
535         d->dir = dir;
536         d->dev_addr = dev_addr;
537         d->fi = burst;
538         d->es = es;
539         if (burst)
540                 d->sync_mode = OMAP_DMA_SYNC_PACKET;
541         else
542                 d->sync_mode = OMAP_DMA_SYNC_ELEMENT;
543         d->sync_type = sync_type;
544         d->periph_port = OMAP_DMA_PORT_MPUI;
545         d->sg[0].addr = buf_addr;
546         d->sg[0].en = period_len / es_bytes[es];
547         d->sg[0].fn = buf_len / period_len;
548         d->sglen = 1;
549
550         if (!c->cyclic) {
551                 c->cyclic = true;
552                 omap_dma_link_lch(c->dma_ch, c->dma_ch);
553
554                 if (flags & DMA_PREP_INTERRUPT)
555                         omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ);
556
557                 omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ);
558         }
559
560         if (dma_omap2plus()) {
561                 uint32_t val;
562
563                 val = c->plat->dma_read(CSDP, c->dma_ch);
564                 val |= 0x03 << 7; /* src burst mode 16 */
565                 val |= 0x03 << 14; /* dst burst mode 16 */
566                 c->plat->dma_write(val, CSDP, c->dma_ch);
567         }
568
569         return vchan_tx_prep(&c->vc, &d->vd, flags);
570 }
571
572 static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
573 {
574         if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
575             cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
576                 return -EINVAL;
577
578         memcpy(&c->cfg, cfg, sizeof(c->cfg));
579
580         return 0;
581 }
582
583 static int omap_dma_terminate_all(struct omap_chan *c)
584 {
585         struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
586         unsigned long flags;
587         LIST_HEAD(head);
588
589         spin_lock_irqsave(&c->vc.lock, flags);
590
591         /* Prevent this channel being scheduled */
592         spin_lock(&d->lock);
593         list_del_init(&c->node);
594         spin_unlock(&d->lock);
595
596         /*
597          * Stop DMA activity: we assume the callback will not be called
598          * after omap_stop_dma() returns (even if it does, it will see
599          * c->desc is NULL and exit.)
600          */
601         if (c->desc) {
602                 c->desc = NULL;
603                 /* Avoid stopping the dma twice */
604                 if (!c->paused)
605                         omap_stop_dma(c->dma_ch);
606         }
607
608         if (c->cyclic) {
609                 c->cyclic = false;
610                 c->paused = false;
611                 omap_dma_unlink_lch(c->dma_ch, c->dma_ch);
612         }
613
614         vchan_get_all_descriptors(&c->vc, &head);
615         spin_unlock_irqrestore(&c->vc.lock, flags);
616         vchan_dma_desc_free_list(&c->vc, &head);
617
618         return 0;
619 }
620
621 static int omap_dma_pause(struct omap_chan *c)
622 {
623         /* Pause/Resume only allowed with cyclic mode */
624         if (!c->cyclic)
625                 return -EINVAL;
626
627         if (!c->paused) {
628                 omap_stop_dma(c->dma_ch);
629                 c->paused = true;
630         }
631
632         return 0;
633 }
634
635 static int omap_dma_resume(struct omap_chan *c)
636 {
637         /* Pause/Resume only allowed with cyclic mode */
638         if (!c->cyclic)
639                 return -EINVAL;
640
641         if (c->paused) {
642                 omap_start_dma(c->dma_ch);
643                 c->paused = false;
644         }
645
646         return 0;
647 }
648
649 static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
650         unsigned long arg)
651 {
652         struct omap_chan *c = to_omap_dma_chan(chan);
653         int ret;
654
655         switch (cmd) {
656         case DMA_SLAVE_CONFIG:
657                 ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
658                 break;
659
660         case DMA_TERMINATE_ALL:
661                 ret = omap_dma_terminate_all(c);
662                 break;
663
664         case DMA_PAUSE:
665                 ret = omap_dma_pause(c);
666                 break;
667
668         case DMA_RESUME:
669                 ret = omap_dma_resume(c);
670                 break;
671
672         default:
673                 ret = -ENXIO;
674                 break;
675         }
676
677         return ret;
678 }
679
680 static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
681 {
682         struct omap_chan *c;
683
684         c = kzalloc(sizeof(*c), GFP_KERNEL);
685         if (!c)
686                 return -ENOMEM;
687
688         c->plat = od->plat;
689         c->dma_sig = dma_sig;
690         c->vc.desc_free = omap_dma_desc_free;
691         vchan_init(&c->vc, &od->ddev);
692         INIT_LIST_HEAD(&c->node);
693
694         od->ddev.chancnt++;
695
696         return 0;
697 }
698
699 static void omap_dma_free(struct omap_dmadev *od)
700 {
701         tasklet_kill(&od->task);
702         while (!list_empty(&od->ddev.channels)) {
703                 struct omap_chan *c = list_first_entry(&od->ddev.channels,
704                         struct omap_chan, vc.chan.device_node);
705
706                 list_del(&c->vc.chan.device_node);
707                 tasklet_kill(&c->vc.task);
708                 kfree(c);
709         }
710 }
711
712 static int omap_dma_probe(struct platform_device *pdev)
713 {
714         struct omap_dmadev *od;
715         int rc, i;
716
717         od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
718         if (!od)
719                 return -ENOMEM;
720
721         od->plat = omap_get_plat_info();
722         if (!od->plat)
723                 return -EPROBE_DEFER;
724
725         dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
726         dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
727         od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
728         od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
729         od->ddev.device_tx_status = omap_dma_tx_status;
730         od->ddev.device_issue_pending = omap_dma_issue_pending;
731         od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
732         od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
733         od->ddev.device_control = omap_dma_control;
734         od->ddev.dev = &pdev->dev;
735         INIT_LIST_HEAD(&od->ddev.channels);
736         INIT_LIST_HEAD(&od->pending);
737         spin_lock_init(&od->lock);
738
739         tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
740
741         for (i = 0; i < 127; i++) {
742                 rc = omap_dma_chan_init(od, i);
743                 if (rc) {
744                         omap_dma_free(od);
745                         return rc;
746                 }
747         }
748
749         rc = dma_async_device_register(&od->ddev);
750         if (rc) {
751                 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
752                         rc);
753                 omap_dma_free(od);
754                 return rc;
755         }
756
757         platform_set_drvdata(pdev, od);
758
759         if (pdev->dev.of_node) {
760                 omap_dma_info.dma_cap = od->ddev.cap_mask;
761
762                 /* Device-tree DMA controller registration */
763                 rc = of_dma_controller_register(pdev->dev.of_node,
764                                 of_dma_simple_xlate, &omap_dma_info);
765                 if (rc) {
766                         pr_warn("OMAP-DMA: failed to register DMA controller\n");
767                         dma_async_device_unregister(&od->ddev);
768                         omap_dma_free(od);
769                 }
770         }
771
772         dev_info(&pdev->dev, "OMAP DMA engine driver\n");
773
774         return rc;
775 }
776
777 static int omap_dma_remove(struct platform_device *pdev)
778 {
779         struct omap_dmadev *od = platform_get_drvdata(pdev);
780
781         if (pdev->dev.of_node)
782                 of_dma_controller_free(pdev->dev.of_node);
783
784         dma_async_device_unregister(&od->ddev);
785         omap_dma_free(od);
786
787         return 0;
788 }
789
790 static const struct of_device_id omap_dma_match[] = {
791         { .compatible = "ti,omap2420-sdma", },
792         { .compatible = "ti,omap2430-sdma", },
793         { .compatible = "ti,omap3430-sdma", },
794         { .compatible = "ti,omap3630-sdma", },
795         { .compatible = "ti,omap4430-sdma", },
796         {},
797 };
798 MODULE_DEVICE_TABLE(of, omap_dma_match);
799
800 static struct platform_driver omap_dma_driver = {
801         .probe  = omap_dma_probe,
802         .remove = omap_dma_remove,
803         .driver = {
804                 .name = "omap-dma-engine",
805                 .owner = THIS_MODULE,
806                 .of_match_table = of_match_ptr(omap_dma_match),
807         },
808 };
809
810 bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
811 {
812         if (chan->device->dev->driver == &omap_dma_driver.driver) {
813                 struct omap_chan *c = to_omap_dma_chan(chan);
814                 unsigned req = *(unsigned *)param;
815
816                 return req == c->dma_sig;
817         }
818         return false;
819 }
820 EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
821
822 static int omap_dma_init(void)
823 {
824         return platform_driver_register(&omap_dma_driver);
825 }
826 subsys_initcall(omap_dma_init);
827
828 static void __exit omap_dma_exit(void)
829 {
830         platform_driver_unregister(&omap_dma_driver);
831 }
832 module_exit(omap_dma_exit);
833
834 MODULE_AUTHOR("Russell King");
835 MODULE_LICENSE("GPL");