4d2650f4202cbe4a587fb25e9a07b7fc536f325d
[firefly-linux-kernel-4.4.55.git] / drivers / dma / omap-dma.c
1 /*
2  * OMAP DMAengine support
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/err.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/module.h>
15 #include <linux/omap-dma.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19
20 #include "virt-dma.h"
21 #include <plat/dma.h>
22
23 struct omap_dmadev {
24         struct dma_device ddev;
25         spinlock_t lock;
26         struct tasklet_struct task;
27         struct list_head pending;
28 };
29
30 struct omap_chan {
31         struct virt_dma_chan vc;
32         struct list_head node;
33
34         struct dma_slave_config cfg;
35         unsigned dma_sig;
36         bool cyclic;
37         bool paused;
38
39         int dma_ch;
40         struct omap_desc *desc;
41         unsigned sgidx;
42 };
43
44 struct omap_sg {
45         dma_addr_t addr;
46         uint32_t en;            /* number of elements (24-bit) */
47         uint32_t fn;            /* number of frames (16-bit) */
48 };
49
50 struct omap_desc {
51         struct virt_dma_desc vd;
52         enum dma_transfer_direction dir;
53         dma_addr_t dev_addr;
54
55         int16_t fi;             /* for OMAP_DMA_SYNC_PACKET */
56         uint8_t es;             /* OMAP_DMA_DATA_TYPE_xxx */
57         uint8_t sync_mode;      /* OMAP_DMA_SYNC_xxx */
58         uint8_t sync_type;      /* OMAP_DMA_xxx_SYNC* */
59         uint8_t periph_port;    /* Peripheral port */
60
61         unsigned sglen;
62         struct omap_sg sg[0];
63 };
64
65 static const unsigned es_bytes[] = {
66         [OMAP_DMA_DATA_TYPE_S8] = 1,
67         [OMAP_DMA_DATA_TYPE_S16] = 2,
68         [OMAP_DMA_DATA_TYPE_S32] = 4,
69 };
70
71 static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
72 {
73         return container_of(d, struct omap_dmadev, ddev);
74 }
75
76 static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
77 {
78         return container_of(c, struct omap_chan, vc.chan);
79 }
80
81 static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
82 {
83         return container_of(t, struct omap_desc, vd.tx);
84 }
85
86 static void omap_dma_desc_free(struct virt_dma_desc *vd)
87 {
88         kfree(container_of(vd, struct omap_desc, vd));
89 }
90
91 static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
92         unsigned idx)
93 {
94         struct omap_sg *sg = d->sg + idx;
95
96         if (d->dir == DMA_DEV_TO_MEM)
97                 omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
98                         OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
99         else
100                 omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
101                         OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
102
103         omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn,
104                 d->sync_mode, c->dma_sig, d->sync_type);
105
106         omap_start_dma(c->dma_ch);
107 }
108
109 static void omap_dma_start_desc(struct omap_chan *c)
110 {
111         struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
112         struct omap_desc *d;
113
114         if (!vd) {
115                 c->desc = NULL;
116                 return;
117         }
118
119         list_del(&vd->node);
120
121         c->desc = d = to_omap_dma_desc(&vd->tx);
122         c->sgidx = 0;
123
124         if (d->dir == DMA_DEV_TO_MEM)
125                 omap_set_dma_src_params(c->dma_ch, d->periph_port,
126                         OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
127         else
128                 omap_set_dma_dest_params(c->dma_ch, d->periph_port,
129                         OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
130
131         omap_dma_start_sg(c, d, 0);
132 }
133
134 static void omap_dma_callback(int ch, u16 status, void *data)
135 {
136         struct omap_chan *c = data;
137         struct omap_desc *d;
138         unsigned long flags;
139
140         spin_lock_irqsave(&c->vc.lock, flags);
141         d = c->desc;
142         if (d) {
143                 if (!c->cyclic) {
144                         if (++c->sgidx < d->sglen) {
145                                 omap_dma_start_sg(c, d, c->sgidx);
146                         } else {
147                                 omap_dma_start_desc(c);
148                                 vchan_cookie_complete(&d->vd);
149                         }
150                 } else {
151                         vchan_cyclic_callback(&d->vd);
152                 }
153         }
154         spin_unlock_irqrestore(&c->vc.lock, flags);
155 }
156
157 /*
158  * This callback schedules all pending channels.  We could be more
159  * clever here by postponing allocation of the real DMA channels to
160  * this point, and freeing them when our virtual channel becomes idle.
161  *
162  * We would then need to deal with 'all channels in-use'
163  */
164 static void omap_dma_sched(unsigned long data)
165 {
166         struct omap_dmadev *d = (struct omap_dmadev *)data;
167         LIST_HEAD(head);
168
169         spin_lock_irq(&d->lock);
170         list_splice_tail_init(&d->pending, &head);
171         spin_unlock_irq(&d->lock);
172
173         while (!list_empty(&head)) {
174                 struct omap_chan *c = list_first_entry(&head,
175                         struct omap_chan, node);
176
177                 spin_lock_irq(&c->vc.lock);
178                 list_del_init(&c->node);
179                 omap_dma_start_desc(c);
180                 spin_unlock_irq(&c->vc.lock);
181         }
182 }
183
184 static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
185 {
186         struct omap_chan *c = to_omap_dma_chan(chan);
187
188         dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
189
190         return omap_request_dma(c->dma_sig, "DMA engine",
191                 omap_dma_callback, c, &c->dma_ch);
192 }
193
194 static void omap_dma_free_chan_resources(struct dma_chan *chan)
195 {
196         struct omap_chan *c = to_omap_dma_chan(chan);
197
198         vchan_free_chan_resources(&c->vc);
199         omap_free_dma(c->dma_ch);
200
201         dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
202 }
203
204 static size_t omap_dma_sg_size(struct omap_sg *sg)
205 {
206         return sg->en * sg->fn;
207 }
208
209 static size_t omap_dma_desc_size(struct omap_desc *d)
210 {
211         unsigned i;
212         size_t size;
213
214         for (size = i = 0; i < d->sglen; i++)
215                 size += omap_dma_sg_size(&d->sg[i]);
216
217         return size * es_bytes[d->es];
218 }
219
220 static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
221 {
222         unsigned i;
223         size_t size, es_size = es_bytes[d->es];
224
225         for (size = i = 0; i < d->sglen; i++) {
226                 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
227
228                 if (size)
229                         size += this_size;
230                 else if (addr >= d->sg[i].addr &&
231                          addr < d->sg[i].addr + this_size)
232                         size += d->sg[i].addr + this_size - addr;
233         }
234         return size;
235 }
236
237 static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
238         dma_cookie_t cookie, struct dma_tx_state *txstate)
239 {
240         struct omap_chan *c = to_omap_dma_chan(chan);
241         struct virt_dma_desc *vd;
242         enum dma_status ret;
243         unsigned long flags;
244
245         ret = dma_cookie_status(chan, cookie, txstate);
246         if (ret == DMA_SUCCESS || !txstate)
247                 return ret;
248
249         spin_lock_irqsave(&c->vc.lock, flags);
250         vd = vchan_find_desc(&c->vc, cookie);
251         if (vd) {
252                 txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
253         } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
254                 struct omap_desc *d = c->desc;
255                 dma_addr_t pos;
256
257                 if (d->dir == DMA_MEM_TO_DEV)
258                         pos = omap_get_dma_src_pos(c->dma_ch);
259                 else if (d->dir == DMA_DEV_TO_MEM)
260                         pos = omap_get_dma_dst_pos(c->dma_ch);
261                 else
262                         pos = 0;
263
264                 txstate->residue = omap_dma_desc_size_pos(d, pos);
265         } else {
266                 txstate->residue = 0;
267         }
268         spin_unlock_irqrestore(&c->vc.lock, flags);
269
270         return ret;
271 }
272
273 static void omap_dma_issue_pending(struct dma_chan *chan)
274 {
275         struct omap_chan *c = to_omap_dma_chan(chan);
276         unsigned long flags;
277
278         spin_lock_irqsave(&c->vc.lock, flags);
279         if (vchan_issue_pending(&c->vc) && !c->desc) {
280                 struct omap_dmadev *d = to_omap_dma_dev(chan->device);
281                 spin_lock(&d->lock);
282                 if (list_empty(&c->node))
283                         list_add_tail(&c->node, &d->pending);
284                 spin_unlock(&d->lock);
285                 tasklet_schedule(&d->task);
286         }
287         spin_unlock_irqrestore(&c->vc.lock, flags);
288 }
289
290 static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
291         struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
292         enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
293 {
294         struct omap_chan *c = to_omap_dma_chan(chan);
295         enum dma_slave_buswidth dev_width;
296         struct scatterlist *sgent;
297         struct omap_desc *d;
298         dma_addr_t dev_addr;
299         unsigned i, j = 0, es, en, frame_bytes, sync_type;
300         u32 burst;
301
302         if (dir == DMA_DEV_TO_MEM) {
303                 dev_addr = c->cfg.src_addr;
304                 dev_width = c->cfg.src_addr_width;
305                 burst = c->cfg.src_maxburst;
306                 sync_type = OMAP_DMA_SRC_SYNC;
307         } else if (dir == DMA_MEM_TO_DEV) {
308                 dev_addr = c->cfg.dst_addr;
309                 dev_width = c->cfg.dst_addr_width;
310                 burst = c->cfg.dst_maxburst;
311                 sync_type = OMAP_DMA_DST_SYNC;
312         } else {
313                 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
314                 return NULL;
315         }
316
317         /* Bus width translates to the element size (ES) */
318         switch (dev_width) {
319         case DMA_SLAVE_BUSWIDTH_1_BYTE:
320                 es = OMAP_DMA_DATA_TYPE_S8;
321                 break;
322         case DMA_SLAVE_BUSWIDTH_2_BYTES:
323                 es = OMAP_DMA_DATA_TYPE_S16;
324                 break;
325         case DMA_SLAVE_BUSWIDTH_4_BYTES:
326                 es = OMAP_DMA_DATA_TYPE_S32;
327                 break;
328         default: /* not reached */
329                 return NULL;
330         }
331
332         /* Now allocate and setup the descriptor. */
333         d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
334         if (!d)
335                 return NULL;
336
337         d->dir = dir;
338         d->dev_addr = dev_addr;
339         d->es = es;
340         d->sync_mode = OMAP_DMA_SYNC_FRAME;
341         d->sync_type = sync_type;
342         d->periph_port = OMAP_DMA_PORT_TIPB;
343
344         /*
345          * Build our scatterlist entries: each contains the address,
346          * the number of elements (EN) in each frame, and the number of
347          * frames (FN).  Number of bytes for this entry = ES * EN * FN.
348          *
349          * Burst size translates to number of elements with frame sync.
350          * Note: DMA engine defines burst to be the number of dev-width
351          * transfers.
352          */
353         en = burst;
354         frame_bytes = es_bytes[es] * en;
355         for_each_sg(sgl, sgent, sglen, i) {
356                 d->sg[j].addr = sg_dma_address(sgent);
357                 d->sg[j].en = en;
358                 d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
359                 j++;
360         }
361
362         d->sglen = j;
363
364         return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
365 }
366
367 static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
368         struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
369         size_t period_len, enum dma_transfer_direction dir, unsigned long flags,
370         void *context)
371 {
372         struct omap_chan *c = to_omap_dma_chan(chan);
373         enum dma_slave_buswidth dev_width;
374         struct omap_desc *d;
375         dma_addr_t dev_addr;
376         unsigned es, sync_type;
377         u32 burst;
378
379         if (dir == DMA_DEV_TO_MEM) {
380                 dev_addr = c->cfg.src_addr;
381                 dev_width = c->cfg.src_addr_width;
382                 burst = c->cfg.src_maxburst;
383                 sync_type = OMAP_DMA_SRC_SYNC;
384         } else if (dir == DMA_MEM_TO_DEV) {
385                 dev_addr = c->cfg.dst_addr;
386                 dev_width = c->cfg.dst_addr_width;
387                 burst = c->cfg.dst_maxburst;
388                 sync_type = OMAP_DMA_DST_SYNC;
389         } else {
390                 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
391                 return NULL;
392         }
393
394         /* Bus width translates to the element size (ES) */
395         switch (dev_width) {
396         case DMA_SLAVE_BUSWIDTH_1_BYTE:
397                 es = OMAP_DMA_DATA_TYPE_S8;
398                 break;
399         case DMA_SLAVE_BUSWIDTH_2_BYTES:
400                 es = OMAP_DMA_DATA_TYPE_S16;
401                 break;
402         case DMA_SLAVE_BUSWIDTH_4_BYTES:
403                 es = OMAP_DMA_DATA_TYPE_S32;
404                 break;
405         default: /* not reached */
406                 return NULL;
407         }
408
409         /* Now allocate and setup the descriptor. */
410         d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
411         if (!d)
412                 return NULL;
413
414         d->dir = dir;
415         d->dev_addr = dev_addr;
416         d->fi = burst;
417         d->es = es;
418         if (burst)
419                 d->sync_mode = OMAP_DMA_SYNC_PACKET;
420         else
421                 d->sync_mode = OMAP_DMA_SYNC_ELEMENT;
422         d->sync_type = sync_type;
423         d->periph_port = OMAP_DMA_PORT_MPUI;
424         d->sg[0].addr = buf_addr;
425         d->sg[0].en = period_len / es_bytes[es];
426         d->sg[0].fn = buf_len / period_len;
427         d->sglen = 1;
428
429         if (!c->cyclic) {
430                 c->cyclic = true;
431                 omap_dma_link_lch(c->dma_ch, c->dma_ch);
432                 omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ);
433                 omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ);
434         }
435
436         if (!cpu_class_is_omap1()) {
437                 omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
438                 omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
439         }
440
441         return vchan_tx_prep(&c->vc, &d->vd, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
442 }
443
444 static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
445 {
446         if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
447             cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
448                 return -EINVAL;
449
450         memcpy(&c->cfg, cfg, sizeof(c->cfg));
451
452         return 0;
453 }
454
455 static int omap_dma_terminate_all(struct omap_chan *c)
456 {
457         struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
458         unsigned long flags;
459         LIST_HEAD(head);
460
461         spin_lock_irqsave(&c->vc.lock, flags);
462
463         /* Prevent this channel being scheduled */
464         spin_lock(&d->lock);
465         list_del_init(&c->node);
466         spin_unlock(&d->lock);
467
468         /*
469          * Stop DMA activity: we assume the callback will not be called
470          * after omap_stop_dma() returns (even if it does, it will see
471          * c->desc is NULL and exit.)
472          */
473         if (c->desc) {
474                 c->desc = NULL;
475                 /* Avoid stopping the dma twice */
476                 if (!c->paused)
477                         omap_stop_dma(c->dma_ch);
478         }
479
480         if (c->cyclic) {
481                 c->cyclic = false;
482                 c->paused = false;
483                 omap_dma_unlink_lch(c->dma_ch, c->dma_ch);
484         }
485
486         vchan_get_all_descriptors(&c->vc, &head);
487         spin_unlock_irqrestore(&c->vc.lock, flags);
488         vchan_dma_desc_free_list(&c->vc, &head);
489
490         return 0;
491 }
492
493 static int omap_dma_pause(struct omap_chan *c)
494 {
495         /* Pause/Resume only allowed with cyclic mode */
496         if (!c->cyclic)
497                 return -EINVAL;
498
499         if (!c->paused) {
500                 omap_stop_dma(c->dma_ch);
501                 c->paused = true;
502         }
503
504         return 0;
505 }
506
507 static int omap_dma_resume(struct omap_chan *c)
508 {
509         /* Pause/Resume only allowed with cyclic mode */
510         if (!c->cyclic)
511                 return -EINVAL;
512
513         if (c->paused) {
514                 omap_start_dma(c->dma_ch);
515                 c->paused = false;
516         }
517
518         return 0;
519 }
520
521 static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
522         unsigned long arg)
523 {
524         struct omap_chan *c = to_omap_dma_chan(chan);
525         int ret;
526
527         switch (cmd) {
528         case DMA_SLAVE_CONFIG:
529                 ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
530                 break;
531
532         case DMA_TERMINATE_ALL:
533                 ret = omap_dma_terminate_all(c);
534                 break;
535
536         case DMA_PAUSE:
537                 ret = omap_dma_pause(c);
538                 break;
539
540         case DMA_RESUME:
541                 ret = omap_dma_resume(c);
542                 break;
543
544         default:
545                 ret = -ENXIO;
546                 break;
547         }
548
549         return ret;
550 }
551
552 static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
553 {
554         struct omap_chan *c;
555
556         c = kzalloc(sizeof(*c), GFP_KERNEL);
557         if (!c)
558                 return -ENOMEM;
559
560         c->dma_sig = dma_sig;
561         c->vc.desc_free = omap_dma_desc_free;
562         vchan_init(&c->vc, &od->ddev);
563         INIT_LIST_HEAD(&c->node);
564
565         od->ddev.chancnt++;
566
567         return 0;
568 }
569
570 static void omap_dma_free(struct omap_dmadev *od)
571 {
572         tasklet_kill(&od->task);
573         while (!list_empty(&od->ddev.channels)) {
574                 struct omap_chan *c = list_first_entry(&od->ddev.channels,
575                         struct omap_chan, vc.chan.device_node);
576
577                 list_del(&c->vc.chan.device_node);
578                 tasklet_kill(&c->vc.task);
579                 kfree(c);
580         }
581         kfree(od);
582 }
583
584 static int omap_dma_probe(struct platform_device *pdev)
585 {
586         struct omap_dmadev *od;
587         int rc, i;
588
589         od = kzalloc(sizeof(*od), GFP_KERNEL);
590         if (!od)
591                 return -ENOMEM;
592
593         dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
594         dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
595         od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
596         od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
597         od->ddev.device_tx_status = omap_dma_tx_status;
598         od->ddev.device_issue_pending = omap_dma_issue_pending;
599         od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
600         od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
601         od->ddev.device_control = omap_dma_control;
602         od->ddev.dev = &pdev->dev;
603         INIT_LIST_HEAD(&od->ddev.channels);
604         INIT_LIST_HEAD(&od->pending);
605         spin_lock_init(&od->lock);
606
607         tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
608
609         for (i = 0; i < 127; i++) {
610                 rc = omap_dma_chan_init(od, i);
611                 if (rc) {
612                         omap_dma_free(od);
613                         return rc;
614                 }
615         }
616
617         rc = dma_async_device_register(&od->ddev);
618         if (rc) {
619                 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
620                         rc);
621                 omap_dma_free(od);
622         } else {
623                 platform_set_drvdata(pdev, od);
624         }
625
626         dev_info(&pdev->dev, "OMAP DMA engine driver\n");
627
628         return rc;
629 }
630
631 static int omap_dma_remove(struct platform_device *pdev)
632 {
633         struct omap_dmadev *od = platform_get_drvdata(pdev);
634
635         dma_async_device_unregister(&od->ddev);
636         omap_dma_free(od);
637
638         return 0;
639 }
640
641 static struct platform_driver omap_dma_driver = {
642         .probe  = omap_dma_probe,
643         .remove = omap_dma_remove,
644         .driver = {
645                 .name = "omap-dma-engine",
646                 .owner = THIS_MODULE,
647         },
648 };
649
650 bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
651 {
652         if (chan->device->dev->driver == &omap_dma_driver.driver) {
653                 struct omap_chan *c = to_omap_dma_chan(chan);
654                 unsigned req = *(unsigned *)param;
655
656                 return req == c->dma_sig;
657         }
658         return false;
659 }
660 EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
661
662 static struct platform_device *pdev;
663
664 static const struct platform_device_info omap_dma_dev_info = {
665         .name = "omap-dma-engine",
666         .id = -1,
667         .dma_mask = DMA_BIT_MASK(32),
668 };
669
670 static int omap_dma_init(void)
671 {
672         int rc = platform_driver_register(&omap_dma_driver);
673
674         if (rc == 0) {
675                 pdev = platform_device_register_full(&omap_dma_dev_info);
676                 if (IS_ERR(pdev)) {
677                         platform_driver_unregister(&omap_dma_driver);
678                         rc = PTR_ERR(pdev);
679                 }
680         }
681         return rc;
682 }
683 subsys_initcall(omap_dma_init);
684
685 static void __exit omap_dma_exit(void)
686 {
687         platform_device_unregister(pdev);
688         platform_driver_unregister(&omap_dma_driver);
689 }
690 module_exit(omap_dma_exit);
691
692 MODULE_AUTHOR("Russell King");
693 MODULE_LICENSE("GPL");