2 * OMAP DMAengine support
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/err.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/module.h>
15 #include <linux/omap-dma.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
24 struct dma_device ddev;
26 struct tasklet_struct task;
27 struct list_head pending;
31 struct virt_dma_chan vc;
32 struct list_head node;
34 struct dma_slave_config cfg;
40 struct omap_desc *desc;
46 uint32_t en; /* number of elements (24-bit) */
47 uint32_t fn; /* number of frames (16-bit) */
51 struct virt_dma_desc vd;
52 enum dma_transfer_direction dir;
55 int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
56 uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */
57 uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */
58 uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */
59 uint8_t periph_port; /* Peripheral port */
65 static const unsigned es_bytes[] = {
66 [OMAP_DMA_DATA_TYPE_S8] = 1,
67 [OMAP_DMA_DATA_TYPE_S16] = 2,
68 [OMAP_DMA_DATA_TYPE_S32] = 4,
71 static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
73 return container_of(d, struct omap_dmadev, ddev);
76 static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
78 return container_of(c, struct omap_chan, vc.chan);
81 static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
83 return container_of(t, struct omap_desc, vd.tx);
86 static void omap_dma_desc_free(struct virt_dma_desc *vd)
88 kfree(container_of(vd, struct omap_desc, vd));
91 static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
94 struct omap_sg *sg = d->sg + idx;
96 if (d->dir == DMA_DEV_TO_MEM)
97 omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
98 OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
100 omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
101 OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
103 omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn,
104 d->sync_mode, c->dma_sig, d->sync_type);
106 omap_start_dma(c->dma_ch);
109 static void omap_dma_start_desc(struct omap_chan *c)
111 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
121 c->desc = d = to_omap_dma_desc(&vd->tx);
124 if (d->dir == DMA_DEV_TO_MEM)
125 omap_set_dma_src_params(c->dma_ch, d->periph_port,
126 OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
128 omap_set_dma_dest_params(c->dma_ch, d->periph_port,
129 OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
131 omap_dma_start_sg(c, d, 0);
134 static void omap_dma_callback(int ch, u16 status, void *data)
136 struct omap_chan *c = data;
140 spin_lock_irqsave(&c->vc.lock, flags);
144 if (++c->sgidx < d->sglen) {
145 omap_dma_start_sg(c, d, c->sgidx);
147 omap_dma_start_desc(c);
148 vchan_cookie_complete(&d->vd);
151 vchan_cyclic_callback(&d->vd);
154 spin_unlock_irqrestore(&c->vc.lock, flags);
158 * This callback schedules all pending channels. We could be more
159 * clever here by postponing allocation of the real DMA channels to
160 * this point, and freeing them when our virtual channel becomes idle.
162 * We would then need to deal with 'all channels in-use'
164 static void omap_dma_sched(unsigned long data)
166 struct omap_dmadev *d = (struct omap_dmadev *)data;
169 spin_lock_irq(&d->lock);
170 list_splice_tail_init(&d->pending, &head);
171 spin_unlock_irq(&d->lock);
173 while (!list_empty(&head)) {
174 struct omap_chan *c = list_first_entry(&head,
175 struct omap_chan, node);
177 spin_lock_irq(&c->vc.lock);
178 list_del_init(&c->node);
179 omap_dma_start_desc(c);
180 spin_unlock_irq(&c->vc.lock);
184 static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
186 struct omap_chan *c = to_omap_dma_chan(chan);
188 dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
190 return omap_request_dma(c->dma_sig, "DMA engine",
191 omap_dma_callback, c, &c->dma_ch);
194 static void omap_dma_free_chan_resources(struct dma_chan *chan)
196 struct omap_chan *c = to_omap_dma_chan(chan);
198 vchan_free_chan_resources(&c->vc);
199 omap_free_dma(c->dma_ch);
201 dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
204 static size_t omap_dma_sg_size(struct omap_sg *sg)
206 return sg->en * sg->fn;
209 static size_t omap_dma_desc_size(struct omap_desc *d)
214 for (size = i = 0; i < d->sglen; i++)
215 size += omap_dma_sg_size(&d->sg[i]);
217 return size * es_bytes[d->es];
220 static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
223 size_t size, es_size = es_bytes[d->es];
225 for (size = i = 0; i < d->sglen; i++) {
226 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
230 else if (addr >= d->sg[i].addr &&
231 addr < d->sg[i].addr + this_size)
232 size += d->sg[i].addr + this_size - addr;
237 static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
238 dma_cookie_t cookie, struct dma_tx_state *txstate)
240 struct omap_chan *c = to_omap_dma_chan(chan);
241 struct virt_dma_desc *vd;
245 ret = dma_cookie_status(chan, cookie, txstate);
246 if (ret == DMA_SUCCESS || !txstate)
249 spin_lock_irqsave(&c->vc.lock, flags);
250 vd = vchan_find_desc(&c->vc, cookie);
252 txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
253 } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
254 struct omap_desc *d = c->desc;
257 if (d->dir == DMA_MEM_TO_DEV)
258 pos = omap_get_dma_src_pos(c->dma_ch);
259 else if (d->dir == DMA_DEV_TO_MEM)
260 pos = omap_get_dma_dst_pos(c->dma_ch);
264 txstate->residue = omap_dma_desc_size_pos(d, pos);
266 txstate->residue = 0;
268 spin_unlock_irqrestore(&c->vc.lock, flags);
273 static void omap_dma_issue_pending(struct dma_chan *chan)
275 struct omap_chan *c = to_omap_dma_chan(chan);
278 spin_lock_irqsave(&c->vc.lock, flags);
279 if (vchan_issue_pending(&c->vc) && !c->desc) {
280 struct omap_dmadev *d = to_omap_dma_dev(chan->device);
282 if (list_empty(&c->node))
283 list_add_tail(&c->node, &d->pending);
284 spin_unlock(&d->lock);
285 tasklet_schedule(&d->task);
287 spin_unlock_irqrestore(&c->vc.lock, flags);
290 static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
291 struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
292 enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
294 struct omap_chan *c = to_omap_dma_chan(chan);
295 enum dma_slave_buswidth dev_width;
296 struct scatterlist *sgent;
299 unsigned i, j = 0, es, en, frame_bytes, sync_type;
302 if (dir == DMA_DEV_TO_MEM) {
303 dev_addr = c->cfg.src_addr;
304 dev_width = c->cfg.src_addr_width;
305 burst = c->cfg.src_maxburst;
306 sync_type = OMAP_DMA_SRC_SYNC;
307 } else if (dir == DMA_MEM_TO_DEV) {
308 dev_addr = c->cfg.dst_addr;
309 dev_width = c->cfg.dst_addr_width;
310 burst = c->cfg.dst_maxburst;
311 sync_type = OMAP_DMA_DST_SYNC;
313 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
317 /* Bus width translates to the element size (ES) */
319 case DMA_SLAVE_BUSWIDTH_1_BYTE:
320 es = OMAP_DMA_DATA_TYPE_S8;
322 case DMA_SLAVE_BUSWIDTH_2_BYTES:
323 es = OMAP_DMA_DATA_TYPE_S16;
325 case DMA_SLAVE_BUSWIDTH_4_BYTES:
326 es = OMAP_DMA_DATA_TYPE_S32;
328 default: /* not reached */
332 /* Now allocate and setup the descriptor. */
333 d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
338 d->dev_addr = dev_addr;
340 d->sync_mode = OMAP_DMA_SYNC_FRAME;
341 d->sync_type = sync_type;
342 d->periph_port = OMAP_DMA_PORT_TIPB;
345 * Build our scatterlist entries: each contains the address,
346 * the number of elements (EN) in each frame, and the number of
347 * frames (FN). Number of bytes for this entry = ES * EN * FN.
349 * Burst size translates to number of elements with frame sync.
350 * Note: DMA engine defines burst to be the number of dev-width
354 frame_bytes = es_bytes[es] * en;
355 for_each_sg(sgl, sgent, sglen, i) {
356 d->sg[j].addr = sg_dma_address(sgent);
358 d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
364 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
367 static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
368 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
369 size_t period_len, enum dma_transfer_direction dir, unsigned long flags,
372 struct omap_chan *c = to_omap_dma_chan(chan);
373 enum dma_slave_buswidth dev_width;
376 unsigned es, sync_type;
379 if (dir == DMA_DEV_TO_MEM) {
380 dev_addr = c->cfg.src_addr;
381 dev_width = c->cfg.src_addr_width;
382 burst = c->cfg.src_maxburst;
383 sync_type = OMAP_DMA_SRC_SYNC;
384 } else if (dir == DMA_MEM_TO_DEV) {
385 dev_addr = c->cfg.dst_addr;
386 dev_width = c->cfg.dst_addr_width;
387 burst = c->cfg.dst_maxburst;
388 sync_type = OMAP_DMA_DST_SYNC;
390 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
394 /* Bus width translates to the element size (ES) */
396 case DMA_SLAVE_BUSWIDTH_1_BYTE:
397 es = OMAP_DMA_DATA_TYPE_S8;
399 case DMA_SLAVE_BUSWIDTH_2_BYTES:
400 es = OMAP_DMA_DATA_TYPE_S16;
402 case DMA_SLAVE_BUSWIDTH_4_BYTES:
403 es = OMAP_DMA_DATA_TYPE_S32;
405 default: /* not reached */
409 /* Now allocate and setup the descriptor. */
410 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
415 d->dev_addr = dev_addr;
419 d->sync_mode = OMAP_DMA_SYNC_PACKET;
421 d->sync_mode = OMAP_DMA_SYNC_ELEMENT;
422 d->sync_type = sync_type;
423 d->periph_port = OMAP_DMA_PORT_MPUI;
424 d->sg[0].addr = buf_addr;
425 d->sg[0].en = period_len / es_bytes[es];
426 d->sg[0].fn = buf_len / period_len;
431 omap_dma_link_lch(c->dma_ch, c->dma_ch);
432 omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ);
433 omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ);
436 if (!cpu_class_is_omap1()) {
437 omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
438 omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
441 return vchan_tx_prep(&c->vc, &d->vd, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
444 static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
446 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
447 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
450 memcpy(&c->cfg, cfg, sizeof(c->cfg));
455 static int omap_dma_terminate_all(struct omap_chan *c)
457 struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
461 spin_lock_irqsave(&c->vc.lock, flags);
463 /* Prevent this channel being scheduled */
465 list_del_init(&c->node);
466 spin_unlock(&d->lock);
469 * Stop DMA activity: we assume the callback will not be called
470 * after omap_stop_dma() returns (even if it does, it will see
471 * c->desc is NULL and exit.)
475 /* Avoid stopping the dma twice */
477 omap_stop_dma(c->dma_ch);
483 omap_dma_unlink_lch(c->dma_ch, c->dma_ch);
486 vchan_get_all_descriptors(&c->vc, &head);
487 spin_unlock_irqrestore(&c->vc.lock, flags);
488 vchan_dma_desc_free_list(&c->vc, &head);
493 static int omap_dma_pause(struct omap_chan *c)
495 /* Pause/Resume only allowed with cyclic mode */
500 omap_stop_dma(c->dma_ch);
507 static int omap_dma_resume(struct omap_chan *c)
509 /* Pause/Resume only allowed with cyclic mode */
514 omap_start_dma(c->dma_ch);
521 static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
524 struct omap_chan *c = to_omap_dma_chan(chan);
528 case DMA_SLAVE_CONFIG:
529 ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
532 case DMA_TERMINATE_ALL:
533 ret = omap_dma_terminate_all(c);
537 ret = omap_dma_pause(c);
541 ret = omap_dma_resume(c);
552 static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
556 c = kzalloc(sizeof(*c), GFP_KERNEL);
560 c->dma_sig = dma_sig;
561 c->vc.desc_free = omap_dma_desc_free;
562 vchan_init(&c->vc, &od->ddev);
563 INIT_LIST_HEAD(&c->node);
570 static void omap_dma_free(struct omap_dmadev *od)
572 tasklet_kill(&od->task);
573 while (!list_empty(&od->ddev.channels)) {
574 struct omap_chan *c = list_first_entry(&od->ddev.channels,
575 struct omap_chan, vc.chan.device_node);
577 list_del(&c->vc.chan.device_node);
578 tasklet_kill(&c->vc.task);
584 static int omap_dma_probe(struct platform_device *pdev)
586 struct omap_dmadev *od;
589 od = kzalloc(sizeof(*od), GFP_KERNEL);
593 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
594 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
595 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
596 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
597 od->ddev.device_tx_status = omap_dma_tx_status;
598 od->ddev.device_issue_pending = omap_dma_issue_pending;
599 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
600 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
601 od->ddev.device_control = omap_dma_control;
602 od->ddev.dev = &pdev->dev;
603 INIT_LIST_HEAD(&od->ddev.channels);
604 INIT_LIST_HEAD(&od->pending);
605 spin_lock_init(&od->lock);
607 tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
609 for (i = 0; i < 127; i++) {
610 rc = omap_dma_chan_init(od, i);
617 rc = dma_async_device_register(&od->ddev);
619 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
623 platform_set_drvdata(pdev, od);
626 dev_info(&pdev->dev, "OMAP DMA engine driver\n");
631 static int omap_dma_remove(struct platform_device *pdev)
633 struct omap_dmadev *od = platform_get_drvdata(pdev);
635 dma_async_device_unregister(&od->ddev);
641 static struct platform_driver omap_dma_driver = {
642 .probe = omap_dma_probe,
643 .remove = omap_dma_remove,
645 .name = "omap-dma-engine",
646 .owner = THIS_MODULE,
650 bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
652 if (chan->device->dev->driver == &omap_dma_driver.driver) {
653 struct omap_chan *c = to_omap_dma_chan(chan);
654 unsigned req = *(unsigned *)param;
656 return req == c->dma_sig;
660 EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
662 static struct platform_device *pdev;
664 static const struct platform_device_info omap_dma_dev_info = {
665 .name = "omap-dma-engine",
667 .dma_mask = DMA_BIT_MASK(32),
670 static int omap_dma_init(void)
672 int rc = platform_driver_register(&omap_dma_driver);
675 pdev = platform_device_register_full(&omap_dma_dev_info);
677 platform_driver_unregister(&omap_dma_driver);
683 subsys_initcall(omap_dma_init);
685 static void __exit omap_dma_exit(void)
687 platform_device_unregister(pdev);
688 platform_driver_unregister(&omap_dma_driver);
690 module_exit(omap_dma_exit);
692 MODULE_AUTHOR("Russell King");
693 MODULE_LICENSE("GPL");