2 * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
4 * extracted from shdma.c
6 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
7 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
8 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
9 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
11 * This is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
16 #include <linux/delay.h>
17 #include <linux/shdma-base.h>
18 #include <linux/dmaengine.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
21 #include <linux/module.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
26 #include "../dmaengine.h"
28 /* DMA descriptor control */
29 enum shdma_desc_status {
33 DESC_COMPLETED, /* completed, have to call callback */
34 DESC_WAITING, /* callback called, waiting for ack / re-submit */
37 #define NR_DESCS_PER_CHANNEL 32
39 #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
40 #define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev)
43 * For slave DMA we assume, that there is a finite number of DMA slaves in the
44 * system, and that each such slave can only use a finite number of channels.
45 * We use slave channel IDs to make sure, that no such slave channel ID is
46 * allocated more than once.
48 static unsigned int slave_num = 256;
49 module_param(slave_num, uint, 0444);
51 /* A bitmask with slave_num bits */
52 static unsigned long *shdma_slave_used;
54 /* Called under spin_lock_irq(&schan->chan_lock") */
55 static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
57 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
58 const struct shdma_ops *ops = sdev->ops;
59 struct shdma_desc *sdesc;
62 if (ops->channel_busy(schan))
65 /* Find the first not transferred descriptor */
66 list_for_each_entry(sdesc, &schan->ld_queue, node)
67 if (sdesc->mark == DESC_SUBMITTED) {
68 ops->start_xfer(schan, sdesc);
73 static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
75 struct shdma_desc *chunk, *c, *desc =
76 container_of(tx, struct shdma_desc, async_tx),
78 struct shdma_chan *schan = to_shdma_chan(tx->chan);
79 dma_async_tx_callback callback = tx->callback;
83 spin_lock_irq(&schan->chan_lock);
85 power_up = list_empty(&schan->ld_queue);
87 cookie = dma_cookie_assign(tx);
89 /* Mark all chunks of this descriptor as submitted, move to the queue */
90 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
92 * All chunks are on the global ld_free, so, we have to find
93 * the end of the chain ourselves
95 if (chunk != desc && (chunk->mark == DESC_IDLE ||
96 chunk->async_tx.cookie > 0 ||
97 chunk->async_tx.cookie == -EBUSY ||
98 &chunk->node == &schan->ld_free))
100 chunk->mark = DESC_SUBMITTED;
101 /* Callback goes to the last chunk */
102 chunk->async_tx.callback = NULL;
103 chunk->cookie = cookie;
104 list_move_tail(&chunk->node, &schan->ld_queue);
107 dev_dbg(schan->dev, "submit #%d@%p on %d\n",
108 tx->cookie, &last->async_tx, schan->id);
111 last->async_tx.callback = callback;
112 last->async_tx.callback_param = tx->callback_param;
116 schan->pm_state = SHDMA_PM_BUSY;
118 ret = pm_runtime_get(schan->dev);
120 spin_unlock_irq(&schan->chan_lock);
122 dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
124 pm_runtime_barrier(schan->dev);
126 spin_lock_irq(&schan->chan_lock);
128 /* Have we been reset, while waiting? */
129 if (schan->pm_state != SHDMA_PM_ESTABLISHED) {
130 struct shdma_dev *sdev =
131 to_shdma_dev(schan->dma_chan.device);
132 const struct shdma_ops *ops = sdev->ops;
133 dev_dbg(schan->dev, "Bring up channel %d\n",
136 * TODO: .xfer_setup() might fail on some platforms.
137 * Make it int then, on error remove chunks from the
140 ops->setup_xfer(schan, schan->slave_id);
142 if (schan->pm_state == SHDMA_PM_PENDING)
143 shdma_chan_xfer_ld_queue(schan);
144 schan->pm_state = SHDMA_PM_ESTABLISHED;
148 * Tell .device_issue_pending() not to run the queue, interrupts
151 schan->pm_state = SHDMA_PM_PENDING;
154 spin_unlock_irq(&schan->chan_lock);
159 /* Called with desc_lock held */
160 static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
162 struct shdma_desc *sdesc;
164 list_for_each_entry(sdesc, &schan->ld_free, node)
165 if (sdesc->mark != DESC_PREPARED) {
166 BUG_ON(sdesc->mark != DESC_IDLE);
167 list_del(&sdesc->node);
174 static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
176 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
177 const struct shdma_ops *ops = sdev->ops;
180 if (schan->dev->of_node) {
181 match = schan->hw_req;
182 ret = ops->set_slave(schan, match, true);
186 slave_id = schan->slave_id;
191 if (slave_id < 0 || slave_id >= slave_num)
194 if (test_and_set_bit(slave_id, shdma_slave_used))
197 ret = ops->set_slave(schan, match, false);
199 clear_bit(slave_id, shdma_slave_used);
203 schan->slave_id = slave_id;
209 * This is the standard shdma filter function to be used as a replacement to the
210 * "old" method, using the .private pointer. If for some reason you allocate a
211 * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter
212 * parameter. If this filter is used, the slave driver, after calling
213 * dma_request_channel(), will also have to call dmaengine_slave_config() with
214 * .slave_id, .direction, and either .src_addr or .dst_addr set.
215 * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
216 * capability! If this becomes a requirement, hardware glue drivers, using this
217 * services would have to provide their own filters, which first would check
218 * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
219 * this, and only then, in case of a match, call this common filter.
220 * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate().
221 * In that case the MID-RID value is used for slave channel filtering and is
222 * passed to this function in the "arg" parameter.
224 bool shdma_chan_filter(struct dma_chan *chan, void *arg)
226 struct shdma_chan *schan = to_shdma_chan(chan);
227 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
228 const struct shdma_ops *ops = sdev->ops;
229 int match = (int)arg;
233 /* No slave requested - arbitrary channel */
236 if (!schan->dev->of_node && match >= slave_num)
239 ret = ops->set_slave(schan, match, true);
245 EXPORT_SYMBOL(shdma_chan_filter);
247 static int shdma_alloc_chan_resources(struct dma_chan *chan)
249 struct shdma_chan *schan = to_shdma_chan(chan);
250 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
251 const struct shdma_ops *ops = sdev->ops;
252 struct shdma_desc *desc;
253 struct shdma_slave *slave = chan->private;
257 * This relies on the guarantee from dmaengine that alloc_chan_resources
258 * never runs concurrently with itself or free_chan_resources.
261 /* Legacy mode: .private is set in filter */
262 ret = shdma_setup_slave(schan, slave->slave_id);
266 schan->slave_id = -EINVAL;
269 schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
270 sdev->desc_size, GFP_KERNEL);
275 schan->desc_num = NR_DESCS_PER_CHANNEL;
277 for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) {
278 desc = ops->embedded_desc(schan->desc, i);
279 dma_async_tx_descriptor_init(&desc->async_tx,
281 desc->async_tx.tx_submit = shdma_tx_submit;
282 desc->mark = DESC_IDLE;
284 list_add(&desc->node, &schan->ld_free);
287 return NR_DESCS_PER_CHANNEL;
292 clear_bit(slave->slave_id, shdma_slave_used);
293 chan->private = NULL;
297 static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
299 struct shdma_desc *desc, *_desc;
300 /* Is the "exposed" head of a chain acked? */
301 bool head_acked = false;
302 dma_cookie_t cookie = 0;
303 dma_async_tx_callback callback = NULL;
307 spin_lock_irqsave(&schan->chan_lock, flags);
308 list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
309 struct dma_async_tx_descriptor *tx = &desc->async_tx;
311 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
312 BUG_ON(desc->mark != DESC_SUBMITTED &&
313 desc->mark != DESC_COMPLETED &&
314 desc->mark != DESC_WAITING);
317 * queue is ordered, and we use this loop to (1) clean up all
318 * completed descriptors, and to (2) update descriptor flags of
319 * any chunks in a (partially) completed chain
321 if (!all && desc->mark == DESC_SUBMITTED &&
322 desc->cookie != cookie)
328 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
329 if (schan->dma_chan.completed_cookie != desc->cookie - 1)
331 "Completing cookie %d, expected %d\n",
333 schan->dma_chan.completed_cookie + 1);
334 schan->dma_chan.completed_cookie = desc->cookie;
337 /* Call callback on the last chunk */
338 if (desc->mark == DESC_COMPLETED && tx->callback) {
339 desc->mark = DESC_WAITING;
340 callback = tx->callback;
341 param = tx->callback_param;
342 dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
343 tx->cookie, tx, schan->id);
344 BUG_ON(desc->chunks != 1);
348 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
349 if (desc->mark == DESC_COMPLETED) {
350 BUG_ON(tx->cookie < 0);
351 desc->mark = DESC_WAITING;
353 head_acked = async_tx_test_ack(tx);
355 switch (desc->mark) {
357 desc->mark = DESC_WAITING;
361 async_tx_ack(&desc->async_tx);
365 dev_dbg(schan->dev, "descriptor %p #%d completed.\n",
368 if (((desc->mark == DESC_COMPLETED ||
369 desc->mark == DESC_WAITING) &&
370 async_tx_test_ack(&desc->async_tx)) || all) {
371 /* Remove from ld_queue list */
372 desc->mark = DESC_IDLE;
374 list_move(&desc->node, &schan->ld_free);
376 if (list_empty(&schan->ld_queue)) {
377 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
378 pm_runtime_put(schan->dev);
379 schan->pm_state = SHDMA_PM_ESTABLISHED;
384 if (all && !callback)
386 * Terminating and the loop completed normally: forgive
387 * uncompleted cookies
389 schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
391 spin_unlock_irqrestore(&schan->chan_lock, flags);
400 * shdma_chan_ld_cleanup - Clean up link descriptors
402 * Clean up the ld_queue of DMA channel.
404 static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all)
406 while (__ld_cleanup(schan, all))
411 * shdma_free_chan_resources - Free all resources of the channel.
413 static void shdma_free_chan_resources(struct dma_chan *chan)
415 struct shdma_chan *schan = to_shdma_chan(chan);
416 struct shdma_dev *sdev = to_shdma_dev(chan->device);
417 const struct shdma_ops *ops = sdev->ops;
420 /* Protect against ISR */
421 spin_lock_irq(&schan->chan_lock);
422 ops->halt_channel(schan);
423 spin_unlock_irq(&schan->chan_lock);
425 /* Now no new interrupts will occur */
427 /* Prepared and not submitted descriptors can still be on the queue */
428 if (!list_empty(&schan->ld_queue))
429 shdma_chan_ld_cleanup(schan, true);
431 if (schan->slave_id >= 0) {
432 /* The caller is holding dma_list_mutex */
433 clear_bit(schan->slave_id, shdma_slave_used);
434 chan->private = NULL;
437 spin_lock_irq(&schan->chan_lock);
439 list_splice_init(&schan->ld_free, &list);
442 spin_unlock_irq(&schan->chan_lock);
448 * shdma_add_desc - get, set up and return one transfer descriptor
449 * @schan: DMA channel
450 * @flags: DMA transfer flags
451 * @dst: destination DMA address, incremented when direction equals
452 * DMA_DEV_TO_MEM or DMA_MEM_TO_MEM
453 * @src: source DMA address, incremented when direction equals
454 * DMA_MEM_TO_DEV or DMA_MEM_TO_MEM
455 * @len: DMA transfer length
456 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
457 * @direction: needed for slave DMA to decide which address to keep constant,
458 * equals DMA_MEM_TO_MEM for MEMCPY
459 * Returns 0 or an error
460 * Locks: called with desc_lock held
462 static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
463 unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len,
464 struct shdma_desc **first, enum dma_transfer_direction direction)
466 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
467 const struct shdma_ops *ops = sdev->ops;
468 struct shdma_desc *new;
469 size_t copy_size = *len;
474 /* Allocate the link descriptor from the free list */
475 new = shdma_get_desc(schan);
477 dev_err(schan->dev, "No free link descriptor available\n");
481 ops->desc_setup(schan, new, *src, *dst, ©_size);
485 new->async_tx.cookie = -EBUSY;
488 /* Other desc - invisible to the user */
489 new->async_tx.cookie = -EINVAL;
493 "chaining (%u/%u)@%x -> %x with %p, cookie %d\n",
494 copy_size, *len, *src, *dst, &new->async_tx,
495 new->async_tx.cookie);
497 new->mark = DESC_PREPARED;
498 new->async_tx.flags = flags;
499 new->direction = direction;
503 if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
505 if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
512 * shdma_prep_sg - prepare transfer descriptors from an SG list
514 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
515 * converted to scatter-gather to guarantee consistent locking and a correct
516 * list manipulation. For slave DMA direction carries the usual meaning, and,
517 * logically, the SG list is RAM and the addr variable contains slave address,
518 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
519 * and the SG list contains only one element and points at the source buffer.
521 static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
522 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
523 enum dma_transfer_direction direction, unsigned long flags)
525 struct scatterlist *sg;
526 struct shdma_desc *first = NULL, *new = NULL /* compiler... */;
529 unsigned long irq_flags;
532 for_each_sg(sgl, sg, sg_len, i)
533 chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len);
535 /* Have to lock the whole loop to protect against concurrent release */
536 spin_lock_irqsave(&schan->chan_lock, irq_flags);
540 * first descriptor is what user is dealing with in all API calls, its
541 * cookie is at first set to -EBUSY, at tx-submit to a positive
543 * if more than one chunk is needed further chunks have cookie = -EINVAL
544 * the last chunk, if not equal to the first, has cookie = -ENOSPC
545 * all chunks are linked onto the tx_list head with their .node heads
546 * only during this function, then they are immediately spliced
547 * back onto the free list in form of a chain
549 for_each_sg(sgl, sg, sg_len, i) {
550 dma_addr_t sg_addr = sg_dma_address(sg);
551 size_t len = sg_dma_len(sg);
557 dev_dbg(schan->dev, "Add SG #%d@%p[%d], dma %llx\n",
558 i, sg, len, (unsigned long long)sg_addr);
560 if (direction == DMA_DEV_TO_MEM)
561 new = shdma_add_desc(schan, flags,
562 &sg_addr, addr, &len, &first,
565 new = shdma_add_desc(schan, flags,
566 addr, &sg_addr, &len, &first,
571 new->chunks = chunks--;
572 list_add_tail(&new->node, &tx_list);
577 new->async_tx.cookie = -ENOSPC;
579 /* Put them back on the free list, so, they don't get lost */
580 list_splice_tail(&tx_list, &schan->ld_free);
582 spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
584 return &first->async_tx;
587 list_for_each_entry(new, &tx_list, node)
588 new->mark = DESC_IDLE;
589 list_splice(&tx_list, &schan->ld_free);
591 spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
596 static struct dma_async_tx_descriptor *shdma_prep_memcpy(
597 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
598 size_t len, unsigned long flags)
600 struct shdma_chan *schan = to_shdma_chan(chan);
601 struct scatterlist sg;
606 BUG_ON(!schan->desc_num);
608 sg_init_table(&sg, 1);
609 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
610 offset_in_page(dma_src));
611 sg_dma_address(&sg) = dma_src;
612 sg_dma_len(&sg) = len;
614 return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, flags);
617 static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
618 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
619 enum dma_transfer_direction direction, unsigned long flags, void *context)
621 struct shdma_chan *schan = to_shdma_chan(chan);
622 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
623 const struct shdma_ops *ops = sdev->ops;
624 int slave_id = schan->slave_id;
625 dma_addr_t slave_addr;
630 BUG_ON(!schan->desc_num);
632 /* Someone calling slave DMA on a generic channel? */
633 if (slave_id < 0 || !sg_len) {
634 dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n",
635 __func__, sg_len, slave_id);
639 slave_addr = ops->slave_addr(schan);
641 return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
645 static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
648 struct shdma_chan *schan = to_shdma_chan(chan);
649 struct shdma_dev *sdev = to_shdma_dev(chan->device);
650 const struct shdma_ops *ops = sdev->ops;
651 struct dma_slave_config *config;
656 case DMA_TERMINATE_ALL:
657 spin_lock_irqsave(&schan->chan_lock, flags);
658 ops->halt_channel(schan);
660 if (ops->get_partial && !list_empty(&schan->ld_queue)) {
661 /* Record partial transfer */
662 struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
663 struct shdma_desc, node);
664 desc->partial = ops->get_partial(schan, desc);
667 spin_unlock_irqrestore(&schan->chan_lock, flags);
669 shdma_chan_ld_cleanup(schan, true);
671 case DMA_SLAVE_CONFIG:
673 * So far only .slave_id is used, but the slave drivers are
674 * encouraged to also set a transfer direction and an address.
679 * We could lock this, but you shouldn't be configuring the
680 * channel, while using it...
682 config = (struct dma_slave_config *)arg;
683 ret = shdma_setup_slave(schan, config->slave_id);
694 static void shdma_issue_pending(struct dma_chan *chan)
696 struct shdma_chan *schan = to_shdma_chan(chan);
698 spin_lock_irq(&schan->chan_lock);
699 if (schan->pm_state == SHDMA_PM_ESTABLISHED)
700 shdma_chan_xfer_ld_queue(schan);
702 schan->pm_state = SHDMA_PM_PENDING;
703 spin_unlock_irq(&schan->chan_lock);
706 static enum dma_status shdma_tx_status(struct dma_chan *chan,
708 struct dma_tx_state *txstate)
710 struct shdma_chan *schan = to_shdma_chan(chan);
711 enum dma_status status;
714 shdma_chan_ld_cleanup(schan, false);
716 spin_lock_irqsave(&schan->chan_lock, flags);
718 status = dma_cookie_status(chan, cookie, txstate);
721 * If we don't find cookie on the queue, it has been aborted and we have
724 if (status != DMA_SUCCESS) {
725 struct shdma_desc *sdesc;
727 list_for_each_entry(sdesc, &schan->ld_queue, node)
728 if (sdesc->cookie == cookie) {
729 status = DMA_IN_PROGRESS;
734 spin_unlock_irqrestore(&schan->chan_lock, flags);
739 /* Called from error IRQ or NMI */
740 bool shdma_reset(struct shdma_dev *sdev)
742 const struct shdma_ops *ops = sdev->ops;
743 struct shdma_chan *schan;
744 unsigned int handled = 0;
747 /* Reset all channels */
748 shdma_for_each_chan(schan, sdev, i) {
749 struct shdma_desc *sdesc;
755 spin_lock(&schan->chan_lock);
757 /* Stop the channel */
758 ops->halt_channel(schan);
760 list_splice_init(&schan->ld_queue, &dl);
762 if (!list_empty(&dl)) {
763 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
764 pm_runtime_put(schan->dev);
766 schan->pm_state = SHDMA_PM_ESTABLISHED;
768 spin_unlock(&schan->chan_lock);
771 list_for_each_entry(sdesc, &dl, node) {
772 struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
773 sdesc->mark = DESC_IDLE;
775 tx->callback(tx->callback_param);
778 spin_lock(&schan->chan_lock);
779 list_splice(&dl, &schan->ld_free);
780 spin_unlock(&schan->chan_lock);
787 EXPORT_SYMBOL(shdma_reset);
789 static irqreturn_t chan_irq(int irq, void *dev)
791 struct shdma_chan *schan = dev;
792 const struct shdma_ops *ops =
793 to_shdma_dev(schan->dma_chan.device)->ops;
796 spin_lock(&schan->chan_lock);
798 ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE;
800 spin_unlock(&schan->chan_lock);
805 static irqreturn_t chan_irqt(int irq, void *dev)
807 struct shdma_chan *schan = dev;
808 const struct shdma_ops *ops =
809 to_shdma_dev(schan->dma_chan.device)->ops;
810 struct shdma_desc *sdesc;
812 spin_lock_irq(&schan->chan_lock);
813 list_for_each_entry(sdesc, &schan->ld_queue, node) {
814 if (sdesc->mark == DESC_SUBMITTED &&
815 ops->desc_completed(schan, sdesc)) {
816 dev_dbg(schan->dev, "done #%d@%p\n",
817 sdesc->async_tx.cookie, &sdesc->async_tx);
818 sdesc->mark = DESC_COMPLETED;
823 shdma_chan_xfer_ld_queue(schan);
824 spin_unlock_irq(&schan->chan_lock);
826 shdma_chan_ld_cleanup(schan, false);
831 int shdma_request_irq(struct shdma_chan *schan, int irq,
832 unsigned long flags, const char *name)
834 int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq,
835 chan_irqt, flags, name, schan);
837 schan->irq = ret < 0 ? ret : irq;
841 EXPORT_SYMBOL(shdma_request_irq);
843 void shdma_chan_probe(struct shdma_dev *sdev,
844 struct shdma_chan *schan, int id)
846 schan->pm_state = SHDMA_PM_ESTABLISHED;
848 /* reference struct dma_device */
849 schan->dma_chan.device = &sdev->dma_dev;
850 dma_cookie_init(&schan->dma_chan);
852 schan->dev = sdev->dma_dev.dev;
855 if (!schan->max_xfer_len)
856 schan->max_xfer_len = PAGE_SIZE;
858 spin_lock_init(&schan->chan_lock);
860 /* Init descripter manage list */
861 INIT_LIST_HEAD(&schan->ld_queue);
862 INIT_LIST_HEAD(&schan->ld_free);
864 /* Add the channel to DMA device channel list */
865 list_add_tail(&schan->dma_chan.device_node,
866 &sdev->dma_dev.channels);
867 sdev->schan[sdev->dma_dev.chancnt++] = schan;
869 EXPORT_SYMBOL(shdma_chan_probe);
871 void shdma_chan_remove(struct shdma_chan *schan)
873 list_del(&schan->dma_chan.device_node);
875 EXPORT_SYMBOL(shdma_chan_remove);
877 int shdma_init(struct device *dev, struct shdma_dev *sdev,
880 struct dma_device *dma_dev = &sdev->dma_dev;
883 * Require all call-backs for now, they can trivially be made optional
888 !sdev->ops->embedded_desc ||
889 !sdev->ops->start_xfer ||
890 !sdev->ops->setup_xfer ||
891 !sdev->ops->set_slave ||
892 !sdev->ops->desc_setup ||
893 !sdev->ops->slave_addr ||
894 !sdev->ops->channel_busy ||
895 !sdev->ops->halt_channel ||
896 !sdev->ops->desc_completed)
899 sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL);
903 INIT_LIST_HEAD(&dma_dev->channels);
905 /* Common and MEMCPY operations */
906 dma_dev->device_alloc_chan_resources
907 = shdma_alloc_chan_resources;
908 dma_dev->device_free_chan_resources = shdma_free_chan_resources;
909 dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy;
910 dma_dev->device_tx_status = shdma_tx_status;
911 dma_dev->device_issue_pending = shdma_issue_pending;
913 /* Compulsory for DMA_SLAVE fields */
914 dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
915 dma_dev->device_control = shdma_control;
921 EXPORT_SYMBOL(shdma_init);
923 void shdma_cleanup(struct shdma_dev *sdev)
927 EXPORT_SYMBOL(shdma_cleanup);
929 static int __init shdma_enter(void)
931 shdma_slave_used = kzalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG) *
932 sizeof(long), GFP_KERNEL);
933 if (!shdma_slave_used)
937 module_init(shdma_enter);
939 static void __exit shdma_exit(void)
941 kfree(shdma_slave_used);
943 module_exit(shdma_exit);
945 MODULE_LICENSE("GPL v2");
946 MODULE_DESCRIPTION("SH-DMA driver base library");
947 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");