2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
34 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * mutex, dma_list_mutex.
37 * A subsystem can get access to a channel by calling dmaengine_get() followed
38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
39 * dma_request_channel(). Once a channel is allocated a reference is taken
40 * against its corresponding driver to disable removal.
42 * Each device has a channels list, which runs unlocked but is never modified
43 * once the device is registered, it's just setup by the driver.
45 * See Documentation/dmaengine.txt for more details
48 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50 #include <linux/dma-mapping.h>
51 #include <linux/init.h>
52 #include <linux/module.h>
54 #include <linux/device.h>
55 #include <linux/dmaengine.h>
56 #include <linux/hardirq.h>
57 #include <linux/spinlock.h>
58 #include <linux/percpu.h>
59 #include <linux/rcupdate.h>
60 #include <linux/mutex.h>
61 #include <linux/jiffies.h>
62 #include <linux/rculist.h>
63 #include <linux/idr.h>
64 #include <linux/slab.h>
65 #include <linux/acpi.h>
66 #include <linux/acpi_dma.h>
67 #include <linux/of_dma.h>
69 static DEFINE_MUTEX(dma_list_mutex);
70 static DEFINE_IDR(dma_idr);
71 static LIST_HEAD(dma_device_list);
72 static long dmaengine_ref_count;
74 /* --- sysfs implementation --- */
77 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
80 * Must be called under dma_list_mutex
82 static struct dma_chan *dev_to_dma_chan(struct device *dev)
84 struct dma_chan_dev *chan_dev;
86 chan_dev = container_of(dev, typeof(*chan_dev), device);
87 return chan_dev->chan;
90 static ssize_t memcpy_count_show(struct device *dev,
91 struct device_attribute *attr, char *buf)
93 struct dma_chan *chan;
94 unsigned long count = 0;
98 mutex_lock(&dma_list_mutex);
99 chan = dev_to_dma_chan(dev);
101 for_each_possible_cpu(i)
102 count += per_cpu_ptr(chan->local, i)->memcpy_count;
103 err = sprintf(buf, "%lu\n", count);
106 mutex_unlock(&dma_list_mutex);
110 static DEVICE_ATTR_RO(memcpy_count);
112 static ssize_t bytes_transferred_show(struct device *dev,
113 struct device_attribute *attr, char *buf)
115 struct dma_chan *chan;
116 unsigned long count = 0;
120 mutex_lock(&dma_list_mutex);
121 chan = dev_to_dma_chan(dev);
123 for_each_possible_cpu(i)
124 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
125 err = sprintf(buf, "%lu\n", count);
128 mutex_unlock(&dma_list_mutex);
132 static DEVICE_ATTR_RO(bytes_transferred);
134 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
137 struct dma_chan *chan;
140 mutex_lock(&dma_list_mutex);
141 chan = dev_to_dma_chan(dev);
143 err = sprintf(buf, "%d\n", chan->client_count);
146 mutex_unlock(&dma_list_mutex);
150 static DEVICE_ATTR_RO(in_use);
152 static struct attribute *dma_dev_attrs[] = {
153 &dev_attr_memcpy_count.attr,
154 &dev_attr_bytes_transferred.attr,
155 &dev_attr_in_use.attr,
158 ATTRIBUTE_GROUPS(dma_dev);
160 static void chan_dev_release(struct device *dev)
162 struct dma_chan_dev *chan_dev;
164 chan_dev = container_of(dev, typeof(*chan_dev), device);
165 if (atomic_dec_and_test(chan_dev->idr_ref)) {
166 mutex_lock(&dma_list_mutex);
167 idr_remove(&dma_idr, chan_dev->dev_id);
168 mutex_unlock(&dma_list_mutex);
169 kfree(chan_dev->idr_ref);
174 static struct class dma_devclass = {
176 .dev_groups = dma_dev_groups,
177 .dev_release = chan_dev_release,
180 /* --- client and device registration --- */
182 #define dma_device_satisfies_mask(device, mask) \
183 __dma_device_satisfies_mask((device), &(mask))
185 __dma_device_satisfies_mask(struct dma_device *device,
186 const dma_cap_mask_t *want)
190 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
192 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
195 static struct module *dma_chan_to_owner(struct dma_chan *chan)
197 return chan->device->dev->driver->owner;
201 * balance_ref_count - catch up the channel reference count
202 * @chan - channel to balance ->client_count versus dmaengine_ref_count
204 * balance_ref_count must be called under dma_list_mutex
206 static void balance_ref_count(struct dma_chan *chan)
208 struct module *owner = dma_chan_to_owner(chan);
210 while (chan->client_count < dmaengine_ref_count) {
212 chan->client_count++;
217 * dma_chan_get - try to grab a dma channel's parent driver module
218 * @chan - channel to grab
220 * Must be called under dma_list_mutex
222 static int dma_chan_get(struct dma_chan *chan)
225 struct module *owner = dma_chan_to_owner(chan);
227 if (chan->client_count) {
230 } else if (try_module_get(owner))
234 chan->client_count++;
236 /* allocate upon first client reference */
237 if (chan->client_count == 1 && err == 0) {
238 int desc_cnt = chan->device->device_alloc_chan_resources(chan);
242 chan->client_count = 0;
244 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
245 balance_ref_count(chan);
252 * dma_chan_put - drop a reference to a dma channel's parent driver module
253 * @chan - channel to release
255 * Must be called under dma_list_mutex
257 static void dma_chan_put(struct dma_chan *chan)
259 if (!chan->client_count)
260 return; /* this channel failed alloc_chan_resources */
261 chan->client_count--;
262 module_put(dma_chan_to_owner(chan));
263 if (chan->client_count == 0)
264 chan->device->device_free_chan_resources(chan);
267 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
269 enum dma_status status;
270 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
272 dma_async_issue_pending(chan);
274 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
275 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
276 pr_err("%s: timeout!\n", __func__);
279 if (status != DMA_IN_PROGRESS)
286 EXPORT_SYMBOL(dma_sync_wait);
289 * dma_cap_mask_all - enable iteration over all operation types
291 static dma_cap_mask_t dma_cap_mask_all;
294 * dma_chan_tbl_ent - tracks channel allocations per core/operation
295 * @chan - associated channel for this entry
297 struct dma_chan_tbl_ent {
298 struct dma_chan *chan;
302 * channel_table - percpu lookup table for memory-to-memory offload providers
304 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
306 static int __init dma_channel_table_init(void)
308 enum dma_transaction_type cap;
311 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
313 /* 'interrupt', 'private', and 'slave' are channel capabilities,
314 * but are not associated with an operation so they do not need
315 * an entry in the channel_table
317 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
318 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
319 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
321 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
322 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
323 if (!channel_table[cap]) {
330 pr_err("initialization failure\n");
331 for_each_dma_cap_mask(cap, dma_cap_mask_all)
332 if (channel_table[cap])
333 free_percpu(channel_table[cap]);
338 arch_initcall(dma_channel_table_init);
341 * dma_find_channel - find a channel to carry out the operation
342 * @tx_type: transaction type
344 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
346 return this_cpu_read(channel_table[tx_type]->chan);
348 EXPORT_SYMBOL(dma_find_channel);
351 * net_dma_find_channel - find a channel for net_dma
352 * net_dma has alignment requirements
354 struct dma_chan *net_dma_find_channel(void)
356 struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
357 if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
362 EXPORT_SYMBOL(net_dma_find_channel);
365 * dma_issue_pending_all - flush all pending operations across all channels
367 void dma_issue_pending_all(void)
369 struct dma_device *device;
370 struct dma_chan *chan;
373 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
374 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
376 list_for_each_entry(chan, &device->channels, device_node)
377 if (chan->client_count)
378 device->device_issue_pending(chan);
382 EXPORT_SYMBOL(dma_issue_pending_all);
385 * nth_chan - returns the nth channel of the given capability
386 * @cap: capability to match
387 * @n: nth channel desired
389 * Defaults to returning the channel with the desired capability and the
390 * lowest reference count when 'n' cannot be satisfied. Must be called
391 * under dma_list_mutex.
393 static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
395 struct dma_device *device;
396 struct dma_chan *chan;
397 struct dma_chan *ret = NULL;
398 struct dma_chan *min = NULL;
400 list_for_each_entry(device, &dma_device_list, global_node) {
401 if (!dma_has_cap(cap, device->cap_mask) ||
402 dma_has_cap(DMA_PRIVATE, device->cap_mask))
404 list_for_each_entry(chan, &device->channels, device_node) {
405 if (!chan->client_count)
409 else if (chan->table_count < min->table_count)
431 * dma_channel_rebalance - redistribute the available channels
433 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
434 * operation type) in the SMP case, and operation isolation (avoid
435 * multi-tasking channels) in the non-SMP case. Must be called under
438 static void dma_channel_rebalance(void)
440 struct dma_chan *chan;
441 struct dma_device *device;
446 /* undo the last distribution */
447 for_each_dma_cap_mask(cap, dma_cap_mask_all)
448 for_each_possible_cpu(cpu)
449 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
451 list_for_each_entry(device, &dma_device_list, global_node) {
452 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
454 list_for_each_entry(chan, &device->channels, device_node)
455 chan->table_count = 0;
458 /* don't populate the channel_table if no clients are available */
459 if (!dmaengine_ref_count)
462 /* redistribute available channels */
464 for_each_dma_cap_mask(cap, dma_cap_mask_all)
465 for_each_online_cpu(cpu) {
466 if (num_possible_cpus() > 1)
467 chan = nth_chan(cap, n++);
469 chan = nth_chan(cap, -1);
471 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
475 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
476 struct dma_device *dev,
477 dma_filter_fn fn, void *fn_param)
479 struct dma_chan *chan;
481 if (!__dma_device_satisfies_mask(dev, mask)) {
482 pr_debug("%s: wrong capabilities\n", __func__);
485 /* devices with multiple channels need special handling as we need to
486 * ensure that all channels are either private or public.
488 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
489 list_for_each_entry(chan, &dev->channels, device_node) {
490 /* some channels are already publicly allocated */
491 if (chan->client_count)
495 list_for_each_entry(chan, &dev->channels, device_node) {
496 if (chan->client_count) {
497 pr_debug("%s: %s busy\n",
498 __func__, dma_chan_name(chan));
501 if (fn && !fn(chan, fn_param)) {
502 pr_debug("%s: %s filter said false\n",
503 __func__, dma_chan_name(chan));
513 * dma_request_channel - try to allocate an exclusive channel
514 * @mask: capabilities that the channel must satisfy
515 * @fn: optional callback to disposition available channels
516 * @fn_param: opaque parameter to pass to dma_filter_fn
518 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
519 dma_filter_fn fn, void *fn_param)
521 struct dma_device *device, *_d;
522 struct dma_chan *chan = NULL;
526 mutex_lock(&dma_list_mutex);
527 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
528 chan = private_candidate(mask, device, fn, fn_param);
530 /* Found a suitable channel, try to grab, prep, and
531 * return it. We first set DMA_PRIVATE to disable
532 * balance_ref_count as this channel will not be
533 * published in the general-purpose allocator
535 dma_cap_set(DMA_PRIVATE, device->cap_mask);
536 device->privatecnt++;
537 err = dma_chan_get(chan);
539 if (err == -ENODEV) {
540 pr_debug("%s: %s module removed\n",
541 __func__, dma_chan_name(chan));
542 list_del_rcu(&device->global_node);
544 pr_debug("%s: failed to get %s: (%d)\n",
545 __func__, dma_chan_name(chan), err);
548 if (--device->privatecnt == 0)
549 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
553 mutex_unlock(&dma_list_mutex);
555 pr_debug("%s: %s (%s)\n",
557 chan ? "success" : "fail",
558 chan ? dma_chan_name(chan) : NULL);
562 EXPORT_SYMBOL_GPL(__dma_request_channel);
565 * dma_request_slave_channel - try to allocate an exclusive slave channel
566 * @dev: pointer to client device structure
567 * @name: slave channel name
569 struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name)
571 /* If device-tree is present get slave info from here */
573 return of_dma_request_slave_channel(dev->of_node, name);
575 /* If device was enumerated by ACPI get slave info from here */
576 if (ACPI_HANDLE(dev))
577 return acpi_dma_request_slave_chan_by_name(dev, name);
581 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
583 void dma_release_channel(struct dma_chan *chan)
585 mutex_lock(&dma_list_mutex);
586 WARN_ONCE(chan->client_count != 1,
587 "chan reference count %d != 1\n", chan->client_count);
589 /* drop PRIVATE cap enabled by __dma_request_channel() */
590 if (--chan->device->privatecnt == 0)
591 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
592 mutex_unlock(&dma_list_mutex);
594 EXPORT_SYMBOL_GPL(dma_release_channel);
597 * dmaengine_get - register interest in dma_channels
599 void dmaengine_get(void)
601 struct dma_device *device, *_d;
602 struct dma_chan *chan;
605 mutex_lock(&dma_list_mutex);
606 dmaengine_ref_count++;
608 /* try to grab channels */
609 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
610 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
612 list_for_each_entry(chan, &device->channels, device_node) {
613 err = dma_chan_get(chan);
614 if (err == -ENODEV) {
615 /* module removed before we could use it */
616 list_del_rcu(&device->global_node);
619 pr_debug("%s: failed to get %s: (%d)\n",
620 __func__, dma_chan_name(chan), err);
624 /* if this is the first reference and there were channels
625 * waiting we need to rebalance to get those channels
626 * incorporated into the channel table
628 if (dmaengine_ref_count == 1)
629 dma_channel_rebalance();
630 mutex_unlock(&dma_list_mutex);
632 EXPORT_SYMBOL(dmaengine_get);
635 * dmaengine_put - let dma drivers be removed when ref_count == 0
637 void dmaengine_put(void)
639 struct dma_device *device;
640 struct dma_chan *chan;
642 mutex_lock(&dma_list_mutex);
643 dmaengine_ref_count--;
644 BUG_ON(dmaengine_ref_count < 0);
645 /* drop channel references */
646 list_for_each_entry(device, &dma_device_list, global_node) {
647 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
649 list_for_each_entry(chan, &device->channels, device_node)
652 mutex_unlock(&dma_list_mutex);
654 EXPORT_SYMBOL(dmaengine_put);
656 static bool device_has_all_tx_types(struct dma_device *device)
658 /* A device that satisfies this test has channels that will never cause
659 * an async_tx channel switch event as all possible operation types can
662 #ifdef CONFIG_ASYNC_TX_DMA
663 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
667 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
668 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
672 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
673 if (!dma_has_cap(DMA_XOR, device->cap_mask))
676 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
677 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
682 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
683 if (!dma_has_cap(DMA_PQ, device->cap_mask))
686 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
687 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
695 static int get_dma_id(struct dma_device *device)
699 mutex_lock(&dma_list_mutex);
701 rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
705 mutex_unlock(&dma_list_mutex);
706 return rc < 0 ? rc : 0;
710 * dma_async_device_register - registers DMA devices found
711 * @device: &dma_device
713 int dma_async_device_register(struct dma_device *device)
716 struct dma_chan* chan;
722 /* validate device routines */
723 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
724 !device->device_prep_dma_memcpy);
725 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
726 !device->device_prep_dma_xor);
727 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
728 !device->device_prep_dma_xor_val);
729 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
730 !device->device_prep_dma_pq);
731 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
732 !device->device_prep_dma_pq_val);
733 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
734 !device->device_prep_dma_interrupt);
735 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
736 !device->device_prep_dma_sg);
737 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
738 !device->device_prep_dma_cyclic);
739 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
740 !device->device_control);
741 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
742 !device->device_prep_interleaved_dma);
744 BUG_ON(!device->device_alloc_chan_resources);
745 BUG_ON(!device->device_free_chan_resources);
746 BUG_ON(!device->device_tx_status);
747 BUG_ON(!device->device_issue_pending);
748 BUG_ON(!device->dev);
750 /* note: this only matters in the
751 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
753 if (device_has_all_tx_types(device))
754 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
756 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
759 rc = get_dma_id(device);
765 atomic_set(idr_ref, 0);
767 /* represent channels in sysfs. Probably want devs too */
768 list_for_each_entry(chan, &device->channels, device_node) {
770 chan->local = alloc_percpu(typeof(*chan->local));
771 if (chan->local == NULL)
773 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
774 if (chan->dev == NULL) {
775 free_percpu(chan->local);
780 chan->chan_id = chancnt++;
781 chan->dev->device.class = &dma_devclass;
782 chan->dev->device.parent = device->dev;
783 chan->dev->chan = chan;
784 chan->dev->idr_ref = idr_ref;
785 chan->dev->dev_id = device->dev_id;
787 dev_set_name(&chan->dev->device, "dma%dchan%d",
788 device->dev_id, chan->chan_id);
790 rc = device_register(&chan->dev->device);
792 free_percpu(chan->local);
798 chan->client_count = 0;
800 device->chancnt = chancnt;
802 mutex_lock(&dma_list_mutex);
803 /* take references on public channels */
804 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
805 list_for_each_entry(chan, &device->channels, device_node) {
806 /* if clients are already waiting for channels we need
807 * to take references on their behalf
809 if (dma_chan_get(chan) == -ENODEV) {
810 /* note we can only get here for the first
811 * channel as the remaining channels are
812 * guaranteed to get a reference
815 mutex_unlock(&dma_list_mutex);
819 list_add_tail_rcu(&device->global_node, &dma_device_list);
820 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
821 device->privatecnt++; /* Always private */
822 dma_channel_rebalance();
823 mutex_unlock(&dma_list_mutex);
828 /* if we never registered a channel just release the idr */
829 if (atomic_read(idr_ref) == 0) {
830 mutex_lock(&dma_list_mutex);
831 idr_remove(&dma_idr, device->dev_id);
832 mutex_unlock(&dma_list_mutex);
837 list_for_each_entry(chan, &device->channels, device_node) {
838 if (chan->local == NULL)
840 mutex_lock(&dma_list_mutex);
841 chan->dev->chan = NULL;
842 mutex_unlock(&dma_list_mutex);
843 device_unregister(&chan->dev->device);
844 free_percpu(chan->local);
848 EXPORT_SYMBOL(dma_async_device_register);
851 * dma_async_device_unregister - unregister a DMA device
852 * @device: &dma_device
854 * This routine is called by dma driver exit routines, dmaengine holds module
855 * references to prevent it being called while channels are in use.
857 void dma_async_device_unregister(struct dma_device *device)
859 struct dma_chan *chan;
861 mutex_lock(&dma_list_mutex);
862 list_del_rcu(&device->global_node);
863 dma_channel_rebalance();
864 mutex_unlock(&dma_list_mutex);
866 list_for_each_entry(chan, &device->channels, device_node) {
867 WARN_ONCE(chan->client_count,
868 "%s called while %d clients hold a reference\n",
869 __func__, chan->client_count);
870 mutex_lock(&dma_list_mutex);
871 chan->dev->chan = NULL;
872 mutex_unlock(&dma_list_mutex);
873 device_unregister(&chan->dev->device);
874 free_percpu(chan->local);
877 EXPORT_SYMBOL(dma_async_device_unregister);
880 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
881 * @chan: DMA channel to offload copy to
882 * @dest: destination address (virtual)
883 * @src: source address (virtual)
886 * Both @dest and @src must be mappable to a bus address according to the
887 * DMA mapping API rules for streaming mappings.
888 * Both @dest and @src must stay memory resident (kernel memory or locked
892 dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
893 void *src, size_t len)
895 struct dma_device *dev = chan->device;
896 struct dma_async_tx_descriptor *tx;
897 dma_addr_t dma_dest, dma_src;
901 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
902 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
903 flags = DMA_CTRL_ACK |
904 DMA_COMPL_SRC_UNMAP_SINGLE |
905 DMA_COMPL_DEST_UNMAP_SINGLE;
906 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
909 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
910 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
915 cookie = tx->tx_submit(tx);
918 __this_cpu_add(chan->local->bytes_transferred, len);
919 __this_cpu_inc(chan->local->memcpy_count);
924 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
927 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
928 * @chan: DMA channel to offload copy to
929 * @page: destination page
930 * @offset: offset in page to copy to
931 * @kdata: source address (virtual)
934 * Both @page/@offset and @kdata must be mappable to a bus address according
935 * to the DMA mapping API rules for streaming mappings.
936 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
937 * locked user space pages)
940 dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
941 unsigned int offset, void *kdata, size_t len)
943 struct dma_device *dev = chan->device;
944 struct dma_async_tx_descriptor *tx;
945 dma_addr_t dma_dest, dma_src;
949 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
950 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
951 flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
952 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
955 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
956 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
961 cookie = tx->tx_submit(tx);
964 __this_cpu_add(chan->local->bytes_transferred, len);
965 __this_cpu_inc(chan->local->memcpy_count);
970 EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
973 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
974 * @chan: DMA channel to offload copy to
975 * @dest_pg: destination page
976 * @dest_off: offset in page to copy to
977 * @src_pg: source page
978 * @src_off: offset in page to copy from
981 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
982 * address according to the DMA mapping API rules for streaming mappings.
983 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
984 * (kernel memory or locked user space pages).
987 dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
988 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
991 struct dma_device *dev = chan->device;
992 struct dma_async_tx_descriptor *tx;
993 dma_addr_t dma_dest, dma_src;
997 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
998 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
1000 flags = DMA_CTRL_ACK;
1001 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
1004 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
1005 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
1009 tx->callback = NULL;
1010 cookie = tx->tx_submit(tx);
1013 __this_cpu_add(chan->local->bytes_transferred, len);
1014 __this_cpu_inc(chan->local->memcpy_count);
1019 EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
1021 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1022 struct dma_chan *chan)
1025 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1026 spin_lock_init(&tx->lock);
1029 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1031 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1032 * @tx: in-flight transaction to wait on
1035 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1037 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1042 while (tx->cookie == -EBUSY) {
1043 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1044 pr_err("%s timeout waiting for descriptor submission\n",
1050 return dma_sync_wait(tx->chan, tx->cookie);
1052 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1054 /* dma_run_dependencies - helper routine for dma drivers to process
1055 * (start) dependent operations on their target channel
1056 * @tx: transaction with dependencies
1058 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1060 struct dma_async_tx_descriptor *dep = txd_next(tx);
1061 struct dma_async_tx_descriptor *dep_next;
1062 struct dma_chan *chan;
1067 /* we'll submit tx->next now, so clear the link */
1071 /* keep submitting up until a channel switch is detected
1072 * in that case we will be called again as a result of
1073 * processing the interrupt from async_tx_channel_switch
1075 for (; dep; dep = dep_next) {
1077 txd_clear_parent(dep);
1078 dep_next = txd_next(dep);
1079 if (dep_next && dep_next->chan == chan)
1080 txd_clear_next(dep); /* ->next will be submitted */
1082 dep_next = NULL; /* submit current dep and terminate */
1085 dep->tx_submit(dep);
1088 chan->device->device_issue_pending(chan);
1090 EXPORT_SYMBOL_GPL(dma_run_dependencies);
1092 static int __init dma_bus_init(void)
1094 return class_register(&dma_devclass);
1096 arch_initcall(dma_bus_init);